535.104.05
This commit is contained in:
parent
12c0739352
commit
a8e01be6b2
|
@ -2,6 +2,8 @@
|
|||
|
||||
## Release 535 Entries
|
||||
|
||||
### [535.104.05] 2023-08-22
|
||||
|
||||
### [535.98] 2023-08-08
|
||||
|
||||
### [535.86.10] 2023-07-31
|
||||
|
|
20
README.md
20
README.md
|
@ -1,7 +1,7 @@
|
|||
# NVIDIA Linux Open GPU Kernel Module Source
|
||||
|
||||
This is the source release of the NVIDIA Linux open GPU kernel modules,
|
||||
version 535.98.
|
||||
version 535.104.05.
|
||||
|
||||
|
||||
## How to Build
|
||||
|
@ -17,7 +17,7 @@ as root:
|
|||
|
||||
Note that the kernel modules built here must be used with GSP
|
||||
firmware and user-space NVIDIA GPU driver components from a corresponding
|
||||
535.98 driver release. This can be achieved by installing
|
||||
535.104.05 driver release. This can be achieved by installing
|
||||
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
|
||||
option. E.g.,
|
||||
|
||||
|
@ -180,7 +180,7 @@ software applications.
|
|||
## Compatible GPUs
|
||||
|
||||
The open-gpu-kernel-modules can be used on any Turing or later GPU
|
||||
(see the table below). However, in the 535.98 release,
|
||||
(see the table below). However, in the 535.104.05 release,
|
||||
GeForce and Workstation support is still considered alpha-quality.
|
||||
|
||||
To enable use of the open kernel modules on GeForce and Workstation GPUs,
|
||||
|
@ -188,7 +188,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
|
|||
parameter to 1. For more details, see the NVIDIA GPU driver end user
|
||||
README here:
|
||||
|
||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.98/README/kernel_open.html
|
||||
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.104.05/README/kernel_open.html
|
||||
|
||||
In the below table, if three IDs are listed, the first is the PCI Device
|
||||
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
|
||||
|
@ -665,6 +665,7 @@ Subsystem Device ID.
|
|||
| NVIDIA PG506-232 | 20B6 10DE 1492 |
|
||||
| NVIDIA A30 | 20B7 10DE 1532 |
|
||||
| NVIDIA A30 | 20B7 10DE 1804 |
|
||||
| NVIDIA A800-SXM4-40GB | 20BD 10DE 17F4 |
|
||||
| NVIDIA A100-PCIE-40GB | 20F1 10DE 145F |
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 179B |
|
||||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 179C |
|
||||
|
@ -676,6 +677,10 @@ Subsystem Device ID.
|
|||
| NVIDIA A800-SXM4-80GB | 20F3 10DE 17A2 |
|
||||
| NVIDIA A800 80GB PCIe | 20F5 10DE 1799 |
|
||||
| NVIDIA A800 80GB PCIe LC | 20F5 10DE 179A |
|
||||
| NVIDIA A800 40GB Active | 20F6 1028 180A |
|
||||
| NVIDIA A800 40GB Active | 20F6 103C 180A |
|
||||
| NVIDIA A800 40GB Active | 20F6 10DE 180A |
|
||||
| NVIDIA A800 40GB Active | 20F6 17AA 180A |
|
||||
| NVIDIA GeForce GTX 1660 Ti | 2182 |
|
||||
| NVIDIA GeForce GTX 1660 | 2184 |
|
||||
| NVIDIA GeForce GTX 1650 SUPER | 2187 |
|
||||
|
@ -734,6 +739,7 @@ Subsystem Device ID.
|
|||
| NVIDIA A10 | 2236 10DE 1482 |
|
||||
| NVIDIA A10G | 2237 10DE 152F |
|
||||
| NVIDIA A10M | 2238 10DE 1677 |
|
||||
| NVIDIA H100 NVL | 2321 10DE 1839 |
|
||||
| NVIDIA H800 PCIe | 2322 10DE 17A4 |
|
||||
| NVIDIA H800 | 2324 10DE 17A6 |
|
||||
| NVIDIA H800 | 2324 10DE 17A8 |
|
||||
|
@ -741,6 +747,7 @@ Subsystem Device ID.
|
|||
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
|
||||
| NVIDIA H100 PCIe | 2331 10DE 1626 |
|
||||
| NVIDIA H100 | 2339 10DE 17FC |
|
||||
| NVIDIA H800 NVL | 233A 10DE 183A |
|
||||
| NVIDIA GeForce RTX 3060 Ti | 2414 |
|
||||
| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 |
|
||||
| NVIDIA RTX A5500 Laptop GPU | 2438 |
|
||||
|
@ -835,10 +842,13 @@ Subsystem Device ID.
|
|||
| NVIDIA RTX 5000 Ada Generation | 26B2 17AA 17FA |
|
||||
| NVIDIA L40 | 26B5 10DE 169D |
|
||||
| NVIDIA L40 | 26B5 10DE 17DA |
|
||||
| NVIDIA L40S | 26B9 10DE 1851 |
|
||||
| NVIDIA L40S | 26B9 10DE 18CF |
|
||||
| NVIDIA GeForce RTX 4080 | 2704 |
|
||||
| NVIDIA GeForce RTX 4090 Laptop GPU | 2717 |
|
||||
| NVIDIA RTX 5000 Ada Generation Laptop GPU | 2730 |
|
||||
| NVIDIA GeForce RTX 4090 Laptop GPU | 2757 |
|
||||
| NVIDIA RTX 5000 Ada Generation Embedded GPU | 2770 |
|
||||
| NVIDIA GeForce RTX 4070 Ti | 2782 |
|
||||
| NVIDIA GeForce RTX 4070 | 2786 |
|
||||
| NVIDIA GeForce RTX 4080 Laptop GPU | 27A0 |
|
||||
|
@ -855,6 +865,7 @@ Subsystem Device ID.
|
|||
| NVIDIA RTX 4000 Ada Generation Laptop GPU | 27BA |
|
||||
| NVIDIA RTX 3500 Ada Generation Laptop GPU | 27BB |
|
||||
| NVIDIA GeForce RTX 4080 Laptop GPU | 27E0 |
|
||||
| NVIDIA RTX 3500 Ada Generation Embedded GPU | 27FB |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2803 |
|
||||
| NVIDIA GeForce RTX 4060 Ti | 2805 |
|
||||
| NVIDIA GeForce RTX 4070 Laptop GPU | 2820 |
|
||||
|
@ -866,3 +877,4 @@ Subsystem Device ID.
|
|||
| NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 |
|
||||
| NVIDIA GeForce RTX 4060 Laptop GPU | 28E0 |
|
||||
| NVIDIA GeForce RTX 4050 Laptop GPU | 28E1 |
|
||||
| NVIDIA RTX 2000 Ada Generation Embedded GPU | 28F8 |
|
||||
|
|
|
@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
|
|||
EXTRA_CFLAGS += -I$(src)
|
||||
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
|
||||
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
|
||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.98\"
|
||||
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.104.05\"
|
||||
|
||||
ifneq ($(SYSSRCHOST1X),)
|
||||
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
|
||||
|
|
|
@ -566,8 +566,11 @@ typedef struct UvmPlatformInfo_tag
|
|||
// Out: ATS (Address Translation Services) is supported
|
||||
NvBool atsSupported;
|
||||
|
||||
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
|
||||
NvBool sevEnabled;
|
||||
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
|
||||
// is enabled in the VM, indicating that Confidential Computing must be
|
||||
// also enabled in the GPU(s); these two security features are either both
|
||||
// enabled, or both disabled.
|
||||
NvBool confComputingEnabled;
|
||||
} UvmPlatformInfo;
|
||||
|
||||
typedef struct UvmGpuClientInfo_tag
|
||||
|
|
|
@ -6341,6 +6341,21 @@ compile_test() {
|
|||
compile_check_conftest "$CODE" "NV_MEMPOLICY_HAS_HOME_NODE" "" "types"
|
||||
;;
|
||||
|
||||
mmu_interval_notifier)
|
||||
#
|
||||
# Determine if mmu_interval_notifier struct is present or not
|
||||
#
|
||||
# Added by commit 99cb252f5 ("mm/mmu_notifier: add an interval tree
|
||||
# notifier") in v5.10 (2019-11-12).
|
||||
#
|
||||
CODE="
|
||||
#include <linux/mmu_notifier.h>
|
||||
struct mmu_interval_notifier interval_notifier;
|
||||
"
|
||||
|
||||
compile_check_conftest "$CODE" "NV_MMU_INTERVAL_NOTIFIER" "" "types"
|
||||
;;
|
||||
|
||||
# When adding a new conftest entry, please use the correct format for
|
||||
# specifying the relevant upstream Linux kernel commit.
|
||||
#
|
||||
|
|
|
@ -110,5 +110,6 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_mm_arg
|
|||
NV_CONFTEST_TYPE_COMPILE_TESTS += handle_mm_fault_has_pt_regs_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_unified_nodes
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mempolicy_has_home_node
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_interval_notifier
|
||||
|
||||
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg
|
||||
|
|
|
@ -44,6 +44,8 @@ void uvm_ats_init(const UvmPlatformInfo *platform_info)
|
|||
|
||||
void uvm_ats_init_va_space(uvm_va_space_t *va_space)
|
||||
{
|
||||
uvm_init_rwsem(&va_space->ats.lock, UVM_LOCK_ORDER_LEAF);
|
||||
|
||||
if (UVM_ATS_IBM_SUPPORTED())
|
||||
uvm_ats_ibm_init_va_space(va_space);
|
||||
}
|
||||
|
|
|
@ -28,17 +28,32 @@
|
|||
#include "uvm_forward_decl.h"
|
||||
#include "uvm_ats_ibm.h"
|
||||
#include "nv_uvm_types.h"
|
||||
#include "uvm_lock.h"
|
||||
|
||||
#include "uvm_ats_sva.h"
|
||||
|
||||
#define UVM_ATS_SUPPORTED() (UVM_ATS_IBM_SUPPORTED() || UVM_ATS_SVA_SUPPORTED())
|
||||
|
||||
// ATS prefetcher uses hmm_range_fault() to query residency information.
|
||||
// hmm_range_fault() needs CONFIG_HMM_MIRROR. To detect racing CPU invalidates
|
||||
// of memory regions while hmm_range_fault() is being called, MMU interval
|
||||
// notifiers are needed.
|
||||
#if defined(CONFIG_HMM_MIRROR) && defined(NV_MMU_INTERVAL_NOTIFIER)
|
||||
#define UVM_ATS_PREFETCH_SUPPORTED() 1
|
||||
#else
|
||||
#define UVM_ATS_PREFETCH_SUPPORTED() 0
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
// Mask of gpu_va_spaces which are registered for ATS access. The mask is
|
||||
// indexed by gpu->id. This mask is protected by the VA space lock.
|
||||
uvm_processor_mask_t registered_gpu_va_spaces;
|
||||
|
||||
// Protects racing invalidates in the VA space while hmm_range_fault() is
|
||||
// being called in ats_compute_residency_mask().
|
||||
uvm_rw_semaphore_t lock;
|
||||
|
||||
union
|
||||
{
|
||||
uvm_ibm_va_space_t ibm;
|
||||
|
|
|
@ -20,60 +20,19 @@
|
|||
DEALINGS IN THE SOFTWARE.
|
||||
*******************************************************************************/
|
||||
|
||||
#include "uvm_api.h"
|
||||
#include "uvm_tools.h"
|
||||
#include "uvm_va_range.h"
|
||||
#include "uvm_ats.h"
|
||||
#include "uvm_ats_faults.h"
|
||||
#include "uvm_migrate_pageable.h"
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/mempolicy.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
// TODO: Bug 2103669: Implement a real prefetching policy and remove or adapt
|
||||
// these experimental parameters. These are intended to help guide that policy.
|
||||
static unsigned int uvm_exp_perf_prefetch_ats_order_replayable = 0;
|
||||
module_param(uvm_exp_perf_prefetch_ats_order_replayable, uint, 0644);
|
||||
MODULE_PARM_DESC(uvm_exp_perf_prefetch_ats_order_replayable,
|
||||
"Max order of pages (2^N) to prefetch on replayable ATS faults");
|
||||
|
||||
static unsigned int uvm_exp_perf_prefetch_ats_order_non_replayable = 0;
|
||||
module_param(uvm_exp_perf_prefetch_ats_order_non_replayable, uint, 0644);
|
||||
MODULE_PARM_DESC(uvm_exp_perf_prefetch_ats_order_non_replayable,
|
||||
"Max order of pages (2^N) to prefetch on non-replayable ATS faults");
|
||||
|
||||
// Expand the fault region to the naturally-aligned region with order given by
|
||||
// the module parameters, clamped to the vma containing fault_addr (if any).
|
||||
// Note that this means the region contains fault_addr but may not begin at
|
||||
// fault_addr.
|
||||
static void expand_fault_region(struct vm_area_struct *vma,
|
||||
NvU64 start,
|
||||
size_t length,
|
||||
uvm_fault_client_type_t client_type,
|
||||
unsigned long *migrate_start,
|
||||
unsigned long *migrate_length)
|
||||
{
|
||||
unsigned int order;
|
||||
unsigned long outer, aligned_start, aligned_size;
|
||||
|
||||
*migrate_start = start;
|
||||
*migrate_length = length;
|
||||
|
||||
if (client_type == UVM_FAULT_CLIENT_TYPE_HUB)
|
||||
order = uvm_exp_perf_prefetch_ats_order_non_replayable;
|
||||
else
|
||||
order = uvm_exp_perf_prefetch_ats_order_replayable;
|
||||
|
||||
if (order == 0)
|
||||
return;
|
||||
|
||||
UVM_ASSERT(vma);
|
||||
UVM_ASSERT(order < BITS_PER_LONG - PAGE_SHIFT);
|
||||
|
||||
aligned_size = (1UL << order) * PAGE_SIZE;
|
||||
|
||||
aligned_start = start & ~(aligned_size - 1);
|
||||
|
||||
*migrate_start = max(vma->vm_start, aligned_start);
|
||||
outer = min(vma->vm_end, aligned_start + aligned_size);
|
||||
*migrate_length = outer - *migrate_start;
|
||||
}
|
||||
#if UVM_ATS_PREFETCH_SUPPORTED()
|
||||
#include <linux/hmm.h>
|
||||
#endif
|
||||
|
||||
static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
|
@ -122,6 +81,8 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
|
|||
.mm = mm,
|
||||
.dst_id = ats_context->residency_id,
|
||||
.dst_node_id = ats_context->residency_node,
|
||||
.start = start,
|
||||
.length = length,
|
||||
.populate_permissions = write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY,
|
||||
.touch = true,
|
||||
.skip_mapped = true,
|
||||
|
@ -132,13 +93,6 @@ static NV_STATUS service_ats_faults(uvm_gpu_va_space_t *gpu_va_space,
|
|||
|
||||
UVM_ASSERT(uvm_ats_can_service_faults(gpu_va_space, mm));
|
||||
|
||||
expand_fault_region(vma,
|
||||
start,
|
||||
length,
|
||||
ats_context->client_type,
|
||||
&uvm_migrate_args.start,
|
||||
&uvm_migrate_args.length);
|
||||
|
||||
// We are trying to use migrate_vma API in the kernel (if it exists) to
|
||||
// populate and map the faulting region on the GPU. We want to do this only
|
||||
// on the first touch. That is, pages which are not already mapped. So, we
|
||||
|
@ -184,6 +138,12 @@ static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
|
|||
struct mempolicy *vma_policy = vma_policy(vma);
|
||||
unsigned short mode;
|
||||
|
||||
ats_context->prefetch_state.has_preferred_location = false;
|
||||
|
||||
// It's safe to read vma_policy since the mmap_lock is held in at least read
|
||||
// mode in this path.
|
||||
uvm_assert_mmap_lock_locked(vma->vm_mm);
|
||||
|
||||
if (!vma_policy)
|
||||
goto done;
|
||||
|
||||
|
@ -212,6 +172,9 @@ static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
|
|||
else
|
||||
residency = first_node(vma_policy->nodes);
|
||||
}
|
||||
|
||||
if (!nodes_empty(vma_policy->nodes))
|
||||
ats_context->prefetch_state.has_preferred_location = true;
|
||||
}
|
||||
|
||||
// Update gpu if residency is not the faulting gpu.
|
||||
|
@ -219,12 +182,253 @@ static void ats_batch_select_residency(uvm_gpu_va_space_t *gpu_va_space,
|
|||
gpu = uvm_va_space_find_gpu_with_memory_node_id(gpu_va_space->va_space, residency);
|
||||
|
||||
done:
|
||||
#else
|
||||
ats_context->prefetch_state.has_preferred_location = false;
|
||||
#endif
|
||||
|
||||
ats_context->residency_id = gpu ? gpu->parent->id : UVM_ID_CPU;
|
||||
ats_context->residency_node = residency;
|
||||
}
|
||||
|
||||
static void get_range_in_vma(struct vm_area_struct *vma, NvU64 base, NvU64 *start, NvU64 *end)
|
||||
{
|
||||
*start = max(vma->vm_start, (unsigned long) base);
|
||||
*end = min(vma->vm_end, (unsigned long) (base + UVM_VA_BLOCK_SIZE));
|
||||
}
|
||||
|
||||
static uvm_page_index_t uvm_ats_cpu_page_index(NvU64 base, NvU64 addr)
|
||||
{
|
||||
UVM_ASSERT(addr >= base);
|
||||
UVM_ASSERT(addr <= (base + UVM_VA_BLOCK_SIZE));
|
||||
|
||||
return (addr - base) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
// start and end must be aligned to PAGE_SIZE and must fall within
|
||||
// [base, base + UVM_VA_BLOCK_SIZE]
|
||||
static uvm_va_block_region_t uvm_ats_region_from_start_end(NvU64 start, NvU64 end)
|
||||
{
|
||||
// base can be greater than, less than or equal to the start of a VMA.
|
||||
NvU64 base = UVM_VA_BLOCK_ALIGN_DOWN(start);
|
||||
|
||||
UVM_ASSERT(start < end);
|
||||
UVM_ASSERT(PAGE_ALIGNED(start));
|
||||
UVM_ASSERT(PAGE_ALIGNED(end));
|
||||
UVM_ASSERT(IS_ALIGNED(base, UVM_VA_BLOCK_SIZE));
|
||||
|
||||
return uvm_va_block_region(uvm_ats_cpu_page_index(base, start), uvm_ats_cpu_page_index(base, end));
|
||||
}
|
||||
|
||||
static uvm_va_block_region_t uvm_ats_region_from_vma(struct vm_area_struct *vma, NvU64 base)
|
||||
{
|
||||
NvU64 start;
|
||||
NvU64 end;
|
||||
|
||||
get_range_in_vma(vma, base, &start, &end);
|
||||
|
||||
return uvm_ats_region_from_start_end(start, end);
|
||||
}
|
||||
|
||||
#if UVM_ATS_PREFETCH_SUPPORTED()
|
||||
|
||||
static bool uvm_ats_invalidate_notifier(struct mmu_interval_notifier *mni, unsigned long cur_seq)
|
||||
{
|
||||
uvm_ats_fault_context_t *ats_context = container_of(mni, uvm_ats_fault_context_t, prefetch_state.notifier);
|
||||
uvm_va_space_t *va_space = ats_context->prefetch_state.va_space;
|
||||
|
||||
// The following write lock protects against concurrent invalidates while
|
||||
// hmm_range_fault() is being called in ats_compute_residency_mask().
|
||||
uvm_down_write(&va_space->ats.lock);
|
||||
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
uvm_up_write(&va_space->ats.lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool uvm_ats_invalidate_notifier_entry(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
UVM_ENTRY_RET(uvm_ats_invalidate_notifier(mni, cur_seq));
|
||||
}
|
||||
|
||||
static const struct mmu_interval_notifier_ops uvm_ats_notifier_ops =
|
||||
{
|
||||
.invalidate = uvm_ats_invalidate_notifier_entry,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static NV_STATUS ats_compute_residency_mask(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 base,
|
||||
uvm_ats_fault_context_t *ats_context)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
#if UVM_ATS_PREFETCH_SUPPORTED()
|
||||
int ret;
|
||||
NvU64 start;
|
||||
NvU64 end;
|
||||
uvm_page_mask_t *residency_mask = &ats_context->prefetch_state.residency_mask;
|
||||
struct hmm_range range;
|
||||
uvm_page_index_t page_index;
|
||||
uvm_va_block_region_t vma_region;
|
||||
uvm_va_space_t *va_space = gpu_va_space->va_space;
|
||||
struct mm_struct *mm = va_space->va_space_mm.mm;
|
||||
|
||||
uvm_assert_rwsem_locked_read(&va_space->lock);
|
||||
|
||||
ats_context->prefetch_state.first_touch = true;
|
||||
|
||||
uvm_page_mask_zero(residency_mask);
|
||||
|
||||
get_range_in_vma(vma, base, &start, &end);
|
||||
|
||||
vma_region = uvm_ats_region_from_start_end(start, end);
|
||||
|
||||
range.notifier = &ats_context->prefetch_state.notifier;
|
||||
range.start = start;
|
||||
range.end = end;
|
||||
range.hmm_pfns = ats_context->prefetch_state.pfns;
|
||||
range.default_flags = 0;
|
||||
range.pfn_flags_mask = 0;
|
||||
range.dev_private_owner = NULL;
|
||||
|
||||
ats_context->prefetch_state.va_space = va_space;
|
||||
|
||||
// mmu_interval_notifier_insert() will try to acquire mmap_lock for write
|
||||
// and will deadlock since mmap_lock is already held for read in this path.
|
||||
// This is prevented by calling __mmu_notifier_register() during va_space
|
||||
// creation. See the comment in uvm_mmu_notifier_register() for more
|
||||
// details.
|
||||
ret = mmu_interval_notifier_insert(range.notifier, mm, start, end, &uvm_ats_notifier_ops);
|
||||
if (ret)
|
||||
return errno_to_nv_status(ret);
|
||||
|
||||
while (true) {
|
||||
range.notifier_seq = mmu_interval_read_begin(range.notifier);
|
||||
ret = hmm_range_fault(&range);
|
||||
if (ret == -EBUSY)
|
||||
continue;
|
||||
if (ret) {
|
||||
status = errno_to_nv_status(ret);
|
||||
UVM_ASSERT(status != NV_OK);
|
||||
break;
|
||||
}
|
||||
|
||||
uvm_down_read(&va_space->ats.lock);
|
||||
|
||||
// Pages may have been freed or re-allocated after hmm_range_fault() is
|
||||
// called. So the PTE might point to a different page or nothing. In the
|
||||
// memory hot-unplug case it is not safe to call page_to_nid() on the
|
||||
// page as the struct page itself may have been freed. To protect
|
||||
// against these cases, uvm_ats_invalidate_entry() blocks on va_space
|
||||
// ATS write lock for concurrent invalidates since va_space ATS lock is
|
||||
// held for read in this path.
|
||||
if (!mmu_interval_read_retry(range.notifier, range.notifier_seq))
|
||||
break;
|
||||
|
||||
uvm_up_read(&va_space->ats.lock);
|
||||
}
|
||||
|
||||
if (status == NV_OK) {
|
||||
for_each_va_block_page_in_region(page_index, vma_region) {
|
||||
unsigned long pfn = ats_context->prefetch_state.pfns[page_index - vma_region.first];
|
||||
|
||||
if (pfn & HMM_PFN_VALID) {
|
||||
struct page *page = hmm_pfn_to_page(pfn);
|
||||
|
||||
if (page_to_nid(page) == ats_context->residency_node)
|
||||
uvm_page_mask_set(residency_mask, page_index);
|
||||
|
||||
ats_context->prefetch_state.first_touch = false;
|
||||
}
|
||||
}
|
||||
|
||||
uvm_up_read(&va_space->ats.lock);
|
||||
}
|
||||
|
||||
mmu_interval_notifier_remove(range.notifier);
|
||||
|
||||
#endif
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void ats_expand_fault_region(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
uvm_ats_fault_context_t *ats_context,
|
||||
uvm_va_block_region_t max_prefetch_region,
|
||||
uvm_page_mask_t *faulted_mask)
|
||||
{
|
||||
uvm_page_mask_t *read_fault_mask = &ats_context->read_fault_mask;
|
||||
uvm_page_mask_t *write_fault_mask = &ats_context->write_fault_mask;
|
||||
uvm_page_mask_t *residency_mask = &ats_context->prefetch_state.residency_mask;
|
||||
uvm_page_mask_t *prefetch_mask = &ats_context->prefetch_state.prefetch_pages_mask;
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree = &ats_context->prefetch_state.bitmap_tree;
|
||||
|
||||
if (uvm_page_mask_empty(faulted_mask))
|
||||
return;
|
||||
|
||||
uvm_perf_prefetch_compute_ats(gpu_va_space->va_space,
|
||||
faulted_mask,
|
||||
uvm_va_block_region_from_mask(NULL, faulted_mask),
|
||||
max_prefetch_region,
|
||||
residency_mask,
|
||||
bitmap_tree,
|
||||
prefetch_mask);
|
||||
|
||||
uvm_page_mask_or(read_fault_mask, read_fault_mask, prefetch_mask);
|
||||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
uvm_page_mask_or(write_fault_mask, write_fault_mask, prefetch_mask);
|
||||
}
|
||||
|
||||
static NV_STATUS ats_fault_prefetch(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 base,
|
||||
uvm_ats_fault_context_t *ats_context)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
uvm_page_mask_t *read_fault_mask = &ats_context->read_fault_mask;
|
||||
uvm_page_mask_t *write_fault_mask = &ats_context->write_fault_mask;
|
||||
uvm_page_mask_t *faulted_mask = &ats_context->faulted_mask;
|
||||
uvm_page_mask_t *prefetch_mask = &ats_context->prefetch_state.prefetch_pages_mask;
|
||||
uvm_va_block_region_t max_prefetch_region = uvm_ats_region_from_vma(vma, base);
|
||||
|
||||
if (!uvm_perf_prefetch_enabled(gpu_va_space->va_space))
|
||||
return status;
|
||||
|
||||
if (uvm_page_mask_empty(faulted_mask))
|
||||
return status;
|
||||
|
||||
status = ats_compute_residency_mask(gpu_va_space, vma, base, ats_context);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
// Prefetch the entire region if none of the pages are resident on any node
|
||||
// and if preferred_location is the faulting GPU.
|
||||
if (ats_context->prefetch_state.has_preferred_location &&
|
||||
ats_context->prefetch_state.first_touch &&
|
||||
uvm_id_equal(ats_context->residency_id, gpu_va_space->gpu->parent->id)) {
|
||||
|
||||
uvm_page_mask_init_from_region(prefetch_mask, max_prefetch_region, NULL);
|
||||
uvm_page_mask_or(read_fault_mask, read_fault_mask, prefetch_mask);
|
||||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
uvm_page_mask_or(write_fault_mask, write_fault_mask, prefetch_mask);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
ats_expand_fault_region(gpu_va_space, vma, ats_context, max_prefetch_region, faulted_mask);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 base,
|
||||
|
@ -267,6 +471,8 @@ NV_STATUS uvm_ats_service_faults(uvm_gpu_va_space_t *gpu_va_space,
|
|||
|
||||
ats_batch_select_residency(gpu_va_space, vma, ats_context);
|
||||
|
||||
ats_fault_prefetch(gpu_va_space, vma, base, ats_context);
|
||||
|
||||
for_each_va_block_subregion_in_mask(subregion, write_fault_mask, region) {
|
||||
NvU64 start = base + (subregion.first * PAGE_SIZE);
|
||||
size_t length = uvm_va_block_region_num_pages(subregion) * PAGE_SIZE;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
Copyright (c) 2021 NVIDIA Corporation
|
||||
Copyright (c) 2021-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
@ -54,23 +54,26 @@ bool uvm_conf_computing_mode_is_hcc(const uvm_gpu_t *gpu)
|
|||
return uvm_conf_computing_get_mode(gpu->parent) == UVM_GPU_CONF_COMPUTE_MODE_HCC;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_conf_computing_init_parent_gpu(const uvm_parent_gpu_t *parent)
|
||||
void uvm_conf_computing_check_parent_gpu(const uvm_parent_gpu_t *parent)
|
||||
{
|
||||
UvmGpuConfComputeMode cc, sys_cc;
|
||||
uvm_gpu_t *first;
|
||||
uvm_gpu_t *first_gpu;
|
||||
|
||||
uvm_assert_mutex_locked(&g_uvm_global.global_lock);
|
||||
|
||||
// The Confidential Computing state of the GPU should match that of the
|
||||
// system.
|
||||
UVM_ASSERT(uvm_conf_computing_mode_enabled_parent(parent) == g_uvm_global.conf_computing_enabled);
|
||||
|
||||
// TODO: Bug 2844714: since we have no routine to traverse parent GPUs,
|
||||
// find first child GPU and get its parent.
|
||||
first = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);
|
||||
if (!first)
|
||||
return NV_OK;
|
||||
first_gpu = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);
|
||||
if (first_gpu == NULL)
|
||||
return;
|
||||
|
||||
sys_cc = uvm_conf_computing_get_mode(first->parent);
|
||||
cc = uvm_conf_computing_get_mode(parent);
|
||||
|
||||
return cc == sys_cc ? NV_OK : NV_ERR_NOT_SUPPORTED;
|
||||
// All GPUs derive Confidential Computing status from their parent. By
|
||||
// current policy all parent GPUs have identical Confidential Computing
|
||||
// status.
|
||||
UVM_ASSERT(uvm_conf_computing_get_mode(parent) == uvm_conf_computing_get_mode(first_gpu->parent));
|
||||
}
|
||||
|
||||
static void dma_buffer_destroy_locked(uvm_conf_computing_dma_buffer_pool_t *dma_buffer_pool,
|
||||
|
|
|
@ -60,10 +60,8 @@
|
|||
// UVM_METHOD_SIZE * 2 * 10 = 80.
|
||||
#define UVM_CONF_COMPUTING_SIGN_BUF_MAX_SIZE 80
|
||||
|
||||
// All GPUs derive confidential computing status from their parent.
|
||||
// By current policy all parent GPUs have identical confidential
|
||||
// computing status.
|
||||
NV_STATUS uvm_conf_computing_init_parent_gpu(const uvm_parent_gpu_t *parent);
|
||||
void uvm_conf_computing_check_parent_gpu(const uvm_parent_gpu_t *parent);
|
||||
|
||||
bool uvm_conf_computing_mode_enabled_parent(const uvm_parent_gpu_t *parent);
|
||||
bool uvm_conf_computing_mode_enabled(const uvm_gpu_t *gpu);
|
||||
bool uvm_conf_computing_mode_is_hcc(const uvm_gpu_t *gpu);
|
||||
|
|
|
@ -71,11 +71,6 @@ static void uvm_unregister_callbacks(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void sev_init(const UvmPlatformInfo *platform_info)
|
||||
{
|
||||
g_uvm_global.sev_enabled = platform_info->sevEnabled;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_global_init(void)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
@ -124,8 +119,7 @@ NV_STATUS uvm_global_init(void)
|
|||
|
||||
uvm_ats_init(&platform_info);
|
||||
g_uvm_global.num_simulated_devices = 0;
|
||||
|
||||
sev_init(&platform_info);
|
||||
g_uvm_global.conf_computing_enabled = platform_info.confComputingEnabled;
|
||||
|
||||
status = uvm_gpu_init();
|
||||
if (status != NV_OK) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
Copyright (c) 2015-2021 NVIDIA Corporation
|
||||
Copyright (c) 2015-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
@ -143,11 +143,16 @@ struct uvm_global_struct
|
|||
struct page *page;
|
||||
} unload_state;
|
||||
|
||||
// AMD Secure Encrypted Virtualization (SEV) status. True if VM has SEV
|
||||
// enabled. This field is set once during global initialization
|
||||
// (uvm_global_init), and can be read afterwards without acquiring any
|
||||
// locks.
|
||||
bool sev_enabled;
|
||||
// True if the VM has AMD's SEV, or equivalent HW security extensions such
|
||||
// as Intel's TDX, enabled. The flag is always false on the host.
|
||||
//
|
||||
// This value moves in tandem with that of Confidential Computing in the
|
||||
// GPU(s) in all supported configurations, so it is used as a proxy for the
|
||||
// Confidential Computing state.
|
||||
//
|
||||
// This field is set once during global initialization (uvm_global_init),
|
||||
// and can be read afterwards without acquiring any locks.
|
||||
bool conf_computing_enabled;
|
||||
};
|
||||
|
||||
// Initialize global uvm state
|
||||
|
|
|
@ -1099,12 +1099,7 @@ static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu,
|
|||
return status;
|
||||
}
|
||||
|
||||
status = uvm_conf_computing_init_parent_gpu(parent_gpu);
|
||||
if (status != NV_OK) {
|
||||
UVM_ERR_PRINT("Confidential computing: %s, GPU %s\n",
|
||||
nvstatusToString(status), parent_gpu->name);
|
||||
return status;
|
||||
}
|
||||
uvm_conf_computing_check_parent_gpu(parent_gpu);
|
||||
|
||||
parent_gpu->pci_dev = gpu_platform_info->pci_dev;
|
||||
parent_gpu->closest_cpu_numa_node = dev_to_node(&parent_gpu->pci_dev->dev);
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
#include "uvm_rb_tree.h"
|
||||
#include "uvm_perf_prefetch.h"
|
||||
#include "nv-kthread-q.h"
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include "uvm_conf_computing.h"
|
||||
|
||||
// Buffer length to store uvm gpu id, RM device name and gpu uuid.
|
||||
|
@ -192,9 +193,9 @@ typedef struct
|
|||
// Mask of successfully serviced read faults on pages in write_fault_mask.
|
||||
uvm_page_mask_t reads_serviced_mask;
|
||||
|
||||
// Temporary mask used for uvm_page_mask_or_equal. This is used since
|
||||
// bitmap_or_equal() isn't present in all linux kernel versions.
|
||||
uvm_page_mask_t tmp_mask;
|
||||
// Mask of all faulted pages in a UVM_VA_BLOCK_SIZE aligned region of a
|
||||
// SAM VMA. This is used as input to the prefetcher.
|
||||
uvm_page_mask_t faulted_mask;
|
||||
|
||||
// Client type of the service requestor.
|
||||
uvm_fault_client_type_t client_type;
|
||||
|
@ -204,6 +205,40 @@ typedef struct
|
|||
|
||||
// New residency NUMA node ID of the faulting region.
|
||||
int residency_node;
|
||||
|
||||
struct
|
||||
{
|
||||
// True if preferred_location was set on this faulting region.
|
||||
// UVM_VA_BLOCK_SIZE sized region in the faulting region bound by the
|
||||
// VMA is is prefetched if preferred_location was set and if first_touch
|
||||
// is true;
|
||||
bool has_preferred_location;
|
||||
|
||||
// True if the UVM_VA_BLOCK_SIZE sized region isn't resident on any
|
||||
// node. False if any page in the region is resident somewhere.
|
||||
bool first_touch;
|
||||
|
||||
// Mask of prefetched pages in a UVM_VA_BLOCK_SIZE aligned region of a
|
||||
// SAM VMA.
|
||||
uvm_page_mask_t prefetch_pages_mask;
|
||||
|
||||
// PFN info of the faulting region
|
||||
unsigned long pfns[PAGES_PER_UVM_VA_BLOCK];
|
||||
|
||||
// Faulting/preferred processor residency mask of the faulting region.
|
||||
uvm_page_mask_t residency_mask;
|
||||
|
||||
#if defined(NV_MMU_INTERVAL_NOTIFIER)
|
||||
// MMU notifier used to compute residency of this faulting region.
|
||||
struct mmu_interval_notifier notifier;
|
||||
#endif
|
||||
|
||||
uvm_va_space_t *va_space;
|
||||
|
||||
// Prefetch temporary state.
|
||||
uvm_perf_prefetch_bitmap_tree_t bitmap_tree;
|
||||
} prefetch_state;
|
||||
|
||||
} uvm_ats_fault_context_t;
|
||||
|
||||
struct uvm_fault_service_batch_context_struct
|
||||
|
|
|
@ -1009,6 +1009,7 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
|
|||
NvU64 address = uvm_va_block_cpu_page_address(va_block, page_index);
|
||||
bool read_duplicate = false;
|
||||
uvm_processor_id_t new_residency;
|
||||
const uvm_va_policy_t *policy;
|
||||
|
||||
// Ensure that the migratability iterator covers the current address
|
||||
while (iter.end < address)
|
||||
|
@ -1035,21 +1036,23 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
|
|||
|
||||
// If the underlying VMA is gone, skip HMM migrations.
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
status = uvm_hmm_find_vma(&service_context->block_context, address);
|
||||
status = uvm_hmm_find_vma(service_context->block_context.mm,
|
||||
&service_context->block_context.hmm.vma,
|
||||
address);
|
||||
if (status == NV_ERR_INVALID_ADDRESS)
|
||||
continue;
|
||||
|
||||
UVM_ASSERT(status == NV_OK);
|
||||
}
|
||||
|
||||
service_context->block_context.policy = uvm_va_policy_get(va_block, address);
|
||||
policy = uvm_va_policy_get(va_block, address);
|
||||
|
||||
new_residency = uvm_va_block_select_residency(va_block,
|
||||
&service_context->block_context,
|
||||
page_index,
|
||||
processor,
|
||||
uvm_fault_access_type_mask_bit(UVM_FAULT_ACCESS_TYPE_PREFETCH),
|
||||
service_context->block_context.policy,
|
||||
policy,
|
||||
&thrashing_hint,
|
||||
UVM_SERVICE_OPERATION_ACCESS_COUNTERS,
|
||||
&read_duplicate);
|
||||
|
@ -1094,12 +1097,17 @@ static NV_STATUS service_va_block_locked(uvm_processor_id_t processor,
|
|||
if (!uvm_processor_mask_empty(&service_context->resident_processors)) {
|
||||
while (first_page_index <= last_page_index) {
|
||||
uvm_page_index_t outer = last_page_index + 1;
|
||||
const uvm_va_policy_t *policy;
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
status = uvm_hmm_find_policy_vma_and_outer(va_block,
|
||||
&service_context->block_context,
|
||||
first_page_index,
|
||||
&outer);
|
||||
status = NV_ERR_INVALID_ADDRESS;
|
||||
if (service_context->block_context.mm) {
|
||||
status = uvm_hmm_find_policy_vma_and_outer(va_block,
|
||||
&service_context->block_context.hmm.vma,
|
||||
first_page_index,
|
||||
&policy,
|
||||
&outer);
|
||||
}
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -343,6 +343,7 @@ static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu,
|
|||
bool read_duplicate;
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable;
|
||||
const uvm_va_policy_t *policy;
|
||||
|
||||
UVM_ASSERT(!fault_entry->is_fatal);
|
||||
|
||||
|
@ -352,7 +353,7 @@ static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu,
|
|||
UVM_ASSERT(fault_entry->fault_address >= va_block->start);
|
||||
UVM_ASSERT(fault_entry->fault_address <= va_block->end);
|
||||
|
||||
service_context->block_context.policy = uvm_va_policy_get(va_block, fault_entry->fault_address);
|
||||
policy = uvm_va_policy_get(va_block, fault_entry->fault_address);
|
||||
|
||||
if (service_context->num_retries == 0) {
|
||||
// notify event to tools/performance heuristics. For now we use a
|
||||
|
@ -361,7 +362,7 @@ static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu,
|
|||
uvm_perf_event_notify_gpu_fault(&va_space->perf_events,
|
||||
va_block,
|
||||
gpu->id,
|
||||
service_context->block_context.policy->preferred_location,
|
||||
policy->preferred_location,
|
||||
fault_entry,
|
||||
++non_replayable_faults->batch_id,
|
||||
false);
|
||||
|
@ -396,7 +397,7 @@ static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu,
|
|||
page_index,
|
||||
gpu->id,
|
||||
fault_entry->access_type_mask,
|
||||
service_context->block_context.policy,
|
||||
policy,
|
||||
&thrashing_hint,
|
||||
UVM_SERVICE_OPERATION_NON_REPLAYABLE_FAULTS,
|
||||
&read_duplicate);
|
||||
|
@ -678,10 +679,17 @@ static NV_STATUS service_fault(uvm_gpu_t *gpu, uvm_fault_buffer_entry_t *fault_e
|
|||
fault_entry->fault_source.channel_id = user_channel->hw_channel_id;
|
||||
|
||||
if (!fault_entry->is_fatal) {
|
||||
status = uvm_va_block_find_create(fault_entry->va_space,
|
||||
fault_entry->fault_address,
|
||||
va_block_context,
|
||||
&va_block);
|
||||
if (mm) {
|
||||
status = uvm_va_block_find_create(fault_entry->va_space,
|
||||
fault_entry->fault_address,
|
||||
&va_block_context->hmm.vma,
|
||||
&va_block);
|
||||
}
|
||||
else {
|
||||
status = uvm_va_block_find_create_managed(fault_entry->va_space,
|
||||
fault_entry->fault_address,
|
||||
&va_block);
|
||||
}
|
||||
if (status == NV_OK)
|
||||
status = service_managed_fault_in_block(gpu_va_space->gpu, va_block, fault_entry);
|
||||
else
|
||||
|
@ -734,8 +742,6 @@ void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu)
|
|||
// Differently to replayable faults, we do not batch up and preprocess
|
||||
// non-replayable faults since getting multiple faults on the same
|
||||
// memory region is not very likely
|
||||
//
|
||||
// TODO: Bug 2103669: [UVM/ATS] Optimize ATS fault servicing
|
||||
for (i = 0; i < cached_faults; ++i) {
|
||||
status = service_fault(gpu, &gpu->parent->fault_buffer_info.non_replayable.fault_cache[i]);
|
||||
if (status != NV_OK)
|
||||
|
|
|
@ -1322,6 +1322,7 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
|
|||
uvm_fault_buffer_entry_t **ordered_fault_cache = batch_context->ordered_fault_cache;
|
||||
uvm_service_block_context_t *block_context = &replayable_faults->block_service_context;
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
const uvm_va_policy_t *policy;
|
||||
NvU64 end;
|
||||
|
||||
// Check that all uvm_fault_access_type_t values can fit into an NvU8
|
||||
|
@ -1347,13 +1348,13 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
|
|||
UVM_ASSERT(ordered_fault_cache[first_fault_index]->fault_address <= va_block->end);
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
uvm_hmm_find_policy_end(va_block,
|
||||
&block_context->block_context,
|
||||
ordered_fault_cache[first_fault_index]->fault_address,
|
||||
&end);
|
||||
policy = uvm_hmm_find_policy_end(va_block,
|
||||
block_context->block_context.hmm.vma,
|
||||
ordered_fault_cache[first_fault_index]->fault_address,
|
||||
&end);
|
||||
}
|
||||
else {
|
||||
block_context->block_context.policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
end = va_block->end;
|
||||
}
|
||||
|
||||
|
@ -1393,7 +1394,7 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
|
|||
update_batch_and_notify_fault(gpu,
|
||||
batch_context,
|
||||
va_block,
|
||||
block_context->block_context.policy->preferred_location,
|
||||
policy->preferred_location,
|
||||
current_entry,
|
||||
is_duplicate);
|
||||
}
|
||||
|
@ -1473,7 +1474,7 @@ static NV_STATUS service_fault_batch_block_locked(uvm_gpu_t *gpu,
|
|||
page_index,
|
||||
gpu->id,
|
||||
service_access_type_mask,
|
||||
block_context->block_context.policy,
|
||||
policy,
|
||||
&thrashing_hint,
|
||||
UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS,
|
||||
&read_duplicate);
|
||||
|
@ -1625,21 +1626,25 @@ static NV_STATUS service_fault_batch_ats_sub_vma(uvm_gpu_va_space_t *gpu_va_spac
|
|||
uvm_ats_fault_context_t *ats_context = &batch_context->ats_context;
|
||||
const uvm_page_mask_t *read_fault_mask = &ats_context->read_fault_mask;
|
||||
const uvm_page_mask_t *write_fault_mask = &ats_context->write_fault_mask;
|
||||
const uvm_page_mask_t *faults_serviced_mask = &ats_context->faults_serviced_mask;
|
||||
const uvm_page_mask_t *reads_serviced_mask = &ats_context->reads_serviced_mask;
|
||||
uvm_page_mask_t *tmp_mask = &ats_context->tmp_mask;
|
||||
uvm_page_mask_t *faults_serviced_mask = &ats_context->faults_serviced_mask;
|
||||
uvm_page_mask_t *faulted_mask = &ats_context->faulted_mask;
|
||||
|
||||
UVM_ASSERT(vma);
|
||||
|
||||
ats_context->client_type = UVM_FAULT_CLIENT_TYPE_GPC;
|
||||
|
||||
uvm_page_mask_or(tmp_mask, write_fault_mask, read_fault_mask);
|
||||
uvm_page_mask_or(faulted_mask, write_fault_mask, read_fault_mask);
|
||||
|
||||
status = uvm_ats_service_faults(gpu_va_space, vma, base, &batch_context->ats_context);
|
||||
|
||||
UVM_ASSERT(uvm_page_mask_subset(faults_serviced_mask, tmp_mask));
|
||||
// Remove prefetched pages from the serviced mask since fault servicing
|
||||
// failures belonging to prefetch pages need to be ignored.
|
||||
uvm_page_mask_and(faults_serviced_mask, faults_serviced_mask, faulted_mask);
|
||||
|
||||
if ((status != NV_OK) || uvm_page_mask_equal(faults_serviced_mask, tmp_mask)) {
|
||||
UVM_ASSERT(uvm_page_mask_subset(faults_serviced_mask, faulted_mask));
|
||||
|
||||
if ((status != NV_OK) || uvm_page_mask_equal(faults_serviced_mask, faulted_mask)) {
|
||||
(*block_faults) += (fault_index_end - fault_index_start);
|
||||
return status;
|
||||
}
|
||||
|
@ -1867,7 +1872,13 @@ static NV_STATUS service_fault_batch_dispatch(uvm_va_space_t *va_space,
|
|||
va_range_next = uvm_va_space_iter_next(va_range_next, ~0ULL);
|
||||
}
|
||||
|
||||
status = uvm_va_block_find_create_in_range(va_space, va_range, fault_address, va_block_context, &va_block);
|
||||
if (va_range)
|
||||
status = uvm_va_block_find_create_in_range(va_space, va_range, fault_address, &va_block);
|
||||
else if (mm)
|
||||
status = uvm_hmm_va_block_find_create(va_space, fault_address, &va_block_context->hmm.vma, &va_block);
|
||||
else
|
||||
status = NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
if (status == NV_OK) {
|
||||
status = service_fault_batch_block(gpu, va_block, batch_context, fault_index, block_faults);
|
||||
}
|
||||
|
|
|
@ -110,7 +110,20 @@ typedef struct
|
|||
|
||||
bool uvm_hmm_is_enabled_system_wide(void)
|
||||
{
|
||||
return !uvm_disable_hmm && !g_uvm_global.ats.enabled && uvm_va_space_mm_enabled_system();
|
||||
if (uvm_disable_hmm)
|
||||
return false;
|
||||
|
||||
if (g_uvm_global.ats.enabled)
|
||||
return false;
|
||||
|
||||
// Confidential Computing and HMM impose mutually exclusive constraints. In
|
||||
// Confidential Computing the GPU can only access pages resident in vidmem,
|
||||
// but in HMM pages may be required to be resident in sysmem: file backed
|
||||
// VMAs, huge pages, etc.
|
||||
if (g_uvm_global.conf_computing_enabled)
|
||||
return false;
|
||||
|
||||
return uvm_va_space_mm_enabled_system();
|
||||
}
|
||||
|
||||
bool uvm_hmm_is_enabled(uvm_va_space_t *va_space)
|
||||
|
@ -127,32 +140,17 @@ static uvm_va_block_t *hmm_va_block_from_node(uvm_range_tree_node_t *node)
|
|||
return container_of(node, uvm_va_block_t, hmm.node);
|
||||
}
|
||||
|
||||
NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
|
||||
void uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
|
||||
{
|
||||
uvm_hmm_va_space_t *hmm_va_space = &va_space->hmm;
|
||||
struct mm_struct *mm = va_space->va_space_mm.mm;
|
||||
int ret;
|
||||
|
||||
if (!uvm_hmm_is_enabled(va_space))
|
||||
return NV_OK;
|
||||
|
||||
uvm_assert_mmap_lock_locked_write(mm);
|
||||
uvm_assert_rwsem_locked_write(&va_space->lock);
|
||||
return;
|
||||
|
||||
uvm_range_tree_init(&hmm_va_space->blocks);
|
||||
uvm_mutex_init(&hmm_va_space->blocks_lock, UVM_LOCK_ORDER_LEAF);
|
||||
|
||||
// Initialize MMU interval notifiers for this process.
|
||||
// This allows mmu_interval_notifier_insert() to be called without holding
|
||||
// the mmap_lock for write.
|
||||
// Note: there is no __mmu_notifier_unregister(), this call just allocates
|
||||
// memory which is attached to the mm_struct and freed when the mm_struct
|
||||
// is freed.
|
||||
ret = __mmu_notifier_register(NULL, mm);
|
||||
if (ret)
|
||||
return errno_to_nv_status(ret);
|
||||
|
||||
return NV_OK;
|
||||
return;
|
||||
}
|
||||
|
||||
void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space)
|
||||
|
@ -325,7 +323,6 @@ static bool hmm_invalidate(uvm_va_block_t *va_block,
|
|||
region = uvm_va_block_region_from_start_end(va_block, start, end);
|
||||
|
||||
va_block_context->hmm.vma = NULL;
|
||||
va_block_context->policy = NULL;
|
||||
|
||||
// We only need to unmap GPUs since Linux handles the CPUs.
|
||||
for_each_gpu_id_in_mask(id, &va_block->mapped) {
|
||||
|
@ -444,11 +441,11 @@ static void hmm_va_block_init(uvm_va_block_t *va_block,
|
|||
static NV_STATUS hmm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
bool allow_unreadable_vma,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma_out,
|
||||
uvm_va_block_t **va_block_ptr)
|
||||
{
|
||||
struct mm_struct *mm = va_space->va_space_mm.mm;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
struct vm_area_struct *va_block_vma;
|
||||
uvm_va_block_t *va_block;
|
||||
NvU64 start, end;
|
||||
NV_STATUS status;
|
||||
|
@ -457,15 +454,14 @@ static NV_STATUS hmm_va_block_find_create(uvm_va_space_t *va_space,
|
|||
if (!uvm_hmm_is_enabled(va_space))
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
UVM_ASSERT(mm);
|
||||
UVM_ASSERT(!va_block_context || va_block_context->mm == mm);
|
||||
mm = va_space->va_space_mm.mm;
|
||||
uvm_assert_mmap_lock_locked(mm);
|
||||
uvm_assert_rwsem_locked(&va_space->lock);
|
||||
UVM_ASSERT(PAGE_ALIGNED(addr));
|
||||
|
||||
// Note that we have to allow PROT_NONE VMAs so that policies can be set.
|
||||
vma = find_vma(mm, addr);
|
||||
if (!uvm_hmm_vma_is_valid(vma, addr, allow_unreadable_vma))
|
||||
va_block_vma = find_vma(mm, addr);
|
||||
if (!uvm_hmm_vma_is_valid(va_block_vma, addr, allow_unreadable_vma))
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
// Since we only hold the va_space read lock, there can be multiple
|
||||
|
@ -517,8 +513,8 @@ static NV_STATUS hmm_va_block_find_create(uvm_va_space_t *va_space,
|
|||
|
||||
done:
|
||||
uvm_mutex_unlock(&va_space->hmm.blocks_lock);
|
||||
if (va_block_context)
|
||||
va_block_context->hmm.vma = vma;
|
||||
if (vma_out)
|
||||
*vma_out = va_block_vma;
|
||||
*va_block_ptr = va_block;
|
||||
return NV_OK;
|
||||
|
||||
|
@ -532,43 +528,36 @@ err_unlock:
|
|||
|
||||
NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma,
|
||||
uvm_va_block_t **va_block_ptr)
|
||||
{
|
||||
return hmm_va_block_find_create(va_space, addr, false, va_block_context, va_block_ptr);
|
||||
return hmm_va_block_find_create(va_space, addr, false, vma, va_block_ptr);
|
||||
}
|
||||
|
||||
NV_STATUS uvm_hmm_find_vma(uvm_va_block_context_t *va_block_context, NvU64 addr)
|
||||
NV_STATUS uvm_hmm_find_vma(struct mm_struct *mm, struct vm_area_struct **vma_out, NvU64 addr)
|
||||
{
|
||||
struct mm_struct *mm = va_block_context->mm;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
if (!mm)
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
uvm_assert_mmap_lock_locked(mm);
|
||||
|
||||
vma = find_vma(mm, addr);
|
||||
if (!uvm_hmm_vma_is_valid(vma, addr, false))
|
||||
*vma_out = find_vma(mm, addr);
|
||||
if (!uvm_hmm_vma_is_valid(*vma_out, addr, false))
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
va_block_context->hmm.vma = vma;
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
uvm_va_block_region_t region)
|
||||
{
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
struct vm_area_struct *vma = va_block_context->hmm.vma;
|
||||
|
||||
UVM_ASSERT(vma);
|
||||
UVM_ASSERT(va_block_context->mm == vma->vm_mm);
|
||||
uvm_assert_mmap_lock_locked(va_block_context->mm);
|
||||
UVM_ASSERT(va_block->hmm.va_space->va_space_mm.mm == vma->vm_mm);
|
||||
uvm_assert_mmap_lock_locked(va_block->hmm.va_space->va_space_mm.mm);
|
||||
UVM_ASSERT(vma->vm_start <= uvm_va_block_region_start(va_block, region));
|
||||
UVM_ASSERT(vma->vm_end > uvm_va_block_region_end(va_block, region));
|
||||
}
|
||||
|
@ -619,8 +608,6 @@ static NV_STATUS hmm_migrate_range(uvm_va_block_t *va_block,
|
|||
uvm_mutex_lock(&va_block->lock);
|
||||
|
||||
uvm_for_each_va_policy_in(policy, va_block, start, end, node, region) {
|
||||
va_block_context->policy = policy;
|
||||
|
||||
// Even though UVM_VA_BLOCK_RETRY_LOCKED() may unlock and relock the
|
||||
// va_block lock, the policy remains valid because we hold the mmap
|
||||
// lock so munmap can't remove the policy, and the va_space lock so the
|
||||
|
@ -670,7 +657,6 @@ void uvm_hmm_evict_va_blocks(uvm_va_space_t *va_space)
|
|||
continue;
|
||||
|
||||
block_context->hmm.vma = vma;
|
||||
block_context->policy = &uvm_va_policy_default;
|
||||
uvm_hmm_va_block_migrate_locked(va_block,
|
||||
NULL,
|
||||
block_context,
|
||||
|
@ -1046,11 +1032,7 @@ static NV_STATUS hmm_set_preferred_location_locked(uvm_va_block_t *va_block,
|
|||
uvm_processor_mask_test(&old_policy->accessed_by, old_policy->preferred_location))
|
||||
uvm_processor_mask_set(&set_accessed_by_processors, old_policy->preferred_location);
|
||||
|
||||
va_block_context->policy = uvm_va_policy_set_preferred_location(va_block,
|
||||
region,
|
||||
preferred_location,
|
||||
old_policy);
|
||||
if (!va_block_context->policy)
|
||||
if (!uvm_va_policy_set_preferred_location(va_block, region, preferred_location, old_policy))
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
// Establish new remote mappings if the old preferred location had
|
||||
|
@ -1109,7 +1091,7 @@ NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space,
|
|||
for (addr = base; addr < last_address; addr = va_block->end + 1) {
|
||||
NvU64 end;
|
||||
|
||||
status = hmm_va_block_find_create(va_space, addr, true, va_block_context, &va_block);
|
||||
status = hmm_va_block_find_create(va_space, addr, true, &va_block_context->hmm.vma, &va_block);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
|
||||
|
@ -1151,7 +1133,6 @@ static NV_STATUS hmm_set_accessed_by_start_end_locked(uvm_va_block_t *va_block,
|
|||
if (uvm_va_policy_is_read_duplicate(&node->policy, va_space))
|
||||
continue;
|
||||
|
||||
va_block_context->policy = &node->policy;
|
||||
region = uvm_va_block_region_from_start_end(va_block,
|
||||
max(start, node->node.start),
|
||||
min(end, node->node.end));
|
||||
|
@ -1196,7 +1177,7 @@ NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space,
|
|||
for (addr = base; addr < last_address; addr = va_block->end + 1) {
|
||||
NvU64 end;
|
||||
|
||||
status = hmm_va_block_find_create(va_space, addr, true, va_block_context, &va_block);
|
||||
status = hmm_va_block_find_create(va_space, addr, true, &va_block_context->hmm.vma, &va_block);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
|
||||
|
@ -1249,8 +1230,6 @@ void uvm_hmm_block_add_eviction_mappings(uvm_va_space_t *va_space,
|
|||
uvm_mutex_lock(&va_block->lock);
|
||||
|
||||
uvm_for_each_va_policy_node_in(node, va_block, va_block->start, va_block->end) {
|
||||
block_context->policy = &node->policy;
|
||||
|
||||
for_each_id_in_mask(id, &node->policy.accessed_by) {
|
||||
status = hmm_set_accessed_by_start_end_locked(va_block,
|
||||
block_context,
|
||||
|
@ -1309,13 +1288,13 @@ void uvm_hmm_block_add_eviction_mappings(uvm_va_space_t *va_space,
|
|||
}
|
||||
}
|
||||
|
||||
void uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
unsigned long addr,
|
||||
NvU64 *endp)
|
||||
const uvm_va_policy_t *uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
NvU64 *endp)
|
||||
{
|
||||
struct vm_area_struct *vma = va_block_context->hmm.vma;
|
||||
const uvm_va_policy_node_t *node;
|
||||
const uvm_va_policy_t *policy;
|
||||
NvU64 end = va_block->end;
|
||||
|
||||
uvm_assert_mmap_lock_locked(vma->vm_mm);
|
||||
|
@ -1326,40 +1305,45 @@ void uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
|||
|
||||
node = uvm_va_policy_node_find(va_block, addr);
|
||||
if (node) {
|
||||
va_block_context->policy = &node->policy;
|
||||
policy = &node->policy;
|
||||
if (end > node->node.end)
|
||||
end = node->node.end;
|
||||
}
|
||||
else {
|
||||
va_block_context->policy = &uvm_va_policy_default;
|
||||
policy = &uvm_va_policy_default;
|
||||
}
|
||||
|
||||
*endp = end;
|
||||
|
||||
return policy;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma_out,
|
||||
uvm_page_index_t page_index,
|
||||
const uvm_va_policy_t **policy,
|
||||
uvm_page_index_t *outerp)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr;
|
||||
NvU64 end;
|
||||
uvm_page_index_t outer;
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
struct mm_struct *mm = va_space->va_space_mm.mm;
|
||||
|
||||
if (!mm)
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_is_hmm(va_block));
|
||||
uvm_assert_mmap_lock_locked(va_block_context->mm);
|
||||
uvm_assert_mmap_lock_locked(mm);
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
|
||||
addr = uvm_va_block_cpu_page_address(va_block, page_index);
|
||||
|
||||
vma = vma_lookup(va_block_context->mm, addr);
|
||||
if (!vma || !(vma->vm_flags & VM_READ))
|
||||
*vma_out = vma_lookup(mm, addr);
|
||||
if (!*vma_out || !((*vma_out)->vm_flags & VM_READ))
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
va_block_context->hmm.vma = vma;
|
||||
|
||||
uvm_hmm_find_policy_end(va_block, va_block_context, addr, &end);
|
||||
*policy = uvm_hmm_find_policy_end(va_block, *vma_out, addr, &end);
|
||||
|
||||
outer = uvm_va_block_cpu_page_index(va_block, end) + 1;
|
||||
if (*outerp > outer)
|
||||
|
@ -1379,8 +1363,6 @@ static NV_STATUS hmm_clear_thrashing_policy(uvm_va_block_t *va_block,
|
|||
uvm_mutex_lock(&va_block->lock);
|
||||
|
||||
uvm_for_each_va_policy_in(policy, va_block, va_block->start, va_block->end, node, region) {
|
||||
block_context->policy = policy;
|
||||
|
||||
// Unmap may split PTEs and require a retry. Needs to be called
|
||||
// before the pinned pages information is destroyed.
|
||||
status = UVM_VA_BLOCK_RETRY_LOCKED(va_block,
|
||||
|
@ -1424,11 +1406,10 @@ NV_STATUS uvm_hmm_clear_thrashing_policy(uvm_va_space_t *va_space)
|
|||
}
|
||||
|
||||
uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
const uvm_va_policy_t *policy,
|
||||
NvU64 address)
|
||||
{
|
||||
struct vm_area_struct *vma = va_block_context->hmm.vma;
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
NvU64 start, end;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_is_hmm(va_block));
|
||||
|
@ -1457,13 +1438,11 @@ uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
|
|||
}
|
||||
|
||||
uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 addr)
|
||||
{
|
||||
struct vm_area_struct *vma = va_block_context->hmm.vma;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_is_hmm(va_block));
|
||||
uvm_assert_mmap_lock_locked(va_block_context->mm);
|
||||
uvm_assert_mmap_lock_locked(va_block->hmm.va_space->va_space_mm.mm);
|
||||
UVM_ASSERT(vma && addr >= vma->vm_start && addr < vma->vm_end);
|
||||
|
||||
if (!(vma->vm_flags & VM_READ))
|
||||
|
@ -2907,8 +2886,6 @@ static NV_STATUS uvm_hmm_migrate_alloc_and_copy(struct vm_area_struct *vma,
|
|||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
UVM_ASSERT(!uvm_va_policy_is_read_duplicate(va_block_context->policy, va_block->hmm.va_space));
|
||||
|
||||
status = uvm_va_block_make_resident_copy(va_block,
|
||||
va_block_retry,
|
||||
va_block_context,
|
||||
|
@ -3140,7 +3117,7 @@ NV_STATUS uvm_hmm_migrate_ranges(uvm_va_space_t *va_space,
|
|||
for (addr = base; addr < last_address; addr = end + 1) {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
status = hmm_va_block_find_create(va_space, addr, false, va_block_context, &va_block);
|
||||
status = hmm_va_block_find_create(va_space, addr, false, &va_block_context->hmm.vma, &va_block);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
|
@ -3232,7 +3209,6 @@ static NV_STATUS hmm_va_block_evict_chunks(uvm_va_block_t *va_block,
|
|||
uvm_for_each_va_policy_in(policy, va_block, start, end, node, region) {
|
||||
npages = uvm_va_block_region_num_pages(region);
|
||||
|
||||
va_block_context->policy = policy;
|
||||
if (out_accessed_by_set && uvm_processor_mask_get_count(&policy->accessed_by) > 0)
|
||||
*out_accessed_by_set = true;
|
||||
|
||||
|
|
|
@ -49,9 +49,7 @@ typedef struct
|
|||
bool uvm_hmm_is_enabled_system_wide(void);
|
||||
|
||||
// Initialize HMM for the given the va_space.
|
||||
// Locking: the va_space->va_space_mm.mm mmap_lock must be write locked
|
||||
// and the va_space lock must be held in write mode.
|
||||
NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space);
|
||||
void uvm_hmm_va_space_initialize(uvm_va_space_t *va_space);
|
||||
|
||||
// Destroy any HMM state for the given the va_space.
|
||||
// Locking: va_space lock must be held in write mode.
|
||||
|
@ -90,31 +88,30 @@ typedef struct
|
|||
// address 'addr' or the VMA does not have at least PROT_READ permission.
|
||||
// The caller is also responsible for checking that there is no UVM
|
||||
// va_range covering the given address before calling this function.
|
||||
// If va_block_context is not NULL, the VMA is cached in
|
||||
// va_block_context->hmm.vma.
|
||||
// The VMA is returned in vma_out if it's not NULL.
|
||||
// Locking: This function must be called with mm retained and locked for
|
||||
// at least read and the va_space lock at least for read.
|
||||
NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma_out,
|
||||
uvm_va_block_t **va_block_ptr);
|
||||
|
||||
// Find the VMA for the given address and set va_block_context->hmm.vma.
|
||||
// Return NV_ERR_INVALID_ADDRESS if va_block_context->mm is NULL or there
|
||||
// is no VMA associated with the address 'addr' or the VMA does not have at
|
||||
// least PROT_READ permission.
|
||||
// Find the VMA for the given address and return it in vma_out. Return
|
||||
// NV_ERR_INVALID_ADDRESS if mm is NULL or there is no VMA associated with
|
||||
// the address 'addr' or the VMA does not have at least PROT_READ
|
||||
// permission.
|
||||
// Locking: This function must be called with mm retained and locked for
|
||||
// at least read or mm equal to NULL.
|
||||
NV_STATUS uvm_hmm_find_vma(uvm_va_block_context_t *va_block_context, NvU64 addr);
|
||||
NV_STATUS uvm_hmm_find_vma(struct mm_struct *mm, struct vm_area_struct **vma_out, NvU64 addr);
|
||||
|
||||
// If va_block is a HMM va_block, check that va_block_context->hmm.vma is
|
||||
// not NULL and covers the given region. This always returns true and is
|
||||
// intended to only be used with UVM_ASSERT().
|
||||
// If va_block is a HMM va_block, check that vma is not NULL and covers the
|
||||
// given region. This always returns true and is intended to only be used
|
||||
// with UVM_ASSERT().
|
||||
// Locking: This function must be called with the va_block lock held and if
|
||||
// va_block is a HMM block, va_block_context->mm must be retained and
|
||||
// locked for at least read.
|
||||
// va_block is a HMM block, va_space->va_space_mm.mm->mmap_lock must be
|
||||
// retained and locked for at least read.
|
||||
bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
uvm_va_block_region_t region);
|
||||
|
||||
// Initialize the HMM portion of the service_context.
|
||||
|
@ -225,31 +222,29 @@ typedef struct
|
|||
return NV_OK;
|
||||
}
|
||||
|
||||
// This function assigns va_block_context->policy to the policy covering
|
||||
// the given address 'addr' and assigns the ending address '*endp' to the
|
||||
// minimum of va_block->end, va_block_context->hmm.vma->vm_end - 1, and the
|
||||
// ending address of the policy range. Note that va_block_context->hmm.vma
|
||||
// is expected to be initialized before calling this function.
|
||||
// Locking: This function must be called with
|
||||
// va_block_context->hmm.vma->vm_mm retained and locked for least read and
|
||||
// the va_block lock held.
|
||||
void uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
unsigned long addr,
|
||||
NvU64 *endp);
|
||||
// This function returns the policy covering the given address 'addr' and
|
||||
// assigns the ending address '*endp' to the minimum of va_block->end,
|
||||
// vma->vm_end - 1, and the ending address of the policy range. Locking:
|
||||
// This function must be called with vma->vm_mm retained and locked for at
|
||||
// least read and the va_block and va_space lock held.
|
||||
const uvm_va_policy_t *uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
NvU64 *endp);
|
||||
|
||||
// This function finds the VMA for the page index 'page_index' and assigns
|
||||
// it to va_block_context->vma, sets va_block_context->policy to the policy
|
||||
// covering the given address, and sets the ending page range '*outerp'
|
||||
// to the minimum of *outerp, va_block_context->hmm.vma->vm_end - 1, the
|
||||
// ending address of the policy range, and va_block->end.
|
||||
// Return NV_ERR_INVALID_ADDRESS if no VMA is found; otherwise, NV_OK.
|
||||
// Locking: This function must be called with
|
||||
// va_block_context->hmm.vma->vm_mm retained and locked for least read and
|
||||
// the va_block lock held.
|
||||
// This function finds the VMA for the page index 'page_index' and returns
|
||||
// it in vma_out which must not be NULL. Returns the policy covering the
|
||||
// given address, and sets the ending page range '*outerp' to the minimum of
|
||||
// *outerp, vma->vm_end - 1, the ending address of the policy range, and
|
||||
// va_block->end.
|
||||
// Return NV_ERR_INVALID_ADDRESS if no VMA is found; otherwise sets *vma
|
||||
// and returns NV_OK.
|
||||
// Locking: This function must be called with mm retained and locked for at
|
||||
// least read and the va_block and va_space lock held.
|
||||
NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma,
|
||||
uvm_page_index_t page_index,
|
||||
const uvm_va_policy_t **policy,
|
||||
uvm_page_index_t *outerp);
|
||||
|
||||
// Clear thrashing policy information from all HMM va_blocks.
|
||||
|
@ -258,24 +253,21 @@ typedef struct
|
|||
|
||||
// Return the expanded region around 'address' limited to the intersection
|
||||
// of va_block start/end, vma start/end, and policy start/end.
|
||||
// va_block_context must not be NULL, va_block_context->hmm.vma must be
|
||||
// valid (this is usually set by uvm_hmm_va_block_find_create()), and
|
||||
// va_block_context->policy must be valid.
|
||||
// Locking: the caller must hold mm->mmap_lock in at least read mode, the
|
||||
// va_space lock must be held in at least read mode, and the va_block lock
|
||||
// held.
|
||||
// Locking: the caller must hold va_space->va_space_mm.mm->mmap_lock in at
|
||||
// least read mode, the va_space lock must be held in at least read mode,
|
||||
// and the va_block lock held.
|
||||
uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
const uvm_va_policy_t *policy,
|
||||
NvU64 address);
|
||||
|
||||
// Return the logical protection allowed of a HMM va_block for the page at
|
||||
// the given address.
|
||||
// va_block_context must not be NULL and va_block_context->hmm.vma must be
|
||||
// valid (this is usually set by uvm_hmm_va_block_find_create()).
|
||||
// Locking: the caller must hold va_block_context->mm mmap_lock in at least
|
||||
// read mode.
|
||||
// the given address within the vma which must be valid. This is usually
|
||||
// obtained from uvm_hmm_va_block_find_create()).
|
||||
// Locking: the caller must hold va_space->va_space_mm.mm mmap_lock in at
|
||||
// least read mode.
|
||||
uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 addr);
|
||||
|
||||
// This is called to service a GPU fault.
|
||||
|
@ -288,9 +280,9 @@ typedef struct
|
|||
uvm_service_block_context_t *service_context);
|
||||
|
||||
// This is called to migrate a region within a HMM va_block.
|
||||
// va_block_context must not be NULL and va_block_context->policy and
|
||||
// va_block_context->hmm.vma must be valid.
|
||||
// Locking: the va_block_context->mm must be retained, mmap_lock must be
|
||||
// va_block_context must not be NULL and va_block_context->hmm.vma
|
||||
// must be valid.
|
||||
// Locking: the va_space->va_space_mm.mm must be retained, mmap_lock must be
|
||||
// locked, and the va_block lock held.
|
||||
NV_STATUS uvm_hmm_va_block_migrate_locked(uvm_va_block_t *va_block,
|
||||
uvm_va_block_retry_t *va_block_retry,
|
||||
|
@ -303,7 +295,7 @@ typedef struct
|
|||
// UvmMigrate().
|
||||
//
|
||||
// va_block_context must not be NULL. The caller is not required to set
|
||||
// va_block_context->policy or va_block_context->hmm.vma.
|
||||
// va_block_context->hmm.vma.
|
||||
//
|
||||
// Locking: the va_space->va_space_mm.mm mmap_lock must be locked and
|
||||
// the va_space read lock must be held.
|
||||
|
@ -412,9 +404,8 @@ typedef struct
|
|||
return false;
|
||||
}
|
||||
|
||||
static NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
|
||||
static void uvm_hmm_va_space_initialize(uvm_va_space_t *va_space)
|
||||
{
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space)
|
||||
|
@ -440,19 +431,19 @@ typedef struct
|
|||
|
||||
static NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma,
|
||||
uvm_va_block_t **va_block_ptr)
|
||||
{
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
static NV_STATUS uvm_hmm_find_vma(uvm_va_block_context_t *va_block_context, NvU64 addr)
|
||||
static NV_STATUS uvm_hmm_find_vma(struct mm_struct *mm, struct vm_area_struct **vma, NvU64 addr)
|
||||
{
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static bool uvm_hmm_check_context_vma_is_valid(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
uvm_va_block_region_t region)
|
||||
{
|
||||
return true;
|
||||
|
@ -533,16 +524,19 @@ typedef struct
|
|||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
static void uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
unsigned long addr,
|
||||
NvU64 *endp)
|
||||
static const uvm_va_policy_t *uvm_hmm_find_policy_end(uvm_va_block_t *va_block,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
NvU64 *endp)
|
||||
{
|
||||
UVM_ASSERT(0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **vma,
|
||||
uvm_page_index_t page_index,
|
||||
const uvm_va_policy_t **policy,
|
||||
uvm_page_index_t *outerp)
|
||||
{
|
||||
return NV_OK;
|
||||
|
@ -554,14 +548,15 @@ typedef struct
|
|||
}
|
||||
|
||||
static uvm_va_block_region_t uvm_hmm_get_prefetch_region(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
const uvm_va_policy_t *policy,
|
||||
NvU64 address)
|
||||
{
|
||||
return (uvm_va_block_region_t){};
|
||||
}
|
||||
|
||||
static uvm_prot_t uvm_hmm_compute_logical_prot(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *vma,
|
||||
NvU64 addr)
|
||||
{
|
||||
return UVM_PROT_NONE;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
Copyright (c) 2016-2022 NVIDIA Corporation
|
||||
Copyright (c) 2016-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
@ -93,8 +93,9 @@ static bool sysmem_can_be_mapped_on_gpu(uvm_mem_t *sysmem)
|
|||
{
|
||||
UVM_ASSERT(uvm_mem_is_sysmem(sysmem));
|
||||
|
||||
// If SEV is enabled, only unprotected memory can be mapped
|
||||
if (g_uvm_global.sev_enabled)
|
||||
// In Confidential Computing, only unprotected memory can be mapped on the
|
||||
// GPU
|
||||
if (g_uvm_global.conf_computing_enabled)
|
||||
return uvm_mem_is_sysmem_dma(sysmem);
|
||||
|
||||
return true;
|
||||
|
@ -737,7 +738,7 @@ static NV_STATUS mem_map_cpu_to_sysmem_kernel(uvm_mem_t *mem)
|
|||
pages[page_index] = mem_cpu_page(mem, page_index * PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (g_uvm_global.sev_enabled && uvm_mem_is_sysmem_dma(mem))
|
||||
if (g_uvm_global.conf_computing_enabled && uvm_mem_is_sysmem_dma(mem))
|
||||
prot = uvm_pgprot_decrypted(PAGE_KERNEL_NOENC);
|
||||
|
||||
mem->kernel.cpu_addr = vmap(pages, num_pages, VM_MAP, prot);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
Copyright (c) 2016-2021 NVIDIA Corporation
|
||||
Copyright (c) 2016-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
@ -44,10 +44,10 @@ static NvU32 first_page_size(NvU32 page_sizes)
|
|||
|
||||
static inline NV_STATUS __alloc_map_sysmem(NvU64 size, uvm_gpu_t *gpu, uvm_mem_t **sys_mem)
|
||||
{
|
||||
if (g_uvm_global.sev_enabled)
|
||||
if (g_uvm_global.conf_computing_enabled)
|
||||
return uvm_mem_alloc_sysmem_dma_and_map_cpu_kernel(size, gpu, current->mm, sys_mem);
|
||||
else
|
||||
return uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, sys_mem);
|
||||
|
||||
return uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, sys_mem);
|
||||
}
|
||||
|
||||
static NV_STATUS check_accessible_from_gpu(uvm_gpu_t *gpu, uvm_mem_t *mem)
|
||||
|
@ -335,9 +335,6 @@ error:
|
|||
|
||||
static bool should_test_page_size(size_t alloc_size, NvU32 page_size)
|
||||
{
|
||||
if (g_uvm_global.sev_enabled)
|
||||
return false;
|
||||
|
||||
if (g_uvm_global.num_simulated_devices == 0)
|
||||
return true;
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ NV_STATUS uvm_va_block_migrate_locked(uvm_va_block_t *va_block,
|
|||
NV_STATUS status, tracker_status = NV_OK;
|
||||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block, va_block_context, region));
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block, va_block_context->hmm.vma, region));
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
status = uvm_hmm_va_block_migrate_locked(va_block,
|
||||
|
@ -234,9 +234,9 @@ NV_STATUS uvm_va_block_migrate_locked(uvm_va_block_t *va_block,
|
|||
UVM_MAKE_RESIDENT_CAUSE_API_MIGRATE);
|
||||
}
|
||||
else {
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
uvm_va_policy_t *policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
|
||||
if (uvm_va_policy_is_read_duplicate(va_block_context->policy, va_space)) {
|
||||
if (uvm_va_policy_is_read_duplicate(policy, va_space)) {
|
||||
status = uvm_va_block_make_resident_read_duplicate(va_block,
|
||||
va_block_retry,
|
||||
va_block_context,
|
||||
|
@ -371,8 +371,6 @@ static bool va_block_should_do_cpu_preunmap(uvm_va_block_t *va_block,
|
|||
if (!va_block)
|
||||
return true;
|
||||
|
||||
UVM_ASSERT(va_range_should_do_cpu_preunmap(va_block_context->policy, uvm_va_block_get_va_space(va_block)));
|
||||
|
||||
region = uvm_va_block_region_from_start_end(va_block, max(start, va_block->start), min(end, va_block->end));
|
||||
|
||||
uvm_mutex_lock(&va_block->lock);
|
||||
|
@ -496,11 +494,9 @@ static NV_STATUS uvm_va_range_migrate(uvm_va_range_t *va_range,
|
|||
uvm_tracker_t *out_tracker)
|
||||
{
|
||||
NvU64 preunmap_range_start = start;
|
||||
uvm_va_policy_t *policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
UVM_ASSERT(va_block_context->policy == uvm_va_range_get_policy(va_range));
|
||||
|
||||
should_do_cpu_preunmap = should_do_cpu_preunmap && va_range_should_do_cpu_preunmap(va_block_context->policy,
|
||||
va_range->va_space);
|
||||
should_do_cpu_preunmap = should_do_cpu_preunmap && va_range_should_do_cpu_preunmap(policy, va_range->va_space);
|
||||
|
||||
// Divide migrations into groups of contiguous VA blocks. This is to trigger
|
||||
// CPU unmaps for that region before the migration starts.
|
||||
|
@ -577,8 +573,6 @@ static NV_STATUS uvm_migrate_ranges(uvm_va_space_t *va_space,
|
|||
break;
|
||||
}
|
||||
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
// For UVM-Lite GPUs, the CUDA driver may suballocate a single va_range
|
||||
// into many range groups. For this reason, we iterate over each va_range first
|
||||
// then through the range groups within.
|
||||
|
@ -653,6 +647,8 @@ static NV_STATUS uvm_migrate(uvm_va_space_t *va_space,
|
|||
|
||||
if (mm)
|
||||
uvm_assert_mmap_lock_locked(mm);
|
||||
else if (!first_va_range)
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
va_block_context = uvm_va_block_context_alloc(mm);
|
||||
if (!va_block_context)
|
||||
|
|
|
@ -672,6 +672,14 @@ static NV_STATUS nv_migrate_vma(struct migrate_vma *args, migrate_vma_state_t *s
|
|||
.finalize_and_map = uvm_migrate_vma_finalize_and_map_helper,
|
||||
};
|
||||
|
||||
// WAR for Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU TLB
|
||||
// invalidates on read-only to read-write upgrades
|
||||
//
|
||||
// This code path isn't used on GH180 but we need to maintain consistent
|
||||
// behaviour on systems that do.
|
||||
if (!vma_is_anonymous(args->vma))
|
||||
return NV_WARN_NOTHING_TO_DO;
|
||||
|
||||
ret = migrate_vma(&uvm_migrate_vma_ops, args->vma, args->start, args->end, args->src, args->dst, state);
|
||||
if (ret < 0)
|
||||
return errno_to_nv_status(ret);
|
||||
|
@ -685,6 +693,24 @@ static NV_STATUS nv_migrate_vma(struct migrate_vma *args, migrate_vma_state_t *s
|
|||
if (ret < 0)
|
||||
return errno_to_nv_status(ret);
|
||||
|
||||
// TODO: Bug 2419180: support file-backed pages in migrate_vma, when
|
||||
// support for it is added to the Linux kernel
|
||||
//
|
||||
// A side-effect of migrate_vma_setup() is it calls mmu notifiers even if a
|
||||
// page can't be migrated (eg. because it's a non-anonymous mapping). We
|
||||
// need this side-effect for SMMU on GH180 to ensure any cached read-only
|
||||
// entries are flushed from SMMU on permission upgrade.
|
||||
//
|
||||
// TODO: Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU TLB
|
||||
// invalidates on read-only to read-write upgrades
|
||||
//
|
||||
// The above WAR doesn't work for HugeTLBfs mappings because
|
||||
// migrate_vma_setup() will fail in that case.
|
||||
if (!vma_is_anonymous(args->vma)) {
|
||||
migrate_vma_finalize(args);
|
||||
return NV_WARN_NOTHING_TO_DO;
|
||||
}
|
||||
|
||||
uvm_migrate_vma_alloc_and_copy(args, state);
|
||||
if (state->status == NV_OK) {
|
||||
migrate_vma_pages(args);
|
||||
|
@ -858,9 +884,13 @@ static NV_STATUS migrate_pageable_vma(struct vm_area_struct *vma,
|
|||
start = max(start, vma->vm_start);
|
||||
outer = min(outer, vma->vm_end);
|
||||
|
||||
// TODO: Bug 2419180: support file-backed pages in migrate_vma, when
|
||||
// support for it is added to the Linux kernel
|
||||
if (!vma_is_anonymous(vma))
|
||||
// migrate_vma only supports anonymous VMAs. We check for those after
|
||||
// calling migrate_vma_setup() to workaround Bug 4130089. We need to check
|
||||
// for HugeTLB VMAs here because migrate_vma_setup() will return a fatal
|
||||
// error for those.
|
||||
// TODO: Bug 4130089: [GH180][r535] WAR for kernel not issuing SMMU TLB
|
||||
// invalidates on read-only to read-write upgrades
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
return NV_WARN_NOTHING_TO_DO;
|
||||
|
||||
if (uvm_processor_mask_empty(&va_space->registered_gpus))
|
||||
|
|
|
@ -34,8 +34,8 @@ typedef struct
|
|||
{
|
||||
uvm_va_space_t *va_space;
|
||||
struct mm_struct *mm;
|
||||
unsigned long start;
|
||||
unsigned long length;
|
||||
const unsigned long start;
|
||||
const unsigned long length;
|
||||
uvm_processor_id_t dst_id;
|
||||
|
||||
// dst_node_id may be clobbered by uvm_migrate_pageable().
|
||||
|
|
|
@ -906,11 +906,10 @@ error:
|
|||
// --------------|-------------------------||----------------|----------------
|
||||
// vidmem | - || vidmem | false
|
||||
// sysmem | - || sysmem | false
|
||||
// default | <not set> || vidmem | true (1)
|
||||
// default | <not set> || vidmem | true
|
||||
// default | vidmem || vidmem | false
|
||||
// default | sysmem || sysmem | false
|
||||
//
|
||||
// (1) When SEV mode is enabled, the fallback path is disabled.
|
||||
//
|
||||
// In SR-IOV heavy the the page tree must be in vidmem, to prevent guest drivers
|
||||
// from updating GPU page tables without hypervisor knowledge.
|
||||
|
@ -926,28 +925,27 @@ error:
|
|||
//
|
||||
static void page_tree_set_location(uvm_page_tree_t *tree, uvm_aperture_t location)
|
||||
{
|
||||
bool should_location_be_vidmem;
|
||||
UVM_ASSERT(tree->gpu != NULL);
|
||||
UVM_ASSERT_MSG((location == UVM_APERTURE_VID) ||
|
||||
(location == UVM_APERTURE_SYS) ||
|
||||
(location == UVM_APERTURE_DEFAULT),
|
||||
"Invalid location %s (%d)\n", uvm_aperture_string(location), (int)location);
|
||||
|
||||
should_location_be_vidmem = uvm_gpu_is_virt_mode_sriov_heavy(tree->gpu)
|
||||
|| uvm_conf_computing_mode_enabled(tree->gpu);
|
||||
|
||||
// The page tree of a "fake" GPU used during page tree testing can be in
|
||||
// sysmem even if should_location_be_vidmem is true. A fake GPU can be
|
||||
// identified by having no channel manager.
|
||||
if ((tree->gpu->channel_manager != NULL) && should_location_be_vidmem)
|
||||
UVM_ASSERT(location == UVM_APERTURE_VID);
|
||||
// sysmem in scenarios where a "real" GPU must be in vidmem. Fake GPUs can
|
||||
// be identified by having no channel manager.
|
||||
if (tree->gpu->channel_manager != NULL) {
|
||||
|
||||
if (uvm_gpu_is_virt_mode_sriov_heavy(tree->gpu))
|
||||
UVM_ASSERT(location == UVM_APERTURE_VID);
|
||||
else if (uvm_conf_computing_mode_enabled(tree->gpu))
|
||||
UVM_ASSERT(location == UVM_APERTURE_VID);
|
||||
}
|
||||
|
||||
if (location == UVM_APERTURE_DEFAULT) {
|
||||
if (page_table_aperture == UVM_APERTURE_DEFAULT) {
|
||||
tree->location = UVM_APERTURE_VID;
|
||||
|
||||
// See the comment (1) above.
|
||||
tree->location_sys_fallback = !g_uvm_global.sev_enabled;
|
||||
tree->location_sys_fallback = true;
|
||||
}
|
||||
else {
|
||||
tree->location = page_table_aperture;
|
||||
|
|
|
@ -218,57 +218,11 @@ static void grow_fault_granularity(uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
|||
}
|
||||
}
|
||||
|
||||
// Within a block we only allow prefetching to a single processor. Therefore,
|
||||
// if two processors are accessing non-overlapping regions within the same
|
||||
// block they won't benefit from prefetching.
|
||||
//
|
||||
// TODO: Bug 1778034: [uvm] Explore prefetching to different processors within
|
||||
// a VA block.
|
||||
static NvU32 uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_page_mask_t *prefetch_pages,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree)
|
||||
static void init_bitmap_tree_from_region(uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_va_block_region_t max_prefetch_region,
|
||||
const uvm_page_mask_t *resident_mask,
|
||||
const uvm_page_mask_t *faulted_pages)
|
||||
{
|
||||
uvm_page_index_t page_index;
|
||||
const uvm_page_mask_t *resident_mask = NULL;
|
||||
const uvm_page_mask_t *thrashing_pages = NULL;
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
uvm_va_block_region_t max_prefetch_region;
|
||||
NvU32 big_page_size;
|
||||
uvm_va_block_region_t big_pages_region;
|
||||
|
||||
if (!uvm_id_equal(va_block->prefetch_info.last_migration_proc_id, new_residency)) {
|
||||
va_block->prefetch_info.last_migration_proc_id = new_residency;
|
||||
va_block->prefetch_info.fault_migrations_to_last_proc = 0;
|
||||
}
|
||||
|
||||
// Compute the expanded region that prefetching is allowed from.
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
max_prefetch_region = uvm_hmm_get_prefetch_region(va_block,
|
||||
va_block_context,
|
||||
uvm_va_block_region_start(va_block, faulted_region));
|
||||
}
|
||||
else {
|
||||
max_prefetch_region = uvm_va_block_region_from_block(va_block);
|
||||
}
|
||||
|
||||
uvm_page_mask_zero(prefetch_pages);
|
||||
|
||||
if (UVM_ID_IS_CPU(new_residency) || va_block->gpus[uvm_id_gpu_index(new_residency)] != NULL)
|
||||
resident_mask = uvm_va_block_resident_mask_get(va_block, new_residency);
|
||||
|
||||
// If this is a first-touch fault and the destination processor is the
|
||||
// preferred location, populate the whole max_prefetch_region.
|
||||
if (uvm_processor_mask_empty(&va_block->resident) &&
|
||||
uvm_id_equal(new_residency, policy->preferred_location)) {
|
||||
uvm_page_mask_region_fill(prefetch_pages, max_prefetch_region);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (resident_mask)
|
||||
uvm_page_mask_or(&bitmap_tree->pages, resident_mask, faulted_pages);
|
||||
else
|
||||
|
@ -277,6 +231,29 @@ static NvU32 uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_blo
|
|||
// If we are using a subregion of the va_block, align bitmap_tree
|
||||
uvm_page_mask_shift_right(&bitmap_tree->pages, &bitmap_tree->pages, max_prefetch_region.first);
|
||||
|
||||
bitmap_tree->offset = 0;
|
||||
bitmap_tree->leaf_count = uvm_va_block_region_num_pages(max_prefetch_region);
|
||||
bitmap_tree->level_count = ilog2(roundup_pow_of_two(bitmap_tree->leaf_count)) + 1;
|
||||
}
|
||||
|
||||
static void update_bitmap_tree_from_va_block(uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t max_prefetch_region)
|
||||
|
||||
{
|
||||
NvU32 big_page_size;
|
||||
uvm_va_block_region_t big_pages_region;
|
||||
uvm_va_space_t *va_space;
|
||||
const uvm_page_mask_t *thrashing_pages;
|
||||
|
||||
UVM_ASSERT(va_block);
|
||||
UVM_ASSERT(va_block_context);
|
||||
|
||||
va_space = uvm_va_block_get_va_space(va_block);
|
||||
|
||||
// Get the big page size for the new residency.
|
||||
// Assume 64K size if the new residency is the CPU or no GPU va space is
|
||||
// registered in the current process for this GPU.
|
||||
|
@ -302,13 +279,9 @@ static NvU32 uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_blo
|
|||
UVM_ASSERT(bitmap_tree->leaf_count <= PAGES_PER_UVM_VA_BLOCK);
|
||||
|
||||
uvm_page_mask_shift_left(&bitmap_tree->pages, &bitmap_tree->pages, bitmap_tree->offset);
|
||||
}
|
||||
else {
|
||||
bitmap_tree->offset = 0;
|
||||
bitmap_tree->leaf_count = uvm_va_block_region_num_pages(max_prefetch_region);
|
||||
}
|
||||
|
||||
bitmap_tree->level_count = ilog2(roundup_pow_of_two(bitmap_tree->leaf_count)) + 1;
|
||||
bitmap_tree->level_count = ilog2(roundup_pow_of_two(bitmap_tree->leaf_count)) + 1;
|
||||
}
|
||||
|
||||
thrashing_pages = uvm_perf_thrashing_get_thrashing_pages(va_block);
|
||||
|
||||
|
@ -320,25 +293,99 @@ static NvU32 uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_blo
|
|||
max_prefetch_region,
|
||||
faulted_pages,
|
||||
thrashing_pages);
|
||||
}
|
||||
|
||||
// Do not compute prefetch regions with faults on pages that are thrashing
|
||||
if (thrashing_pages)
|
||||
uvm_page_mask_andnot(&va_block_context->scratch_page_mask, faulted_pages, thrashing_pages);
|
||||
else
|
||||
uvm_page_mask_copy(&va_block_context->scratch_page_mask, faulted_pages);
|
||||
static void compute_prefetch_mask(uvm_va_block_region_t faulted_region,
|
||||
uvm_va_block_region_t max_prefetch_region,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_page_mask_t *out_prefetch_mask)
|
||||
{
|
||||
uvm_page_index_t page_index;
|
||||
|
||||
// Update the tree using the scratch mask to compute the pages to prefetch
|
||||
for_each_va_block_page_in_region_mask(page_index, &va_block_context->scratch_page_mask, faulted_region) {
|
||||
uvm_page_mask_zero(out_prefetch_mask);
|
||||
|
||||
// Update the tree using the faulted mask to compute the pages to prefetch.
|
||||
for_each_va_block_page_in_region_mask(page_index, faulted_pages, faulted_region) {
|
||||
uvm_va_block_region_t region = compute_prefetch_region(page_index, bitmap_tree, max_prefetch_region);
|
||||
|
||||
uvm_page_mask_region_fill(prefetch_pages, region);
|
||||
uvm_page_mask_region_fill(out_prefetch_mask, region);
|
||||
|
||||
// Early out if we have already prefetched until the end of the VA block
|
||||
if (region.outer == max_prefetch_region.outer)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Within a block we only allow prefetching to a single processor. Therefore,
|
||||
// if two processors are accessing non-overlapping regions within the same
|
||||
// block they won't benefit from prefetching.
|
||||
//
|
||||
// TODO: Bug 1778034: [uvm] Explore prefetching to different processors within
|
||||
// a VA block.
|
||||
static NvU32 uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_page_mask_t *prefetch_pages,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree)
|
||||
{
|
||||
const uvm_page_mask_t *resident_mask = NULL;
|
||||
const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, faulted_region);
|
||||
uvm_va_block_region_t max_prefetch_region;
|
||||
const uvm_page_mask_t *thrashing_pages = uvm_perf_thrashing_get_thrashing_pages(va_block);
|
||||
|
||||
if (!uvm_id_equal(va_block->prefetch_info.last_migration_proc_id, new_residency)) {
|
||||
va_block->prefetch_info.last_migration_proc_id = new_residency;
|
||||
va_block->prefetch_info.fault_migrations_to_last_proc = 0;
|
||||
}
|
||||
|
||||
// Compute the expanded region that prefetching is allowed from.
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
max_prefetch_region = uvm_hmm_get_prefetch_region(va_block,
|
||||
va_block_context->hmm.vma,
|
||||
policy,
|
||||
uvm_va_block_region_start(va_block, faulted_region));
|
||||
}
|
||||
else {
|
||||
max_prefetch_region = uvm_va_block_region_from_block(va_block);
|
||||
}
|
||||
|
||||
uvm_page_mask_zero(prefetch_pages);
|
||||
|
||||
if (UVM_ID_IS_CPU(new_residency) || va_block->gpus[uvm_id_gpu_index(new_residency)] != NULL)
|
||||
resident_mask = uvm_va_block_resident_mask_get(va_block, new_residency);
|
||||
|
||||
// If this is a first-touch fault and the destination processor is the
|
||||
// preferred location, populate the whole max_prefetch_region.
|
||||
if (uvm_processor_mask_empty(&va_block->resident) &&
|
||||
uvm_id_equal(new_residency, policy->preferred_location)) {
|
||||
uvm_page_mask_region_fill(prefetch_pages, max_prefetch_region);
|
||||
}
|
||||
else {
|
||||
init_bitmap_tree_from_region(bitmap_tree, max_prefetch_region, resident_mask, faulted_pages);
|
||||
|
||||
update_bitmap_tree_from_va_block(bitmap_tree,
|
||||
va_block,
|
||||
va_block_context,
|
||||
new_residency,
|
||||
faulted_pages,
|
||||
max_prefetch_region);
|
||||
|
||||
// Do not compute prefetch regions with faults on pages that are thrashing
|
||||
if (thrashing_pages)
|
||||
uvm_page_mask_andnot(&va_block_context->scratch_page_mask, faulted_pages, thrashing_pages);
|
||||
else
|
||||
uvm_page_mask_copy(&va_block_context->scratch_page_mask, faulted_pages);
|
||||
|
||||
compute_prefetch_mask(faulted_region,
|
||||
max_prefetch_region,
|
||||
bitmap_tree,
|
||||
&va_block_context->scratch_page_mask,
|
||||
prefetch_pages);
|
||||
}
|
||||
|
||||
done:
|
||||
// Do not prefetch pages that are going to be migrated/populated due to a
|
||||
// fault
|
||||
uvm_page_mask_andnot(prefetch_pages, prefetch_pages, faulted_pages);
|
||||
|
@ -364,31 +411,58 @@ done:
|
|||
return uvm_page_mask_weight(prefetch_pages);
|
||||
}
|
||||
|
||||
void uvm_perf_prefetch_get_hint(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_perf_prefetch_hint_t *out_hint)
|
||||
bool uvm_perf_prefetch_enabled(uvm_va_space_t *va_space)
|
||||
{
|
||||
if (!g_uvm_perf_prefetch_enable)
|
||||
return false;
|
||||
|
||||
UVM_ASSERT(va_space);
|
||||
|
||||
return va_space->test.page_prefetch_enabled;
|
||||
}
|
||||
|
||||
void uvm_perf_prefetch_compute_ats(uvm_va_space_t *va_space,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_va_block_region_t max_prefetch_region,
|
||||
const uvm_page_mask_t *residency_mask,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_page_mask_t *out_prefetch_mask)
|
||||
{
|
||||
UVM_ASSERT(faulted_pages);
|
||||
UVM_ASSERT(bitmap_tree);
|
||||
UVM_ASSERT(out_prefetch_mask);
|
||||
|
||||
uvm_page_mask_zero(out_prefetch_mask);
|
||||
|
||||
if (!uvm_perf_prefetch_enabled(va_space))
|
||||
return;
|
||||
|
||||
init_bitmap_tree_from_region(bitmap_tree, max_prefetch_region, residency_mask, faulted_pages);
|
||||
|
||||
compute_prefetch_mask(faulted_region, max_prefetch_region, bitmap_tree, faulted_pages, out_prefetch_mask);
|
||||
}
|
||||
|
||||
void uvm_perf_prefetch_get_hint_va_block(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_perf_prefetch_hint_t *out_hint)
|
||||
{
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
uvm_page_mask_t *prefetch_pages = &out_hint->prefetch_pages_mask;
|
||||
NvU32 pending_prefetch_pages;
|
||||
|
||||
uvm_assert_rwsem_locked(&va_space->lock);
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, policy, faulted_region));
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block, va_block_context, faulted_region));
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block, va_block_context->hmm.vma, faulted_region));
|
||||
|
||||
out_hint->residency = UVM_ID_INVALID;
|
||||
uvm_page_mask_zero(prefetch_pages);
|
||||
|
||||
if (!g_uvm_perf_prefetch_enable)
|
||||
return;
|
||||
|
||||
if (!va_space->test.page_prefetch_enabled)
|
||||
if (!uvm_perf_prefetch_enabled(va_space))
|
||||
return;
|
||||
|
||||
pending_prefetch_pages = uvm_perf_prefetch_prenotify_fault_migrations(va_block,
|
||||
|
|
|
@ -61,21 +61,41 @@ typedef struct
|
|||
// Global initialization function (no clean up needed).
|
||||
NV_STATUS uvm_perf_prefetch_init(void);
|
||||
|
||||
// Returns whether prefetching is enabled in the VA space.
|
||||
// va_space cannot be NULL.
|
||||
bool uvm_perf_prefetch_enabled(uvm_va_space_t *va_space);
|
||||
|
||||
// Return the prefetch mask with the pages that may be prefetched in a ATS
|
||||
// block. ATS block is a system allocated memory block with base aligned to
|
||||
// UVM_VA_BLOCK_SIZE and a maximum size of UVM_VA_BLOCK_SIZE. The faulted_pages
|
||||
// mask and faulted_region are the pages being faulted on the given residency.
|
||||
//
|
||||
// Only residency_mask can be NULL.
|
||||
//
|
||||
// Locking: The caller must hold the va_space lock.
|
||||
void uvm_perf_prefetch_compute_ats(uvm_va_space_t *va_space,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_va_block_region_t max_prefetch_region,
|
||||
const uvm_page_mask_t *residency_mask,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_page_mask_t *out_prefetch_mask);
|
||||
|
||||
// Return a hint with the pages that may be prefetched in the block.
|
||||
// The faulted_pages mask and faulted_region are the pages being migrated to
|
||||
// the given residency.
|
||||
// va_block_context must not be NULL, va_block_context->policy must be valid,
|
||||
// and if the va_block is a HMM block, va_block_context->hmm.vma must be valid
|
||||
// which also means the va_block_context->mm is not NULL, retained, and locked
|
||||
// for at least read.
|
||||
// va_block_context must not be NULL, and if the va_block is a HMM
|
||||
// block, va_block_context->hmm.vma must be valid which also means the
|
||||
// va_block_context->mm is not NULL, retained, and locked for at least
|
||||
// read.
|
||||
// Locking: The caller must hold the va_space lock and va_block lock.
|
||||
void uvm_perf_prefetch_get_hint(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_perf_prefetch_hint_t *out_hint);
|
||||
void uvm_perf_prefetch_get_hint_va_block(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_processor_id_t new_residency,
|
||||
const uvm_page_mask_t *faulted_pages,
|
||||
uvm_va_block_region_t faulted_region,
|
||||
uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_perf_prefetch_hint_t *out_hint);
|
||||
|
||||
void uvm_perf_prefetch_bitmap_tree_iter_init(const uvm_perf_prefetch_bitmap_tree_t *bitmap_tree,
|
||||
uvm_page_index_t page_index,
|
||||
|
|
|
@ -1095,7 +1095,7 @@ static NV_STATUS unmap_remote_pinned_pages(uvm_va_block_t *va_block,
|
|||
NV_STATUS tracker_status;
|
||||
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
|
||||
uvm_processor_id_t processor_id;
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
const uvm_va_policy_t *policy = uvm_va_policy_get(va_block, uvm_va_block_region_start(va_block, region));
|
||||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
|
||||
|
@ -1141,10 +1141,9 @@ NV_STATUS uvm_perf_thrashing_unmap_remote_pinned_pages_all(uvm_va_block_t *va_bl
|
|||
{
|
||||
block_thrashing_info_t *block_thrashing;
|
||||
uvm_processor_mask_t unmap_processors;
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, region);
|
||||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, policy, region));
|
||||
|
||||
block_thrashing = thrashing_info_get(va_block);
|
||||
if (!block_thrashing || !block_thrashing->pages)
|
||||
|
@ -1867,8 +1866,6 @@ static void thrashing_unpin_pages(struct work_struct *work)
|
|||
UVM_ASSERT(uvm_page_mask_test(&block_thrashing->pinned_pages.mask, page_index));
|
||||
|
||||
uvm_va_block_context_init(va_block_context, NULL);
|
||||
va_block_context->policy =
|
||||
uvm_va_policy_get(va_block, uvm_va_block_cpu_page_address(va_block, page_index));
|
||||
|
||||
uvm_perf_thrashing_unmap_remote_pinned_pages_all(va_block,
|
||||
va_block_context,
|
||||
|
@ -2123,8 +2120,6 @@ NV_STATUS uvm_test_set_page_thrashing_policy(UVM_TEST_SET_PAGE_THRASHING_POLICY_
|
|||
uvm_va_block_region_t va_block_region = uvm_va_block_region_from_block(va_block);
|
||||
uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL);
|
||||
|
||||
block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
uvm_mutex_lock(&va_block->lock);
|
||||
|
||||
// Unmap may split PTEs and require a retry. Needs to be called
|
||||
|
|
|
@ -103,11 +103,11 @@ void uvm_perf_thrashing_unload(uvm_va_space_t *va_space);
|
|||
// Destroy the thrashing detection struct for the given block.
|
||||
void uvm_perf_thrashing_info_destroy(uvm_va_block_t *va_block);
|
||||
|
||||
// Unmap remote mappings from all processors on the pinned pages
|
||||
// described by region and block_thrashing->pinned pages.
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid() in uvm_va_block.h.
|
||||
// Locking: the va_block lock must be held.
|
||||
// Unmap remote mappings from all processors on the pinned pages described by
|
||||
// region and block_thrashing->pinned pages. va_block_context must not be NULL
|
||||
// and policy for the region must match. See the comments for
|
||||
// uvm_va_block_check_policy_is_valid() in uvm_va_block.h. Locking: the
|
||||
// va_block lock must be held.
|
||||
NV_STATUS uvm_perf_thrashing_unmap_remote_pinned_pages_all(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_va_block_region_t region);
|
||||
|
|
|
@ -3820,18 +3820,11 @@ NV_STATUS uvm_test_evict_chunk(UVM_TEST_EVICT_CHUNK_PARAMS *params, struct file
|
|||
// For virtual mode, look up and retain the block first so that eviction can
|
||||
// be started without the VA space lock held.
|
||||
if (params->eviction_mode == UvmTestEvictModeVirtual) {
|
||||
uvm_va_block_context_t *block_context;
|
||||
if (mm)
|
||||
status = uvm_va_block_find_create(va_space, params->address, NULL, &block);
|
||||
else
|
||||
status = uvm_va_block_find_create_managed(va_space, params->address, &block);
|
||||
|
||||
block_context = uvm_va_block_context_alloc(mm);
|
||||
if (!block_context) {
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
uvm_va_space_up_read(va_space);
|
||||
uvm_va_space_mm_release_unlock(va_space, mm);
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = uvm_va_block_find_create(va_space, params->address, block_context, &block);
|
||||
uvm_va_block_context_free(block_context);
|
||||
if (status != NV_OK) {
|
||||
uvm_va_space_up_read(va_space);
|
||||
uvm_va_space_mm_or_current_release_unlock(va_space, mm);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
Copyright (c) 2015-2022 NVIDIA Corporation
|
||||
Copyright (c) 2015-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
@ -324,7 +324,7 @@ static NV_STATUS gpu_mem_check(uvm_gpu_t *gpu,
|
|||
|
||||
// TODO: Bug 3839176: [UVM][HCC][uvm_test] Update tests that assume GPU
|
||||
// engines can directly access sysmem
|
||||
// Skip this test for now. To enable this test under SEV,
|
||||
// Skip this test for now. To enable this test in Confidential Computing,
|
||||
// The GPU->CPU CE copy needs to be updated so it uses encryption when
|
||||
// CC is enabled.
|
||||
if (uvm_conf_computing_mode_enabled(gpu))
|
||||
|
@ -1223,8 +1223,6 @@ static NV_STATUS test_indirect_peers(uvm_gpu_t *owning_gpu, uvm_gpu_t *accessing
|
|||
if (!chunks)
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
UVM_ASSERT(!g_uvm_global.sev_enabled);
|
||||
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_alloc_sysmem_and_map_cpu_kernel(UVM_CHUNK_SIZE_MAX, current->mm, &verif_mem), out);
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, owning_gpu), out);
|
||||
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, accessing_gpu), out);
|
||||
|
|
|
@ -160,7 +160,7 @@ static NV_STATUS preferred_location_unmap_remote_pages(uvm_va_block_t *va_block,
|
|||
NV_STATUS status = NV_OK;
|
||||
NV_STATUS tracker_status;
|
||||
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, region);
|
||||
uvm_processor_id_t preferred_location = policy->preferred_location;
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
const uvm_page_mask_t *mapped_mask;
|
||||
|
@ -279,6 +279,9 @@ static NV_STATUS preferred_location_set(uvm_va_space_t *va_space,
|
|||
return NV_OK;
|
||||
}
|
||||
|
||||
if (!mm)
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
|
||||
return uvm_hmm_set_preferred_location(va_space, preferred_location, base, last_address, out_tracker);
|
||||
}
|
||||
|
||||
|
@ -445,7 +448,6 @@ NV_STATUS uvm_va_block_set_accessed_by_locked(uvm_va_block_t *va_block,
|
|||
NV_STATUS tracker_status;
|
||||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, va_block_context->policy, region));
|
||||
|
||||
status = uvm_va_block_add_mappings(va_block,
|
||||
va_block_context,
|
||||
|
@ -467,13 +469,13 @@ NV_STATUS uvm_va_block_set_accessed_by(uvm_va_block_t *va_block,
|
|||
uvm_va_block_region_t region = uvm_va_block_region_from_block(va_block);
|
||||
NV_STATUS status;
|
||||
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
|
||||
uvm_va_policy_t *policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
|
||||
UVM_ASSERT(!uvm_va_block_is_hmm(va_block));
|
||||
UVM_ASSERT(va_block_context->policy == uvm_va_range_get_policy(va_block->va_range));
|
||||
|
||||
// Read duplication takes precedence over SetAccessedBy. Do not add mappings
|
||||
// if read duplication is enabled.
|
||||
if (uvm_va_policy_is_read_duplicate(va_block_context->policy, va_space))
|
||||
if (uvm_va_policy_is_read_duplicate(policy, va_space))
|
||||
return NV_OK;
|
||||
|
||||
status = UVM_VA_BLOCK_LOCK_RETRY(va_block,
|
||||
|
@ -592,8 +594,15 @@ static NV_STATUS accessed_by_set(uvm_va_space_t *va_space,
|
|||
UVM_ASSERT(va_range_last->node.end >= last_address);
|
||||
}
|
||||
else {
|
||||
// NULL mm case already filtered by uvm_api_range_type_check()
|
||||
UVM_ASSERT(mm);
|
||||
UVM_ASSERT(type == UVM_API_RANGE_TYPE_HMM);
|
||||
status = uvm_hmm_set_accessed_by(va_space, processor_id, set_bit, base, last_address, &local_tracker);
|
||||
status = uvm_hmm_set_accessed_by(va_space,
|
||||
processor_id,
|
||||
set_bit,
|
||||
base,
|
||||
last_address,
|
||||
&local_tracker);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -656,7 +665,6 @@ NV_STATUS uvm_va_block_set_read_duplication(uvm_va_block_t *va_block,
|
|||
|
||||
// TODO: Bug 3660922: need to implement HMM read duplication support.
|
||||
UVM_ASSERT(!uvm_va_block_is_hmm(va_block));
|
||||
UVM_ASSERT(va_block_context->policy == uvm_va_range_get_policy(va_block->va_range));
|
||||
|
||||
status = UVM_VA_BLOCK_LOCK_RETRY(va_block, &va_block_retry,
|
||||
va_block_set_read_duplication_locked(va_block,
|
||||
|
@ -675,7 +683,7 @@ static NV_STATUS va_block_unset_read_duplication_locked(uvm_va_block_t *va_block
|
|||
uvm_processor_id_t processor_id;
|
||||
uvm_va_block_region_t block_region = uvm_va_block_region_from_block(va_block);
|
||||
uvm_page_mask_t *break_read_duplication_pages = &va_block_context->caller_page_mask;
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
const uvm_va_policy_t *policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
uvm_processor_id_t preferred_location = policy->preferred_location;
|
||||
uvm_processor_mask_t accessed_by = policy->accessed_by;
|
||||
|
||||
|
@ -757,7 +765,6 @@ NV_STATUS uvm_va_block_unset_read_duplication(uvm_va_block_t *va_block,
|
|||
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
|
||||
|
||||
UVM_ASSERT(!uvm_va_block_is_hmm(va_block));
|
||||
UVM_ASSERT(va_block_context->policy == uvm_va_range_get_policy(va_block->va_range));
|
||||
|
||||
// Restore all SetAccessedBy mappings
|
||||
status = UVM_VA_BLOCK_LOCK_RETRY(va_block, &va_block_retry,
|
||||
|
@ -915,7 +922,6 @@ static NV_STATUS system_wide_atomics_set(uvm_va_space_t *va_space, const NvProce
|
|||
if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
|
||||
continue;
|
||||
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
uvm_page_mask_t *non_resident_pages = &va_block_context->caller_page_mask;
|
||||
|
||||
|
|
|
@ -264,7 +264,6 @@ NV_STATUS uvm_range_group_va_range_migrate(uvm_va_range_t *va_range,
|
|||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
uvm_assert_rwsem_locked(&va_range->va_space->lock);
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
// Iterate over blocks, populating them if necessary
|
||||
for (i = uvm_va_range_block_index(va_range, start); i <= uvm_va_range_block_index(va_range, end); ++i) {
|
||||
|
|
|
@ -2069,7 +2069,11 @@ static NV_STATUS tools_access_process_memory(uvm_va_space_t *va_space,
|
|||
|
||||
// The RM flavor of the lock is needed to perform ECC checks.
|
||||
uvm_va_space_down_read_rm(va_space);
|
||||
status = uvm_va_block_find_create(va_space, UVM_PAGE_ALIGN_DOWN(target_va_start), block_context, &block);
|
||||
if (mm)
|
||||
status = uvm_va_block_find_create(va_space, UVM_PAGE_ALIGN_DOWN(target_va_start), &block_context->hmm.vma, &block);
|
||||
else
|
||||
status = uvm_va_block_find_create_managed(va_space, UVM_PAGE_ALIGN_DOWN(target_va_start), &block);
|
||||
|
||||
if (status != NV_OK)
|
||||
goto unlock_and_exit;
|
||||
|
||||
|
|
|
@ -106,36 +106,6 @@ uvm_va_space_t *uvm_va_block_get_va_space(uvm_va_block_t *va_block)
|
|||
return va_space;
|
||||
}
|
||||
|
||||
bool uvm_va_block_check_policy_is_valid(uvm_va_block_t *va_block,
|
||||
const uvm_va_policy_t *policy,
|
||||
uvm_va_block_region_t region)
|
||||
{
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
const uvm_va_policy_node_t *node;
|
||||
|
||||
if (uvm_va_policy_is_default(policy)) {
|
||||
// There should only be the default policy within the region.
|
||||
node = uvm_va_policy_node_iter_first(va_block,
|
||||
uvm_va_block_region_start(va_block, region),
|
||||
uvm_va_block_region_end(va_block, region));
|
||||
UVM_ASSERT(!node);
|
||||
}
|
||||
else {
|
||||
// The policy node should cover the region.
|
||||
node = uvm_va_policy_node_from_policy(policy);
|
||||
UVM_ASSERT(node->node.start <= uvm_va_block_region_start(va_block, region));
|
||||
UVM_ASSERT(node->node.end >= uvm_va_block_region_end(va_block, region));
|
||||
}
|
||||
}
|
||||
else {
|
||||
UVM_ASSERT(policy == uvm_va_range_get_policy(va_block->va_range));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static NvU64 block_gpu_pte_flag_cacheable(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_processor_id_t resident_id)
|
||||
{
|
||||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(block);
|
||||
|
@ -3697,7 +3667,6 @@ NV_STATUS uvm_va_block_make_resident_copy(uvm_va_block_t *va_block,
|
|||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_is_hmm(va_block) || va_block->va_range->type == UVM_VA_RANGE_TYPE_MANAGED);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, va_block_context->policy, region));
|
||||
|
||||
resident_mask = block_resident_mask_get_alloc(va_block, dest_id);
|
||||
if (!resident_mask)
|
||||
|
@ -3944,7 +3913,6 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
|
|||
|
||||
// TODO: Bug 3660922: need to implement HMM read duplication support.
|
||||
UVM_ASSERT(!uvm_va_block_is_hmm(va_block));
|
||||
UVM_ASSERT(va_block_context->policy == uvm_va_range_get_policy(va_block->va_range));
|
||||
|
||||
va_block_context->make_resident.dest_id = dest_id;
|
||||
va_block_context->make_resident.cause = cause;
|
||||
|
@ -4742,7 +4710,7 @@ static void block_unmap_cpu(uvm_va_block_t *block, uvm_va_block_region_t region,
|
|||
// Given a mask of mapped pages, returns true if any of the pages in the mask
|
||||
// are mapped remotely by the given GPU.
|
||||
static bool block_has_remote_mapping_gpu(uvm_va_block_t *block,
|
||||
uvm_va_block_context_t *block_context,
|
||||
uvm_page_mask_t *scratch_page_mask,
|
||||
uvm_gpu_id_t gpu_id,
|
||||
const uvm_page_mask_t *mapped_pages)
|
||||
{
|
||||
|
@ -4764,7 +4732,7 @@ static bool block_has_remote_mapping_gpu(uvm_va_block_t *block,
|
|||
}
|
||||
|
||||
// Remote pages are pages which are mapped but not resident locally
|
||||
return uvm_page_mask_andnot(&block_context->scratch_page_mask, mapped_pages, &gpu_state->resident);
|
||||
return uvm_page_mask_andnot(scratch_page_mask, mapped_pages, &gpu_state->resident);
|
||||
}
|
||||
|
||||
// Writes pte_clear_val to the 4k PTEs covered by clear_page_mask. If
|
||||
|
@ -6659,7 +6627,7 @@ static NV_STATUS block_unmap_gpu(uvm_va_block_t *block,
|
|||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
only_local_mappings = !block_has_remote_mapping_gpu(block, block_context, gpu->id, pages_to_unmap);
|
||||
only_local_mappings = !block_has_remote_mapping_gpu(block, &block_context->scratch_page_mask, gpu->id, pages_to_unmap);
|
||||
tlb_membar = uvm_hal_downgrade_membar_type(gpu, only_local_mappings);
|
||||
|
||||
status = uvm_push_begin_acquire(gpu->channel_manager,
|
||||
|
@ -6794,16 +6762,15 @@ static NV_STATUS uvm_cpu_insert_page(struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
static uvm_prot_t compute_logical_prot(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *hmm_vma,
|
||||
uvm_page_index_t page_index)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
uvm_prot_t logical_prot;
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
NvU64 addr = uvm_va_block_cpu_page_address(va_block, page_index);
|
||||
|
||||
logical_prot = uvm_hmm_compute_logical_prot(va_block, va_block_context, addr);
|
||||
logical_prot = uvm_hmm_compute_logical_prot(va_block, hmm_vma, addr);
|
||||
}
|
||||
else {
|
||||
uvm_va_range_t *va_range = va_block->va_range;
|
||||
|
@ -6815,6 +6782,8 @@ static uvm_prot_t compute_logical_prot(uvm_va_block_t *va_block,
|
|||
logical_prot = UVM_PROT_NONE;
|
||||
}
|
||||
else {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
vma = uvm_va_range_vma(va_range);
|
||||
|
||||
if (!(vma->vm_flags & VM_READ))
|
||||
|
@ -6864,13 +6833,15 @@ static struct page *block_page_get(uvm_va_block_t *block, block_phys_page_t bloc
|
|||
// with new_prot permissions
|
||||
// - Guarantee that vm_insert_page is safe to use (vma->vm_mm has a reference
|
||||
// and mmap_lock is held in at least read mode)
|
||||
// - For HMM blocks that vma is valid and safe to use, vma->vm_mm has a
|
||||
// reference and mmap_lock is held in at least read mode
|
||||
// - Ensure that the struct page corresponding to the physical memory being
|
||||
// mapped exists
|
||||
// - Manage the block's residency bitmap
|
||||
// - Ensure that the block hasn't been killed (block->va_range is present)
|
||||
// - Update the pte/mapping tracking state on success
|
||||
static NV_STATUS block_map_cpu_page_to(uvm_va_block_t *block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *hmm_vma,
|
||||
uvm_processor_id_t resident_id,
|
||||
uvm_page_index_t page_index,
|
||||
uvm_prot_t new_prot)
|
||||
|
@ -6883,7 +6854,7 @@ static NV_STATUS block_map_cpu_page_to(uvm_va_block_t *block,
|
|||
NvU64 addr;
|
||||
struct page *page;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_is_hmm(block) || va_range->type == UVM_VA_RANGE_TYPE_MANAGED);
|
||||
UVM_ASSERT((uvm_va_block_is_hmm(block) && hmm_vma) || va_range->type == UVM_VA_RANGE_TYPE_MANAGED);
|
||||
UVM_ASSERT(new_prot != UVM_PROT_NONE);
|
||||
UVM_ASSERT(new_prot < UVM_PROT_MAX);
|
||||
UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(resident_id)], UVM_ID_CPU));
|
||||
|
@ -6904,7 +6875,7 @@ static NV_STATUS block_map_cpu_page_to(uvm_va_block_t *block,
|
|||
|
||||
// Check for existing VMA permissions. They could have been modified after
|
||||
// the initial mmap by mprotect.
|
||||
if (new_prot > compute_logical_prot(block, va_block_context, page_index))
|
||||
if (new_prot > compute_logical_prot(block, hmm_vma, page_index))
|
||||
return NV_ERR_INVALID_ACCESS_TYPE;
|
||||
|
||||
if (uvm_va_block_is_hmm(block)) {
|
||||
|
@ -7001,7 +6972,7 @@ static NV_STATUS block_map_cpu_to(uvm_va_block_t *block,
|
|||
|
||||
for_each_va_block_page_in_region_mask(page_index, pages_to_map, region) {
|
||||
status = block_map_cpu_page_to(block,
|
||||
block_context,
|
||||
block_context->hmm.vma,
|
||||
resident_id,
|
||||
page_index,
|
||||
new_prot);
|
||||
|
@ -7234,13 +7205,13 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
|
|||
const uvm_page_mask_t *pte_mask;
|
||||
uvm_page_mask_t *running_page_mask = &va_block_context->mapping.map_running_page_mask;
|
||||
NV_STATUS status;
|
||||
const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, region);
|
||||
|
||||
va_block_context->mapping.cause = cause;
|
||||
|
||||
UVM_ASSERT(new_prot != UVM_PROT_NONE);
|
||||
UVM_ASSERT(new_prot < UVM_PROT_MAX);
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, va_block_context->policy, region));
|
||||
|
||||
// Mapping is not supported on the eviction path that doesn't hold the VA
|
||||
// space lock.
|
||||
|
@ -7282,7 +7253,7 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
|
|||
|
||||
// Map per resident location so we can more easily detect physically-
|
||||
// contiguous mappings.
|
||||
map_get_allowed_destinations(va_block, va_block_context, va_block_context->policy, id, &allowed_destinations);
|
||||
map_get_allowed_destinations(va_block, va_block_context, policy, id, &allowed_destinations);
|
||||
|
||||
for_each_closest_id(resident_id, &allowed_destinations, id, va_space) {
|
||||
if (UVM_ID_IS_CPU(id)) {
|
||||
|
@ -7588,8 +7559,6 @@ NV_STATUS uvm_va_block_map_mask(uvm_va_block_t *va_block,
|
|||
NV_STATUS tracker_status;
|
||||
uvm_processor_id_t id;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, va_block_context->policy, region));
|
||||
|
||||
for_each_id_in_mask(id, map_processor_mask) {
|
||||
status = uvm_va_block_map(va_block,
|
||||
va_block_context,
|
||||
|
@ -9573,7 +9542,7 @@ static bool block_region_might_read_duplicate(uvm_va_block_t *va_block,
|
|||
// could be changed in the future to optimize multiple faults/counters on
|
||||
// contiguous pages.
|
||||
static uvm_prot_t compute_new_permission(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct *hmm_vma,
|
||||
uvm_page_index_t page_index,
|
||||
uvm_processor_id_t fault_processor_id,
|
||||
uvm_processor_id_t new_residency,
|
||||
|
@ -9586,7 +9555,7 @@ static uvm_prot_t compute_new_permission(uvm_va_block_t *va_block,
|
|||
// query_promote: upgrade access privileges to avoid future faults IF
|
||||
// they don't trigger further revocations.
|
||||
new_prot = uvm_fault_access_type_to_prot(access_type);
|
||||
logical_prot = compute_logical_prot(va_block, va_block_context, page_index);
|
||||
logical_prot = compute_logical_prot(va_block, hmm_vma, page_index);
|
||||
|
||||
UVM_ASSERT(logical_prot >= new_prot);
|
||||
|
||||
|
@ -9729,11 +9698,10 @@ NV_STATUS uvm_va_block_add_mappings_after_migration(uvm_va_block_t *va_block,
|
|||
uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block);
|
||||
const uvm_page_mask_t *final_page_mask = map_page_mask;
|
||||
uvm_tracker_t local_tracker = UVM_TRACKER_INIT();
|
||||
const uvm_va_policy_t *policy = va_block_context->policy;
|
||||
const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, region);
|
||||
uvm_processor_id_t preferred_location;
|
||||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, policy, region));
|
||||
|
||||
// Read duplication takes precedence over SetAccessedBy.
|
||||
//
|
||||
|
@ -9959,8 +9927,6 @@ NV_STATUS uvm_va_block_add_mappings(uvm_va_block_t *va_block,
|
|||
uvm_range_group_range_iter_t iter;
|
||||
uvm_prot_t prot_to_map;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block, va_block_context->policy, region));
|
||||
|
||||
if (UVM_ID_IS_CPU(processor_id) && !uvm_va_block_is_hmm(va_block)) {
|
||||
if (!uvm_va_range_vma_check(va_range, va_block_context->mm))
|
||||
return NV_OK;
|
||||
|
@ -10207,11 +10173,8 @@ uvm_processor_id_t uvm_va_block_select_residency(uvm_va_block_t *va_block,
|
|||
{
|
||||
uvm_processor_id_t id;
|
||||
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block,
|
||||
va_block_context->policy,
|
||||
uvm_va_block_region_for_page(page_index)));
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block,
|
||||
va_block_context,
|
||||
va_block_context->hmm.vma,
|
||||
uvm_va_block_region_for_page(page_index)));
|
||||
|
||||
id = block_select_residency(va_block,
|
||||
|
@ -10255,6 +10218,7 @@ static bool check_access_counters_dont_revoke(uvm_va_block_t *block,
|
|||
// Update service_context->prefetch_hint, service_context->per_processor_masks,
|
||||
// and service_context->region.
|
||||
static void uvm_va_block_get_prefetch_hint(uvm_va_block_t *va_block,
|
||||
const uvm_va_policy_t *policy,
|
||||
uvm_service_block_context_t *service_context)
|
||||
{
|
||||
uvm_processor_id_t new_residency;
|
||||
|
@ -10265,20 +10229,19 @@ static void uvm_va_block_get_prefetch_hint(uvm_va_block_t *va_block,
|
|||
if (uvm_processor_mask_get_count(&service_context->resident_processors) == 1) {
|
||||
uvm_page_index_t page_index;
|
||||
uvm_page_mask_t *new_residency_mask;
|
||||
const uvm_va_policy_t *policy = service_context->block_context.policy;
|
||||
|
||||
new_residency = uvm_processor_mask_find_first_id(&service_context->resident_processors);
|
||||
new_residency_mask = &service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency;
|
||||
|
||||
// Update prefetch tracking structure with the pages that will migrate
|
||||
// due to faults
|
||||
uvm_perf_prefetch_get_hint(va_block,
|
||||
&service_context->block_context,
|
||||
new_residency,
|
||||
new_residency_mask,
|
||||
service_context->region,
|
||||
&service_context->prefetch_bitmap_tree,
|
||||
&service_context->prefetch_hint);
|
||||
uvm_perf_prefetch_get_hint_va_block(va_block,
|
||||
&service_context->block_context,
|
||||
new_residency,
|
||||
new_residency_mask,
|
||||
service_context->region,
|
||||
&service_context->prefetch_bitmap_tree,
|
||||
&service_context->prefetch_hint);
|
||||
|
||||
// Obtain the prefetch hint and give a fake fault access type to the
|
||||
// prefetched pages
|
||||
|
@ -10463,7 +10426,7 @@ NV_STATUS uvm_va_block_service_finish(uvm_processor_id_t processor_id,
|
|||
|
||||
for_each_va_block_page_in_region_mask(page_index, new_residency_mask, service_context->region) {
|
||||
new_prot = compute_new_permission(va_block,
|
||||
&service_context->block_context,
|
||||
service_context->block_context.hmm.vma,
|
||||
page_index,
|
||||
processor_id,
|
||||
new_residency,
|
||||
|
@ -10706,11 +10669,8 @@ NV_STATUS uvm_va_block_service_locked(uvm_processor_id_t processor_id,
|
|||
NV_STATUS status = NV_OK;
|
||||
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block,
|
||||
service_context->block_context.policy,
|
||||
service_context->region));
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block,
|
||||
&service_context->block_context,
|
||||
service_context->block_context.hmm.vma,
|
||||
service_context->region));
|
||||
|
||||
// GPU fault servicing must be done under the VA space read lock. GPU fault
|
||||
|
@ -10724,7 +10684,9 @@ NV_STATUS uvm_va_block_service_locked(uvm_processor_id_t processor_id,
|
|||
else
|
||||
uvm_assert_rwsem_locked_read(&va_space->lock);
|
||||
|
||||
uvm_va_block_get_prefetch_hint(va_block, service_context);
|
||||
uvm_va_block_get_prefetch_hint(va_block,
|
||||
uvm_va_policy_get_region(va_block, service_context->region),
|
||||
service_context);
|
||||
|
||||
for_each_id_in_mask(new_residency, &service_context->resident_processors) {
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
|
@ -10757,11 +10719,8 @@ NV_STATUS uvm_va_block_check_logical_permissions(uvm_va_block_t *va_block,
|
|||
uvm_va_range_t *va_range = va_block->va_range;
|
||||
uvm_prot_t access_prot = uvm_fault_access_type_to_prot(access_type);
|
||||
|
||||
UVM_ASSERT(uvm_va_block_check_policy_is_valid(va_block,
|
||||
va_block_context->policy,
|
||||
uvm_va_block_region_for_page(page_index)));
|
||||
UVM_ASSERT(uvm_hmm_check_context_vma_is_valid(va_block,
|
||||
va_block_context,
|
||||
va_block_context->hmm.vma,
|
||||
uvm_va_block_region_for_page(page_index)));
|
||||
|
||||
// CPU permissions are checked later by block_map_cpu_page.
|
||||
|
@ -10779,8 +10738,8 @@ NV_STATUS uvm_va_block_check_logical_permissions(uvm_va_block_t *va_block,
|
|||
// vm_flags at any moment (for example on mprotect) and here we are not
|
||||
// guaranteed to have vma->vm_mm->mmap_lock. During tests we ensure that
|
||||
// this scenario does not happen.
|
||||
if ((va_block_context->mm || uvm_enable_builtin_tests) &&
|
||||
(access_prot > compute_logical_prot(va_block, va_block_context, page_index)))
|
||||
if (((va_block->hmm.va_space && va_block->hmm.va_space->va_space_mm.mm) || uvm_enable_builtin_tests) &&
|
||||
(access_prot > compute_logical_prot(va_block, va_block_context->hmm.vma, page_index)))
|
||||
return NV_ERR_INVALID_ACCESS_TYPE;
|
||||
}
|
||||
|
||||
|
@ -10866,6 +10825,7 @@ static NV_STATUS block_cpu_fault_locked(uvm_va_block_t *va_block,
|
|||
uvm_perf_thrashing_hint_t thrashing_hint;
|
||||
uvm_processor_id_t new_residency;
|
||||
bool read_duplicate;
|
||||
const uvm_va_policy_t *policy;
|
||||
|
||||
uvm_assert_rwsem_locked(&va_space->lock);
|
||||
|
||||
|
@ -10874,13 +10834,13 @@ static NV_STATUS block_cpu_fault_locked(uvm_va_block_t *va_block,
|
|||
|
||||
uvm_assert_mmap_lock_locked(service_context->block_context.mm);
|
||||
|
||||
service_context->block_context.policy = uvm_va_policy_get(va_block, fault_addr);
|
||||
policy = uvm_va_policy_get(va_block, fault_addr);
|
||||
|
||||
if (service_context->num_retries == 0) {
|
||||
// notify event to tools/performance heuristics
|
||||
uvm_perf_event_notify_cpu_fault(&va_space->perf_events,
|
||||
va_block,
|
||||
service_context->block_context.policy->preferred_location,
|
||||
policy->preferred_location,
|
||||
fault_addr,
|
||||
fault_access_type > UVM_FAULT_ACCESS_TYPE_READ,
|
||||
KSTK_EIP(current));
|
||||
|
@ -10925,7 +10885,7 @@ static NV_STATUS block_cpu_fault_locked(uvm_va_block_t *va_block,
|
|||
page_index,
|
||||
UVM_ID_CPU,
|
||||
uvm_fault_access_type_mask_bit(fault_access_type),
|
||||
service_context->block_context.policy,
|
||||
policy,
|
||||
&thrashing_hint,
|
||||
UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS,
|
||||
&read_duplicate);
|
||||
|
@ -11025,7 +10985,6 @@ NV_STATUS uvm_va_block_find(uvm_va_space_t *va_space, NvU64 addr, uvm_va_block_t
|
|||
NV_STATUS uvm_va_block_find_create_in_range(uvm_va_space_t *va_space,
|
||||
uvm_va_range_t *va_range,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_va_block_t **out_block)
|
||||
{
|
||||
size_t index;
|
||||
|
@ -11033,12 +10992,7 @@ NV_STATUS uvm_va_block_find_create_in_range(uvm_va_space_t *va_space,
|
|||
if (uvm_enable_builtin_tests && atomic_dec_if_positive(&va_space->test.va_block_allocation_fail_nth) == 0)
|
||||
return NV_ERR_NO_MEMORY;
|
||||
|
||||
if (!va_range) {
|
||||
if (!va_block_context || !va_block_context->mm)
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
return uvm_hmm_va_block_find_create(va_space, addr, va_block_context, out_block);
|
||||
}
|
||||
|
||||
UVM_ASSERT(va_range);
|
||||
UVM_ASSERT(addr >= va_range->node.start);
|
||||
UVM_ASSERT(addr <= va_range->node.end);
|
||||
|
||||
|
@ -11052,14 +11006,32 @@ NV_STATUS uvm_va_block_find_create_in_range(uvm_va_space_t *va_space,
|
|||
return uvm_va_range_block_create(va_range, index, out_block);
|
||||
}
|
||||
|
||||
NV_STATUS uvm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NV_STATUS uvm_va_block_find_create_managed(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_va_block_t **out_block)
|
||||
{
|
||||
uvm_va_range_t *va_range = uvm_va_range_find(va_space, addr);
|
||||
|
||||
return uvm_va_block_find_create_in_range(va_space, va_range, addr, va_block_context, out_block);
|
||||
if (va_range)
|
||||
return uvm_va_block_find_create_in_range(va_space, va_range, addr, out_block);
|
||||
else
|
||||
return NV_ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
NV_STATUS uvm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
struct vm_area_struct **hmm_vma,
|
||||
uvm_va_block_t **out_block)
|
||||
{
|
||||
uvm_va_range_t *va_range = uvm_va_range_find(va_space, addr);
|
||||
|
||||
if (hmm_vma)
|
||||
*hmm_vma = NULL;
|
||||
|
||||
if (va_range)
|
||||
return uvm_va_block_find_create_in_range(va_space, va_range, addr, out_block);
|
||||
else
|
||||
return uvm_hmm_va_block_find_create(va_space, addr, hmm_vma, out_block);
|
||||
}
|
||||
|
||||
// Launch a synchronous, encrypted copy between GPU and CPU.
|
||||
|
@ -11236,8 +11208,6 @@ NV_STATUS uvm_va_block_write_from_cpu(uvm_va_block_t *va_block,
|
|||
if (UVM_ID_IS_INVALID(proc))
|
||||
proc = UVM_ID_CPU;
|
||||
|
||||
block_context->policy = uvm_va_policy_get(va_block, dst);
|
||||
|
||||
// Use make_resident() in all cases to break read-duplication, but
|
||||
// block_retry can be NULL as if the page is not resident yet we will make
|
||||
// it resident on the CPU.
|
||||
|
@ -11406,7 +11376,6 @@ static void block_add_eviction_mappings(void *args)
|
|||
uvm_va_range_t *va_range = va_block->va_range;
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
for_each_id_in_mask(id, &uvm_va_range_get_policy(va_range)->accessed_by) {
|
||||
status = uvm_va_block_set_accessed_by(va_block, block_context, id);
|
||||
if (status != NV_OK)
|
||||
|
@ -11557,8 +11526,8 @@ NV_STATUS uvm_va_block_evict_chunks(uvm_va_block_t *va_block,
|
|||
&accessed_by_set);
|
||||
}
|
||||
else {
|
||||
block_context->policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
accessed_by_set = uvm_processor_mask_get_count(&block_context->policy->accessed_by) > 0;
|
||||
const uvm_va_policy_t *policy = uvm_va_range_get_policy(va_block->va_range);
|
||||
accessed_by_set = uvm_processor_mask_get_count(&policy->accessed_by) > 0;
|
||||
|
||||
// TODO: Bug 1765193: make_resident() breaks read-duplication, but it's
|
||||
// not necessary to do so for eviction. Add a version that unmaps only
|
||||
|
@ -11749,19 +11718,16 @@ NV_STATUS uvm_test_va_block_inject_error(UVM_TEST_VA_BLOCK_INJECT_ERROR_PARAMS *
|
|||
struct mm_struct *mm;
|
||||
uvm_va_block_t *va_block;
|
||||
uvm_va_block_test_t *va_block_test;
|
||||
uvm_va_block_context_t *block_context = NULL;
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
mm = uvm_va_space_mm_or_current_retain_lock(va_space);
|
||||
uvm_va_space_down_read(va_space);
|
||||
|
||||
block_context = uvm_va_block_context_alloc(mm);
|
||||
if (!block_context) {
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto out;
|
||||
}
|
||||
if (mm)
|
||||
status = uvm_va_block_find_create(va_space, params->lookup_address, NULL, &va_block);
|
||||
else
|
||||
status = uvm_va_block_find_create_managed(va_space, params->lookup_address, &va_block);
|
||||
|
||||
status = uvm_va_block_find_create(va_space, params->lookup_address, block_context, &va_block);
|
||||
if (status != NV_OK)
|
||||
goto out;
|
||||
|
||||
|
@ -11801,7 +11767,6 @@ block_unlock:
|
|||
out:
|
||||
uvm_va_space_up_read(va_space);
|
||||
uvm_va_space_mm_or_current_release_unlock(va_space, mm);
|
||||
uvm_va_block_context_free(block_context);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -11872,7 +11837,11 @@ NV_STATUS uvm_test_change_pte_mapping(UVM_TEST_CHANGE_PTE_MAPPING_PARAMS *params
|
|||
goto out;
|
||||
}
|
||||
|
||||
status = uvm_va_block_find_create(va_space, params->va, block_context, &block);
|
||||
if (mm)
|
||||
status = uvm_va_block_find_create(va_space, params->va, &block_context->hmm.vma, &block);
|
||||
else
|
||||
status = uvm_va_block_find_create_managed(va_space, params->va, &block);
|
||||
|
||||
if (status != NV_OK)
|
||||
goto out;
|
||||
|
||||
|
@ -11899,8 +11868,6 @@ NV_STATUS uvm_test_change_pte_mapping(UVM_TEST_CHANGE_PTE_MAPPING_PARAMS *params
|
|||
goto out_block;
|
||||
}
|
||||
|
||||
block_context->policy = uvm_va_policy_get(block, params->va);
|
||||
|
||||
if (new_prot == UVM_PROT_NONE) {
|
||||
status = uvm_va_block_unmap(block, block_context, id, region, NULL, &block->tracker);
|
||||
}
|
||||
|
|
|
@ -453,11 +453,12 @@ struct uvm_va_block_struct
|
|||
NvU16 fault_migrations_to_last_proc;
|
||||
} prefetch_info;
|
||||
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
struct
|
||||
{
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
// The MMU notifier is registered per va_block.
|
||||
struct mmu_interval_notifier notifier;
|
||||
#endif
|
||||
|
||||
// This is used to serialize migrations between CPU and GPU while
|
||||
// allowing the va_block lock to be dropped.
|
||||
|
@ -487,7 +488,6 @@ struct uvm_va_block_struct
|
|||
// Storage node for range tree of va_blocks.
|
||||
uvm_range_tree_node_t node;
|
||||
} hmm;
|
||||
#endif
|
||||
};
|
||||
|
||||
// We define additional per-VA Block fields for testing. When
|
||||
|
@ -678,18 +678,8 @@ static void uvm_va_block_context_init(uvm_va_block_context_t *va_block_context,
|
|||
memset(va_block_context, 0xff, sizeof(*va_block_context));
|
||||
|
||||
va_block_context->mm = mm;
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
va_block_context->hmm.vma = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Check that a single policy covers the given region for the given va_block.
|
||||
// This always returns true and is intended to only be used with UVM_ASSERT().
|
||||
// Locking: the va_block lock must be held.
|
||||
bool uvm_va_block_check_policy_is_valid(uvm_va_block_t *va_block,
|
||||
const uvm_va_policy_t *policy,
|
||||
uvm_va_block_region_t region);
|
||||
|
||||
// TODO: Bug 1766480: Using only page masks instead of a combination of regions
|
||||
// and page masks could simplify the below APIs and their implementations
|
||||
// at the cost of having to scan the whole mask for small regions.
|
||||
|
@ -734,15 +724,15 @@ bool uvm_va_block_check_policy_is_valid(uvm_va_block_t *va_block,
|
|||
// user memory is guaranteed not to happen. Allocation-retry of GPU page tables
|
||||
// can still occur though.
|
||||
//
|
||||
// va_block_context must not be NULL. This function will set a bit in
|
||||
// va_block_context->make_resident.pages_changed_residency for each page that
|
||||
// changed residency (due to a migration or first population) as a result of the
|
||||
// operation and va_block_context->make_resident.all_involved_processors for
|
||||
// each processor involved in the copy. This function only sets bits in those
|
||||
// masks. It is the caller's responsiblity to zero the masks or not first.
|
||||
//
|
||||
// va_block_context->policy must also be set by the caller for the given region.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
// va_block_context must not be NULL and policy for the region must
|
||||
// match. This function will set a bit in
|
||||
// va_block_context->make_resident.pages_changed_residency for each
|
||||
// page that changed residency (due to a migration or first
|
||||
// population) as a result of the operation and
|
||||
// va_block_context->make_resident.all_involved_processors for each
|
||||
// processor involved in the copy. This function only sets bits in
|
||||
// those masks. It is the caller's responsiblity to zero the masks or
|
||||
// not first.
|
||||
//
|
||||
// Notably any status other than NV_OK indicates that the block's lock might
|
||||
// have been unlocked and relocked.
|
||||
|
@ -839,7 +829,7 @@ void uvm_va_block_make_resident_finish(uvm_va_block_t *va_block,
|
|||
// pages because the earlier operation can cause a PTE split or merge which is
|
||||
// assumed by the later operation.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// If allocation-retry was required as part of the operation and was successful,
|
||||
|
@ -896,7 +886,7 @@ NV_STATUS uvm_va_block_map_mask(uvm_va_block_t *va_block,
|
|||
// pages because the earlier operation can cause a PTE split or merge which is
|
||||
// assumed by the later operation.
|
||||
//
|
||||
// va_block_context must not be NULL. The va_block_context->policy is unused.
|
||||
// va_block_context must not be NULL.
|
||||
//
|
||||
// If allocation-retry was required as part of the operation and was successful,
|
||||
// NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case, the entries in the
|
||||
|
@ -929,7 +919,7 @@ NV_STATUS uvm_va_block_unmap_mask(uvm_va_block_t *va_block,
|
|||
// - Unmap the preferred location's processor from any pages in this region
|
||||
// which are not resident on the preferred location.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// LOCKING: The caller must hold the VA block lock.
|
||||
|
@ -941,7 +931,7 @@ NV_STATUS uvm_va_block_set_preferred_location_locked(uvm_va_block_t *va_block,
|
|||
// location and policy. Waits for the operation to complete before returning.
|
||||
// This function should only be called with managed va_blocks.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// LOCKING: This takes and releases the VA block lock. If va_block_context->mm
|
||||
|
@ -956,7 +946,7 @@ NV_STATUS uvm_va_block_set_accessed_by(uvm_va_block_t *va_block,
|
|||
// the tracker after all mappings have been started.
|
||||
// This function can be called with HMM and managed va_blocks.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// LOCKING: The caller must hold the va_block lock and
|
||||
|
@ -970,7 +960,7 @@ NV_STATUS uvm_va_block_set_accessed_by_locked(uvm_va_block_t *va_block,
|
|||
// Breaks SetAccessedBy and remote mappings
|
||||
// This function should only be called with managed va_blocks.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// LOCKING: This takes and releases the VA block lock. If va_block_context->mm
|
||||
|
@ -982,7 +972,7 @@ NV_STATUS uvm_va_block_set_read_duplication(uvm_va_block_t *va_block,
|
|||
// Restores SetAccessedBy mappings
|
||||
// This function should only be called with managed va_blocks.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// LOCKING: This takes and releases the VA block lock. If va_block_context->mm
|
||||
|
@ -1002,10 +992,9 @@ NV_STATUS uvm_va_block_unset_read_duplication(uvm_va_block_t *va_block,
|
|||
// NV_ERR_INVALID_OPERATION The access would violate the policies specified
|
||||
// by UvmPreventMigrationRangeGroups.
|
||||
//
|
||||
// va_block_context must not be NULL, va_block_context->policy must be valid,
|
||||
// and if the va_block is a HMM block, va_block_context->hmm.vma must be valid
|
||||
// which also means the va_block_context->mm is not NULL, retained, and locked
|
||||
// for at least read.
|
||||
// va_block_context must not be NULL, policy must match, and if the va_block is
|
||||
// a HMM block, va_block_context->hmm.vma must be valid which also means the
|
||||
// va_block_context->mm is not NULL, retained, and locked for at least read.
|
||||
// Locking: the va_block lock must be held.
|
||||
NV_STATUS uvm_va_block_check_logical_permissions(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
|
@ -1041,7 +1030,7 @@ NV_STATUS uvm_va_block_check_logical_permissions(uvm_va_block_t *va_block,
|
|||
// different pages because the earlier operation can cause a PTE split or merge
|
||||
// which is assumed by the later operation.
|
||||
//
|
||||
// va_block_context must not be NULL. The va_block_context->policy is unused.
|
||||
// va_block_context must not be NULL.
|
||||
//
|
||||
// If allocation-retry was required as part of the operation and was successful,
|
||||
// NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case, the entries in the
|
||||
|
@ -1081,7 +1070,7 @@ NV_STATUS uvm_va_block_revoke_prot_mask(uvm_va_block_t *va_block,
|
|||
// processor_id, which triggered the migration and should have already been
|
||||
// mapped).
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// This function acquires/waits for the va_block tracker and updates that
|
||||
|
@ -1112,7 +1101,7 @@ NV_STATUS uvm_va_block_add_mappings_after_migration(uvm_va_block_t *va_block,
|
|||
// Note that this can return NV_ERR_MORE_PROCESSING_REQUIRED just like
|
||||
// uvm_va_block_map() indicating that the operation needs to be retried.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// va_block_context must not be NULL and policy must for the region must match.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
//
|
||||
// LOCKING: The caller must hold the va block lock. If va_block_context->mm !=
|
||||
|
@ -1134,7 +1123,7 @@ NV_STATUS uvm_va_block_add_gpu_va_space(uvm_va_block_t *va_block, uvm_gpu_va_spa
|
|||
// If mm != NULL, that mm is used for any CPU mappings which may be created as
|
||||
// a result of this call. See uvm_va_block_context_t::mm for details.
|
||||
//
|
||||
// va_block_context must not be NULL. The va_block_context->policy is unused.
|
||||
// va_block_context must not be NULL.
|
||||
//
|
||||
// LOCKING: The caller must hold the va_block lock. If block_context->mm is not
|
||||
// NULL, the caller must hold mm->mmap_lock in at least read mode.
|
||||
|
@ -1225,7 +1214,6 @@ NV_STATUS uvm_va_block_split_locked(uvm_va_block_t *existing_va_block,
|
|||
// - va_space lock must be held in at least read mode
|
||||
//
|
||||
// service_context->block_context.mm is ignored and vma->vm_mm is used instead.
|
||||
// service_context->block_context.policy is set by this function.
|
||||
//
|
||||
// Returns NV_ERR_INVALID_ACCESS_TYPE if a CPU mapping to fault_addr cannot be
|
||||
// accessed, for example because it's within a range group which is non-
|
||||
|
@ -1239,10 +1227,10 @@ NV_STATUS uvm_va_block_cpu_fault(uvm_va_block_t *va_block,
|
|||
// (migrations, cache invalidates, etc.) in response to the given service block
|
||||
// context.
|
||||
//
|
||||
// service_context must not be NULL and service_context->block_context.policy
|
||||
// must be valid. See the comments for uvm_va_block_check_policy_is_valid().
|
||||
// If va_block is a HMM block, va_block_context->hmm.vma must be valid.
|
||||
// See the comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// service_context must not be NULL and policy for service_context->region must
|
||||
// match. See the comments for uvm_va_block_check_policy_is_valid(). If
|
||||
// va_block is a HMM block, va_block_context->hmm.vma must be valid. See the
|
||||
// comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// service_context->prefetch_hint is set by this function.
|
||||
//
|
||||
// Locking:
|
||||
|
@ -1267,10 +1255,10 @@ NV_STATUS uvm_va_block_service_locked(uvm_processor_id_t processor_id,
|
|||
// Performs population of the destination pages, unmapping and copying source
|
||||
// pages to new_residency.
|
||||
//
|
||||
// service_context must not be NULL and service_context->block_context.policy
|
||||
// must be valid. See the comments for uvm_va_block_check_policy_is_valid().
|
||||
// If va_block is a HMM block, va_block_context->hmm.vma must be valid.
|
||||
// See the comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// service_context must not be NULL and policy for service_context->region must
|
||||
// match. See the comments for uvm_va_block_check_policy_is_valid(). If
|
||||
// va_block is a HMM block, va_block_context->hmm.vma must be valid. See the
|
||||
// comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// service_context->prefetch_hint should be set before calling this function.
|
||||
//
|
||||
// Locking:
|
||||
|
@ -1296,10 +1284,10 @@ NV_STATUS uvm_va_block_service_copy(uvm_processor_id_t processor_id,
|
|||
// This updates the va_block residency state and maps the faulting processor_id
|
||||
// to the new residency (which may be remote).
|
||||
//
|
||||
// service_context must not be NULL and service_context->block_context.policy
|
||||
// must be valid. See the comments for uvm_va_block_check_policy_is_valid().
|
||||
// If va_block is a HMM block, va_block_context->hmm.vma must be valid.
|
||||
// See the comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// service_context must not be NULL and policy for service_context->region must
|
||||
// match. See the comments for uvm_va_block_check_policy_is_valid(). If
|
||||
// va_block is a HMM block, va_block_context->hmm.vma must be valid. See the
|
||||
// comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// service_context must be initialized by calling uvm_va_block_service_copy()
|
||||
// before calling this function.
|
||||
//
|
||||
|
@ -1428,40 +1416,34 @@ const uvm_page_mask_t *uvm_va_block_map_mask_get(uvm_va_block_t *block, uvm_proc
|
|||
NV_STATUS uvm_va_block_find(uvm_va_space_t *va_space, NvU64 addr, uvm_va_block_t **out_block);
|
||||
|
||||
// Same as uvm_va_block_find except that the block is created if not found.
|
||||
// If addr is covered by a UVM_VA_RANGE_TYPE_MANAGED va_range, a managed block
|
||||
// will be created. Otherwise, if addr is not covered by any va_range, HMM is
|
||||
// enabled in the va_space, and va_block_context and va_block_context->mm are
|
||||
// non-NULL, then a HMM block will be created and va_block_context->hmm.vma is
|
||||
// set to the VMA covering 'addr'. The va_block_context->policy field is left
|
||||
// unchanged.
|
||||
// In either case, if va_block_context->mm is non-NULL, it must be retained and
|
||||
// locked in at least read mode. Return values:
|
||||
// If addr is covered by a UVM_VA_RANGE_TYPE_MANAGED va_range a managed block
|
||||
// will be created. If addr is not covered by any va_range and HMM is
|
||||
// enabled in the va_space then a HMM block will be created and hmm_vma is
|
||||
// set to the VMA covering 'addr'. The va_space_mm must be retained and locked.
|
||||
// Otherwise hmm_vma is set to NULL.
|
||||
// Return values:
|
||||
// NV_ERR_INVALID_ADDRESS addr is not a UVM_VA_RANGE_TYPE_MANAGED va_range nor
|
||||
// a HMM enabled VMA.
|
||||
// NV_ERR_NO_MEMORY memory could not be allocated.
|
||||
NV_STATUS uvm_va_block_find_create(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
struct vm_area_struct **hmm_vma,
|
||||
uvm_va_block_t **out_block);
|
||||
|
||||
// Same as uvm_va_block_find_create except that va_range lookup was already done
|
||||
// by the caller. If the supplied va_range is NULL, this function behaves just
|
||||
// like when the va_range lookup in uvm_va_block_find_create is NULL.
|
||||
// Same as uvm_va_block_find_create except that only managed va_blocks are
|
||||
// created if not already present in the VA range. Does not require va_space_mm
|
||||
// to be locked or retained.
|
||||
NV_STATUS uvm_va_block_find_create_managed(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_t **out_block);
|
||||
|
||||
// Same as uvm_va_block_find_create_managed except that va_range lookup was
|
||||
// already done by the caller. The supplied va_range must not be NULL.
|
||||
NV_STATUS uvm_va_block_find_create_in_range(uvm_va_space_t *va_space,
|
||||
uvm_va_range_t *va_range,
|
||||
NvU64 addr,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_va_block_t **out_block);
|
||||
|
||||
// Same as uvm_va_block_find_create except that only managed va_blocks are
|
||||
// created if not already present in the VA range.
|
||||
static NV_STATUS uvm_va_block_find_create_managed(uvm_va_space_t *va_space,
|
||||
NvU64 addr,
|
||||
uvm_va_block_t **out_block)
|
||||
{
|
||||
return uvm_va_block_find_create(va_space, addr, NULL, out_block);
|
||||
}
|
||||
|
||||
// Look up a chunk backing a specific address within the VA block.
|
||||
// Returns NULL if none.
|
||||
uvm_gpu_chunk_t *uvm_va_block_lookup_gpu_chunk(uvm_va_block_t *va_block, uvm_gpu_t *gpu, NvU64 address);
|
||||
|
@ -1476,10 +1458,10 @@ uvm_gpu_chunk_t *uvm_va_block_lookup_gpu_chunk(uvm_va_block_t *va_block, uvm_gpu
|
|||
// The caller needs to handle allocation-retry. va_block_retry can be NULL if
|
||||
// the destination is the CPU.
|
||||
//
|
||||
// va_block_context must not be NULL and va_block_context->policy must be valid.
|
||||
// See the comments for uvm_va_block_check_policy_is_valid().
|
||||
// If va_block is a HMM block, va_block_context->hmm.vma must be valid.
|
||||
// See the comments for uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// va_block_context must not be NULL and policy for the region must match. See
|
||||
// the comments for uvm_va_block_check_policy_is_valid(). If va_block is a HMM
|
||||
// block, va_block_context->hmm.vma must be valid. See the comments for
|
||||
// uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
//
|
||||
// LOCKING: The caller must hold the va_block lock. If va_block_context->mm !=
|
||||
// NULL, va_block_context->mm->mmap_lock must be held in at least
|
||||
|
@ -1497,7 +1479,7 @@ NV_STATUS uvm_va_block_migrate_locked(uvm_va_block_t *va_block,
|
|||
// The [dst, dst + size) range has to fit within a single PAGE_SIZE page.
|
||||
//
|
||||
// va_block_context must not be NULL. The caller is not required to set
|
||||
// va_block_context->policy or va_block_context->hmm.vma.
|
||||
// va_block_context->hmm.vma.
|
||||
//
|
||||
// The caller needs to support allocation-retry of page tables.
|
||||
//
|
||||
|
@ -1569,7 +1551,7 @@ void uvm_va_block_mark_cpu_dirty(uvm_va_block_t *va_block);
|
|||
// successful, NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case the
|
||||
// block's lock was unlocked and relocked.
|
||||
//
|
||||
// va_block_context must not be NULL. The va_block_context->policy is unused.
|
||||
// va_block_context must not be NULL.
|
||||
//
|
||||
// LOCKING: The caller must hold the va_block lock.
|
||||
NV_STATUS uvm_va_block_set_cancel(uvm_va_block_t *va_block, uvm_va_block_context_t *block_context, uvm_gpu_t *gpu);
|
||||
|
@ -1650,12 +1632,18 @@ static uvm_va_block_region_t uvm_va_block_region_from_block(uvm_va_block_t *va_b
|
|||
return uvm_va_block_region(0, uvm_va_block_num_cpu_pages(va_block));
|
||||
}
|
||||
|
||||
// Create a block region from a va block and page mask. Note that the region
|
||||
// Create a block region from a va block and page mask. If va_block is NULL, the
|
||||
// region is assumed to cover the maximum va_block size. Note that the region
|
||||
// covers the first through the last set bit and may have unset bits in between.
|
||||
static uvm_va_block_region_t uvm_va_block_region_from_mask(uvm_va_block_t *va_block, const uvm_page_mask_t *page_mask)
|
||||
{
|
||||
uvm_va_block_region_t region;
|
||||
uvm_page_index_t outer = uvm_va_block_num_cpu_pages(va_block);
|
||||
uvm_page_index_t outer;
|
||||
|
||||
if (va_block)
|
||||
outer = uvm_va_block_num_cpu_pages(va_block);
|
||||
else
|
||||
outer = PAGES_PER_UVM_VA_BLOCK;
|
||||
|
||||
region.first = find_first_bit(page_mask->bitmap, outer);
|
||||
if (region.first >= outer) {
|
||||
|
@ -2140,15 +2128,14 @@ uvm_va_block_region_t uvm_va_block_big_page_region_subset(uvm_va_block_t *va_blo
|
|||
// MAX_BIG_PAGES_PER_UVM_VA_BLOCK is returned.
|
||||
size_t uvm_va_block_big_page_index(uvm_va_block_t *va_block, uvm_page_index_t page_index, NvU32 big_page_size);
|
||||
|
||||
// Returns the new residency for a page that faulted or triggered access
|
||||
// counter notifications. The read_duplicate output parameter indicates if the
|
||||
// page meets the requirements to be read-duplicated
|
||||
// va_block_context must not be NULL, va_block_context->policy must be valid,
|
||||
// and if the va_block is a HMM block, va_block_context->hmm.vma must be valid
|
||||
// which also means the va_block_context->mm is not NULL, retained, and locked
|
||||
// for at least read. See the comments for uvm_va_block_check_policy_is_valid()
|
||||
// and uvm_hmm_check_context_vma_is_valid() in uvm_hmm.h.
|
||||
// Locking: the va_block lock must be held.
|
||||
// Returns the new residency for a page that faulted or triggered access counter
|
||||
// notifications. The read_duplicate output parameter indicates if the page
|
||||
// meets the requirements to be read-duplicated va_block_context must not be
|
||||
// NULL, and if the va_block is a HMM block, va_block_context->hmm.vma must be
|
||||
// valid which also means the va_block_context->mm is not NULL, retained, and
|
||||
// locked for at least read. See the comments for
|
||||
// uvm_va_block_check_policy_is_valid() and uvm_hmm_check_context_vma_is_valid()
|
||||
// in uvm_hmm.h. Locking: the va_block lock must be held.
|
||||
uvm_processor_id_t uvm_va_block_select_residency(uvm_va_block_t *va_block,
|
||||
uvm_va_block_context_t *va_block_context,
|
||||
uvm_page_index_t page_index,
|
||||
|
|
|
@ -29,9 +29,7 @@
|
|||
#include "uvm_tlb_batch.h"
|
||||
#include "uvm_forward_decl.h"
|
||||
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
#include <linux/migrate.h>
|
||||
#endif
|
||||
|
||||
// UVM_VA_BLOCK_BITS is 21, meaning the maximum block size is 2MB. Rationale:
|
||||
// - 2MB matches the largest Pascal GPU page size so it's a natural fit
|
||||
|
@ -234,9 +232,6 @@ typedef struct
|
|||
// the mm, such as creating CPU mappings.
|
||||
struct mm_struct *mm;
|
||||
|
||||
const uvm_va_policy_t *policy;
|
||||
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
struct
|
||||
{
|
||||
// These are used for migrate_vma_*(), hmm_range_fault(), and
|
||||
|
@ -257,10 +252,11 @@ typedef struct
|
|||
// Cached VMA pointer. This is only valid while holding the mmap_lock.
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
// Used for migrate_vma_*() to migrate pages to/from GPU/CPU.
|
||||
struct migrate_vma migrate_vma_args;
|
||||
} hmm;
|
||||
#endif
|
||||
} hmm;
|
||||
|
||||
// Convenience buffer for page mask prints
|
||||
char page_mask_string_buffer[UVM_PAGE_MASK_PRINT_MIN_BUFFER_SIZE];
|
||||
|
|
|
@ -54,6 +54,52 @@ const uvm_va_policy_t *uvm_va_policy_get(uvm_va_block_t *va_block, NvU64 addr)
|
|||
}
|
||||
}
|
||||
|
||||
// HMM va_blocks can have different polices for different regions withing the
|
||||
// va_block. This function checks the given region is covered by the same policy
|
||||
// and asserts if the region is covered by different policies.
|
||||
// This always returns true and is intended to only be used with UVM_ASSERT() to
|
||||
// avoid calling it on release builds.
|
||||
// Locking: the va_block lock must be held.
|
||||
static bool uvm_hmm_va_block_assert_policy_is_valid(uvm_va_block_t *va_block,
|
||||
const uvm_va_policy_t *policy,
|
||||
uvm_va_block_region_t region)
|
||||
{
|
||||
const uvm_va_policy_node_t *node;
|
||||
|
||||
if (uvm_va_policy_is_default(policy)) {
|
||||
// There should only be the default policy within the region.
|
||||
node = uvm_va_policy_node_iter_first(va_block,
|
||||
uvm_va_block_region_start(va_block, region),
|
||||
uvm_va_block_region_end(va_block, region));
|
||||
UVM_ASSERT(!node);
|
||||
}
|
||||
else {
|
||||
// The policy node should cover the region.
|
||||
node = uvm_va_policy_node_from_policy(policy);
|
||||
UVM_ASSERT(node->node.start <= uvm_va_block_region_start(va_block, region));
|
||||
UVM_ASSERT(node->node.end >= uvm_va_block_region_end(va_block, region));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const uvm_va_policy_t *uvm_va_policy_get_region(uvm_va_block_t *va_block, uvm_va_block_region_t region)
|
||||
{
|
||||
uvm_assert_mutex_locked(&va_block->lock);
|
||||
|
||||
if (uvm_va_block_is_hmm(va_block)) {
|
||||
const uvm_va_policy_t *policy;
|
||||
const uvm_va_policy_node_t *node = uvm_va_policy_node_find(va_block, uvm_va_block_region_start(va_block, region));
|
||||
|
||||
policy = node ? &node->policy : &uvm_va_policy_default;
|
||||
UVM_ASSERT(uvm_hmm_va_block_assert_policy_is_valid(va_block, policy, region));
|
||||
return policy;
|
||||
}
|
||||
else {
|
||||
return uvm_va_range_get_policy(va_block->va_range);
|
||||
}
|
||||
}
|
||||
|
||||
#if UVM_IS_CONFIG_HMM()
|
||||
|
||||
static struct kmem_cache *g_uvm_va_policy_node_cache __read_mostly;
|
||||
|
|
|
@ -100,6 +100,9 @@ bool uvm_va_policy_is_read_duplicate(const uvm_va_policy_t *policy, uvm_va_space
|
|||
// Locking: The va_block lock must be held.
|
||||
const uvm_va_policy_t *uvm_va_policy_get(uvm_va_block_t *va_block, NvU64 addr);
|
||||
|
||||
// Same as above but asserts the policy covers the whole region
|
||||
const uvm_va_policy_t *uvm_va_policy_get_region(uvm_va_block_t *va_block, uvm_va_block_region_t region);
|
||||
|
||||
// Return a uvm_va_policy_node_t given a uvm_va_policy_t pointer.
|
||||
static const uvm_va_policy_node_t *uvm_va_policy_node_from_policy(const uvm_va_policy_t *policy)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*******************************************************************************
|
||||
Copyright (c) 2015-2022 NVIDIA Corporation
|
||||
Copyright (c) 2015-2023 NVIDIA Corporation
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
|
@ -376,7 +376,7 @@ NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space,
|
|||
if (status != NV_OK)
|
||||
goto error;
|
||||
|
||||
if (i == 0 && g_uvm_global.sev_enabled)
|
||||
if (i == 0 && g_uvm_global.conf_computing_enabled)
|
||||
mem_alloc_params.dma_owner = gpu;
|
||||
|
||||
if (attrs.is_cacheable) {
|
||||
|
@ -608,7 +608,6 @@ static NV_STATUS va_range_add_gpu_va_space_managed(uvm_va_range_t *va_range,
|
|||
uvm_va_block_t *va_block;
|
||||
uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, mm);
|
||||
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
// TODO: Bug 2090378. Consolidate all per-VA block operations within
|
||||
// uvm_va_block_add_gpu_va_space so we only need to take the VA block
|
||||
|
@ -687,7 +686,6 @@ static void va_range_remove_gpu_va_space_managed(uvm_va_range_t *va_range,
|
|||
bool should_enable_read_duplicate;
|
||||
uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, mm);
|
||||
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
should_enable_read_duplicate =
|
||||
uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_ENABLED &&
|
||||
uvm_va_space_can_read_duplicate(va_space, NULL) != uvm_va_space_can_read_duplicate(va_space, gpu_va_space->gpu);
|
||||
|
@ -769,7 +767,6 @@ static NV_STATUS uvm_va_range_enable_peer_managed(uvm_va_range_t *va_range, uvm_
|
|||
uvm_va_space_t *va_space = va_range->va_space;
|
||||
uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, NULL);
|
||||
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
// TODO: Bug 1767224: Refactor the uvm_va_block_set_accessed_by logic
|
||||
|
@ -1322,7 +1319,6 @@ static NV_STATUS range_unmap_mask(uvm_va_range_t *va_range,
|
|||
if (uvm_processor_mask_empty(mask))
|
||||
return NV_OK;
|
||||
|
||||
block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
for_each_va_block_in_va_range(va_range, block) {
|
||||
NV_STATUS status;
|
||||
|
@ -1364,7 +1360,6 @@ static NV_STATUS range_map_uvm_lite_gpus(uvm_va_range_t *va_range, uvm_tracker_t
|
|||
if (uvm_processor_mask_empty(&va_range->uvm_lite_gpus))
|
||||
return NV_OK;
|
||||
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
// UVM-Lite GPUs always map with RWA
|
||||
|
@ -1528,7 +1523,6 @@ NV_STATUS uvm_va_range_set_preferred_location(uvm_va_range_t *va_range,
|
|||
uvm_processor_mask_copy(&va_range->uvm_lite_gpus, &new_uvm_lite_gpus);
|
||||
|
||||
va_block_context = uvm_va_space_block_context(va_space, mm);
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
uvm_processor_id_t id;
|
||||
|
@ -1610,7 +1604,6 @@ NV_STATUS uvm_va_range_set_accessed_by(uvm_va_range_t *va_range,
|
|||
|
||||
uvm_processor_mask_copy(&va_range->uvm_lite_gpus, &new_uvm_lite_gpus);
|
||||
va_block_context = uvm_va_space_block_context(va_space, mm);
|
||||
va_block_context->policy = policy;
|
||||
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
status = uvm_va_block_set_accessed_by(va_block, va_block_context, processor_id);
|
||||
|
@ -1657,7 +1650,6 @@ NV_STATUS uvm_va_range_set_read_duplication(uvm_va_range_t *va_range, struct mm_
|
|||
return NV_OK;
|
||||
|
||||
va_block_context = uvm_va_space_block_context(va_range->va_space, mm);
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
NV_STATUS status = uvm_va_block_set_read_duplication(va_block, va_block_context);
|
||||
|
@ -1679,7 +1671,6 @@ NV_STATUS uvm_va_range_unset_read_duplication(uvm_va_range_t *va_range, struct m
|
|||
return NV_OK;
|
||||
|
||||
va_block_context = uvm_va_space_block_context(va_range->va_space, mm);
|
||||
va_block_context->policy = uvm_va_range_get_policy(va_range);
|
||||
|
||||
for_each_va_block_in_va_range(va_range, va_block) {
|
||||
status = uvm_va_block_unset_read_duplication(va_block, va_block_context);
|
||||
|
@ -1816,7 +1807,7 @@ NV_STATUS uvm_api_alloc_semaphore_pool(UVM_ALLOC_SEMAPHORE_POOL_PARAMS *params,
|
|||
if (params->gpuAttributesCount > UVM_MAX_GPUS)
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
||||
if (g_uvm_global.sev_enabled && params->gpuAttributesCount == 0)
|
||||
if (g_uvm_global.conf_computing_enabled && params->gpuAttributesCount == 0)
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
|
||||
// The mm needs to be locked in order to remove stale HMM va_blocks.
|
||||
|
|
|
@ -242,9 +242,7 @@ NV_STATUS uvm_va_space_create(struct address_space *mapping, uvm_va_space_t **va
|
|||
if (status != NV_OK)
|
||||
goto fail;
|
||||
|
||||
status = uvm_hmm_va_space_initialize(va_space);
|
||||
if (status != NV_OK)
|
||||
goto fail;
|
||||
uvm_hmm_va_space_initialize(va_space);
|
||||
|
||||
uvm_va_space_up_write(va_space);
|
||||
uvm_up_write_mmap_lock(current->mm);
|
||||
|
@ -2226,11 +2224,12 @@ static vm_fault_t uvm_va_space_cpu_fault(uvm_va_space_t *va_space,
|
|||
// address with mremap() so create a new va_block if needed.
|
||||
status = uvm_hmm_va_block_find_create(va_space,
|
||||
fault_addr,
|
||||
&service_context->block_context,
|
||||
&service_context->block_context.hmm.vma,
|
||||
&va_block);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
|
||||
UVM_ASSERT(service_context->block_context.hmm.vma == vma);
|
||||
status = uvm_hmm_migrate_begin(va_block);
|
||||
if (status != NV_OK)
|
||||
break;
|
||||
|
|
|
@ -274,6 +274,22 @@ NV_STATUS uvm_va_space_mm_register(uvm_va_space_t *va_space)
|
|||
}
|
||||
}
|
||||
|
||||
if ((UVM_IS_CONFIG_HMM() || UVM_ATS_PREFETCH_SUPPORTED()) && uvm_va_space_pageable_mem_access_supported(va_space)) {
|
||||
#if UVM_CAN_USE_MMU_NOTIFIERS()
|
||||
// Initialize MMU interval notifiers for this process. This allows
|
||||
// mmu_interval_notifier_insert() to be called without holding the
|
||||
// mmap_lock for write.
|
||||
// Note: there is no __mmu_notifier_unregister(), this call just
|
||||
// allocates memory which is attached to the mm_struct and freed
|
||||
// when the mm_struct is freed.
|
||||
ret = __mmu_notifier_register(NULL, current->mm);
|
||||
if (ret)
|
||||
return errno_to_nv_status(ret);
|
||||
#else
|
||||
UVM_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __DETECT_SELF_HOSTED_H__
|
||||
#define __DETECT_SELF_HOSTED_H__
|
||||
|
||||
// PCI devIds 0x2340-0x237f are for Self-Hosted Hopper
|
||||
static inline int pci_devid_is_self_hosted(unsigned short devid)
|
||||
{
|
||||
return devid >= 0x2340 && devid <= 0x237f;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -96,6 +96,8 @@
|
|||
|
||||
#include "conftest/patches.h"
|
||||
|
||||
#include "detect-self-hosted.h"
|
||||
|
||||
#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000
|
||||
#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900
|
||||
#define RM_UNHANDLED_TIMEOUT_US 100000
|
||||
|
|
|
@ -209,7 +209,7 @@ NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session,
|
|||
memset(platformInfo, 0, sizeof(*platformInfo));
|
||||
platformInfo->atsSupported = nv_ats_supported;
|
||||
|
||||
platformInfo->sevEnabled = os_cc_enabled;
|
||||
platformInfo->confComputingEnabled = os_cc_enabled;
|
||||
|
||||
status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session);
|
||||
|
||||
|
|
|
@ -50,22 +50,6 @@
|
|||
|
||||
using namespace DisplayPort;
|
||||
|
||||
// These wrappers are specifically for DSC PPS library malloc and free callbacks
|
||||
// Pointer to these functions are populated to dscMalloc/dscFree in DSC_InitializeCallBack and it is initialized from both DPLib and HDMiPacketLib.
|
||||
// In HDMI case, callback function for malloc/free needs client handle so to match function prototype, in DP case, adding these wrappers.
|
||||
extern "C" void * dpMallocCb(const void *clientHandle, NvLength size);
|
||||
extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr);
|
||||
|
||||
extern "C" void * dpMallocCb(const void *clientHandle, NvLength size)
|
||||
{
|
||||
return dpMalloc(size);
|
||||
}
|
||||
|
||||
extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr)
|
||||
{
|
||||
dpFree(pMemPtr);
|
||||
}
|
||||
|
||||
ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink)
|
||||
: main(main),
|
||||
auxBus(auxBus),
|
||||
|
@ -158,14 +142,6 @@ ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Co
|
|||
hal->applyRegkeyOverrides(dpRegkeyDatabase);
|
||||
|
||||
highestAssessedLC = getMaxLinkConfig();
|
||||
|
||||
// Initialize DSC callbacks
|
||||
DSC_CALLBACK callback;
|
||||
callback.clientHandle = NULL;
|
||||
callback.dscPrint = NULL;
|
||||
callback.dscMalloc = dpMallocCb;
|
||||
callback.dscFree = dpFreeCb;
|
||||
DSC_InitializeCallback(callback);
|
||||
}
|
||||
|
||||
void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase)
|
||||
|
@ -1309,10 +1285,13 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
|
|||
warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth;
|
||||
warData.connectorType = DSC_DP;
|
||||
|
||||
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pScratchBuffer = nullptr;
|
||||
pScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*) dpMalloc(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA));
|
||||
|
||||
result = DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
|
||||
&warData, availableBandwidthBitsPerSecond,
|
||||
(NvU32*)(PPS),
|
||||
(NvU32*)(&bitsPerPixelX16));
|
||||
(NvU32*)(&bitsPerPixelX16), pScratchBuffer);
|
||||
|
||||
// Try max dsc compression bpp = 8 once to check if that can support that mode.
|
||||
if (result != NVT_STATUS_SUCCESS && !bDscBppForced)
|
||||
|
@ -1324,7 +1303,13 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
|
|||
result = DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
|
||||
&warData, availableBandwidthBitsPerSecond,
|
||||
(NvU32*)(PPS),
|
||||
(NvU32*)(&bitsPerPixelX16));
|
||||
(NvU32*)(&bitsPerPixelX16), pScratchBuffer);
|
||||
}
|
||||
|
||||
if (pScratchBuffer)
|
||||
{
|
||||
dpFree(pScratchBuffer);
|
||||
pScratchBuffer = nullptr;
|
||||
}
|
||||
|
||||
if (result != NVT_STATUS_SUCCESS)
|
||||
|
@ -1614,10 +1599,21 @@ nonDscDpIMP:
|
|||
warData.dpData.dpMode = DSC_DP_SST;
|
||||
warData.connectorType = DSC_DP;
|
||||
|
||||
if ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
|
||||
&warData, availableBandwidthBitsPerSecond,
|
||||
(NvU32*)(PPS),
|
||||
(NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS)
|
||||
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pScratchBuffer = nullptr;
|
||||
pScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*)dpMalloc(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA));
|
||||
|
||||
bool bPpsFailure = ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC,
|
||||
&warData, availableBandwidthBitsPerSecond,
|
||||
(NvU32*)(PPS),
|
||||
(NvU32*)(&bitsPerPixelX16),
|
||||
pScratchBuffer)) != NVT_STATUS_SUCCESS);
|
||||
if (pScratchBuffer)
|
||||
{
|
||||
dpFree(pScratchBuffer);
|
||||
pScratchBuffer = nullptr;
|
||||
}
|
||||
|
||||
if (bPpsFailure)
|
||||
{
|
||||
compoundQueryResult = false;
|
||||
pDscParams->bEnableDsc = false;
|
||||
|
|
|
@ -36,25 +36,25 @@
|
|||
// and then checked back in. You cannot make changes to these sections without
|
||||
// corresponding changes to the buildmeister script
|
||||
#ifndef NV_BUILD_BRANCH
|
||||
#define NV_BUILD_BRANCH r535_00
|
||||
#define NV_BUILD_BRANCH r537_13
|
||||
#endif
|
||||
#ifndef NV_PUBLIC_BRANCH
|
||||
#define NV_PUBLIC_BRANCH r535_00
|
||||
#define NV_PUBLIC_BRANCH r537_13
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
|
||||
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r535_00-239"
|
||||
#define NV_BUILD_CHANGELIST_NUM (33134228)
|
||||
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r537_13-260"
|
||||
#define NV_BUILD_CHANGELIST_NUM (33206197)
|
||||
#define NV_BUILD_TYPE "Official"
|
||||
#define NV_BUILD_NAME "rel/gpu_drv/r535/r535_00-239"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33134228)
|
||||
#define NV_BUILD_NAME "rel/gpu_drv/r535/r537_13-260"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33206197)
|
||||
|
||||
#else /* Windows builds */
|
||||
#define NV_BUILD_BRANCH_VERSION "r535_00-254"
|
||||
#define NV_BUILD_CHANGELIST_NUM (33134228)
|
||||
#define NV_BUILD_BRANCH_VERSION "r537_13-1"
|
||||
#define NV_BUILD_CHANGELIST_NUM (33194057)
|
||||
#define NV_BUILD_TYPE "Official"
|
||||
#define NV_BUILD_NAME "536.92"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33134228)
|
||||
#define NV_BUILD_NAME "537.17"
|
||||
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33194057)
|
||||
#define NV_BUILD_BRANCH_BASE_VERSION R535
|
||||
#endif
|
||||
// End buildmeister python edited section
|
||||
|
|
|
@ -158,6 +158,7 @@ static const PNPVendorId PNPVendorIds[] =
|
|||
{ "CSE", _VENDOR_NAME_ENTRY("Compu Shack") },
|
||||
{ "CSI", _VENDOR_NAME_ENTRY("Cabletron") },
|
||||
{ "CSS", _VENDOR_NAME_ENTRY("CSS Laboratories") },
|
||||
{ "CSW", _VENDOR_NAME_ENTRY("China Star Optoelectronics Technology Co., Ltd") },
|
||||
{ "CTN", _VENDOR_NAME_ENTRY("Computone") },
|
||||
{ "CTX", _VENDOR_NAME_ENTRY("Chuntex/CTX") },
|
||||
{ "CUB", _VENDOR_NAME_ENTRY("Cubix") },
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
|
||||
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
|
||||
|
||||
#define NV_VERSION_STRING "535.98"
|
||||
#define NV_VERSION_STRING "535.104.05"
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -110,7 +110,18 @@
|
|||
#define NV_PFALCON_FALCON_DMEMC_AINCW 24:24 /* RWIVF */
|
||||
#define NV_PFALCON_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */
|
||||
#define NV_PFALCON_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */
|
||||
#define NV_PFALCON_FALCON_DMEMC_AINCR 25:25 /* RWIVF */
|
||||
#define NV_PFALCON_FALCON_DMEMC_AINCR_TRUE 0x00000001 /* RW--V */
|
||||
#define NV_PFALCON_FALCON_DMEMC_AINCR_FALSE 0x00000000 /* RW--V */
|
||||
#define NV_PFALCON_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */
|
||||
#define NV_PFALCON_FALCON_DMEMD_DATA 31:0 /* RW-VF */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(i) (0x00000300+(i)*4) /* RW-4A */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0__SIZE_1 4 /* */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(i) (0x00000310+(i)*4) /* RW-4A */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1__SIZE_1 4 /* */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(i) (0x00000320+(i)*4) /* RW-4A */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2__SIZE_1 4 /* */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(i) (0x00000330+(i)*4) /* RW-4A */
|
||||
#define NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3__SIZE_1 4 /* */
|
||||
|
||||
#endif // __tu102_dev_falcon_v4_h__
|
||||
|
|
|
@ -181,24 +181,6 @@ static const NVHDMIPKT_CLASS_HIERARCHY hierarchy[] =
|
|||
},
|
||||
};
|
||||
|
||||
#if defined(DSC_CALLBACK_MODIFIED)
|
||||
// Callbacks for DSC PPS library
|
||||
void *hdmipktMallocCb(const void *clientHandle, NvLength size);
|
||||
void hdmipktFreeCb(const void *clientHandle, void *pMemPtr);
|
||||
|
||||
void *hdmipktMallocCb(const void *clientHandle, NvLength size)
|
||||
{
|
||||
const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle);
|
||||
return pClass->callback.malloc(pClass->cbHandle, size);
|
||||
}
|
||||
|
||||
void hdmipktFreeCb(const void *clientHandle, void *pMemPtr)
|
||||
{
|
||||
const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle);
|
||||
pClass->callback.free(pClass->cbHandle, pMemPtr);
|
||||
}
|
||||
#endif // DSC_CALLBACK_MODIFIED
|
||||
|
||||
/********************************** HDMI Library interfaces *************************************/
|
||||
/*
|
||||
* NvHdmiPkt_PacketCtrl
|
||||
|
@ -581,15 +563,6 @@ NvHdmiPkt_InitializeLibrary(NvU32 const hwClass,
|
|||
// 2. Constructor calls
|
||||
result = NvHdmiPkt_CallConstructors(thisClassId, pClass);
|
||||
|
||||
#if defined(DSC_CALLBACK_MODIFIED)
|
||||
DSC_CALLBACK callbacks;
|
||||
NVMISC_MEMSET(&callbacks, 0, sizeof(DSC_CALLBACK));
|
||||
callbacks.clientHandle = pClass;
|
||||
callbacks.dscMalloc = hdmipktMallocCb;
|
||||
callbacks.dscFree = hdmipktFreeCb;
|
||||
DSC_InitializeCallback(callbacks);
|
||||
#endif // DSC_CALLBACK_MODIFIED
|
||||
|
||||
NvHdmiPkt_InitializeLibrary_exit:
|
||||
if (result)
|
||||
{
|
||||
|
|
|
@ -1168,18 +1168,29 @@ frlQuery_Success:
|
|||
NvU64 availableLinkBw = (NvU64)(frlBitRateGbps) * (NvU64)(numLanes) * MULTIPLIER_1G;
|
||||
warData.connectorType = DSC_HDMI;
|
||||
|
||||
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pDscScratchBuffer = NULL;
|
||||
pDscScratchBuffer = (DSC_GENERATE_PPS_OPAQUE_WORKAREA*)pThis->callback.malloc(pThis->cbHandle,
|
||||
sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA));
|
||||
|
||||
if ((DSC_GeneratePPS(&dscInfo,
|
||||
&dscModesetInfo,
|
||||
&warData,
|
||||
availableLinkBw,
|
||||
pFRLConfig->dscInfo.pps,
|
||||
&bitsPerPixelX16)) != NVT_STATUS_SUCCESS)
|
||||
&bitsPerPixelX16,
|
||||
pDscScratchBuffer)) != NVT_STATUS_SUCCESS)
|
||||
{
|
||||
NvHdmiPkt_Print(pThis, "ERROR - DSC PPS calculation failed.");
|
||||
NvHdmiPkt_Assert(0);
|
||||
result = NVHDMIPKT_FAIL;
|
||||
}
|
||||
|
||||
if (pDscScratchBuffer != NULL)
|
||||
{
|
||||
pThis->callback.free(pThis->cbHandle, pDscScratchBuffer);
|
||||
pDscScratchBuffer = NULL;
|
||||
}
|
||||
|
||||
// DSC lib should honor the bpp setting passed from client, assert here just in case
|
||||
NvHdmiPkt_Assert(bitsPerPixelX16 == pFRLConfig->dscInfo.bitsPerPixelX16);
|
||||
}
|
||||
|
|
|
@ -33,20 +33,19 @@
|
|||
#include "nvt_dsc_pps.h"
|
||||
#include "nvmisc.h"
|
||||
#include "displayport/displayport.h"
|
||||
#include "nvctassert.h"
|
||||
#include <stddef.h>
|
||||
|
||||
/* ------------------------ Macros ----------------------------------------- */
|
||||
|
||||
#if defined (DEBUG)
|
||||
#define DSC_Print(...) \
|
||||
do { \
|
||||
if (callbacks.dscPrint) { \
|
||||
callbacks.dscPrint("DSC: " __VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
#else
|
||||
//
|
||||
// DSC_Print macro was for debugging purposes in early development of
|
||||
// DSC PPS library. The print statements no longer get logged
|
||||
// inside any client logger. But the lines of print in this file are useful
|
||||
// for browsing code, hence this DSC_Print is left as a stub
|
||||
// definition intentionally to help reader understand the PPS code.
|
||||
//
|
||||
#define DSC_Print(...) do { } while(0)
|
||||
#endif
|
||||
|
||||
#define MIN_CHECK(s,a,b) { if((a)<(b)) { DSC_Print("%s (=%u) needs to be larger than %u",s,a,b); return (NVT_STATUS_ERR);} }
|
||||
#define RANGE_CHECK(s,a,b,c) { if((((NvS32)(a))<(NvS32)(b))||(((NvS32)(a))>(NvS32)(c))) { DSC_Print("%s (=%u) needs to be between %u and %u",s,a,b,c); return (NVT_STATUS_ERR);} }
|
||||
|
@ -171,9 +170,21 @@ typedef struct
|
|||
NvU32 flatness_det_thresh;
|
||||
} DSC_OUTPUT_PARAMS;
|
||||
|
||||
/* ------------------------ Global Variables ------------------------------- */
|
||||
//
|
||||
// Opaque scratch space is passed by client for DSC calculation usage.
|
||||
// Use an internal struct to cast the input buffer
|
||||
// into in/out params for DSC PPS calculation functions to work with
|
||||
//
|
||||
typedef struct _DSC_GENERATE_PPS_WORKAREA
|
||||
{
|
||||
DSC_INPUT_PARAMS in;
|
||||
DSC_OUTPUT_PARAMS out;
|
||||
} DSC_GENERATE_PPS_WORKAREA;
|
||||
|
||||
DSC_CALLBACK callbacks;
|
||||
// Compile time check to ensure Opaque workarea buffer size always covers required work area.
|
||||
ct_assert(sizeof(DSC_GENERATE_PPS_OPAQUE_WORKAREA) >= sizeof(DSC_GENERATE_PPS_WORKAREA));
|
||||
|
||||
/* ------------------------ Global Variables ------------------------------- */
|
||||
|
||||
static const NvU8 minqp444_8b[15][37]={
|
||||
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
@ -396,8 +407,6 @@ static const NvU32 rcBufThresh[] = { 896, 1792, 2688, 3584, 4480, 5376, 6272, 67
|
|||
/* ------------------------ Static Variables ------------------------------- */
|
||||
/* ------------------------ Private Functions Prototype--------------------- */
|
||||
|
||||
static void * DSC_Malloc(NvLength size);
|
||||
static void DSC_Free(void * ptr);
|
||||
static NvU32
|
||||
DSC_GetHigherSliceCount
|
||||
(
|
||||
|
@ -1586,19 +1595,11 @@ static NVT_STATUS
|
|||
DSC_PpsDataGen
|
||||
(
|
||||
const DSC_INPUT_PARAMS *in,
|
||||
NvU32 out[DSC_MAX_PPS_SIZE_DWORD]
|
||||
DSC_OUTPUT_PARAMS *pPpsOut,
|
||||
NvU32 out[DSC_MAX_PPS_SIZE_DWORD]
|
||||
)
|
||||
{
|
||||
NVT_STATUS ret;
|
||||
DSC_OUTPUT_PARAMS *pPpsOut;
|
||||
|
||||
pPpsOut = (DSC_OUTPUT_PARAMS *)DSC_Malloc(sizeof(DSC_OUTPUT_PARAMS));
|
||||
if (pPpsOut == NULL)
|
||||
{
|
||||
DSC_Print("ERROR - Memory allocation error.");
|
||||
ret = NVT_STATUS_NO_MEMORY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
NVMISC_MEMSET(pPpsOut, 0, sizeof(DSC_OUTPUT_PARAMS));
|
||||
ret = DSC_PpsCalc(in, pPpsOut);
|
||||
|
@ -1612,44 +1613,9 @@ DSC_PpsDataGen
|
|||
|
||||
/* fall through */
|
||||
done:
|
||||
DSC_Free(pPpsOut);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Allocates memory for requested size
|
||||
*
|
||||
* @param[in] size Size to be allocated
|
||||
*
|
||||
* @returns Pointer to allocated memory
|
||||
*/
|
||||
static void *
|
||||
DSC_Malloc(NvLength size)
|
||||
{
|
||||
#if defined(DSC_CALLBACK_MODIFIED)
|
||||
return (callbacks.dscMalloc)(callbacks.clientHandle, size);
|
||||
#else
|
||||
return (callbacks.dscMalloc)(size);
|
||||
#endif // DSC_CALLBACK_MODIFIED
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Frees dynamically allocated memory
|
||||
*
|
||||
* @param[in] ptr Pointer to a memory to be deallocated
|
||||
*
|
||||
*/
|
||||
static void
|
||||
DSC_Free(void * ptr)
|
||||
{
|
||||
#if defined(DSC_CALLBACK_MODIFIED)
|
||||
(callbacks.dscFree)(callbacks.clientHandle, ptr);
|
||||
#else
|
||||
(callbacks.dscFree)(ptr);
|
||||
#endif // DSC_CALLBACK_MODIFIED
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Validate input parameter we got from caller of this function
|
||||
*
|
||||
|
@ -1992,19 +1958,26 @@ DSC_GeneratePPS
|
|||
const WAR_DATA *pWARData,
|
||||
NvU64 availableBandwidthBitsPerSecond,
|
||||
NvU32 pps[DSC_MAX_PPS_SIZE_DWORD],
|
||||
NvU32 *pBitsPerPixelX16
|
||||
NvU32 *pBitsPerPixelX16,
|
||||
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pOpaqueWorkarea
|
||||
)
|
||||
{
|
||||
DSC_INPUT_PARAMS *in = NULL;
|
||||
DSC_INPUT_PARAMS *in = NULL;
|
||||
DSC_OUTPUT_PARAMS *out = NULL;
|
||||
DSC_GENERATE_PPS_WORKAREA *pWorkarea = NULL;
|
||||
NVT_STATUS ret = NVT_STATUS_ERR;
|
||||
|
||||
if ((!pDscInfo) || (!pModesetInfo) || (!pBitsPerPixelX16))
|
||||
if ((!pDscInfo) || (!pModesetInfo) || (!pBitsPerPixelX16) || (!pOpaqueWorkarea))
|
||||
{
|
||||
DSC_Print("ERROR - Invalid parameter.");
|
||||
ret = NVT_STATUS_INVALID_PARAMETER;
|
||||
goto done;
|
||||
}
|
||||
|
||||
pWorkarea = (DSC_GENERATE_PPS_WORKAREA*)(pOpaqueWorkarea);
|
||||
in = &pWorkarea->in;
|
||||
out = &pWorkarea->out;
|
||||
|
||||
ret = _validateInput(pDscInfo, pModesetInfo, pWARData, availableBandwidthBitsPerSecond);
|
||||
if (ret != NVT_STATUS_SUCCESS)
|
||||
{
|
||||
|
@ -2013,14 +1986,6 @@ DSC_GeneratePPS
|
|||
goto done;
|
||||
}
|
||||
|
||||
in = (DSC_INPUT_PARAMS *)DSC_Malloc(sizeof(DSC_INPUT_PARAMS));
|
||||
if (in == NULL)
|
||||
{
|
||||
DSC_Print("ERROR - Memory allocation error.");
|
||||
ret = NVT_STATUS_NO_MEMORY;
|
||||
goto done;
|
||||
}
|
||||
|
||||
NVMISC_MEMSET(in, 0, sizeof(DSC_INPUT_PARAMS));
|
||||
|
||||
in->bits_per_component = pModesetInfo->bitsPerComponent;
|
||||
|
@ -2277,42 +2242,11 @@ DSC_GeneratePPS
|
|||
}
|
||||
}
|
||||
|
||||
ret = DSC_PpsDataGen(in, pps);
|
||||
ret = DSC_PpsDataGen(in, out, pps);
|
||||
|
||||
*pBitsPerPixelX16 = in->bits_per_pixel;
|
||||
|
||||
/* fall through */
|
||||
done:
|
||||
DSC_Free(in);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Initializes callbacks for print and assert
|
||||
*
|
||||
* @param[in] callback DSC callbacks
|
||||
*
|
||||
* @returns NVT_STATUS_SUCCESS if successful;
|
||||
* NVT_STATUS_ERR if unsuccessful;
|
||||
*/
|
||||
NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback)
|
||||
{
|
||||
// if callbacks are initialized already, return nothing to do
|
||||
if (callbacks.dscMalloc && callbacks.dscFree)
|
||||
{
|
||||
return NVT_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#if defined(DSC_CALLBACK_MODIFIED)
|
||||
callbacks.clientHandle = callback.clientHandle;
|
||||
#endif // DSC_CALLBACK_MODIFIED
|
||||
callbacks.dscPrint = NULL;
|
||||
callbacks.dscMalloc = callback.dscMalloc;
|
||||
callbacks.dscFree = callback.dscFree;
|
||||
#if defined (DEBUG)
|
||||
callbacks.dscPrint = callback.dscPrint;
|
||||
#endif
|
||||
|
||||
return NVT_STATUS_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -43,27 +43,6 @@
|
|||
|
||||
/* ------------------------ Datatypes -------------------------------------- */
|
||||
|
||||
#define DSC_CALLBACK_MODIFIED 1
|
||||
|
||||
#if defined(DSC_CALLBACK_MODIFIED)
|
||||
typedef struct
|
||||
{
|
||||
// DSC - Callbacks
|
||||
const void* clientHandle; // ClientHandle is only used when calling into HDMI lib's mallocCb/freeCb
|
||||
void (*dscPrint) (const char* fmtstring, ...);
|
||||
void *(*dscMalloc)(const void *clientHandle, NvLength size);
|
||||
void (*dscFree) (const void *clientHandle, void * ptr);
|
||||
} DSC_CALLBACK;
|
||||
#else
|
||||
typedef struct
|
||||
{
|
||||
// DSC - Callbacks
|
||||
void (*dscPrint) (const char* fmtstring, ...);
|
||||
void *(*dscMalloc)(NvLength size);
|
||||
void (*dscFree) (void * ptr);
|
||||
} DSC_CALLBACK;
|
||||
#endif // DSC_CALLBACK_MODIFIED
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 versionMajor;
|
||||
|
@ -278,6 +257,16 @@ typedef struct
|
|||
}dpData;
|
||||
} WAR_DATA;
|
||||
|
||||
//
|
||||
// DSC PPS calculations need large scratch buffer to work with, which can be too
|
||||
// big for some platforms. These buffers need to be allocated on heap rather
|
||||
// than local stack variable. Clients are expected to pre-allocate
|
||||
// this buffer and pass it in to DSC PPS interface
|
||||
//
|
||||
typedef struct {
|
||||
NvU8 data[512U]; // an upper bound of total size of DSC_IN/OUTPUT_PARAMS
|
||||
} DSC_GENERATE_PPS_OPAQUE_WORKAREA;
|
||||
|
||||
/*
|
||||
* Windows testbed compiles are done with warnings as errors
|
||||
* with the maximum warning level. Here we turn off some
|
||||
|
@ -292,16 +281,6 @@ typedef struct
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/*
|
||||
* @brief Initializes callbacks for print and assert
|
||||
*
|
||||
* @param[in] callback DSC callbacks
|
||||
*
|
||||
* @returns NVT_STATUS_SUCCESS if successful;
|
||||
* NVT_STATUS_ERR if unsuccessful;
|
||||
*/
|
||||
NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback);
|
||||
|
||||
/*
|
||||
* @brief Calculate PPS parameters based on passed down Sink,
|
||||
* GPU capability and modeset info
|
||||
|
@ -323,7 +302,8 @@ NVT_STATUS DSC_GeneratePPS(const DSC_INFO *pDscInfo,
|
|||
const WAR_DATA *pWARData,
|
||||
NvU64 availableBandwidthBitsPerSecond,
|
||||
NvU32 pps[DSC_MAX_PPS_SIZE_DWORD],
|
||||
NvU32 *pBitsPerPixelX16);
|
||||
NvU32 *pBitsPerPixelX16,
|
||||
DSC_GENERATE_PPS_OPAQUE_WORKAREA *pOpaqueWorkarea);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -136,6 +136,15 @@ typedef volatile struct _clcba2_tag0 {
|
|||
#define NVCBA2_ERROR_OS_APPLICATION (0x0000000D)
|
||||
#define NVCBA2_ERROR_INVALID_CTXSW_REQUEST (0x0000000E)
|
||||
#define NVCBA2_ERROR_BUFFER_OVERFLOW (0x0000000F)
|
||||
#define NVCBA2_ERROR_IV_OVERFLOW (0x00000010)
|
||||
#define NVCBA2_ERROR_INTERNAL_SETUP_FAILURE (0x00000011)
|
||||
#define NVCBA2_ERROR_DECRYPT_COPY_INTERNAL_DMA_FAILURE (0x00000012)
|
||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_ADDR_INTERNAL_DMA_FAILURE (0x00000013)
|
||||
#define NVCBA2_ERROR_METHOD_STREAM_AUTH_TAG_HMAC_CALC_FAILURE (0x00000014)
|
||||
#define NVCBA2_ERROR_NONCE_OVERFLOW (0x00000015)
|
||||
#define NVCBA2_ERROR_AES_GCM_DECRYPTION_FAILURE (0x00000016)
|
||||
#define NVCBA2_ERROR_SEMAPHORE_RELEASE_INTERNAL_DMA_FAILURE (0x00000017)
|
||||
#define NVCBA2_ERROR_KEY_DERIVATION_FAILURE (0x00000018)
|
||||
#define NVCBA2_ERROR_SCRUBBER_FAILURE (0x00000019)
|
||||
#define NVCBA2_ERROR_SCRUBBER_INVALD_ADDRESS (0x0000001a)
|
||||
#define NVCBA2_ERROR_SCRUBBER_INSUFFICIENT_PERMISSIONS (0x0000001b)
|
||||
|
|
|
@ -793,6 +793,37 @@ typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
|
|||
NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
|
||||
} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
|
||||
|
||||
typedef struct NV2080_CTRL_INTERNAL_NV_RANGE {
|
||||
NV_DECLARE_ALIGNED(NvU64 lo, 8);
|
||||
NV_DECLARE_ALIGNED(NvU64 hi, 8);
|
||||
} NV2080_CTRL_INTERNAL_NV_RANGE;
|
||||
|
||||
/*!
|
||||
* NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS
|
||||
*
|
||||
* This structure specifies a target swizz-id and mem_range to update
|
||||
*
|
||||
* swizzId[IN]
|
||||
* - Targeted swizz-id for which the memRange is being set
|
||||
*
|
||||
* memAddrRange[IN]
|
||||
* - Memory Range for given GPU instance
|
||||
*/
|
||||
#define NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID (0x43U)
|
||||
|
||||
typedef struct NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS {
|
||||
NvU32 swizzId;
|
||||
NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NV_RANGE memAddrRange, 8);
|
||||
} NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS;
|
||||
|
||||
#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a44) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID (0x44U)
|
||||
|
||||
typedef NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS;
|
||||
|
||||
#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a43) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */
|
||||
|
||||
/**
|
||||
* Get GR PDB properties synchronized between Kernel and Physical
|
||||
*
|
||||
|
@ -1512,11 +1543,6 @@ typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS {
|
|||
|
||||
#define NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID 15
|
||||
|
||||
typedef struct NV2080_CTRL_INTERNAL_NV_RANGE {
|
||||
NV_DECLARE_ALIGNED(NvU64 lo, 8);
|
||||
NV_DECLARE_ALIGNED(NvU64 hi, 8);
|
||||
} NV2080_CTRL_INTERNAL_NV_RANGE;
|
||||
|
||||
#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID (0x60U)
|
||||
|
||||
typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS {
|
||||
|
|
|
@ -60,9 +60,6 @@ typedef struct RM_GSP_SPDM_CC_INIT_CTX {
|
|||
|
||||
|
||||
NvU64_ALIGN32 dmaAddr; // The address RM allocate in SYS memory or FB memory.
|
||||
|
||||
NvU32 rmBufferSizeInByte; // The memort size allocated by RM(exclude NV_SPDM_DESC_HEADER)
|
||||
|
||||
} RM_GSP_SPDM_CC_INIT_CTX;
|
||||
typedef struct RM_GSP_SPDM_CC_INIT_CTX *PRM_GSP_SPDM_CC_INIT_CTX;
|
||||
|
||||
|
|
|
@ -120,7 +120,8 @@
|
|||
#define ALI_TRAINING_FAIL (136)
|
||||
#define NVLINK_FLA_PRIV_ERR (137)
|
||||
#define ROBUST_CHANNEL_DLA_ERROR (138)
|
||||
#define ROBUST_CHANNEL_LAST_ERROR (ROBUST_CHANNEL_DLA_ERROR)
|
||||
#define ROBUST_CHANNEL_FAST_PATH_ERROR (139)
|
||||
#define ROBUST_CHANNEL_LAST_ERROR (ROBUST_CHANNEL_FAST_PATH_ERROR)
|
||||
|
||||
|
||||
// Indexed CE reference
|
||||
|
|
|
@ -14,7 +14,10 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
|
|||
} vgpu_aliases[] = {
|
||||
{ 0x20B5, 0x1642, 0x20B5, 0x1533 },
|
||||
{ 0x20B8, 0x1581, 0x20B5, 0x1533 },
|
||||
{ 0x20B7, 0x1804, 0x20B7, 0x1532 },
|
||||
{ 0x20B7, 0x1852, 0x20B7, 0x1532 },
|
||||
{ 0x20B9, 0x157F, 0x20B7, 0x1532 },
|
||||
{ 0x20FD, 0x17F8, 0x20F5, 0x0 },
|
||||
{ 0x2330, 0x16C0, 0x2330, 0x16C1 },
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef LIBOS_V2_CRASHCAT_H
|
||||
#define LIBOS_V2_CRASHCAT_H
|
||||
|
||||
#include "nv-crashcat.h"
|
||||
#include "nv-crashcat-decoder.h"
|
||||
|
||||
// libosv2 implements the CrashCat V1 protocol with the following implementation-defined bits
|
||||
|
||||
typedef enum
|
||||
{
|
||||
LibosPanicReasonUnspecified = 0x00,
|
||||
LibosPanicReasonUnrecoverableTaskCrash = 0x01,
|
||||
LibosPanicReasonUnhandledState = 0x02,
|
||||
LibosPanicReasonInvalidConfiguration = 0x03,
|
||||
LibosPanicReasonFatalHardwareError = 0x04,
|
||||
LibosPanicReasonInsufficientResources = 0x05,
|
||||
LibosPanicReasonTimeout = 0x06,
|
||||
LibosPanicReasonEnvCallFailed = 0x07,
|
||||
LibosPanicReasonSspStackCheckFailed = 0x08,
|
||||
LibosPanicReasonAsanMemoryError = 0x09,
|
||||
LibosPanicReasonTest = 0x0a,
|
||||
LibosPanicReasonProgrammingError = 0x0b,
|
||||
LibosPanicReasonDebugAssertionFailed = 0x0c,
|
||||
LibosPanicReasonCount
|
||||
} LibosPanicReason;
|
||||
|
||||
// NV_CRASHCAT_REPORT_IMPLEMENTER_SIGNATURE (bits 63:0) - "LIBOS2.0"
|
||||
#define NV_CRASHCAT_REPORT_IMPLEMENTER_SIGNATURE_LIBOS2 (0x4C49424F53322E30ull)
|
||||
|
||||
// NV_CRASHCAT_REPORT_V1_REPORTER_ID_IMPL_DEF (bits 63:24)
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_TASK_ID 31:24
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_TASK_ID_UNSPECIFIED 0xFF
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_LIBOS2_RESERVED 63:32
|
||||
|
||||
static NV_INLINE
|
||||
void crashcatReportV1SetReporterLibos2TaskId(NvCrashCatReport_V1 *pReport, NvU8 task_id)
|
||||
{
|
||||
pReport->reporterId = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _LIBOS2_TASK_ID,
|
||||
task_id, pReport->reporterId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU8 crashcatReportV1ReporterLibos2TaskId(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvU8)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _LIBOS2_TASK_ID, pReport->reporterId);
|
||||
}
|
||||
|
||||
// NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION (bits 31:0)
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION_LIBOS2_CL 23:0
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION_LIBOS2_MINOR 27:24
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION_LIBOS2_MAJOR 31:28
|
||||
|
||||
static NV_INLINE
|
||||
void crashcatReportV1SetReporterVersionLibos2(NvCrashCatReport_V1 *pReport, NvU32 cl)
|
||||
{
|
||||
pReport->reporterData = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_DATA,
|
||||
_VERSION_LIBOS2_MAJOR, 2, pReport->reporterData);
|
||||
pReport->reporterData = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_DATA,
|
||||
_VERSION_LIBOS2_MINOR, 0, pReport->reporterData);
|
||||
pReport->reporterData = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_REPORTER_DATA,
|
||||
_VERSION_LIBOS2_CL, cl, pReport->reporterData);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU32 crashcatReportV1ReporterVersionLibos2Cl(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return DRF_VAL(_CRASHCAT, _REPORT_V1_REPORTER_DATA_VERSION, _LIBOS2_CL,
|
||||
crashcatReportV1ReporterVersion(pReport));
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU8 crashcatReportV1ReporterVersionLibos2Minor(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvU8)DRF_VAL(_CRASHCAT, _REPORT_V1_REPORTER_DATA_VERSION, _LIBOS2_MINOR,
|
||||
crashcatReportV1ReporterVersion(pReport));
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU8 crashcatReportV1ReporterVersionLibos2Major(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvU8)DRF_VAL(_CRASHCAT, _REPORT_V1_REPORTER_DATA_VERSION, _LIBOS2_MAJOR,
|
||||
crashcatReportV1ReporterVersion(pReport));
|
||||
}
|
||||
|
||||
// NV_CRASHCAT_REPORT_V1_SOURCE_ID_IMPL_DEF (63:24)
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_TASK_ID 31:24
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_TASK_ID_UNSPECIFIED 0xFF
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_LIBOS2_RESERVED 63:32
|
||||
|
||||
static NV_INLINE
|
||||
void crashcatReportV1SetSourceLibos2TaskId(NvCrashCatReport_V1 *pReport, NvU8 task_id)
|
||||
{
|
||||
pReport->sourceId = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _LIBOS2_TASK_ID, task_id,
|
||||
pReport->sourceId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU8 crashcatReportV1SourceLibos2TaskId(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvU8)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _LIBOS2_TASK_ID, pReport->sourceId);
|
||||
}
|
||||
|
||||
// NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_IMPL_DEF (63:32)
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_LIBOS2_REASON 39:32
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_LIBOS2_RESERVED 63:40
|
||||
|
||||
ct_assert(LibosPanicReasonCount <=
|
||||
NVBIT(DRF_SIZE(NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_LIBOS2_REASON)));
|
||||
|
||||
static NV_INLINE
|
||||
void crashcatReportV1SetSourceCauseLibos2Reason(NvCrashCatReport_V1 *pReport,
|
||||
LibosPanicReason reason)
|
||||
{
|
||||
pReport->sourceCause = FLD_SET_DRF_NUM64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _LIBOS2_REASON,
|
||||
reason, pReport->sourceCause);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
LibosPanicReason crashcatReportV1SourceCauseLibos2Reason(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (LibosPanicReason)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _LIBOS2_REASON,
|
||||
pReport->sourceCause);
|
||||
}
|
||||
|
||||
#endif // LIBOS_V2_CRASHCAT_H
|
|
@ -0,0 +1,244 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NV_CRASHCAT_DECODER_H
|
||||
#define NV_CRASHCAT_DECODER_H
|
||||
|
||||
#include "nv-crashcat.h"
|
||||
|
||||
// This header defines decoder/consumer-side implementation helpers for the CrashCat protocol
|
||||
|
||||
//
|
||||
// Wayfinder L0 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NvBool crashcatWayfinderL0Valid(NvCrashCatWayfinderL0_V1 wfl0)
|
||||
{
|
||||
return FLD_TEST_DRF(_CRASHCAT, _WAYFINDER_L0, _SIGNATURE, _VALID, wfl0);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_WAYFINDER_VERSION crashcatWayfinderL0Version(NvCrashCatWayfinderL0_V1 wfl0)
|
||||
{
|
||||
return (NV_CRASHCAT_WAYFINDER_VERSION)DRF_VAL(_CRASHCAT, _WAYFINDER_L0, _VERSION, wfl0);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID crashcatWayfinderL0V1Wfl1Location(NvCrashCatWayfinderL0_V1 wfl0)
|
||||
{
|
||||
return (NV_CRASHCAT_SCRATCH_GROUP_ID)DRF_VAL(_CRASHCAT, _WAYFINDER_L0_V1, _WFL1_LOCATION, wfl0);
|
||||
}
|
||||
|
||||
//
|
||||
// Wayfinder L1 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_MEM_APERTURE crashcatWayfinderL1V1QueueAperture(NvCrashCatWayfinderL1_V1 wfl1)
|
||||
{
|
||||
return (NV_CRASHCAT_MEM_APERTURE)DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_APERTURE, wfl1);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvLength crashcatWayfinderL1V1QueueSize(NvCrashCatWayfinderL1_V1 wfl1)
|
||||
{
|
||||
NvU8 unitShift;
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE unitSize =
|
||||
(NV_CRASHCAT_MEM_UNIT_SIZE)DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_UNIT_SIZE, wfl1);
|
||||
switch (unitSize)
|
||||
{
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_1KB: unitShift = 10; break;
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_4KB: unitShift = 12; break;
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_64KB: unitShift = 16; break;
|
||||
default: return 0;
|
||||
}
|
||||
|
||||
// Increment size, since the size in the header is size - 1 (queue of 0 size is not encodable)
|
||||
return (NvLength)((DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_SIZE, wfl1) + 1) << unitShift);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU64 crashcatWayfinderL1V1QueueOffset(NvCrashCatWayfinderL1_V1 wfl1)
|
||||
{
|
||||
return DRF_VAL64(_CRASHCAT, _WAYFINDER_L1_V1, _QUEUE_OFFSET_1KB, wfl1) << 10;
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat Packet Header (Unversioned) Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION crashcatPacketHeaderFormatVersion(NvCrashCatPacketHeader hdr)
|
||||
{
|
||||
return (NV_CRASHCAT_PACKET_FORMAT_VERSION)DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _FORMAT_VERSION,
|
||||
hdr);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvLength crashcatPacketHeaderPayloadSize(NvCrashCatPacketHeader hdr)
|
||||
{
|
||||
NvU8 unitShift;
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE unitSize =
|
||||
(NV_CRASHCAT_MEM_UNIT_SIZE)DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _PAYLOAD_UNIT_SIZE, hdr);
|
||||
switch (unitSize)
|
||||
{
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_8B: unitShift = 3; break;
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_1KB: unitShift = 10; break;
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_4KB: unitShift = 12; break;
|
||||
case NV_CRASHCAT_MEM_UNIT_SIZE_64KB: unitShift = 16; break;
|
||||
default: return 0;
|
||||
}
|
||||
|
||||
// Increment size, since the size in the header is size - 1 (payload of 0 size is not encodable)
|
||||
return (NvLength)((DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _PAYLOAD_SIZE, hdr) + 1) << unitShift);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvBool crashcatPacketHeaderValid(NvCrashCatPacketHeader hdr)
|
||||
{
|
||||
return (FLD_TEST_DRF64(_CRASHCAT, _PACKET_HEADER, _SIGNATURE, _VALID, hdr) &&
|
||||
(crashcatPacketHeaderFormatVersion(hdr) > 0) &&
|
||||
(crashcatPacketHeaderFormatVersion(hdr) <= NV_CRASHCAT_PACKET_FORMAT_VERSION_LAST) &&
|
||||
(crashcatPacketHeaderPayloadSize(hdr) > 0));
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat Packet Header (V1) Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_PACKET_TYPE crashcatPacketHeaderV1Type(NvCrashCatPacketHeader_V1 hdr)
|
||||
{
|
||||
return (NV_CRASHCAT_PACKET_TYPE)DRF_VAL64(_CRASHCAT, _PACKET_HEADER, _V1_TYPE, hdr);
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NvCrashCatNvriscvPartition crashcatReportV1ReporterPartition(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvCrashCatNvriscvPartition)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID,
|
||||
_NVRISCV_PARTITION, pReport->reporterId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvCrashCatNvriscvUcodeId crashcatReportV1ReporterUcodeId(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvCrashCatNvriscvUcodeId)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _NVRISCV_UCODE_ID,
|
||||
pReport->reporterId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_RISCV_MODE crashcatReportV1ReporterMode(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_ID, _RISCV_MODE,
|
||||
pReport->reporterId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU32 crashcatReportV1ReporterVersion(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvU32)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_DATA, _VERSION, pReport->reporterData);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvU32 crashcatReportV1ReporterTimestamp(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvU32)DRF_VAL64(_CRASHCAT, _REPORT_V1_REPORTER_DATA, _TIMESTAMP, pReport->reporterData);
|
||||
}
|
||||
|
||||
|
||||
static NV_INLINE
|
||||
NvCrashCatNvriscvPartition crashcatReportV1SourcePartition(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvCrashCatNvriscvPartition)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID,
|
||||
_NVRISCV_PARTITION, pReport->sourceId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NvCrashCatNvriscvUcodeId crashcatReportV1SourceUcodeId(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NvCrashCatNvriscvUcodeId)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _NVRISCV_UCODE_ID,
|
||||
pReport->sourceId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_RISCV_MODE crashcatReportV1SourceMode(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_ID, _RISCV_MODE,
|
||||
pReport->sourceId);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_CAUSE_TYPE crashcatReportV1SourceCauseType(NvCrashCatReport_V1 *pReport)
|
||||
{
|
||||
return (NV_CRASHCAT_CAUSE_TYPE)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _TYPE,
|
||||
pReport->sourceCause);
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat RISC-V 64-bit CSR State V1 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_RISCV_MODE crashcatRiscv64CsrStateV1Mode(NvCrashCatRiscv64CsrState_V1 *pRiscv64CsrState)
|
||||
{
|
||||
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _RISCV64_CSR_STATE_V1_HEADER, _RISCV_MODE,
|
||||
pRiscv64CsrState->header);
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat RISC-V 64-bit GPR State V1 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_RISCV_MODE crashcatRiscv64GprStateV1Mode(NvCrashCatRiscv64GprState_V1 *pRiscv64GprState)
|
||||
{
|
||||
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _RISCV64_GPR_STATE_V1_HEADER, _RISCV_MODE,
|
||||
pRiscv64GprState->header);
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat RISC-V 64-bit Trace V1 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_RISCV_MODE crashcatRiscv64TraceV1Mode(NvCrashCatRiscv64Trace_V1 *pRiscv64Trace)
|
||||
{
|
||||
return (NV_CRASHCAT_RISCV_MODE)DRF_VAL64(_CRASHCAT, _RISCV64_TRACE_V1_HEADER, _RISCV_MODE,
|
||||
pRiscv64Trace->header);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_TRACE_TYPE crashcatRiscv64TraceV1Type(NvCrashCatRiscv64Trace_V1 *pRiscv64Trace)
|
||||
{
|
||||
return (NV_CRASHCAT_TRACE_TYPE)DRF_VAL64(_CRASHCAT, _RISCV64_TRACE_V1_HEADER, _TRACE_TYPE,
|
||||
pRiscv64Trace->header);
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat 32-bit I/O State V1 Bitfield Accessors
|
||||
//
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_IO_APERTURE crashcatIo32StateV1Aperture(NvCrashCatIo32State_V1 *pIo32State)
|
||||
{
|
||||
return (NV_CRASHCAT_IO_APERTURE)DRF_VAL64(_CRASHCAT, _IO32_STATE_V1_HEADER, _APERTURE,
|
||||
pIo32State->header);
|
||||
}
|
||||
|
||||
#endif // NV_CRASHCAT_DECODER_H
|
|
@ -0,0 +1,861 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NV_CRASHCAT_H
|
||||
#define NV_CRASHCAT_H
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvmisc.h"
|
||||
#include "nvctassert.h"
|
||||
|
||||
//
|
||||
// NVIDIA CrashCat - Crash Reporting And Signaling Helpers for Peregrine
|
||||
//
|
||||
// When a crash occurs on a Peregrine core, NVIDIA firmware may report additional data for post-
|
||||
// mortem analysis of the crash. The protocol is described in greater detail elsewhere, but in
|
||||
// general is defined to be a multi-producer/single-consumer flow as follows:
|
||||
//
|
||||
// 1. Peregrine writes a wayfinder to a set of predefined registers to indicate a crash report is
|
||||
// in progress, and its general location (first crash only)
|
||||
// 2. Peregrine writes crash report data in a tagged format to a circular queue accessible to both
|
||||
// reporter and consumer of the crash reports.
|
||||
// 3. Peregrine completes the wayfinder by updating a put pointer to indicate the crash report is
|
||||
// complete.
|
||||
// 4. Peregrine raises a beacon interrupt to the consumer to signal the presence of a crash report.
|
||||
//
|
||||
// This header contains the shared type and bitfield definitions that are common to both producer
|
||||
// and consumer sides of the CrashCat protocol.
|
||||
//
|
||||
|
||||
#define NV_CRASHCAT_SIGNATURE 0xdead
|
||||
|
||||
//
|
||||
// CrashCat Wayfinder Protocol Versions
|
||||
// A new version is created when backward-incompatible changes are made (the wayfinders and queue
|
||||
// control cannot be handled by software written for a prior version).
|
||||
//
|
||||
// This version indicates the handling sequence and format of the wayfinder registers, except for
|
||||
// the 16-bit signature and (this) 4-bit version number in the L0 wayfinder.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_WAYFINDER_VERSION_1 = 0x01,
|
||||
NV_CRASHCAT_WAYFINDER_VERSION_LAST = 0x01,
|
||||
} NV_CRASHCAT_WAYFINDER_VERSION;
|
||||
|
||||
//
|
||||
// CrashCat Packet Format Versions
|
||||
// A new version is created when backward-incompatible changes are made (packets cannot be handled
|
||||
// by software written for a prior version).
|
||||
//
|
||||
// This version indicates the format of the upper 32 bits of the packet header, and, along with the
|
||||
// NV_CRASHCAT_PACKET_TYPE, the format of the payload.
|
||||
//
|
||||
// The lower 32 bits of the packet header, which include the 16-bit signature, (this) 4-bit
|
||||
// version number, and 2-bit payload size unit and 10-bit payload size, are not covered by this
|
||||
// version number and their format must not change.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_1 = 0x01,
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_LAST = 0x01
|
||||
} NV_CRASHCAT_PACKET_FORMAT_VERSION;
|
||||
|
||||
// Utility macro for ensuring the maximum enum value will fit in a DRF bitfield
|
||||
#define STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(e, bf) \
|
||||
ct_assert(e ## _LAST < NVBIT(DRF_SIZE(bf)))
|
||||
|
||||
//
|
||||
// The below enum definitions are generally unversioned, and so new values must only be added to
|
||||
// the end, and existing values cannot be changed or removed (except for the _LAST values).
|
||||
// Note that adding a new value may require a new version of the wayfinder protocol or packet
|
||||
// formats that use the enum to accommodate a new maximum value.
|
||||
//
|
||||
|
||||
//
|
||||
// CrashCat Scratch Group Identifier
|
||||
// Each enum value represents an ordered set of one or more scratch registers in the Peregrine IP.
|
||||
// See NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE for the canonical list of registers in each
|
||||
// scratch group for version 1 of the wayfinder protocol.
|
||||
//
|
||||
// This enum is used by the wayfinder protocol (version 1).
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_NONE = 0x0,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_A = 0x1,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_B = 0x2,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_C = 0x3,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_D = 0x4,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_E = 0x5,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_F = 0x6,
|
||||
|
||||
//
|
||||
// Note: NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF represents a set of registers which are defined
|
||||
// by the implementer instead of the protocol specification - producer and consumer must
|
||||
// agree on this set definition.
|
||||
//
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF = 0x7,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID_LAST = 0x7
|
||||
} NV_CRASHCAT_SCRATCH_GROUP_ID;
|
||||
|
||||
//
|
||||
// Canonical CrashCat Scratch Group Register Mappings (V1)
|
||||
// This macro defines a designated-initializer table mapping NV_CRASHCAT_SCRATCH_GROUP_ID values to
|
||||
// 0-terminated arrays of register offsets (relative to the NV_PFALCON register space base offset).
|
||||
// This mapping is defined for version 1 of the wayfinder protocol; future versions may use a
|
||||
// different mapping.
|
||||
//
|
||||
// This header does not define, or include any header that defines, the register offset macros used
|
||||
// in the table entries. The caller should include the appropriate header defining these register
|
||||
// offsets before invoking this macro.
|
||||
//
|
||||
// If the implementation intends to use the NV_CRASCHCAT_SCRATCH_GROUP_ID_IMPL_DEF group, it can
|
||||
// invoke NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF() macro with the list of
|
||||
// registers to be used for the IMPL_DEF group (up to 4). Example:
|
||||
//
|
||||
// NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF(scratchOffsetTable,
|
||||
// NV_PUNIT_REG0, NV_PUNIT_REG1, NV_PUNIT_REG2, NV_PUNIT_REG3);
|
||||
//
|
||||
|
||||
// Maximum number of registers in a scratch group for now
|
||||
#define NV_CRASHCAT_SCRATCH_GROUP_V1_MAX_NUM_REGISTERS 4
|
||||
|
||||
#define NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE(tblName) \
|
||||
NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF(tblName, 0)
|
||||
|
||||
#define NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE_WITH_IMPL_DEF(tblName, ...) \
|
||||
static const NvU32 tblName[][NV_CRASHCAT_SCRATCH_GROUP_V1_MAX_NUM_REGISTERS + 1] = { \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_NONE] = {0}, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_A] = { \
|
||||
NV_PFALCON_FALCON_MAILBOX0, NV_PFALCON_FALCON_MAILBOX1, 0 }, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_B] = { \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(0), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(1), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(2), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_0(3), 0}, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_C] = { \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(0), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(1), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(2), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_1(3), 0}, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_D] = { \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(0), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(1), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(2), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_2(3), 0}, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_E] = { \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(0), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(1), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(2), \
|
||||
NV_PFALCON_FALCON_COMMON_SCRATCH_GROUP_3(3), 0}, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_F] = {0}, \
|
||||
[NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF] = { __VA_ARGS__, 0 } \
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat Memory Aperture Identifier
|
||||
// Each enum value represents a target aperture through which a CrashCat memory buffer can be
|
||||
// accessed.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_MEM_APERTURE_SYSGPA = 0x0,
|
||||
NV_CRASHCAT_MEM_APERTURE_FBGPA = 0x1,
|
||||
NV_CRASHCAT_MEM_APERTURE_DMEM = 0x2,
|
||||
NV_CRASHCAT_MEM_APERTURE_EMEM = 0x3,
|
||||
NV_CRASHCAT_MEM_APERTURE_LAST = 0x3,
|
||||
NV_CRASHCAT_MEM_APERTURE_UNKNOWN = 0xFF, // Used for error checking/translation failures
|
||||
} NV_CRASHCAT_MEM_APERTURE;
|
||||
|
||||
//
|
||||
// CrashCat Memory Unit Size
|
||||
// Each enum value represents a fixed unit size for a corresponding size field. This is used to
|
||||
// encode buffer sizes in compact register space.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_8B = 0, // 8-byte units
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_1KB = 1, // 1-kilobyte units
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_4KB = 2, // 4-kilobyte units
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_64KB = 3, // 64-kilobyte units
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_LAST = 3,
|
||||
} NV_CRASHCAT_MEM_UNIT_SIZE;
|
||||
|
||||
//
|
||||
// CrashCat Packet Type
|
||||
// Encoded in the CrashCat packet header to indicate the format of the data.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_PACKET_TYPE_REPORT = 0x00, // Base CrashCat report packet (required)
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE = 0x01, // Each 8-byte value is a RISC-V 64-bit CSR
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE = 0x02, // Each 8-byte value is a RISC-V 64-bit GPR
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE = 0x03, // Each 8-byte value is a program counter/
|
||||
// virtual address from a RISC-V 64-bit trace
|
||||
NV_CRASHCAT_PACKET_TYPE_IO32_STATE = 0x04, // Each 8-byte value is a 32-bit register
|
||||
// address in the upper bytes combined with
|
||||
// a 32-bit value in the lower bytes
|
||||
NV_CRASHCAT_PACKET_TYPE_LAST = 0x04
|
||||
} NV_CRASHCAT_PACKET_TYPE;
|
||||
|
||||
//
|
||||
// CrashCat RISC-V Mode
|
||||
// Indicates the execution mode of the Peregrine core.
|
||||
// Note: this does not include all RISC-V standard modes, only the ones supported by NVRISC-V.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED = 0x0,
|
||||
NV_CRASHCAT_RISCV_MODE_M = 0x1, // Machine Mode
|
||||
NV_CRASHCAT_RISCV_MODE_S = 0x2, // Supervisor Mode
|
||||
NV_CRASHCAT_RISCV_MODE_U = 0x3, // User Mode
|
||||
NV_CRASHCAT_RISCV_MODE_LAST = 0x3,
|
||||
} NV_CRASHCAT_RISCV_MODE;
|
||||
|
||||
//
|
||||
// CrashCat Partition
|
||||
// Represents a NVRISC-V microcode partition index
|
||||
//
|
||||
typedef NvU8 NvCrashCatNvriscvPartition;
|
||||
#define NV_CRASHCAT_NVRISCV_PARTITION_UNSPECIFIED NV_U8_MAX
|
||||
|
||||
//
|
||||
// CrashCat Ucode ID
|
||||
// Represents an NVRISC-V microcode ID
|
||||
//
|
||||
typedef NvU8 NvCrashCatNvriscvUcodeId;
|
||||
#define NV_CRASHCAT_NVRISCV_UCODE_ID_UNSPECIFIED NV_U8_MAX
|
||||
|
||||
//
|
||||
// CrashCat Crash Cause Type
|
||||
// Indicates the general nature of the crash cause.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_CAUSE_TYPE_EXCEPTION = 0x0, // Crash observed via Peregrine trap (exception or
|
||||
// unhandled interrupt)
|
||||
NV_CRASHCAT_CAUSE_TYPE_TIMEOUT = 0x1, // Crash observed via timeout or hang condition
|
||||
NV_CRASHCAT_CAUSE_TYPE_PANIC = 0x2, // Crash observed via direct panic condition
|
||||
NV_CRASHCAT_CAUSE_TYPE_LAST = 0x2
|
||||
} NV_CRASHCAT_CAUSE_TYPE;
|
||||
|
||||
//
|
||||
// CrashCat I/O Aperture Identifier
|
||||
// Indicates the Peregrine MMIO aperture through which register offsets are accessed.
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_IO_APERTURE_NONE = 0x00, // Register offsets are not relative
|
||||
NV_CRASHCAT_IO_APERTURE_INTIO = 0x01, // Register offsets are relative to local I/O base
|
||||
NV_CRASHCAT_IO_APERTURE_EXTIO = 0x02, // Register offsets are relative to external I/O base
|
||||
NV_CRASHCAT_IO_APERTURE_LAST = 0x02
|
||||
} NV_CRASHCAT_IO_APERTURE;
|
||||
|
||||
//
|
||||
// CrashCat Trace Type
|
||||
// Indicates the source of trace data (PC values)
|
||||
//
|
||||
typedef enum {
|
||||
NV_CRASHCAT_TRACE_TYPE_STACK = 0x00, // The PC values are return addresses on a stack, walked
|
||||
// by CrashCat implementation
|
||||
NV_CRASHCAT_TRACE_TYPE_NVRVTB = 0x01, // The PC values are entries from the NVRISC-V PC trace
|
||||
// buffer
|
||||
NV_CRASHCAT_TRACE_TYPE_LAST = 0x01
|
||||
} NV_CRASHCAT_TRACE_TYPE;
|
||||
|
||||
//
|
||||
// CrashCat Wayfinder Protocol is a mechanism for locating crash-reports in a programmatic way,
|
||||
// since available memory for reports may vary across different Peregrines. In V1, the wayfinder
|
||||
// protocol uses a single common scratch register (level 0, A.K.A. WFL0) to point to a secondary
|
||||
// group of scratch registers (level 1, A.K.A. WFL1), which point to the full crash report queue.
|
||||
// The queue is implemented as a circular buffer with classic put/get semantics, controlled through
|
||||
// the wayfinder L1 registers.
|
||||
//
|
||||
// Crash Report Wayfinder Level 0 (NV_CRASHCAT_WAYFINDER_L0)
|
||||
// _SIGNATURE : Initialized to NV_CRASHCAT_SIGNATURE after the level 1 wayfinder is
|
||||
// initialized.
|
||||
// _VERSION : NV_CRASHCAT_WAYFINDER_VERSION value of the protocol implemented for the
|
||||
// crash report wayfinder on this Peregrine (must be consistent with all
|
||||
// implementers on a Peregrine).
|
||||
//
|
||||
// Version 1 Fields:
|
||||
// _V1_WFL1_LOCATION : Contains an NV_CRASHCAT_SCRATCH_GROUP_ID identifying the registers
|
||||
// containing the level 1 wayfinder
|
||||
// _V1_RESERVED : Reserved for future use (currently 0).
|
||||
//
|
||||
typedef NvU32 NvCrashCatWayfinderL0_V1;
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_SIGNATURE 15:0
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_SIGNATURE_VALID NV_CRASHCAT_SIGNATURE
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_VERSION 19:16
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_VERSION_1 NV_CRASHCAT_WAYFINDER_VERSION_1
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION 22:20
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_NONE NV_CRASHCAT_SCRATCH_GROUP_ID_NONE
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_A NV_CRASHCAT_SCRATCH_GROUP_ID_A
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_B NV_CRASHCAT_SCRATCH_GROUP_ID_B
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_C NV_CRASHCAT_SCRATCH_GROUP_ID_C
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_D NV_CRASHCAT_SCRATCH_GROUP_ID_D
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_E NV_CRASHCAT_SCRATCH_GROUP_ID_E
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_F NV_CRASHCAT_SCRATCH_GROUP_ID_F
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION_IMPL_DEF NV_CRASHCAT_SCRATCH_GROUP_ID_IMPL_DEF
|
||||
#define NV_CRASHCAT_WAYFINDER_L0_V1_RESERVED 31:23
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_WAYFINDER_VERSION,
|
||||
NV_CRASHCAT_WAYFINDER_L0_VERSION);
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_SCRATCH_GROUP_ID,
|
||||
NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION);
|
||||
|
||||
//
|
||||
// Crash Report Wayfinder Level 1 and Queue Control Notes
|
||||
// Depending on how many scratch registers are in the set specified by the level 0 wayfinder, the
|
||||
// registers used for the level 1 wayfinder may need to be reused for the queue control registers.
|
||||
//
|
||||
// The first two scratch registers in the set are used to compose the NvCrashCatWayfinderL1_V1
|
||||
// value, with the register with the lower address providing the bottom 32 bits and the register
|
||||
// with the higher address providing the upper 32 bits.
|
||||
//
|
||||
// If four scratch registers are available, the last two are used for the queue put and get
|
||||
// control, respectively. The producer implementation should ensure these are initialized to zero,
|
||||
// and may update the put pointer without any synchronization with the consumer.
|
||||
//
|
||||
// If only two scratch registers are available, the WFL1 registers are reclaimed after they are
|
||||
// decoded by the consumer and used for the queue put and get points. The producer must wait for
|
||||
// the consumer to set the NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION bits of the level 0 wayfinder
|
||||
// to _NONE before writing the put pointer. It is the responsibility of the consumer to clear the
|
||||
// WFL1 registers before updating the level 0 wayfinder - after the producer sees the
|
||||
// NV_CRASHCAT_WAYFINDER_L0_V1_WFL1_LOCATION change to _NONE, it may update the put pointer.
|
||||
//
|
||||
// CrashCat Wayfinder Locking Notes for Implementers
|
||||
// Due to the multi-producer nature of the crash report buffer, accesses to the following registers
|
||||
// must be synchronized when writing to avoid stomping other crash reports or otherwise corrupting
|
||||
// the queue:
|
||||
// - NV_CRASHCAT_WAYFINDER_L0
|
||||
// If the signature is already present when the reporter initializes, the reporter should panic
|
||||
// if the wayfinder protocol version is not the same as what the reporter implements.
|
||||
// Where possible, it is recommended to compile-time assert reporter version consistency.
|
||||
// - NV_CRASHCAT_WAYFINDER_L1
|
||||
// Writes to these registers must be synchronized during initialization by the reporter, to
|
||||
// ensure that only one chooses the value and writes it. If they are already initialized, the
|
||||
// reporter should not need to update them, and should instead queue its crash reports in the
|
||||
// buffer pointed to by these registers.
|
||||
// - NV_CRASHCAT_QUEUE_PUT
|
||||
// This register must be synchronized on during initialization and update by the reporter. The
|
||||
// interface should be locked before the start of writing the crash report and released after
|
||||
// this register is updated.
|
||||
// - NV_CRASHCAT_QUEUE_GET
|
||||
// The (single) consumer controls this register, so no explicit synchronization is needed.
|
||||
// The implementation should initialize to 0 when the level 0 wayfinder is initialized, and not
|
||||
// touch it afterward.
|
||||
//
|
||||
// If no preemption is possible, then it is sufficient for a producer to push packets into the
|
||||
// queue one by one, and only update the put pointer once all packets from the report have been
|
||||
// queued. If the producer can be preempted while queuing report packets, it must hold a lock
|
||||
// synchronizing access to the CrashCat queue while it it pushes all report packets in the queue,
|
||||
// to prevent potential interleaving with packets from other reports.
|
||||
//
|
||||
// It may be advantageous for the Peregrine FMC to receive the report queue location as a boot
|
||||
// argument and initialize the wayfinders accordingly during boot, rather than when a crash is
|
||||
// observed.
|
||||
//
|
||||
|
||||
//
|
||||
// Crash Report Wayfinder Level 1 (NV_CRASHCAT_WAYFINDER_L1) V1
|
||||
// _QUEUE_APERTURE : NV_CRASHCAT_MEM_APERTURE value of the aperture through which the queue can
|
||||
// be accessed
|
||||
// _QUEUE_UNIT_SIZE : NV_CRASHCAT_MEM_UNIT_SIZE value indicating the units of the _SIZE field
|
||||
// (1KB or greater)
|
||||
// _RESERVED : Reserved for future use (currently 0)
|
||||
// _QUEUE_SIZE : Size of the queue in _UNIT_SIZE minus 1 (_SIZE = 0 -> queue size is 1 unit)
|
||||
// _QUEUE_OFFSET_1KB : 1KB-aligned offset of the start of the queue in _QUEUE_APERTURE
|
||||
//
|
||||
typedef NvU64 NvCrashCatWayfinderL1_V1;
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE 2:0
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_SYSGPA NV_CRASHCAT_MEM_APERTURE_SYSGPA
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_FBGPA NV_CRASHCAT_MEM_APERTURE_FBGPA
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_DMEM NV_CRASHCAT_MEM_APERTURE_DMEM
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE_EMEM NV_CRASHCAT_MEM_APERTURE_EMEM
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE 4:3
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE_1KB NV_CRASHCAT_MEM_UNIT_SIZE_1KB
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE_4KB NV_CRASHCAT_MEM_UNIT_SIZE_4KB
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE_64KB NV_CRASHCAT_MEM_UNIT_SIZE_64KB
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_RESERVED 5:5
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_SIZE 9:6
|
||||
#define NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_OFFSET_1KB 63:10
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_MEM_APERTURE,
|
||||
NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_APERTURE);
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_MEM_UNIT_SIZE,
|
||||
NV_CRASHCAT_WAYFINDER_L1_V1_QUEUE_UNIT_SIZE);
|
||||
|
||||
//
|
||||
// CrashCat Queue Put Pointer
|
||||
// Offset in bytes into the CrashCat circular queue at which the next crash report will be written.
|
||||
// Reports may wrap around the end of the buffer to the start.
|
||||
//
|
||||
// The implementation should only update the put pointer once all packets from the report have been
|
||||
// queued. This simplifies the consumer implementation, as it can assume that the report is
|
||||
// complete once the put pointer is updated.
|
||||
//
|
||||
|
||||
//
|
||||
// CrashCat Queue Get Pointer
|
||||
// Offset in bytes into the CrashCat circular queue at which the next crash report will be read by
|
||||
// the consumer (when get is behind put). The consumer advances this pointer to allow queue memory
|
||||
// to be reused by subsequent reports.
|
||||
//
|
||||
|
||||
static NV_INLINE NvU32 crashcatQueueFreeBytes(NvU32 put, NvU32 get, NvU32 size)
|
||||
{
|
||||
return (put >= get) ? (size - (put - get)) : (get - put);
|
||||
}
|
||||
|
||||
static NV_INLINE NvU32 crashcatQueueBytesToRead(NvU32 put, NvU32 get, NvU32 size)
|
||||
{
|
||||
return (put >= get) ? (put - get) : (size - (get - put));
|
||||
}
|
||||
|
||||
//
|
||||
// CrashCat Packet Header (NV_CRASHCAT_PACKET_HEADER)
|
||||
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new data value
|
||||
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION value
|
||||
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_MEM_UNIT_SIZE value indicating the units of the
|
||||
// _PAYLOAD_SIZE field
|
||||
// _PAYLOAD_SIZE : Size of the packet payload (excluding header) in _PAYLOAD_UNIT_SIZE
|
||||
// minus 1 (_PAYLOAD_SIZE = 0 -> payload size is 1 unit)
|
||||
// _V1_TYPE : NV_CRASHCAT_PACKET_TYPE value
|
||||
// _V1_META : Additional packet metadata bits specific to the packet type
|
||||
//
|
||||
typedef NvU64 NvCrashCatPacketHeader;
|
||||
typedef NvU64 NvCrashCatPacketHeader_V1;
|
||||
#define NV_CRASHCAT_PACKET_HEADER_SIGNATURE 15:0
|
||||
#define NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID NV_CRASHCAT_SIGNATURE
|
||||
#define NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION 19:16
|
||||
#define NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION_1 NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE 21:20
|
||||
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B NV_CRASHCAT_MEM_UNIT_SIZE_8B
|
||||
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_1KB NV_CRASHCAT_MEM_UNIT_SIZE_1KB
|
||||
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_4KB NV_CRASHCAT_MEM_UNIT_SIZE_4KB
|
||||
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_64PKB NV_CRASHCAT_MEM_UNIT_SIZE_64KB
|
||||
#define NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE 31:22
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE 39:32
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_REPORT NV_CRASHCAT_PACKET_TYPE_REPORT
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_RISCV64_CSR_STATE \
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_RISCV64_GPR_STATE \
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_IO32_STATE NV_CRASHCAT_PACKET_TYPE_IO32_STATE
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_TYPE_RISCV64_TRACE NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE
|
||||
#define NV_CRASHCAT_PACKET_HEADER_V1_META 63:40
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_PACKET_FORMAT_VERSION,
|
||||
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION);
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_MEM_UNIT_SIZE,
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE);
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_PACKET_TYPE,
|
||||
NV_CRASHCAT_PACKET_HEADER_V1_TYPE);
|
||||
|
||||
//
|
||||
// CrashCat Report (NV_CRASHCAT_PACKET_TYPE_REPORT) V1
|
||||
// A fixed-size packet including
|
||||
// 8 bytes: packet header (see NV_CRASHCAT_PACKET_HEADER)
|
||||
// 8 bytes: implementer signature
|
||||
// 16 bytes: reporter information
|
||||
// 32 bytes: crash source information
|
||||
//
|
||||
// A report packet sets the context for the remaining packets that come after it (until the next
|
||||
// NV_CRASHCAT_PACKET_TYPE_REPORT packet).
|
||||
//
|
||||
typedef struct NvCrashCatReport_V1 {
|
||||
//
|
||||
// CrashCat Report V1 Header (NV_CRASHCAT_REPORT_V1_HEADER)
|
||||
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
|
||||
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
|
||||
// _PAYLOAD_SIZE : 6 (56 bytes)
|
||||
// _TYPE : NV_CRASHCAT_PACKET_TYPE_REPORT value
|
||||
// _RESERVED : Reserved for future use (currently 0)
|
||||
//
|
||||
NvCrashCatPacketHeader_V1 header;
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_SIGNATURE \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_SIGNATURE_VALID \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_FORMAT_VERSION \
|
||||
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_FORMAT_VERSION_VALID \
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_UNIT_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_UNIT_SIZE_VALID \
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_8B
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_PAYLOAD_SIZE_VALID \
|
||||
(((sizeof(NvCrashCatReport_V1) - sizeof(NvCrashCatPacketHeader_V1)) >> 3) - 1)
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_PACKET_TYPE \
|
||||
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_PACKET_TYPE_VALID \
|
||||
NV_CRASHCAT_PACKET_TYPE_REPORT
|
||||
#define NV_CRASHCAT_REPORT_V1_HEADER_RESERVED 63:40
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Implementer Signature
|
||||
// Provides a unique 64-bit identifier for the decoder to use to interpret the
|
||||
// implementation-defined bits
|
||||
//
|
||||
NvU64 implementerSignature;
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Reporter ID (NV_CRASHCAT_REPORT_V1_REPORTER_ID)
|
||||
// _NVRISCV_PARTITION : Partition index of the crash reporter (depends on FMC configuration)
|
||||
// _NVRISCV_UCODE_ID : Ucode ID of the crash reporter (read from the relevant curruid
|
||||
// CSR/field)
|
||||
// _RISCV_MODE : Current RISC-V mode of the crash reporter
|
||||
// _IMPL_DEF : Implementation-defined identifier
|
||||
//
|
||||
NvU64 reporterId;
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_PARTITION 7:0
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_PARTITION_UNSPECIFIED \
|
||||
NV_CRASHCAT_NVRISCV_PARTITION_UNSPECIFIED
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_UCODE_ID 15:8
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_NVRISCV_UCODE_ID_UNSPECIFIED \
|
||||
NV_CRASHCAT_NVRISCV_UCODE_ID_UNSPECIFIED
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE 18:16
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_M \
|
||||
NV_CRASHCAT_RISCV_MODE_M
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_S \
|
||||
NV_CRASHCAT_RISCV_MODE_S
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_U \
|
||||
NV_CRASHCAT_RISCV_MODE_U
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE_UNSPECIFIED \
|
||||
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_RESERVED 23:19
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_ID_IMPL_DEF 63:24
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Reporter Data (NV_CRASHCAT_REPORT_V1_REPORTER_DATA)
|
||||
// _VERSION : Implementation-defined version identifier (recommend CL number)
|
||||
// _TIMESTAMP : Seconds since epoch (Jan 1, 1970) or cold reset of when the crash report was
|
||||
// generated. Since this value is read from a local clock source, the consumer
|
||||
// is responsible for adjusting this value to a relevant reference point.
|
||||
//
|
||||
NvU64 reporterData;
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_VERSION 31:0
|
||||
#define NV_CRASHCAT_REPORT_V1_REPORTER_DATA_TIMESTAMP 63:32
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Source ID (NV_CRASHCAT_REPORT_V1_SOURCE_ID)
|
||||
// _NVRISCV_PARTITION : Partition ID of the crashing code (depends on FMC configuration)
|
||||
// _NVRISCV_UCODE_ID : Ucode ID of the crashing code (read from the relevant curruid
|
||||
// CSR/field)
|
||||
// _RISCV_MODE : RISC-V mode of the crashing code
|
||||
// _IMPL_DEF : Implementation-defined identifier
|
||||
//
|
||||
NvU64 sourceId;
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_PARTITION 7:0
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_PARTITION_UNSPECIFIED \
|
||||
NV_CRASHCAT_NVRISCV_PARTITION_UNSPECIFIED
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_UCODE_ID 15:8
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_NVRISCV_UCODE_ID_UNSPECIFIED \
|
||||
NV_CRASHCAT_NVRISCV_UCODE_ID_UNSPECIFIED
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE 18:16
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE_UNSPECIFIED \
|
||||
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_RESERVED 23:19
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_ID_IMPL_DEF 63:24
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Source Cause (NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE)
|
||||
// _TYPE : CrashCat general failure type for the crash (i.e., how the crash was observed)
|
||||
// _RESERVED: Reserved for future use (currently 0)
|
||||
// _IMPL_DEF: Implementation-defined reason code for the crash
|
||||
//
|
||||
NvU64 sourceCause;
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE 3:0
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_EXCEPTION NV_CRASHCAT_CAUSE_TYPE_EXCEPTION
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_TIMEOUT NV_CRASHCAT_CAUSE_TYPE_TIMEOUT
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_PANIC NV_CRASHCAT_CAUSE_TYPE_PANIC
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_RESERVED 31:4
|
||||
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_IMPL_DEF 63:32
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Source PC
|
||||
// Program counter of the instruction where the crash occurred
|
||||
//
|
||||
NvU64 sourcePc;
|
||||
|
||||
//
|
||||
// CrashCat Report V1 Source Data
|
||||
// Additional crash source data (implementation-defined)
|
||||
//
|
||||
NvU64 sourceData;
|
||||
} NvCrashCatReport_V1;
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
|
||||
NV_CRASHCAT_REPORT_V1_REPORTER_ID_RISCV_MODE);
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
|
||||
NV_CRASHCAT_REPORT_V1_SOURCE_ID_RISCV_MODE);
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_CAUSE_TYPE,
|
||||
NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE);
|
||||
|
||||
//
|
||||
// CrashCat RISC-V CSR State (NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE) V1
|
||||
// A fixed-size packet containing values for RISC-V Control and Status Registers (CSRs) that are
|
||||
// commonly relevant in debugging crashes.
|
||||
//
|
||||
// Note: all CSRs in this structure must be defined in the standard RISC-V specification.
|
||||
// Do not add NVRISC-V-specific CSRs to this packet.
|
||||
//
|
||||
typedef struct NvCrashCatRiscv64CsrState_V1 {
|
||||
//
|
||||
// CrashCat RISC-V CSR State Header (NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER)
|
||||
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
|
||||
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
|
||||
// _PAYLOAD_SIZE : 6 (56 bytes)
|
||||
// _TYPE : NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE
|
||||
// _RISCV_MODE : NV_CRASHCAT_RISCV_MODE that indicates the RISC-V mode in which the
|
||||
// CSR values are captured from
|
||||
// _RESERVED : Reserved for future use (currently 0)
|
||||
//
|
||||
NvCrashCatPacketHeader_V1 header;
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_SIGNATURE \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_SIGNATURE_VALID \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_FORMAT_VERSION \
|
||||
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_FORMAT_VERSION_VALID \
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE_VALID \
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_8B
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PAYLOAD_SIZE_VALID \
|
||||
(((sizeof(NvCrashCatRiscv64CsrState_V1) - sizeof(NvCrashCatPacketHeader_V1)) >> 3) \
|
||||
- 1)
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PACKET_TYPE \
|
||||
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_PACKET_TYPE_VALID \
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_CSR_STATE
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE 42:40
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE_UNSPECIFIED \
|
||||
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
|
||||
#define NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RESERVED 63:43
|
||||
|
||||
NvU64 xstatus; // mstatus or sstatus
|
||||
NvU64 xie; // mie or sie
|
||||
NvU64 xip; // mip or sip
|
||||
NvU64 xepc; // mepc or sepc
|
||||
NvU64 xtval; // mbadaddr, mtval or stval
|
||||
NvU64 xcause; // mcause or scause
|
||||
NvU64 xscratch; // mscratch or sscratch
|
||||
} NvCrashCatRiscv64CsrState_V1;
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
|
||||
NV_CRASHCAT_RISCV64_CSR_STATE_V1_HEADER_RISCV_MODE);
|
||||
|
||||
//
|
||||
// CrashCat RISC-V GPR State (NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE) V1
|
||||
// A fixed-size packet containing values for RISC-V general purpose registers (GPRs).
|
||||
//
|
||||
// These are defined to match the RISC-V standard calling convention for x1-x31.
|
||||
// x0 is hardwired to 0, so we don't include it in dumps, and the packet header takes its place.
|
||||
//
|
||||
typedef struct NvCrashCatRiscv64GprState_V1 {
|
||||
//
|
||||
// CrashCat RISC-V GPR State Header (NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER)
|
||||
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
|
||||
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
|
||||
// _PAYLOAD_SIZE : 30 (188 bytes)
|
||||
// _TYPE : NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE
|
||||
// _RESERVED : Reserved for future use (currently 0)
|
||||
//
|
||||
NvCrashCatPacketHeader_V1 header;
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_SIGNATURE \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_SIGNATURE_VALID \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_FORMAT_VERSION \
|
||||
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_FORMAT_VERSION_VALID \
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE_VALID \
|
||||
NV_CRASHCAT_MEM_UNIT_SIZE_8B
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PAYLOAD_SIZE_VALID \
|
||||
(((sizeof(NvCrashCatRiscv64GprState_V1) - sizeof(NvCrashCatPacketHeader_V1)) >> 3) \
|
||||
- 1)
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PACKET_TYPE \
|
||||
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_PACKET_TYPE_VALID \
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_GPR_STATE
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE 42:40
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE_UNSPECIFIED \
|
||||
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
|
||||
#define NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RESERVED 63:40
|
||||
|
||||
NvU64 ra; // Return address
|
||||
NvU64 sp; // Stack pointer
|
||||
NvU64 gp; // Global pointer
|
||||
NvU64 tp; // Thread pointer
|
||||
NvU64 t0; // Temporary register 0
|
||||
NvU64 t1; // Temporary register 1
|
||||
NvU64 t2; // Temporary register 2
|
||||
NvU64 s0; // Saved register 0
|
||||
NvU64 s1; // Saved register 1
|
||||
NvU64 a0; // Argument/return value register 0
|
||||
NvU64 a1; // Argument/return value register 1
|
||||
NvU64 a2; // Argument register 2
|
||||
NvU64 a3; // Argument register 3
|
||||
NvU64 a4; // Argument register 4
|
||||
NvU64 a5; // Argument register 5
|
||||
NvU64 a6; // Argument register 6
|
||||
NvU64 a7; // Argument register 7
|
||||
NvU64 s2; // Saved register 2
|
||||
NvU64 s3; // Saved register 3
|
||||
NvU64 s4; // Saved register 4
|
||||
NvU64 s5; // Saved register 5
|
||||
NvU64 s6; // Saved register 6
|
||||
NvU64 s7; // Saved register 7
|
||||
NvU64 s8; // Saved register 8
|
||||
NvU64 s9; // Saved register 9
|
||||
NvU64 s10; // Saved register 10
|
||||
NvU64 s11; // Saved register 11
|
||||
NvU64 t3; // Temporary register 3
|
||||
NvU64 t4; // Temporary register 4
|
||||
NvU64 t5; // Temporary register 5
|
||||
NvU64 t6; // Temporary register 6
|
||||
} NvCrashCatRiscv64GprState_V1;
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
|
||||
NV_CRASHCAT_RISCV64_GPR_STATE_V1_HEADER_RISCV_MODE);
|
||||
|
||||
//
|
||||
// CrashCat Trace (NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE) V1
|
||||
// A variable-size packet in which each 64-bit payload value is a virtual address from a trace
|
||||
// (such as from a stack or PC trace buffer). The packet header metadata includes details to help
|
||||
// differentiate types of traces.
|
||||
//
|
||||
typedef struct NvCrashCatRiscv64Trace_V1 {
|
||||
//
|
||||
// CrashCat Stack Trace Header (NV_CRASHCAT_RISCV64_TRACE_V1_HEADER)
|
||||
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
|
||||
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
|
||||
// _PAYLOAD_SIZE : Variable
|
||||
// _TYPE : NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE
|
||||
// _RISCV_MODE : The NV_CRASHCAT_RISCV_MODE context of the trace (e.g., the RISC-V
|
||||
// mode in which the trace addresses are relevant in)
|
||||
// _RESERVED : Reserved for future use (currently 0)
|
||||
//
|
||||
NvCrashCatPacketHeader_V1 header;
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_SIGNATURE \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_SIGNATURE_VALID \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_FORMAT_VERSION \
|
||||
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_FORMAT_VERSION_VALID \
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PAYLOAD_UNIT_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PAYLOAD_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PACKET_TYPE \
|
||||
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_PACKET_TYPE_VALID \
|
||||
NV_CRASHCAT_PACKET_TYPE_RISCV64_TRACE
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE 42:40
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_M NV_CRASHCAT_RISCV_MODE_M
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_S NV_CRASHCAT_RISCV_MODE_S
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_U NV_CRASHCAT_RISCV_MODE_U
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE_UNSPECIFIED \
|
||||
NV_CRASHCAT_RISCV_MODE_UNSPECIFIED
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_TRACE_TYPE 43:43
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_TRACE_TYPE_STACK NV_CRASHCAT_TRACE_TYPE_STACK
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_TRACE_TYPE_NVRVTB NV_CRASHCAT_TRACE_TYPE_NVRVTB
|
||||
#define NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RESERVED 63:44
|
||||
|
||||
NvU64 addr[];
|
||||
} NvCrashCatRiscv64Trace_V1;
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_RISCV_MODE,
|
||||
NV_CRASHCAT_RISCV64_TRACE_V1_HEADER_RISCV_MODE);
|
||||
|
||||
//
|
||||
// CrashCat 32-bit I/O State (NV_CRASHCAT_PACKET_TYPE_IO32_STATE) V1
|
||||
// A variable-size packet in which a 32-bit address and a 32-bit value are encoded into each 64-bit
|
||||
// payload value.
|
||||
//
|
||||
typedef struct NvCrashCatIo32State_V1 {
|
||||
//
|
||||
// CrashCat 32-bit I/O Trace Header (NV_CRASHCAT_IO32_STATE_V1_HEADER)
|
||||
// _SIGNATURE : NV_CRASHCAT_SIGNATURE value to indicate the start of a new packet
|
||||
// _FORMAT_VERSION : NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
// _PAYLOAD_UNIT_SIZE : NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE_8B
|
||||
// _PAYLOAD_SIZE : Variable
|
||||
// _TYPE : NV_CRASHCAT_PACKET_TYPE_IO32_STATE
|
||||
// _APERTURE : NV_CRASHCAT_IO_APERTURE value identifying the aperture that the
|
||||
// offset is relative to
|
||||
// _RESERVED : Reserved for future use (currently 0)
|
||||
//
|
||||
NvCrashCatPacketHeader_V1 header;
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_SIGNATURE \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_SIGNATURE_VALID \
|
||||
NV_CRASHCAT_PACKET_HEADER_SIGNATURE_VALID
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_FORMAT_VERSION \
|
||||
NV_CRASHCAT_PACKET_HEADER_FORMAT_VERSION
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_FORMAT_VERSION_VALID \
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION_1
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PAYLOAD_UNIT_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_UNIT_SIZE
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PAYLOAD_SIZE \
|
||||
NV_CRASHCAT_PACKET_HEADER_PAYLOAD_SIZE
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PACKET_TYPE \
|
||||
NV_CRASHCAT_PACKET_HEADER_V1_TYPE
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_PACKET_TYPE_VALID \
|
||||
NV_CRASHCAT_PACKET_TYPE_IO32_STATE
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE 41:40
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE_NONE NV_CRASHCAT_IO_APERTURE_NONE
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE_INTIO NV_CRASHCAT_IO_APERTURE_INTIO
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE_EXTIO NV_CRASHCAT_IO_APERTURE_EXTIO
|
||||
#define NV_CRASHCAT_IO32_STATE_V1_HEADER_RESERVED 63:42
|
||||
|
||||
struct {
|
||||
NvU32 value;
|
||||
NvU32 offset;
|
||||
} regs[];
|
||||
} NvCrashCatIo32State_V1;
|
||||
|
||||
STATIC_ASSERT_ENUM_FITS_IN_BITFIELD(NV_CRASHCAT_IO_APERTURE,
|
||||
NV_CRASHCAT_IO32_STATE_V1_HEADER_APERTURE);
|
||||
|
||||
#endif // NV_CRASHCAT_H
|
|
@ -137,28 +137,51 @@ typedef struct
|
|||
// Boot count. Used to determine whether to load the firmware image.
|
||||
NvU64 bootCount;
|
||||
|
||||
// TODO: the partitionRpc* fields below do not really belong in this
|
||||
// structure. The values are patched in by the partition bootstrapper
|
||||
// when GSP-RM is booted in a partition, and this structure was a
|
||||
// convenient place for the bootstrapper to access them. These should
|
||||
// be moved to a different comm. mechanism between the bootstrapper
|
||||
// and the GSP-RM tasks.
|
||||
// This union is organized the way it is to start at an 8-byte boundary and achieve natural
|
||||
// packing of the internal struct fields.
|
||||
union
|
||||
{
|
||||
struct
|
||||
{
|
||||
// TODO: the partitionRpc* fields below do not really belong in this
|
||||
// structure. The values are patched in by the partition bootstrapper
|
||||
// when GSP-RM is booted in a partition, and this structure was a
|
||||
// convenient place for the bootstrapper to access them. These should
|
||||
// be moved to a different comm. mechanism between the bootstrapper
|
||||
// and the GSP-RM tasks.
|
||||
|
||||
// Shared partition RPC memory (physical address)
|
||||
NvU64 partitionRpcAddr;
|
||||
// Shared partition RPC memory (physical address)
|
||||
NvU64 partitionRpcAddr;
|
||||
|
||||
// Offsets relative to partitionRpcAddr
|
||||
NvU16 partitionRpcRequestOffset;
|
||||
NvU16 partitionRpcReplyOffset;
|
||||
// Offsets relative to partitionRpcAddr
|
||||
NvU16 partitionRpcRequestOffset;
|
||||
NvU16 partitionRpcReplyOffset;
|
||||
|
||||
// Code section and dataSection offset and size.
|
||||
NvU32 elfCodeOffset;
|
||||
NvU32 elfDataOffset;
|
||||
NvU32 elfCodeSize;
|
||||
NvU32 elfDataSize;
|
||||
// Code section and dataSection offset and size.
|
||||
NvU32 elfCodeOffset;
|
||||
NvU32 elfDataOffset;
|
||||
NvU32 elfCodeSize;
|
||||
NvU32 elfDataSize;
|
||||
|
||||
// Used during GSP-RM resume to check for revocation
|
||||
NvU32 lsUcodeVersion;
|
||||
// Used during GSP-RM resume to check for revocation
|
||||
NvU32 lsUcodeVersion;
|
||||
};
|
||||
|
||||
struct
|
||||
{
|
||||
// Pad for the partitionRpc* fields, plus 4 bytes
|
||||
NvU32 partitionRpcPadding[4];
|
||||
|
||||
// CrashCat (contiguous) buffer size/location - occupies same bytes as the
|
||||
// elf(Code|Data)(Offset|Size) fields above.
|
||||
// TODO: move to GSP_FMC_INIT_PARAMS
|
||||
NvU64 sysmemAddrOfCrashReportQueue;
|
||||
NvU32 sizeOfCrashReportQueue;
|
||||
|
||||
// Pad for the lsUcodeVersion field
|
||||
NvU32 lsUcodeVersionPadding[1];
|
||||
};
|
||||
};
|
||||
|
||||
// Number of VF partitions allocating sub-heaps from the WPR heap
|
||||
// Used during boot to ensure the heap is adequately sized
|
||||
|
|
|
@ -5332,14 +5332,16 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
|
|||
)
|
||||
{
|
||||
THREAD_STATE_NODE threadState;
|
||||
NV_STATUS rmStatus = NV_OK;
|
||||
NV_STATUS rmStatus = NV_ERR_INVALID_ARGUMENT;
|
||||
OBJGPU *pGpu;
|
||||
void *fp;
|
||||
|
||||
NV_ENTER_RM_RUNTIME(sp,fp);
|
||||
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
|
||||
|
||||
NV_ASSERT_OR_GOTO(((ppRanges != NULL) && (pRangeCount != NULL)), Done);
|
||||
NV_ASSERT_OR_GOTO(((ppRanges != NULL) &&
|
||||
(pRangeCount != NULL) &&
|
||||
(pStaticMemInfo != NULL)), Done);
|
||||
|
||||
pGpu = NV_GET_NV_PRIV_PGPU(nv);
|
||||
|
||||
|
@ -5347,12 +5349,54 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
|
|||
{
|
||||
KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu);
|
||||
MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR *) pStaticMemInfo;
|
||||
NvU32 pageSize = 0;
|
||||
NvU32 memdescPageSize = memdescGetPageSize(pMemDesc, AT_GPU);
|
||||
NvU64 prologueOffset = offset;
|
||||
NvU64 prologueSize = 0;
|
||||
NvU64 epilogueOffset = offset;
|
||||
NvU64 epilogueSize = 0;
|
||||
NvU64 mainOffset = offset;
|
||||
NvU64 mainSize = 0;
|
||||
NvU32 mainPageCount = 0;
|
||||
NvU64 alignedOffset;
|
||||
NvU32 pageCount = 0;
|
||||
NvU32 i = 0;
|
||||
NvU32 index = 0;
|
||||
|
||||
pageSize = memdescGetPageSize(pMemDesc, AT_GPU);
|
||||
pageCount = size / pageSize;
|
||||
alignedOffset = NV_ALIGN_UP64(offset, memdescPageSize);
|
||||
|
||||
if ((size > 0) && offset != alignedOffset)
|
||||
{
|
||||
prologueOffset = offset;
|
||||
prologueSize = NV_MIN(alignedOffset - offset, size);
|
||||
pageCount++;
|
||||
|
||||
size -= prologueSize;
|
||||
}
|
||||
|
||||
if (size > 0)
|
||||
{
|
||||
mainOffset = prologueOffset + prologueSize;
|
||||
mainSize = NV_ALIGN_DOWN64(size, memdescPageSize);
|
||||
mainPageCount = mainSize / memdescPageSize;
|
||||
pageCount += mainPageCount;
|
||||
|
||||
size -= mainSize;
|
||||
}
|
||||
|
||||
if (size > 0)
|
||||
{
|
||||
epilogueOffset = mainOffset + mainSize;
|
||||
epilogueSize = size;
|
||||
pageCount++;
|
||||
|
||||
size -= epilogueSize;
|
||||
}
|
||||
|
||||
if ((pageCount == 0) || (size != 0))
|
||||
{
|
||||
NV_ASSERT(0);
|
||||
rmStatus = NV_ERR_INVALID_STATE;
|
||||
goto Done;
|
||||
}
|
||||
|
||||
rmStatus = os_alloc_mem((void **) ppRanges,
|
||||
pageCount * sizeof(nv_phys_addr_range_t));
|
||||
|
@ -5361,15 +5405,39 @@ NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle(
|
|||
goto Done;
|
||||
}
|
||||
|
||||
for (i = 0; i < pageCount; i++)
|
||||
// Fill the first unaligned segment
|
||||
if (prologueSize > 0)
|
||||
{
|
||||
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, offset);
|
||||
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, prologueOffset);
|
||||
(*ppRanges)[0].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
|
||||
(*ppRanges)[0].len = prologueSize;
|
||||
|
||||
(*ppRanges)[i].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
|
||||
(*ppRanges)[i].len = pageSize;
|
||||
|
||||
offset += pageSize;
|
||||
index = 1;
|
||||
}
|
||||
|
||||
// Fill the aligned segments between first and last entries
|
||||
while (mainPageCount != 0)
|
||||
{
|
||||
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, alignedOffset);
|
||||
(*ppRanges)[index].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
|
||||
(*ppRanges)[index].len = memdescPageSize;
|
||||
index++;
|
||||
|
||||
alignedOffset += memdescPageSize;
|
||||
mainPageCount--;
|
||||
}
|
||||
|
||||
// Fill the last unaligned segment
|
||||
if (epilogueSize > 0)
|
||||
{
|
||||
NvU64 physAddr = memdescGetPhysAddr(pMemDesc, AT_CPU, epilogueOffset);
|
||||
(*ppRanges)[index].addr = pKernelMemorySystem->coherentCpuFbBase + physAddr;
|
||||
(*ppRanges)[index].len = epilogueSize;
|
||||
index++;
|
||||
}
|
||||
|
||||
NV_ASSERT(index == pageCount);
|
||||
|
||||
*pRangeCount = pageCount;
|
||||
}
|
||||
else
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,123 @@
|
|||
#define NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_crashcat_engine_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x654166 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
void __nvoc_init_CrashCatEngine(CrashCatEngine*);
|
||||
void __nvoc_init_funcTable_CrashCatEngine(CrashCatEngine*);
|
||||
NV_STATUS __nvoc_ctor_CrashCatEngine(CrashCatEngine*);
|
||||
void __nvoc_init_dataField_CrashCatEngine(CrashCatEngine*);
|
||||
void __nvoc_dtor_CrashCatEngine(CrashCatEngine*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatEngine;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatEngine_CrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatEngine,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatEngine = {
|
||||
/*numRelatives=*/ 1,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_CrashCatEngine_CrashCatEngine,
|
||||
},
|
||||
};
|
||||
|
||||
// Not instantiable because it's not derived from class "Object"
|
||||
// Not instantiable because it's an abstract class with following pure virtual functions:
|
||||
// crashcatEngineConfigured
|
||||
// crashcatEngineVprintf
|
||||
// crashcatEnginePriRead
|
||||
// crashcatEnginePriWrite
|
||||
// crashcatEngineMapBufferDescriptor
|
||||
// crashcatEngineUnmapBufferDescriptor
|
||||
// crashcatEngineSyncBufferDescriptor
|
||||
// crashcatEngineGetScratchOffsets
|
||||
// crashcatEngineGetWFL0Offset
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(CrashCatEngine),
|
||||
/*classId=*/ classId(CrashCatEngine),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "CrashCatEngine",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatEngine,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_CrashCatEngine
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatEngine =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_CrashCatEngine(CrashCatEngine *pThis) {
|
||||
__nvoc_crashcatEngineDestruct(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_CrashCatEngine(CrashCatEngine *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_CrashCatEngine(CrashCatEngine *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
__nvoc_init_dataField_CrashCatEngine(pThis);
|
||||
|
||||
status = __nvoc_crashcatEngineConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatEngine_fail__init;
|
||||
goto __nvoc_ctor_CrashCatEngine_exit; // Success
|
||||
|
||||
__nvoc_ctor_CrashCatEngine_fail__init:
|
||||
__nvoc_ctor_CrashCatEngine_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_CrashCatEngine_1(CrashCatEngine *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__crashcatEngineUnload__ = &crashcatEngineUnload_IMPL;
|
||||
|
||||
pThis->__crashcatEngineConfigured__ = NULL;
|
||||
|
||||
pThis->__crashcatEngineVprintf__ = NULL;
|
||||
|
||||
pThis->__crashcatEnginePriRead__ = NULL;
|
||||
|
||||
pThis->__crashcatEnginePriWrite__ = NULL;
|
||||
|
||||
pThis->__crashcatEngineMapBufferDescriptor__ = NULL;
|
||||
|
||||
pThis->__crashcatEngineUnmapBufferDescriptor__ = NULL;
|
||||
|
||||
pThis->__crashcatEngineSyncBufferDescriptor__ = NULL;
|
||||
|
||||
pThis->__crashcatEngineGetScratchOffsets__ = NULL;
|
||||
|
||||
pThis->__crashcatEngineGetWFL0Offset__ = NULL;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_CrashCatEngine(CrashCatEngine *pThis) {
|
||||
__nvoc_init_funcTable_CrashCatEngine_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_CrashCatEngine(CrashCatEngine *pThis) {
|
||||
pThis->__nvoc_pbase_CrashCatEngine = pThis;
|
||||
__nvoc_init_funcTable_CrashCatEngine(pThis);
|
||||
}
|
||||
|
|
@ -0,0 +1,287 @@
|
|||
#ifndef _G_CRASHCAT_ENGINE_NVOC_H_
|
||||
#define _G_CRASHCAT_ENGINE_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_crashcat_engine_nvoc.h"
|
||||
|
||||
#ifndef CRASHCAT_ENGINE_H
|
||||
#define CRASHCAT_ENGINE_H
|
||||
|
||||
#include "containers/map.h"
|
||||
#include "nvoc/object.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "nv-crashcat.h"
|
||||
|
||||
struct CrashCatReport;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
typedef struct CrashCatReport CrashCatReport;
|
||||
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatReport
|
||||
#define __nvoc_class_id_CrashCatReport 0xde4777
|
||||
#endif /* __nvoc_class_id_CrashCatReport */
|
||||
|
||||
|
||||
struct CrashCatWayfinder;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
|
||||
typedef struct CrashCatWayfinder CrashCatWayfinder;
|
||||
#endif /* __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatWayfinder
|
||||
#define __nvoc_class_id_CrashCatWayfinder 0x085e32
|
||||
#endif /* __nvoc_class_id_CrashCatWayfinder */
|
||||
|
||||
|
||||
|
||||
typedef struct {
|
||||
void *pEngPriv;
|
||||
void *pMapping;
|
||||
NvBool bRegistered;
|
||||
NV_CRASHCAT_MEM_APERTURE aperture;
|
||||
NvU64 physOffset;
|
||||
NvLength size;
|
||||
MapNode registeredBufferMapNode;
|
||||
MapNode mappedBufferMapNode;
|
||||
} CrashCatBufferDescriptor;
|
||||
|
||||
MAKE_INTRUSIVE_MAP(CrashCatRegisteredBufferMap, CrashCatBufferDescriptor, registeredBufferMapNode);
|
||||
MAKE_INTRUSIVE_MAP(CrashCatMappedBufferMap, CrashCatBufferDescriptor, mappedBufferMapNode);
|
||||
|
||||
// Base class for engine-specific accessors - must be implemented by the host codebase.
|
||||
#ifdef NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct CrashCatEngine {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
|
||||
void (*__crashcatEngineUnload__)(struct CrashCatEngine *);
|
||||
NvBool (*__crashcatEngineConfigured__)(struct CrashCatEngine *);
|
||||
void (*__crashcatEngineVprintf__)(struct CrashCatEngine *, NvBool, const char *, va_list);
|
||||
NvU32 (*__crashcatEnginePriRead__)(struct CrashCatEngine *, NvU32);
|
||||
void (*__crashcatEnginePriWrite__)(struct CrashCatEngine *, NvU32, NvU32);
|
||||
void *(*__crashcatEngineMapBufferDescriptor__)(struct CrashCatEngine *, CrashCatBufferDescriptor *);
|
||||
void (*__crashcatEngineUnmapBufferDescriptor__)(struct CrashCatEngine *, CrashCatBufferDescriptor *);
|
||||
void (*__crashcatEngineSyncBufferDescriptor__)(struct CrashCatEngine *, CrashCatBufferDescriptor *, NvU32, NvU32);
|
||||
const NvU32 *(*__crashcatEngineGetScratchOffsets__)(struct CrashCatEngine *, NV_CRASHCAT_SCRATCH_GROUP_ID);
|
||||
NvU32 (*__crashcatEngineGetWFL0Offset__)(struct CrashCatEngine *);
|
||||
NvBool PRIVATE_FIELD(bEnabled);
|
||||
struct CrashCatWayfinder *PRIVATE_FIELD(pWayfinder);
|
||||
CrashCatRegisteredBufferMap PRIVATE_FIELD(registeredCrashBuffers);
|
||||
CrashCatMappedBufferMap PRIVATE_FIELD(mappedCrashBuffers);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatEngine_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatEngine_TYPEDEF__
|
||||
typedef struct CrashCatEngine CrashCatEngine;
|
||||
#endif /* __NVOC_CLASS_CrashCatEngine_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatEngine
|
||||
#define __nvoc_class_id_CrashCatEngine 0x654166
|
||||
#endif /* __nvoc_class_id_CrashCatEngine */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
#define __staticCast_CrashCatEngine(pThis) \
|
||||
((pThis)->__nvoc_pbase_CrashCatEngine)
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
#define __dynamicCast_CrashCatEngine(pThis) ((CrashCatEngine*)NULL)
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define __dynamicCast_CrashCatEngine(pThis) \
|
||||
((CrashCatEngine*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatEngine)))
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatEngine(CrashCatEngine**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatEngine(CrashCatEngine**, Dynamic*, NvU32);
|
||||
#define __objCreate_CrashCatEngine(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_CrashCatEngine((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
#define crashcatEngineUnload(arg0) crashcatEngineUnload_DISPATCH(arg0)
|
||||
#define crashcatEngineConfigured(arg0) crashcatEngineConfigured_DISPATCH(arg0)
|
||||
#define crashcatEngineVprintf(arg0, bReportStart, fmt, args) crashcatEngineVprintf_DISPATCH(arg0, bReportStart, fmt, args)
|
||||
#define crashcatEnginePriRead(arg0, offset) crashcatEnginePriRead_DISPATCH(arg0, offset)
|
||||
#define crashcatEnginePriWrite(arg0, offset, data) crashcatEnginePriWrite_DISPATCH(arg0, offset, data)
|
||||
#define crashcatEngineMapBufferDescriptor(arg0, pBufDesc) crashcatEngineMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define crashcatEngineUnmapBufferDescriptor(arg0, pBufDesc) crashcatEngineUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define crashcatEngineSyncBufferDescriptor(arg0, pBufDesc, offset, size) crashcatEngineSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
|
||||
#define crashcatEngineGetScratchOffsets(arg0, scratchId) crashcatEngineGetScratchOffsets_DISPATCH(arg0, scratchId)
|
||||
#define crashcatEngineGetWFL0Offset(arg0) crashcatEngineGetWFL0Offset_DISPATCH(arg0)
|
||||
void crashcatEngineUnload_IMPL(struct CrashCatEngine *arg0);
|
||||
|
||||
static inline void crashcatEngineUnload_DISPATCH(struct CrashCatEngine *arg0) {
|
||||
arg0->__crashcatEngineUnload__(arg0);
|
||||
}
|
||||
|
||||
static inline NvBool crashcatEngineConfigured_DISPATCH(struct CrashCatEngine *arg0) {
|
||||
return arg0->__crashcatEngineConfigured__(arg0);
|
||||
}
|
||||
|
||||
static inline void crashcatEngineVprintf_DISPATCH(struct CrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
arg0->__crashcatEngineVprintf__(arg0, bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static inline NvU32 crashcatEnginePriRead_DISPATCH(struct CrashCatEngine *arg0, NvU32 offset) {
|
||||
return arg0->__crashcatEnginePriRead__(arg0, offset);
|
||||
}
|
||||
|
||||
static inline void crashcatEnginePriWrite_DISPATCH(struct CrashCatEngine *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__crashcatEnginePriWrite__(arg0, offset, data);
|
||||
}
|
||||
|
||||
static inline void *crashcatEngineMapBufferDescriptor_DISPATCH(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return arg0->__crashcatEngineMapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void crashcatEngineUnmapBufferDescriptor_DISPATCH(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
arg0->__crashcatEngineUnmapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void crashcatEngineSyncBufferDescriptor_DISPATCH(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
arg0->__crashcatEngineSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static inline const NvU32 *crashcatEngineGetScratchOffsets_DISPATCH(struct CrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchId) {
|
||||
return arg0->__crashcatEngineGetScratchOffsets__(arg0, scratchId);
|
||||
}
|
||||
|
||||
static inline NvU32 crashcatEngineGetWFL0Offset_DISPATCH(struct CrashCatEngine *arg0) {
|
||||
return arg0->__crashcatEngineGetWFL0Offset__(arg0);
|
||||
}
|
||||
|
||||
NV_STATUS crashcatEngineConstruct_IMPL(struct CrashCatEngine *arg_);
|
||||
|
||||
#define __nvoc_crashcatEngineConstruct(arg_) crashcatEngineConstruct_IMPL(arg_)
|
||||
void crashcatEngineDestruct_IMPL(struct CrashCatEngine *arg0);
|
||||
|
||||
#define __nvoc_crashcatEngineDestruct(arg0) crashcatEngineDestruct_IMPL(arg0)
|
||||
struct CrashCatReport *crashcatEngineGetNextCrashReport_IMPL(struct CrashCatEngine *arg0);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline struct CrashCatReport *crashcatEngineGetNextCrashReport(struct CrashCatEngine *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineGetNextCrashReport(arg0) crashcatEngineGetNextCrashReport_IMPL(arg0)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
NV_STATUS crashcatEngineRegisterCrashBuffer_IMPL(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size, void *pEngPriv);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline NV_STATUS crashcatEngineRegisterCrashBuffer(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size, void *pEngPriv) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineRegisterCrashBuffer(arg0, aperture, offset, size, pEngPriv) crashcatEngineRegisterCrashBuffer_IMPL(arg0, aperture, offset, size, pEngPriv)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
void crashcatEngineUnregisterCrashBuffer_IMPL(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline void crashcatEngineUnregisterCrashBuffer(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineUnregisterCrashBuffer(arg0, aperture, offset, size) crashcatEngineUnregisterCrashBuffer_IMPL(arg0, aperture, offset, size)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
void *crashcatEngineMapCrashBuffer_IMPL(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline void *crashcatEngineMapCrashBuffer(struct CrashCatEngine *arg0, NV_CRASHCAT_MEM_APERTURE aperture, NvU64 offset, NvU64 size) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineMapCrashBuffer(arg0, aperture, offset, size) crashcatEngineMapCrashBuffer_IMPL(arg0, aperture, offset, size)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
void crashcatEngineUnmapCrashBuffer_IMPL(struct CrashCatEngine *arg0, void *ptr);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline void crashcatEngineUnmapCrashBuffer(struct CrashCatEngine *arg0, void *ptr) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineUnmapCrashBuffer(arg0, ptr) crashcatEngineUnmapCrashBuffer_IMPL(arg0, ptr)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
void crashcatEngineSyncCrashBuffer_IMPL(struct CrashCatEngine *arg0, void *ptr, NvU32 offset, NvU32 size);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline void crashcatEngineSyncCrashBuffer(struct CrashCatEngine *arg0, void *ptr, NvU32 offset, NvU32 size) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineSyncCrashBuffer(arg0, ptr, offset, size) crashcatEngineSyncCrashBuffer_IMPL(arg0, ptr, offset, size)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
NV_STATUS crashcatEngineLoadWayfinder_IMPL(struct CrashCatEngine *arg0);
|
||||
|
||||
#ifdef __nvoc_crashcat_engine_h_disabled
|
||||
static inline NV_STATUS crashcatEngineLoadWayfinder(struct CrashCatEngine *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatEngine was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_crashcat_engine_h_disabled
|
||||
#define crashcatEngineLoadWayfinder(arg0) crashcatEngineLoadWayfinder_IMPL(arg0)
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
#ifndef NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#ifndef __nvoc_crashcat_engine_h_disabled
|
||||
#undef crashcatEngineLoadWayfinder
|
||||
NV_STATUS NVOC_PRIVATE_FUNCTION(crashcatEngineLoadWayfinder)(struct CrashCatEngine *arg0);
|
||||
#endif //__nvoc_crashcat_engine_h_disabled
|
||||
|
||||
#endif // NVOC_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
|
||||
|
||||
#define CRASHCAT_GET_ENGINE(pCrashCatObj) objFindAncestorOfType(CrashCatEngine, pCrashCatObj)
|
||||
|
||||
// Non-NVOC wrapper for handling variadic arguments
|
||||
void crashcatEnginePrintf(struct CrashCatEngine *, NvBool, const char *, ...);
|
||||
|
||||
#endif // CRASHCAT_ENGINE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_CRASHCAT_ENGINE_NVOC_H_
|
|
@ -0,0 +1,179 @@
|
|||
#define NVOC_CRASHCAT_QUEUE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_crashcat_queue_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xbaa900 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatQueue;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* );
|
||||
void __nvoc_init_funcTable_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* );
|
||||
NV_STATUS __nvoc_ctor_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* , CrashCatQueueConfig * arg_pQueueConfig);
|
||||
void __nvoc_init_dataField_CrashCatQueue(CrashCatQueue*, CrashCatWayfinder* );
|
||||
void __nvoc_dtor_CrashCatQueue(CrashCatQueue*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatQueue;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatQueue_CrashCatQueue = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatQueue,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatQueue,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatQueue_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(CrashCatQueue, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatQueue = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_CrashCatQueue_CrashCatQueue,
|
||||
&__nvoc_rtti_CrashCatQueue_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatQueue =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(CrashCatQueue),
|
||||
/*classId=*/ classId(CrashCatQueue),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "CrashCatQueue",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CrashCatQueue,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatQueue,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_CrashCatQueue
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatQueue =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_CrashCatQueue(CrashCatQueue *pThis) {
|
||||
__nvoc_crashcatQueueDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
|
||||
CrashCatWayfinderHal *wayfinderHal = &pCrashcatWayfinder->wayfinderHal;
|
||||
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pCrashcatWayfinder);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder, CrashCatQueueConfig * arg_pQueueConfig) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatQueue_fail_Object;
|
||||
__nvoc_init_dataField_CrashCatQueue(pThis, pCrashcatWayfinder);
|
||||
|
||||
status = __nvoc_crashcatQueueConstruct(pThis, arg_pQueueConfig);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatQueue_fail__init;
|
||||
goto __nvoc_ctor_CrashCatQueue_exit; // Success
|
||||
|
||||
__nvoc_ctor_CrashCatQueue_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_CrashCatQueue_fail_Object:
|
||||
__nvoc_ctor_CrashCatQueue_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_CrashCatQueue_1(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
|
||||
CrashCatWayfinderHal *wayfinderHal = &pCrashcatWayfinder->wayfinderHal;
|
||||
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pCrashcatWayfinder);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
|
||||
__nvoc_init_funcTable_CrashCatQueue_1(pThis, pCrashcatWayfinder);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_CrashCatQueue(CrashCatQueue *pThis, CrashCatWayfinder *pCrashcatWayfinder) {
|
||||
pThis->__nvoc_pbase_CrashCatQueue = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_CrashCatQueue(pThis, pCrashcatWayfinder);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatQueue(CrashCatQueue **ppThis, Dynamic *pParent, NvU32 createFlags, CrashCatQueueConfig * arg_pQueueConfig) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
CrashCatQueue *pThis;
|
||||
CrashCatWayfinder *pCrashcatWayfinder;
|
||||
|
||||
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CrashCatQueue), (void**)&pThis, (void**)ppThis);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(CrashCatQueue));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_CrashCatQueue);
|
||||
|
||||
pThis->__nvoc_base_Object.createFlags = createFlags;
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
if ((pCrashcatWayfinder = dynamicCast(pParent, CrashCatWayfinder)) == NULL)
|
||||
pCrashcatWayfinder = objFindAncestorOfType(CrashCatWayfinder, pParent);
|
||||
NV_ASSERT_OR_RETURN(pCrashcatWayfinder != NULL, NV_ERR_INVALID_ARGUMENT);
|
||||
|
||||
__nvoc_init_CrashCatQueue(pThis, pCrashcatWayfinder);
|
||||
status = __nvoc_ctor_CrashCatQueue(pThis, pCrashcatWayfinder, arg_pQueueConfig);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_CrashCatQueue_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_CrashCatQueue_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
|
||||
portMemSet(pThis, 0, sizeof(CrashCatQueue));
|
||||
else
|
||||
portMemFree(pThis);
|
||||
|
||||
// coverity[leaked_storage:FALSE]
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatQueue(CrashCatQueue **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
CrashCatQueueConfig * arg_pQueueConfig = va_arg(args, CrashCatQueueConfig *);
|
||||
|
||||
status = __nvoc_objCreate_CrashCatQueue(ppThis, pParent, createFlags, arg_pQueueConfig);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
#ifndef _G_CRASHCAT_QUEUE_NVOC_H_
|
||||
#define _G_CRASHCAT_QUEUE_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_crashcat_queue_nvoc.h"
|
||||
|
||||
#ifndef CRASHCAT_QUEUE_H
|
||||
#define CRASHCAT_QUEUE_H
|
||||
|
||||
#include "nvoc/object.h"
|
||||
#include "nv-crashcat.h"
|
||||
#include "crashcat/crashcat_wayfinder.h" // for CrashCatWayfinderHal spec
|
||||
|
||||
struct CrashCatEngine;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatEngine_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatEngine_TYPEDEF__
|
||||
typedef struct CrashCatEngine CrashCatEngine;
|
||||
#endif /* __NVOC_CLASS_CrashCatEngine_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatEngine
|
||||
#define __nvoc_class_id_CrashCatEngine 0x654166
|
||||
#endif /* __nvoc_class_id_CrashCatEngine */
|
||||
|
||||
|
||||
struct CrashCatReport;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
typedef struct CrashCatReport CrashCatReport;
|
||||
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatReport
|
||||
#define __nvoc_class_id_CrashCatReport 0xde4777
|
||||
#endif /* __nvoc_class_id_CrashCatReport */
|
||||
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NV_CRASHCAT_MEM_APERTURE aperture;
|
||||
NvU32 size;
|
||||
NvU64 offset;
|
||||
|
||||
NvU32 putRegOffset;
|
||||
NvU32 getRegOffset;
|
||||
} CrashCatQueueConfig;
|
||||
|
||||
#ifdef NVOC_CRASHCAT_QUEUE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct CrashCatQueue {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct Object __nvoc_base_Object;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct CrashCatQueue *__nvoc_pbase_CrashCatQueue;
|
||||
CrashCatQueueConfig PRIVATE_FIELD(config);
|
||||
struct CrashCatEngine *PRIVATE_FIELD(pEngine);
|
||||
void *PRIVATE_FIELD(pMapping);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatQueue_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatQueue_TYPEDEF__
|
||||
typedef struct CrashCatQueue CrashCatQueue;
|
||||
#endif /* __NVOC_CLASS_CrashCatQueue_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatQueue
|
||||
#define __nvoc_class_id_CrashCatQueue 0xbaa900
|
||||
#endif /* __nvoc_class_id_CrashCatQueue */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatQueue;
|
||||
|
||||
#define __staticCast_CrashCatQueue(pThis) \
|
||||
((pThis)->__nvoc_pbase_CrashCatQueue)
|
||||
|
||||
#ifdef __nvoc_crashcat_queue_h_disabled
|
||||
#define __dynamicCast_CrashCatQueue(pThis) ((CrashCatQueue*)NULL)
|
||||
#else //__nvoc_crashcat_queue_h_disabled
|
||||
#define __dynamicCast_CrashCatQueue(pThis) \
|
||||
((CrashCatQueue*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatQueue)))
|
||||
#endif //__nvoc_crashcat_queue_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatQueue(CrashCatQueue**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatQueue(CrashCatQueue**, Dynamic*, NvU32, CrashCatQueueConfig * arg_pQueueConfig);
|
||||
#define __objCreate_CrashCatQueue(ppNewObj, pParent, createFlags, arg_pQueueConfig) \
|
||||
__nvoc_objCreate_CrashCatQueue((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pQueueConfig)
|
||||
|
||||
struct CrashCatReport *crashcatQueueConsumeNextReport_V1(struct CrashCatQueue *arg0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_queue_h_disabled
|
||||
static inline struct CrashCatReport *crashcatQueueConsumeNextReport(struct CrashCatQueue *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatQueue was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_queue_h_disabled
|
||||
#define crashcatQueueConsumeNextReport(arg0) crashcatQueueConsumeNextReport_V1(arg0)
|
||||
#endif //__nvoc_crashcat_queue_h_disabled
|
||||
|
||||
#define crashcatQueueConsumeNextReport_HAL(arg0) crashcatQueueConsumeNextReport(arg0)
|
||||
|
||||
NV_STATUS crashcatQueueConstruct_IMPL(struct CrashCatQueue *arg_, CrashCatQueueConfig *arg_pQueueConfig);
|
||||
|
||||
#define __nvoc_crashcatQueueConstruct(arg_, arg_pQueueConfig) crashcatQueueConstruct_IMPL(arg_, arg_pQueueConfig)
|
||||
void crashcatQueueDestruct_IMPL(struct CrashCatQueue *arg0);
|
||||
|
||||
#define __nvoc_crashcatQueueDestruct(arg0) crashcatQueueDestruct_IMPL(arg0)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // CRASHCAT_QUEUE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_CRASHCAT_QUEUE_NVOC_H_
|
|
@ -0,0 +1,218 @@
|
|||
#define NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_crashcat_report_nvoc.h"
|
||||
|
||||
void __nvoc_init_halspec_CrashCatReportHal(CrashCatReportHal *pCrashCatReportHal, NV_CRASHCAT_PACKET_FORMAT_VERSION version, CrashCatImplementer implementer)
|
||||
{
|
||||
// V1_GENERIC
|
||||
if(version == 0x1 && implementer == 0x0)
|
||||
{
|
||||
pCrashCatReportHal->__nvoc_HalVarIdx = 0;
|
||||
}
|
||||
// V1_LIBOS2
|
||||
else if(version == 0x1 && implementer == 0x4C49424F53322E30)
|
||||
{
|
||||
pCrashCatReportHal->__nvoc_HalVarIdx = 1;
|
||||
}
|
||||
// V1_LIBOS3
|
||||
else if(version == 0x1 && implementer == 0x4C49424F53332E31)
|
||||
{
|
||||
pCrashCatReportHal->__nvoc_HalVarIdx = 2;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xde4777 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatReport;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_CrashCatReport(CrashCatReport*,
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer);
|
||||
void __nvoc_init_funcTable_CrashCatReport(CrashCatReport*);
|
||||
NV_STATUS __nvoc_ctor_CrashCatReport(CrashCatReport*, void ** arg_ppReportBytes, NvLength arg_bytesRemaining);
|
||||
void __nvoc_init_dataField_CrashCatReport(CrashCatReport*);
|
||||
void __nvoc_dtor_CrashCatReport(CrashCatReport*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatReport;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatReport_CrashCatReport = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatReport,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatReport,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatReport_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(CrashCatReport, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatReport = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_CrashCatReport_CrashCatReport,
|
||||
&__nvoc_rtti_CrashCatReport_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatReport =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(CrashCatReport),
|
||||
/*classId=*/ classId(CrashCatReport),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "CrashCatReport",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CrashCatReport,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatReport,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_CrashCatReport
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatReport =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_CrashCatReport(CrashCatReport *pThis) {
|
||||
__nvoc_crashcatReportDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_CrashCatReport(CrashCatReport *pThis) {
|
||||
CrashCatReportHal *reportHal = &pThis->reportHal;
|
||||
const unsigned long reportHal_HalVarIdx = (unsigned long)reportHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(reportHal);
|
||||
PORT_UNREFERENCED_VARIABLE(reportHal_HalVarIdx);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_CrashCatReport(CrashCatReport *pThis, void ** arg_ppReportBytes, NvLength arg_bytesRemaining) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatReport_fail_Object;
|
||||
__nvoc_init_dataField_CrashCatReport(pThis);
|
||||
|
||||
status = __nvoc_crashcatReportConstruct(pThis, arg_ppReportBytes, arg_bytesRemaining);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatReport_fail__init;
|
||||
goto __nvoc_ctor_CrashCatReport_exit; // Success
|
||||
|
||||
__nvoc_ctor_CrashCatReport_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_CrashCatReport_fail_Object:
|
||||
__nvoc_ctor_CrashCatReport_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_CrashCatReport_1(CrashCatReport *pThis) {
|
||||
CrashCatReportHal *reportHal = &pThis->reportHal;
|
||||
const unsigned long reportHal_HalVarIdx = (unsigned long)reportHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(reportHal);
|
||||
PORT_UNREFERENCED_VARIABLE(reportHal_HalVarIdx);
|
||||
|
||||
// Hal function -- crashcatReportLogReporter
|
||||
if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* CrashCatReportHal: V1_GENERIC */
|
||||
{
|
||||
pThis->__crashcatReportLogReporter__ = &crashcatReportLogReporter_V1_GENERIC;
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__crashcatReportLogReporter__ = &crashcatReportLogReporter_V1_LIBOS2;
|
||||
}
|
||||
|
||||
// Hal function -- crashcatReportLogSource
|
||||
if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* CrashCatReportHal: V1_GENERIC */
|
||||
{
|
||||
pThis->__crashcatReportLogSource__ = &crashcatReportLogSource_V1_GENERIC;
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__crashcatReportLogSource__ = &crashcatReportLogSource_V1_LIBOS2;
|
||||
}
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_CrashCatReport(CrashCatReport *pThis) {
|
||||
__nvoc_init_funcTable_CrashCatReport_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_CrashCatReport(CrashCatReport *pThis,
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer) {
|
||||
pThis->__nvoc_pbase_CrashCatReport = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_halspec_CrashCatReportHal(&pThis->reportHal, CrashCatReportHal_version, CrashCatReportHal_implementer);
|
||||
__nvoc_init_funcTable_CrashCatReport(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatReport(CrashCatReport **ppThis, Dynamic *pParent, NvU32 createFlags,
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer, void ** arg_ppReportBytes, NvLength arg_bytesRemaining) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
CrashCatReport *pThis;
|
||||
|
||||
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CrashCatReport), (void**)&pThis, (void**)ppThis);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(CrashCatReport));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_CrashCatReport);
|
||||
|
||||
pThis->__nvoc_base_Object.createFlags = createFlags;
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_CrashCatReport(pThis, CrashCatReportHal_version, CrashCatReportHal_implementer);
|
||||
status = __nvoc_ctor_CrashCatReport(pThis, arg_ppReportBytes, arg_bytesRemaining);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_CrashCatReport_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_CrashCatReport_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
|
||||
portMemSet(pThis, 0, sizeof(CrashCatReport));
|
||||
else
|
||||
portMemFree(pThis);
|
||||
|
||||
// coverity[leaked_storage:FALSE]
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatReport(CrashCatReport **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version = va_arg(args, NV_CRASHCAT_PACKET_FORMAT_VERSION);
|
||||
CrashCatImplementer CrashCatReportHal_implementer = va_arg(args, CrashCatImplementer);
|
||||
void ** arg_ppReportBytes = va_arg(args, void **);
|
||||
NvLength arg_bytesRemaining = va_arg(args, NvLength);
|
||||
|
||||
status = __nvoc_objCreate_CrashCatReport(ppThis, pParent, createFlags, CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
|
@ -0,0 +1,388 @@
|
|||
#ifndef _G_CRASHCAT_REPORT_NVOC_H_
|
||||
#define _G_CRASHCAT_REPORT_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_crashcat_report_nvoc.h"
|
||||
|
||||
#ifndef CRASHCAT_REPORT_H
|
||||
#define CRASHCAT_REPORT_H
|
||||
|
||||
#include "nv-crashcat.h"
|
||||
#include "nvoc/object.h"
|
||||
|
||||
struct CrashCatEngine;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatEngine_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatEngine_TYPEDEF__
|
||||
typedef struct CrashCatEngine CrashCatEngine;
|
||||
#endif /* __NVOC_CLASS_CrashCatEngine_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatEngine
|
||||
#define __nvoc_class_id_CrashCatEngine 0x654166
|
||||
#endif /* __nvoc_class_id_CrashCatEngine */
|
||||
|
||||
|
||||
|
||||
typedef NvU64 CrashCatImplementer;
|
||||
|
||||
#define CRASHCAT_IMPLEMENTER_UNSPECIFIED 0ull
|
||||
#define CRASHCAT_IMPLEMENTER_LIBOS2 0x4C49424F53322E30ull // "LIBOS2.0"
|
||||
#define CRASHCAT_IMPLEMENTER_LIBOS3 0x4C49424F53332E31ull // "LIBOS3.1"
|
||||
|
||||
struct CrashCatReportHal {
|
||||
unsigned short __nvoc_HalVarIdx;
|
||||
};
|
||||
typedef struct CrashCatReportHal CrashCatReportHal;
|
||||
void __nvoc_init_halspec_CrashCatReportHal(CrashCatReportHal*, NV_CRASHCAT_PACKET_FORMAT_VERSION, CrashCatImplementer);
|
||||
|
||||
#ifdef NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct __nvoc_inner_struc_CrashCatReport_1__ {
|
||||
NvCrashCatReport_V1 report;
|
||||
NvCrashCatRiscv64CsrState_V1 riscv64CsrState;
|
||||
NvCrashCatRiscv64GprState_V1 riscv64GprState;
|
||||
NvCrashCatRiscv64Trace_V1 *pRiscv64Trace;
|
||||
NvCrashCatIo32State_V1 *pIo32State;
|
||||
};
|
||||
|
||||
|
||||
struct CrashCatReport {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct Object __nvoc_base_Object;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct CrashCatReport *__nvoc_pbase_CrashCatReport;
|
||||
void (*__crashcatReportLogReporter__)(struct CrashCatReport *);
|
||||
void (*__crashcatReportLogSource__)(struct CrashCatReport *);
|
||||
struct CrashCatReportHal reportHal;
|
||||
struct CrashCatEngine *PRIVATE_FIELD(pEngine);
|
||||
NvU32 PRIVATE_FIELD(validTags);
|
||||
struct __nvoc_inner_struc_CrashCatReport_1__ PRIVATE_FIELD(v1);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
typedef struct CrashCatReport CrashCatReport;
|
||||
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatReport
|
||||
#define __nvoc_class_id_CrashCatReport 0xde4777
|
||||
#endif /* __nvoc_class_id_CrashCatReport */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatReport;
|
||||
|
||||
#define __staticCast_CrashCatReport(pThis) \
|
||||
((pThis)->__nvoc_pbase_CrashCatReport)
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
#define __dynamicCast_CrashCatReport(pThis) ((CrashCatReport*)NULL)
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define __dynamicCast_CrashCatReport(pThis) \
|
||||
((CrashCatReport*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatReport)))
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatReport(CrashCatReport**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatReport(CrashCatReport**, Dynamic*, NvU32,
|
||||
NV_CRASHCAT_PACKET_FORMAT_VERSION CrashCatReportHal_version, CrashCatImplementer CrashCatReportHal_implementer, void ** arg_ppReportBytes, NvLength arg_bytesRemaining);
|
||||
#define __objCreate_CrashCatReport(ppNewObj, pParent, createFlags, CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining) \
|
||||
__nvoc_objCreate_CrashCatReport((ppNewObj), staticCast((pParent), Dynamic), (createFlags), CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining)
|
||||
|
||||
#define crashcatReportLogReporter(arg0) crashcatReportLogReporter_DISPATCH(arg0)
|
||||
#define crashcatReportLogReporter_HAL(arg0) crashcatReportLogReporter_DISPATCH(arg0)
|
||||
#define crashcatReportLogSource(arg0) crashcatReportLogSource_DISPATCH(arg0)
|
||||
#define crashcatReportLogSource_HAL(arg0) crashcatReportLogSource_DISPATCH(arg0)
|
||||
void crashcatReportDestruct_V1(struct CrashCatReport *arg0);
|
||||
|
||||
|
||||
#define __nvoc_crashcatReportDestruct(arg0) crashcatReportDestruct_V1(arg0)
|
||||
void *crashcatReportExtract_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void *crashcatReportExtract(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportExtract(arg0, pReportBytes, bytesRemaining) crashcatReportExtract_V1(arg0, pReportBytes, bytesRemaining)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportExtract_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtract(arg0, pReportBytes, bytesRemaining)
|
||||
|
||||
void *crashcatReportExtractReport_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void *crashcatReportExtractReport(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportExtractReport(arg0, pReportBytes, bytesRemaining) crashcatReportExtractReport_V1(arg0, pReportBytes, bytesRemaining)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportExtractReport_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractReport(arg0, pReportBytes, bytesRemaining)
|
||||
|
||||
void *crashcatReportExtractRiscv64CsrState_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void *crashcatReportExtractRiscv64CsrState(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportExtractRiscv64CsrState(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64CsrState_V1(arg0, pReportBytes, bytesRemaining)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportExtractRiscv64CsrState_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64CsrState(arg0, pReportBytes, bytesRemaining)
|
||||
|
||||
void *crashcatReportExtractRiscv64GprState_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void *crashcatReportExtractRiscv64GprState(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportExtractRiscv64GprState(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64GprState_V1(arg0, pReportBytes, bytesRemaining)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportExtractRiscv64GprState_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64GprState(arg0, pReportBytes, bytesRemaining)
|
||||
|
||||
void *crashcatReportExtractRiscv64Trace_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void *crashcatReportExtractRiscv64Trace(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportExtractRiscv64Trace(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64Trace_V1(arg0, pReportBytes, bytesRemaining)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportExtractRiscv64Trace_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractRiscv64Trace(arg0, pReportBytes, bytesRemaining)
|
||||
|
||||
void *crashcatReportExtractIo32State_V1(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void *crashcatReportExtractIo32State(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportExtractIo32State(arg0, pReportBytes, bytesRemaining) crashcatReportExtractIo32State_V1(arg0, pReportBytes, bytesRemaining)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportExtractIo32State_HAL(arg0, pReportBytes, bytesRemaining) crashcatReportExtractIo32State(arg0, pReportBytes, bytesRemaining)
|
||||
|
||||
void crashcatReportLogRiscv64CsrState_V1(struct CrashCatReport *arg0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void crashcatReportLogRiscv64CsrState(struct CrashCatReport *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportLogRiscv64CsrState(arg0) crashcatReportLogRiscv64CsrState_V1(arg0)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportLogRiscv64CsrState_HAL(arg0) crashcatReportLogRiscv64CsrState(arg0)
|
||||
|
||||
void crashcatReportLogRiscv64GprState_V1(struct CrashCatReport *arg0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void crashcatReportLogRiscv64GprState(struct CrashCatReport *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportLogRiscv64GprState(arg0) crashcatReportLogRiscv64GprState_V1(arg0)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportLogRiscv64GprState_HAL(arg0) crashcatReportLogRiscv64GprState(arg0)
|
||||
|
||||
void crashcatReportLogRiscv64Trace_V1(struct CrashCatReport *arg0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void crashcatReportLogRiscv64Trace(struct CrashCatReport *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportLogRiscv64Trace(arg0) crashcatReportLogRiscv64Trace_V1(arg0)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportLogRiscv64Trace_HAL(arg0) crashcatReportLogRiscv64Trace(arg0)
|
||||
|
||||
void crashcatReportLogIo32State_V1(struct CrashCatReport *arg0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void crashcatReportLogIo32State(struct CrashCatReport *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportLogIo32State(arg0) crashcatReportLogIo32State_V1(arg0)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#define crashcatReportLogIo32State_HAL(arg0) crashcatReportLogIo32State(arg0)
|
||||
|
||||
void crashcatReportLogReporter_V1_GENERIC(struct CrashCatReport *arg0);
|
||||
|
||||
void crashcatReportLogReporter_V1_LIBOS2(struct CrashCatReport *arg0);
|
||||
|
||||
static inline void crashcatReportLogReporter_DISPATCH(struct CrashCatReport *arg0) {
|
||||
arg0->__crashcatReportLogReporter__(arg0);
|
||||
}
|
||||
|
||||
void crashcatReportLogSource_V1_GENERIC(struct CrashCatReport *arg0);
|
||||
|
||||
void crashcatReportLogSource_V1_LIBOS2(struct CrashCatReport *arg0);
|
||||
|
||||
static inline void crashcatReportLogSource_DISPATCH(struct CrashCatReport *arg0) {
|
||||
arg0->__crashcatReportLogSource__(arg0);
|
||||
}
|
||||
|
||||
NV_STATUS crashcatReportConstruct_IMPL(struct CrashCatReport *arg_, void **arg_ppReportBytes, NvLength arg_bytesRemaining);
|
||||
|
||||
#define __nvoc_crashcatReportConstruct(arg_, arg_ppReportBytes, arg_bytesRemaining) crashcatReportConstruct_IMPL(arg_, arg_ppReportBytes, arg_bytesRemaining)
|
||||
void crashcatReportLog_IMPL(struct CrashCatReport *arg0);
|
||||
|
||||
#ifdef __nvoc_crashcat_report_h_disabled
|
||||
static inline void crashcatReportLog(struct CrashCatReport *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatReport was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_report_h_disabled
|
||||
#define crashcatReportLog(arg0) crashcatReportLog_IMPL(arg0)
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
#ifndef NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#undef crashcatReportLogReporter
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogReporter)(struct CrashCatReport *arg0);
|
||||
|
||||
#undef crashcatReportLogReporter_HAL
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogReporter_HAL)(struct CrashCatReport *arg0);
|
||||
|
||||
#undef crashcatReportLogSource
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogSource)(struct CrashCatReport *arg0);
|
||||
|
||||
#undef crashcatReportLogSource_HAL
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogSource_HAL)(struct CrashCatReport *arg0);
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportExtract
|
||||
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtract)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportExtractReport
|
||||
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractReport)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportExtractRiscv64CsrState
|
||||
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractRiscv64CsrState)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportExtractRiscv64GprState
|
||||
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractRiscv64GprState)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportExtractRiscv64Trace
|
||||
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractRiscv64Trace)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportExtractIo32State
|
||||
void *NVOC_PRIVATE_FUNCTION(crashcatReportExtractIo32State)(struct CrashCatReport *arg0, void *pReportBytes, NvLength bytesRemaining);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportLogRiscv64CsrState
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogRiscv64CsrState)(struct CrashCatReport *arg0);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportLogRiscv64GprState
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogRiscv64GprState)(struct CrashCatReport *arg0);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportLogRiscv64Trace
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogRiscv64Trace)(struct CrashCatReport *arg0);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#ifndef __nvoc_crashcat_report_h_disabled
|
||||
#undef crashcatReportLogIo32State
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatReportLogIo32State)(struct CrashCatReport *arg0);
|
||||
#endif //__nvoc_crashcat_report_h_disabled
|
||||
|
||||
#endif // NVOC_CRASHCAT_REPORT_H_PRIVATE_ACCESS_ALLOWED
|
||||
|
||||
|
||||
// Utility to convert a cause code to a user-friendly string
|
||||
const char *crashcatReportRiscvCauseToString(NvU64 xcause);
|
||||
|
||||
// Log indentation used for multi-line reports
|
||||
#define CRASHCAT_LOG_INDENT " "
|
||||
|
||||
// Prefix used for multi-line reports
|
||||
#if defined(NVRM)
|
||||
#define CRASHCAT_LOG_PREFIX "NVRM: "
|
||||
#else
|
||||
#define CRASHCAT_LOG_PREFIX
|
||||
#endif
|
||||
|
||||
#define CRASHCAT_REPORT_LOG_PACKET_TYPE(pReport, fmt, ...) \
|
||||
crashcatEnginePrintf(pReport->pEngine, NV_FALSE, \
|
||||
CRASHCAT_LOG_PREFIX CRASHCAT_LOG_INDENT fmt, ##__VA_ARGS__)
|
||||
#define CRASHCAT_REPORT_LOG_DATA(pReport, fmt, ...) \
|
||||
crashcatEnginePrintf(pReport->pEngine, NV_FALSE, \
|
||||
CRASHCAT_LOG_PREFIX CRASHCAT_LOG_INDENT CRASHCAT_LOG_INDENT fmt, \
|
||||
##__VA_ARGS__)
|
||||
|
||||
#endif // CRASHCAT_REPORT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_CRASHCAT_REPORT_NVOC_H_
|
|
@ -0,0 +1,186 @@
|
|||
#define NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_crashcat_wayfinder_nvoc.h"
|
||||
|
||||
void __nvoc_init_halspec_CrashCatWayfinderHal(CrashCatWayfinderHal *pCrashCatWayfinderHal, NV_CRASHCAT_WAYFINDER_VERSION version)
|
||||
{
|
||||
// V1
|
||||
if(version == 0x1)
|
||||
{
|
||||
pCrashCatWayfinderHal->__nvoc_HalVarIdx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x085e32 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatWayfinder;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_CrashCatWayfinder(CrashCatWayfinder*,
|
||||
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version);
|
||||
void __nvoc_init_funcTable_CrashCatWayfinder(CrashCatWayfinder*);
|
||||
NV_STATUS __nvoc_ctor_CrashCatWayfinder(CrashCatWayfinder*, NvU32 arg_wfl0);
|
||||
void __nvoc_init_dataField_CrashCatWayfinder(CrashCatWayfinder*);
|
||||
void __nvoc_dtor_CrashCatWayfinder(CrashCatWayfinder*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatWayfinder;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatWayfinder_CrashCatWayfinder = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatWayfinder,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_CrashCatWayfinder,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_CrashCatWayfinder_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(CrashCatWayfinder, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_CrashCatWayfinder = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_CrashCatWayfinder_CrashCatWayfinder,
|
||||
&__nvoc_rtti_CrashCatWayfinder_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatWayfinder =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(CrashCatWayfinder),
|
||||
/*classId=*/ classId(CrashCatWayfinder),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "CrashCatWayfinder",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_CrashCatWayfinder,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_CrashCatWayfinder,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_CrashCatWayfinder
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_CrashCatWayfinder =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_CrashCatWayfinder(CrashCatWayfinder *pThis) {
|
||||
__nvoc_crashcatWayfinderDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_CrashCatWayfinder(CrashCatWayfinder *pThis) {
|
||||
CrashCatWayfinderHal *wayfinderHal = &pThis->wayfinderHal;
|
||||
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_CrashCatWayfinder(CrashCatWayfinder *pThis, NvU32 arg_wfl0) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatWayfinder_fail_Object;
|
||||
__nvoc_init_dataField_CrashCatWayfinder(pThis);
|
||||
|
||||
status = __nvoc_crashcatWayfinderConstruct(pThis, arg_wfl0);
|
||||
if (status != NV_OK) goto __nvoc_ctor_CrashCatWayfinder_fail__init;
|
||||
goto __nvoc_ctor_CrashCatWayfinder_exit; // Success
|
||||
|
||||
__nvoc_ctor_CrashCatWayfinder_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_CrashCatWayfinder_fail_Object:
|
||||
__nvoc_ctor_CrashCatWayfinder_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_CrashCatWayfinder_1(CrashCatWayfinder *pThis) {
|
||||
CrashCatWayfinderHal *wayfinderHal = &pThis->wayfinderHal;
|
||||
const unsigned long wayfinderHal_HalVarIdx = (unsigned long)wayfinderHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal);
|
||||
PORT_UNREFERENCED_VARIABLE(wayfinderHal_HalVarIdx);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_CrashCatWayfinder(CrashCatWayfinder *pThis) {
|
||||
__nvoc_init_funcTable_CrashCatWayfinder_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_CrashCatWayfinder(CrashCatWayfinder *pThis,
|
||||
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version) {
|
||||
pThis->__nvoc_pbase_CrashCatWayfinder = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_halspec_CrashCatWayfinderHal(&pThis->wayfinderHal, CrashCatWayfinderHal_version);
|
||||
__nvoc_init_funcTable_CrashCatWayfinder(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatWayfinder(CrashCatWayfinder **ppThis, Dynamic *pParent, NvU32 createFlags,
|
||||
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version, NvU32 arg_wfl0) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
CrashCatWayfinder *pThis;
|
||||
|
||||
status = __nvoc_handleObjCreateMemAlloc(createFlags, sizeof(CrashCatWayfinder), (void**)&pThis, (void**)ppThis);
|
||||
if (status != NV_OK)
|
||||
return status;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(CrashCatWayfinder));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_CrashCatWayfinder);
|
||||
|
||||
pThis->__nvoc_base_Object.createFlags = createFlags;
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_CrashCatWayfinder(pThis, CrashCatWayfinderHal_version);
|
||||
status = __nvoc_ctor_CrashCatWayfinder(pThis, arg_wfl0);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_CrashCatWayfinder_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_CrashCatWayfinder_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
if (createFlags & NVOC_OBJ_CREATE_FLAGS_IN_PLACE_CONSTRUCT)
|
||||
portMemSet(pThis, 0, sizeof(CrashCatWayfinder));
|
||||
else
|
||||
portMemFree(pThis);
|
||||
|
||||
// coverity[leaked_storage:FALSE]
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatWayfinder(CrashCatWayfinder **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version = va_arg(args, NV_CRASHCAT_WAYFINDER_VERSION);
|
||||
NvU32 arg_wfl0 = va_arg(args, NvU32);
|
||||
|
||||
status = __nvoc_objCreate_CrashCatWayfinder(ppThis, pParent, createFlags, CrashCatWayfinderHal_version, arg_wfl0);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
|
@ -0,0 +1,170 @@
|
|||
#ifndef _G_CRASHCAT_WAYFINDER_NVOC_H_
|
||||
#define _G_CRASHCAT_WAYFINDER_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_crashcat_wayfinder_nvoc.h"
|
||||
|
||||
#ifndef CRASHCAT_WAYFINDER_H
|
||||
#define CRASHCAT_WAYFINDER_H
|
||||
|
||||
#include "nvoc/object.h"
|
||||
#include "nv-crashcat.h"
|
||||
|
||||
struct CrashCatQueue;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatQueue_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatQueue_TYPEDEF__
|
||||
typedef struct CrashCatQueue CrashCatQueue;
|
||||
#endif /* __NVOC_CLASS_CrashCatQueue_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatQueue
|
||||
#define __nvoc_class_id_CrashCatQueue 0xbaa900
|
||||
#endif /* __nvoc_class_id_CrashCatQueue */
|
||||
|
||||
|
||||
struct CrashCatReport;
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatReport_TYPEDEF__
|
||||
typedef struct CrashCatReport CrashCatReport;
|
||||
#endif /* __NVOC_CLASS_CrashCatReport_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatReport
|
||||
#define __nvoc_class_id_CrashCatReport 0xde4777
|
||||
#endif /* __nvoc_class_id_CrashCatReport */
|
||||
|
||||
|
||||
|
||||
struct CrashCatWayfinderHal {
|
||||
unsigned short __nvoc_HalVarIdx;
|
||||
};
|
||||
typedef struct CrashCatWayfinderHal CrashCatWayfinderHal;
|
||||
void __nvoc_init_halspec_CrashCatWayfinderHal(CrashCatWayfinderHal*, NV_CRASHCAT_WAYFINDER_VERSION);
|
||||
|
||||
#ifdef NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct __nvoc_inner_struc_CrashCatWayfinder_1__ {
|
||||
NvCrashCatWayfinderL0_V1 wfl0;
|
||||
NvCrashCatWayfinderL1_V1 wfl1;
|
||||
};
|
||||
|
||||
|
||||
struct CrashCatWayfinder {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct Object __nvoc_base_Object;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct CrashCatWayfinder *__nvoc_pbase_CrashCatWayfinder;
|
||||
struct CrashCatWayfinderHal wayfinderHal;
|
||||
struct CrashCatQueue *PRIVATE_FIELD(pQueue);
|
||||
struct __nvoc_inner_struc_CrashCatWayfinder_1__ PRIVATE_FIELD(v1);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
|
||||
#define __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__
|
||||
typedef struct CrashCatWayfinder CrashCatWayfinder;
|
||||
#endif /* __NVOC_CLASS_CrashCatWayfinder_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_CrashCatWayfinder
|
||||
#define __nvoc_class_id_CrashCatWayfinder 0x085e32
|
||||
#endif /* __nvoc_class_id_CrashCatWayfinder */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatWayfinder;
|
||||
|
||||
#define __staticCast_CrashCatWayfinder(pThis) \
|
||||
((pThis)->__nvoc_pbase_CrashCatWayfinder)
|
||||
|
||||
#ifdef __nvoc_crashcat_wayfinder_h_disabled
|
||||
#define __dynamicCast_CrashCatWayfinder(pThis) ((CrashCatWayfinder*)NULL)
|
||||
#else //__nvoc_crashcat_wayfinder_h_disabled
|
||||
#define __dynamicCast_CrashCatWayfinder(pThis) \
|
||||
((CrashCatWayfinder*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(CrashCatWayfinder)))
|
||||
#endif //__nvoc_crashcat_wayfinder_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_CrashCatWayfinder(CrashCatWayfinder**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_CrashCatWayfinder(CrashCatWayfinder**, Dynamic*, NvU32,
|
||||
NV_CRASHCAT_WAYFINDER_VERSION CrashCatWayfinderHal_version, NvU32 arg_wfl0);
|
||||
#define __objCreate_CrashCatWayfinder(ppNewObj, pParent, createFlags, CrashCatWayfinderHal_version, arg_wfl0) \
|
||||
__nvoc_objCreate_CrashCatWayfinder((ppNewObj), staticCast((pParent), Dynamic), (createFlags), CrashCatWayfinderHal_version, arg_wfl0)
|
||||
|
||||
struct CrashCatQueue *crashcatWayfinderGetReportQueue_V1(struct CrashCatWayfinder *arg0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_wayfinder_h_disabled
|
||||
static inline struct CrashCatQueue *crashcatWayfinderGetReportQueue(struct CrashCatWayfinder *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatWayfinder was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_crashcat_wayfinder_h_disabled
|
||||
#define crashcatWayfinderGetReportQueue(arg0) crashcatWayfinderGetReportQueue_V1(arg0)
|
||||
#endif //__nvoc_crashcat_wayfinder_h_disabled
|
||||
|
||||
#define crashcatWayfinderGetReportQueue_HAL(arg0) crashcatWayfinderGetReportQueue(arg0)
|
||||
|
||||
void crashcatWayfinderSetWFL0_V1(struct CrashCatWayfinder *arg0, NvU32 wfl0);
|
||||
|
||||
|
||||
#ifdef __nvoc_crashcat_wayfinder_h_disabled
|
||||
static inline void crashcatWayfinderSetWFL0(struct CrashCatWayfinder *arg0, NvU32 wfl0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("CrashCatWayfinder was disabled!");
|
||||
}
|
||||
#else //__nvoc_crashcat_wayfinder_h_disabled
|
||||
#define crashcatWayfinderSetWFL0(arg0, wfl0) crashcatWayfinderSetWFL0_V1(arg0, wfl0)
|
||||
#endif //__nvoc_crashcat_wayfinder_h_disabled
|
||||
|
||||
#define crashcatWayfinderSetWFL0_HAL(arg0, wfl0) crashcatWayfinderSetWFL0(arg0, wfl0)
|
||||
|
||||
NV_STATUS crashcatWayfinderConstruct_IMPL(struct CrashCatWayfinder *arg_, NvU32 arg_wfl0);
|
||||
|
||||
#define __nvoc_crashcatWayfinderConstruct(arg_, arg_wfl0) crashcatWayfinderConstruct_IMPL(arg_, arg_wfl0)
|
||||
void crashcatWayfinderDestruct_IMPL(struct CrashCatWayfinder *arg0);
|
||||
|
||||
#define __nvoc_crashcatWayfinderDestruct(arg0) crashcatWayfinderDestruct_IMPL(arg0)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
#ifndef NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
|
||||
#ifndef __nvoc_crashcat_wayfinder_h_disabled
|
||||
#undef crashcatWayfinderSetWFL0
|
||||
void NVOC_PRIVATE_FUNCTION(crashcatWayfinderSetWFL0)(struct CrashCatWayfinder *arg0, NvU32 wfl0);
|
||||
#endif //__nvoc_crashcat_wayfinder_h_disabled
|
||||
|
||||
#endif // NVOC_CRASHCAT_WAYFINDER_H_PRIVATE_ACCESS_ALLOWED
|
||||
|
||||
|
||||
#endif // CRASHCAT_VECTOR_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_CRASHCAT_WAYFINDER_NVOC_H_
|
|
@ -266,6 +266,7 @@ void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
|
|||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF, ((NvBool)(0 == 0)));
|
||||
}
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED, ((NvBool)(0 != 0)));
|
||||
|
||||
pThis->boardId = ~0;
|
||||
|
||||
|
|
|
@ -992,6 +992,7 @@ struct OBJGPU {
|
|||
NvBool PDB_PROP_GPU_SKIP_TABLE_CE_MAP;
|
||||
NvBool PDB_PROP_GPU_CHIP_SUPPORTS_RTD3_DEF;
|
||||
NvBool PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL;
|
||||
NvBool PDB_PROP_GPU_FASTPATH_SEQ_ENABLED;
|
||||
OS_GPU_INFO *pOsGpuInfo;
|
||||
OS_RM_CAPS *pOsRmCaps;
|
||||
NvU32 halImpl;
|
||||
|
@ -1268,6 +1269,8 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
|
|||
#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI
|
||||
#define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_CAST
|
||||
#define PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL_BASE_NAME PDB_PROP_GPU_FORCE_PERF_BIOS_LEVEL
|
||||
#define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_CAST
|
||||
#define PDB_PROP_GPU_FASTPATH_SEQ_ENABLED_BASE_NAME PDB_PROP_GPU_FASTPATH_SEQ_ENABLED
|
||||
#define PDB_PROP_GPU_IS_UEFI_BASE_CAST
|
||||
#define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI
|
||||
#define PDB_PROP_GPU_SKIP_TABLE_CE_MAP_BASE_CAST
|
||||
|
|
|
@ -625,40 +625,6 @@ static inline void intrClearLeafVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 v
|
|||
|
||||
#define intrClearLeafVector_HAL(pGpu, pIntr, vector, pThreadState) intrClearLeafVector(pGpu, pIntr, vector, pThreadState)
|
||||
|
||||
static inline void intrClearCpuLeafVector_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) {
|
||||
return;
|
||||
}
|
||||
|
||||
void intrClearCpuLeafVector_GH100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState);
|
||||
|
||||
|
||||
#ifdef __nvoc_intr_h_disabled
|
||||
static inline void intrClearCpuLeafVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Intr was disabled!");
|
||||
}
|
||||
#else //__nvoc_intr_h_disabled
|
||||
#define intrClearCpuLeafVector(pGpu, pIntr, vector, pThreadState) intrClearCpuLeafVector_b3696a(pGpu, pIntr, vector, pThreadState)
|
||||
#endif //__nvoc_intr_h_disabled
|
||||
|
||||
#define intrClearCpuLeafVector_HAL(pGpu, pIntr, vector, pThreadState) intrClearCpuLeafVector(pGpu, pIntr, vector, pThreadState)
|
||||
|
||||
static inline void intrWriteCpuRegLeaf_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) {
|
||||
return;
|
||||
}
|
||||
|
||||
void intrWriteCpuRegLeaf_GH100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2);
|
||||
|
||||
|
||||
#ifdef __nvoc_intr_h_disabled
|
||||
static inline void intrWriteCpuRegLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Intr was disabled!");
|
||||
}
|
||||
#else //__nvoc_intr_h_disabled
|
||||
#define intrWriteCpuRegLeaf(pGpu, pIntr, arg0, arg1, arg2) intrWriteCpuRegLeaf_b3696a(pGpu, pIntr, arg0, arg1, arg2)
|
||||
#endif //__nvoc_intr_h_disabled
|
||||
|
||||
#define intrWriteCpuRegLeaf_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteCpuRegLeaf(pGpu, pIntr, arg0, arg1, arg2)
|
||||
|
||||
NvBool intrIsVectorPending_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState);
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
#define NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_kernel_crashcat_engine_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xc37aef = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
void __nvoc_init_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
|
||||
void __nvoc_init_funcTable_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
|
||||
void __nvoc_init_dataField_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
|
||||
void __nvoc_dtor_KernelCrashCatEngine(KernelCrashCatEngine*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCrashCatEngine;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelCrashCatEngine_KernelCrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCrashCatEngine,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelCrashCatEngine, __nvoc_base_CrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCrashCatEngine = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_KernelCrashCatEngine_KernelCrashCatEngine,
|
||||
&__nvoc_rtti_KernelCrashCatEngine_CrashCatEngine,
|
||||
},
|
||||
};
|
||||
|
||||
// Not instantiable because it's not derived from class "Object"
|
||||
// Not instantiable because it's an abstract class with following pure virtual functions:
|
||||
// kcrashcatEngineRegRead
|
||||
// kcrashcatEngineRegWrite
|
||||
// kcrashcatEngineMaskDmemAddr
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(KernelCrashCatEngine),
|
||||
/*classId=*/ classId(KernelCrashCatEngine),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "KernelCrashCatEngine",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_KernelCrashCatEngine,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_KernelCrashCatEngine
|
||||
};
|
||||
|
||||
static NvBool __nvoc_thunk_KernelCrashCatEngine_crashcatEngineConfigured(struct CrashCatEngine *arg0) {
|
||||
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnload(struct CrashCatEngine *arg0) {
|
||||
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineVprintf(struct CrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriRead(struct CrashCatEngine *arg0, NvU32 offset) {
|
||||
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), offset);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriWrite(struct CrashCatEngine *arg0, NvU32 offset, NvU32 data) {
|
||||
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), offset, data);
|
||||
}
|
||||
|
||||
static void *__nvoc_thunk_KernelCrashCatEngine_crashcatEngineMapBufferDescriptor(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnmapBufferDescriptor(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_crashcatEngineSyncBufferDescriptor(struct CrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetScratchOffsets(struct CrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset), scratchGroupId);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetWFL0Offset(struct CrashCatEngine *arg0) {
|
||||
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) - __nvoc_rtti_KernelCrashCatEngine_CrashCatEngine.offset));
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCrashCatEngine =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_CrashCatEngine(CrashCatEngine*);
|
||||
void __nvoc_dtor_KernelCrashCatEngine(KernelCrashCatEngine *pThis) {
|
||||
__nvoc_dtor_CrashCatEngine(&pThis->__nvoc_base_CrashCatEngine);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_CrashCatEngine(CrashCatEngine* );
|
||||
NV_STATUS __nvoc_ctor_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_CrashCatEngine(&pThis->__nvoc_base_CrashCatEngine);
|
||||
if (status != NV_OK) goto __nvoc_ctor_KernelCrashCatEngine_fail_CrashCatEngine;
|
||||
__nvoc_init_dataField_KernelCrashCatEngine(pThis, pRmhalspecowner);
|
||||
goto __nvoc_ctor_KernelCrashCatEngine_exit; // Success
|
||||
|
||||
__nvoc_ctor_KernelCrashCatEngine_fail_CrashCatEngine:
|
||||
__nvoc_ctor_KernelCrashCatEngine_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_KernelCrashCatEngine_1(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
|
||||
pThis->__kcrashcatEngineConfigured__ = &kcrashcatEngineConfigured_IMPL;
|
||||
|
||||
pThis->__kcrashcatEngineUnload__ = &kcrashcatEngineUnload_IMPL;
|
||||
|
||||
pThis->__kcrashcatEngineVprintf__ = &kcrashcatEngineVprintf_IMPL;
|
||||
|
||||
pThis->__kcrashcatEngineRegRead__ = NULL;
|
||||
|
||||
pThis->__kcrashcatEngineRegWrite__ = NULL;
|
||||
|
||||
pThis->__kcrashcatEngineMaskDmemAddr__ = NULL;
|
||||
|
||||
pThis->__kcrashcatEnginePriRead__ = &kcrashcatEnginePriRead_IMPL;
|
||||
|
||||
pThis->__kcrashcatEnginePriWrite__ = &kcrashcatEnginePriWrite_IMPL;
|
||||
|
||||
pThis->__kcrashcatEngineMapBufferDescriptor__ = &kcrashcatEngineMapBufferDescriptor_IMPL;
|
||||
|
||||
pThis->__kcrashcatEngineUnmapBufferDescriptor__ = &kcrashcatEngineUnmapBufferDescriptor_IMPL;
|
||||
|
||||
pThis->__kcrashcatEngineSyncBufferDescriptor__ = &kcrashcatEngineSyncBufferDescriptor_IMPL;
|
||||
|
||||
// Hal function -- kcrashcatEngineReadDmem
|
||||
pThis->__kcrashcatEngineReadDmem__ = &kcrashcatEngineReadDmem_TU102;
|
||||
|
||||
pThis->__kcrashcatEngineReadEmem__ = &kcrashcatEngineReadEmem_2fced3;
|
||||
|
||||
// Hal function -- kcrashcatEngineGetScratchOffsets
|
||||
pThis->__kcrashcatEngineGetScratchOffsets__ = &kcrashcatEngineGetScratchOffsets_TU102;
|
||||
|
||||
// Hal function -- kcrashcatEngineGetWFL0Offset
|
||||
pThis->__kcrashcatEngineGetWFL0Offset__ = &kcrashcatEngineGetWFL0Offset_TU102;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineConfigured;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineUnload__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnload;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineVprintf;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEnginePriRead__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriRead;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEnginePriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEnginePriWrite;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineMapBufferDescriptor;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineUnmapBufferDescriptor;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineSyncBufferDescriptor;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetScratchOffsets;
|
||||
|
||||
pThis->__nvoc_base_CrashCatEngine.__crashcatEngineGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_crashcatEngineGetWFL0Offset;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
__nvoc_init_funcTable_KernelCrashCatEngine_1(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
void __nvoc_init_CrashCatEngine(CrashCatEngine*);
|
||||
void __nvoc_init_KernelCrashCatEngine(KernelCrashCatEngine *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_KernelCrashCatEngine = pThis;
|
||||
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_CrashCatEngine;
|
||||
__nvoc_init_CrashCatEngine(&pThis->__nvoc_base_CrashCatEngine);
|
||||
__nvoc_init_funcTable_KernelCrashCatEngine(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
|
@ -0,0 +1,274 @@
|
|||
#ifndef _G_KERNEL_CRASHCAT_ENGINE_NVOC_H_
|
||||
#define _G_KERNEL_CRASHCAT_ENGINE_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_kernel_crashcat_engine_nvoc.h"
|
||||
|
||||
#ifndef KERNEL_CRASHCAT_ENGINE_H
|
||||
#define KERNEL_CRASHCAT_ENGINE_H
|
||||
|
||||
#include "containers/map.h"
|
||||
#include "core/core.h"
|
||||
#include "crashcat/crashcat_engine.h"
|
||||
#include "gpu/gpu_halspec.h"
|
||||
#include "gpu/mem_mgr/mem_desc.h"
|
||||
|
||||
typedef struct KernelCrashCatEngineConfig
|
||||
{
|
||||
const char *pName; // Engine name passed to nvErrorLog_va() for crash reports
|
||||
NvU32 allocQueueSize; // Size of the system memory buffer to allocate for the CrashCat queue
|
||||
NvU32 errorId; // Error ID passed to nvErrorLog_va() for crash reports
|
||||
NvU8 dmemPort; // DMEM port allocated for CrashCat usage
|
||||
NvBool bEnable; // Enable CrashCat monitoring for the engine
|
||||
} KernelCrashCatEngineConfig;
|
||||
|
||||
/*!
|
||||
* Base implementation of CrashCatEngine in RM.
|
||||
*/
|
||||
#ifdef NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct KernelCrashCatEngine {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct CrashCatEngine __nvoc_base_CrashCatEngine;
|
||||
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
|
||||
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
|
||||
NvBool (*__kcrashcatEngineConfigured__)(struct KernelCrashCatEngine *);
|
||||
void (*__kcrashcatEngineUnload__)(struct KernelCrashCatEngine *);
|
||||
void (*__kcrashcatEngineVprintf__)(struct KernelCrashCatEngine *, NvBool, const char *, va_list);
|
||||
NvU32 (*__kcrashcatEngineRegRead__)(struct OBJGPU *, struct KernelCrashCatEngine *, NvU32);
|
||||
void (*__kcrashcatEngineRegWrite__)(struct OBJGPU *, struct KernelCrashCatEngine *, NvU32, NvU32);
|
||||
NvU32 (*__kcrashcatEngineMaskDmemAddr__)(struct OBJGPU *, struct KernelCrashCatEngine *, NvU32);
|
||||
NvU32 (*__kcrashcatEnginePriRead__)(struct KernelCrashCatEngine *, NvU32);
|
||||
void (*__kcrashcatEnginePriWrite__)(struct KernelCrashCatEngine *, NvU32, NvU32);
|
||||
void *(*__kcrashcatEngineMapBufferDescriptor__)(struct KernelCrashCatEngine *, CrashCatBufferDescriptor *);
|
||||
void (*__kcrashcatEngineUnmapBufferDescriptor__)(struct KernelCrashCatEngine *, CrashCatBufferDescriptor *);
|
||||
void (*__kcrashcatEngineSyncBufferDescriptor__)(struct KernelCrashCatEngine *, CrashCatBufferDescriptor *, NvU32, NvU32);
|
||||
void (*__kcrashcatEngineReadDmem__)(struct KernelCrashCatEngine *, NvU32, NvU32, void *);
|
||||
void (*__kcrashcatEngineReadEmem__)(struct KernelCrashCatEngine *, NvU64, NvU64, void *);
|
||||
const NvU32 *(*__kcrashcatEngineGetScratchOffsets__)(struct KernelCrashCatEngine *, NV_CRASHCAT_SCRATCH_GROUP_ID);
|
||||
NvU32 (*__kcrashcatEngineGetWFL0Offset__)(struct KernelCrashCatEngine *);
|
||||
NvBool PRIVATE_FIELD(bConfigured);
|
||||
MEMORY_DESCRIPTOR *PRIVATE_FIELD(pQueueMemDesc);
|
||||
const char *PRIVATE_FIELD(pName);
|
||||
NvU32 PRIVATE_FIELD(errorId);
|
||||
struct OBJGPU *PRIVATE_FIELD(pGpu);
|
||||
NvU32 PRIVATE_FIELD(dmemPort);
|
||||
char PRIVATE_FIELD(printBuffer)[512];
|
||||
char PRIVATE_FIELD(fmtBuffer)[512];
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_KernelCrashCatEngine_TYPEDEF__
|
||||
#define __NVOC_CLASS_KernelCrashCatEngine_TYPEDEF__
|
||||
typedef struct KernelCrashCatEngine KernelCrashCatEngine;
|
||||
#endif /* __NVOC_CLASS_KernelCrashCatEngine_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_KernelCrashCatEngine
|
||||
#define __nvoc_class_id_KernelCrashCatEngine 0xc37aef
|
||||
#endif /* __nvoc_class_id_KernelCrashCatEngine */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
|
||||
|
||||
#define __staticCast_KernelCrashCatEngine(pThis) \
|
||||
((pThis)->__nvoc_pbase_KernelCrashCatEngine)
|
||||
|
||||
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
|
||||
#define __dynamicCast_KernelCrashCatEngine(pThis) ((KernelCrashCatEngine*)NULL)
|
||||
#else //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
#define __dynamicCast_KernelCrashCatEngine(pThis) \
|
||||
((KernelCrashCatEngine*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCrashCatEngine)))
|
||||
#endif //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_KernelCrashCatEngine(KernelCrashCatEngine**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_KernelCrashCatEngine(KernelCrashCatEngine**, Dynamic*, NvU32);
|
||||
#define __objCreate_KernelCrashCatEngine(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_KernelCrashCatEngine((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
#define kcrashcatEngineConfigured(arg0) kcrashcatEngineConfigured_DISPATCH(arg0)
|
||||
#define kcrashcatEngineUnload(arg0) kcrashcatEngineUnload_DISPATCH(arg0)
|
||||
#define kcrashcatEngineVprintf(arg0, bReportStart, fmt, args) kcrashcatEngineVprintf_DISPATCH(arg0, bReportStart, fmt, args)
|
||||
#define kcrashcatEngineRegRead(pGpu, arg0, offset) kcrashcatEngineRegRead_DISPATCH(pGpu, arg0, offset)
|
||||
#define kcrashcatEngineRegWrite(pGpu, arg0, offset, data) kcrashcatEngineRegWrite_DISPATCH(pGpu, arg0, offset, data)
|
||||
#define kcrashcatEngineMaskDmemAddr(pGpu, arg0, addr) kcrashcatEngineMaskDmemAddr_DISPATCH(pGpu, arg0, addr)
|
||||
#define kcrashcatEnginePriRead(arg0, offset) kcrashcatEnginePriRead_DISPATCH(arg0, offset)
|
||||
#define kcrashcatEnginePriWrite(arg0, offset, data) kcrashcatEnginePriWrite_DISPATCH(arg0, offset, data)
|
||||
#define kcrashcatEngineMapBufferDescriptor(arg0, pBufDesc) kcrashcatEngineMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define kcrashcatEngineUnmapBufferDescriptor(arg0, pBufDesc) kcrashcatEngineUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define kcrashcatEngineSyncBufferDescriptor(arg0, pBufDesc, offset, size) kcrashcatEngineSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
|
||||
#define kcrashcatEngineReadDmem(arg0, offset, size, pBuf) kcrashcatEngineReadDmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define kcrashcatEngineReadDmem_HAL(arg0, offset, size, pBuf) kcrashcatEngineReadDmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define kcrashcatEngineReadEmem(arg0, offset, size, pBuf) kcrashcatEngineReadEmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define kcrashcatEngineReadEmem_HAL(arg0, offset, size, pBuf) kcrashcatEngineReadEmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define kcrashcatEngineGetScratchOffsets(arg0, scratchGroupId) kcrashcatEngineGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
|
||||
#define kcrashcatEngineGetScratchOffsets_HAL(arg0, scratchGroupId) kcrashcatEngineGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
|
||||
#define kcrashcatEngineGetWFL0Offset(arg0) kcrashcatEngineGetWFL0Offset_DISPATCH(arg0)
|
||||
#define kcrashcatEngineGetWFL0Offset_HAL(arg0) kcrashcatEngineGetWFL0Offset_DISPATCH(arg0)
|
||||
NvBool kcrashcatEngineConfigured_IMPL(struct KernelCrashCatEngine *arg0);
|
||||
|
||||
static inline NvBool kcrashcatEngineConfigured_DISPATCH(struct KernelCrashCatEngine *arg0) {
|
||||
return arg0->__kcrashcatEngineConfigured__(arg0);
|
||||
}
|
||||
|
||||
void kcrashcatEngineUnload_IMPL(struct KernelCrashCatEngine *arg0);
|
||||
|
||||
static inline void kcrashcatEngineUnload_DISPATCH(struct KernelCrashCatEngine *arg0) {
|
||||
arg0->__kcrashcatEngineUnload__(arg0);
|
||||
}
|
||||
|
||||
void kcrashcatEngineVprintf_IMPL(struct KernelCrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args);
|
||||
|
||||
static inline void kcrashcatEngineVprintf_DISPATCH(struct KernelCrashCatEngine *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
arg0->__kcrashcatEngineVprintf__(arg0, bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static inline NvU32 kcrashcatEngineRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelCrashCatEngine *arg0, NvU32 offset) {
|
||||
return arg0->__kcrashcatEngineRegRead__(pGpu, arg0, offset);
|
||||
}
|
||||
|
||||
static inline void kcrashcatEngineRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__kcrashcatEngineRegWrite__(pGpu, arg0, offset, data);
|
||||
}
|
||||
|
||||
static inline NvU32 kcrashcatEngineMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelCrashCatEngine *arg0, NvU32 addr) {
|
||||
return arg0->__kcrashcatEngineMaskDmemAddr__(pGpu, arg0, addr);
|
||||
}
|
||||
|
||||
NvU32 kcrashcatEnginePriRead_IMPL(struct KernelCrashCatEngine *arg0, NvU32 offset);
|
||||
|
||||
static inline NvU32 kcrashcatEnginePriRead_DISPATCH(struct KernelCrashCatEngine *arg0, NvU32 offset) {
|
||||
return arg0->__kcrashcatEnginePriRead__(arg0, offset);
|
||||
}
|
||||
|
||||
void kcrashcatEnginePriWrite_IMPL(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 data);
|
||||
|
||||
static inline void kcrashcatEnginePriWrite_DISPATCH(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__kcrashcatEnginePriWrite__(arg0, offset, data);
|
||||
}
|
||||
|
||||
void *kcrashcatEngineMapBufferDescriptor_IMPL(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc);
|
||||
|
||||
static inline void *kcrashcatEngineMapBufferDescriptor_DISPATCH(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return arg0->__kcrashcatEngineMapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
void kcrashcatEngineUnmapBufferDescriptor_IMPL(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc);
|
||||
|
||||
static inline void kcrashcatEngineUnmapBufferDescriptor_DISPATCH(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
arg0->__kcrashcatEngineUnmapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
void kcrashcatEngineSyncBufferDescriptor_IMPL(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size);
|
||||
|
||||
static inline void kcrashcatEngineSyncBufferDescriptor_DISPATCH(struct KernelCrashCatEngine *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
arg0->__kcrashcatEngineSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
void kcrashcatEngineReadDmem_TU102(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 size, void *pBuf);
|
||||
|
||||
static inline void kcrashcatEngineReadDmem_DISPATCH(struct KernelCrashCatEngine *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
arg0->__kcrashcatEngineReadDmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
static inline void kcrashcatEngineReadEmem_2fced3(struct KernelCrashCatEngine *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
NV_ASSERT_PRECOMP(0);
|
||||
}
|
||||
|
||||
static inline void kcrashcatEngineReadEmem_DISPATCH(struct KernelCrashCatEngine *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
arg0->__kcrashcatEngineReadEmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
const NvU32 *kcrashcatEngineGetScratchOffsets_TU102(struct KernelCrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId);
|
||||
|
||||
static inline const NvU32 *kcrashcatEngineGetScratchOffsets_DISPATCH(struct KernelCrashCatEngine *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return arg0->__kcrashcatEngineGetScratchOffsets__(arg0, scratchGroupId);
|
||||
}
|
||||
|
||||
NvU32 kcrashcatEngineGetWFL0Offset_TU102(struct KernelCrashCatEngine *arg0);
|
||||
|
||||
static inline NvU32 kcrashcatEngineGetWFL0Offset_DISPATCH(struct KernelCrashCatEngine *arg0) {
|
||||
return arg0->__kcrashcatEngineGetWFL0Offset__(arg0);
|
||||
}
|
||||
|
||||
NV_STATUS kcrashcatEngineConfigure_IMPL(struct KernelCrashCatEngine *arg0, KernelCrashCatEngineConfig *pEngConfig);
|
||||
|
||||
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
|
||||
static inline NV_STATUS kcrashcatEngineConfigure(struct KernelCrashCatEngine *arg0, KernelCrashCatEngineConfig *pEngConfig) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
#define kcrashcatEngineConfigure(arg0, pEngConfig) kcrashcatEngineConfigure_IMPL(arg0, pEngConfig)
|
||||
#endif //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
|
||||
MEMORY_DESCRIPTOR *kcrashcatEngineGetQueueMemDesc_IMPL(struct KernelCrashCatEngine *arg0);
|
||||
|
||||
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
|
||||
static inline MEMORY_DESCRIPTOR *kcrashcatEngineGetQueueMemDesc(struct KernelCrashCatEngine *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
#define kcrashcatEngineGetQueueMemDesc(arg0) kcrashcatEngineGetQueueMemDesc_IMPL(arg0)
|
||||
#endif //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
|
||||
NV_STATUS kcrashcatEngineRegisterCrashBuffer_IMPL(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1);
|
||||
|
||||
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
|
||||
static inline NV_STATUS kcrashcatEngineRegisterCrashBuffer(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
#define kcrashcatEngineRegisterCrashBuffer(arg0, arg1) kcrashcatEngineRegisterCrashBuffer_IMPL(arg0, arg1)
|
||||
#endif //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
|
||||
void kcrashcatEngineUnregisterCrashBuffer_IMPL(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1);
|
||||
|
||||
#ifdef __nvoc_kernel_crashcat_engine_h_disabled
|
||||
static inline void kcrashcatEngineUnregisterCrashBuffer(struct KernelCrashCatEngine *arg0, MEMORY_DESCRIPTOR *arg1) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelCrashCatEngine was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
#define kcrashcatEngineUnregisterCrashBuffer(arg0, arg1) kcrashcatEngineUnregisterCrashBuffer_IMPL(arg0, arg1)
|
||||
#endif //__nvoc_kernel_crashcat_engine_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // KERNEL_CRASHCAT_ENGINE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_KERNEL_CRASHCAT_ENGINE_NVOC_H_
|
|
@ -13,6 +13,10 @@ char __nvoc_class_id_uniqueness_check_0xb6b1af = 1;
|
|||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
|
||||
|
||||
void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* );
|
||||
void __nvoc_init_funcTable_KernelFalcon(KernelFalcon*, RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon*, RmHalspecOwner* );
|
||||
|
@ -26,10 +30,24 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_KernelFalcon = {
|
|||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_CrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelFalcon, __nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_KernelCrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelFalcon, __nvoc_base_KernelCrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelFalcon = {
|
||||
/*numRelatives=*/ 1,
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_KernelFalcon_KernelFalcon,
|
||||
&__nvoc_rtti_KernelFalcon_KernelCrashCatEngine,
|
||||
&__nvoc_rtti_KernelFalcon_CrashCatEngine,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -51,13 +69,75 @@ const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon =
|
|||
/*pExportInfo=*/ &__nvoc_export_info_KernelFalcon
|
||||
};
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_kcrashcatEngineRegRead(struct OBJGPU *pGpu, struct KernelCrashCatEngine *pKernelFlcn, NvU32 offset) {
|
||||
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) - __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelFalcon_kcrashcatEngineRegWrite(struct OBJGPU *pGpu, struct KernelCrashCatEngine *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) - __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, data);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_kcrashcatEngineMaskDmemAddr(struct OBJGPU *pGpu, struct KernelCrashCatEngine *pKernelFlcn, NvU32 addr) {
|
||||
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) - __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), addr);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnReadEmem(struct KernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kflcnGetWFL0Offset(struct KernelFalcon *arg0) {
|
||||
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_kflcnGetScratchOffsets(struct KernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), scratchGroupId);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnUnload(struct KernelFalcon *arg0) {
|
||||
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_KernelCrashCatEngine_kflcnConfigured(struct KernelFalcon *arg0) {
|
||||
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kflcnPriRead(struct KernelFalcon *arg0, NvU32 offset) {
|
||||
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnVprintf(struct KernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnPriWrite(struct KernelFalcon *arg0, NvU32 offset, NvU32 data) {
|
||||
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, data);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnSyncBufferDescriptor(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static void *__nvoc_thunk_KernelCrashCatEngine_kflcnMapBufferDescriptor(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnUnmapBufferDescriptor(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kflcnReadDmem(struct KernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelFalcon =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_KernelCrashCatEngine(KernelCrashCatEngine*);
|
||||
void __nvoc_dtor_KernelFalcon(KernelFalcon *pThis) {
|
||||
__nvoc_dtor_KernelCrashCatEngine(&pThis->__nvoc_base_KernelCrashCatEngine);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
|
@ -74,11 +154,15 @@ void __nvoc_init_dataField_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRm
|
|||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_KernelCrashCatEngine(KernelCrashCatEngine* , RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_KernelCrashCatEngine(&pThis->__nvoc_base_KernelCrashCatEngine, pRmhalspecowner);
|
||||
if (status != NV_OK) goto __nvoc_ctor_KernelFalcon_fail_KernelCrashCatEngine;
|
||||
__nvoc_init_dataField_KernelFalcon(pThis, pRmhalspecowner);
|
||||
goto __nvoc_ctor_KernelFalcon_exit; // Success
|
||||
|
||||
__nvoc_ctor_KernelFalcon_fail_KernelCrashCatEngine:
|
||||
__nvoc_ctor_KernelFalcon_exit:
|
||||
|
||||
return status;
|
||||
|
@ -96,6 +180,12 @@ static void __nvoc_init_funcTable_KernelFalcon_1(KernelFalcon *pThis, RmHalspecO
|
|||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
|
||||
// Hal function -- kflcnRegRead
|
||||
pThis->__kflcnRegRead__ = &kflcnRegRead_TU102;
|
||||
|
||||
// Hal function -- kflcnRegWrite
|
||||
pThis->__kflcnRegWrite__ = &kflcnRegWrite_TU102;
|
||||
|
||||
// Hal function -- kflcnIsRiscvActive
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */
|
||||
{
|
||||
|
@ -188,14 +278,48 @@ static void __nvoc_init_funcTable_KernelFalcon_1(KernelFalcon *pThis, RmHalspecO
|
|||
{
|
||||
pThis->__kflcnMaskDmemAddr__ = &kflcnMaskDmemAddr_GA100;
|
||||
}
|
||||
|
||||
pThis->__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineRegRead__ = &__nvoc_thunk_KernelFalcon_kcrashcatEngineRegRead;
|
||||
|
||||
pThis->__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineRegWrite__ = &__nvoc_thunk_KernelFalcon_kcrashcatEngineRegWrite;
|
||||
|
||||
pThis->__nvoc_base_KernelCrashCatEngine.__kcrashcatEngineMaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_kcrashcatEngineMaskDmemAddr;
|
||||
|
||||
pThis->__kflcnReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnReadEmem;
|
||||
|
||||
pThis->__kflcnGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnGetWFL0Offset;
|
||||
|
||||
pThis->__kflcnGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnGetScratchOffsets;
|
||||
|
||||
pThis->__kflcnUnload__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnUnload;
|
||||
|
||||
pThis->__kflcnConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnConfigured;
|
||||
|
||||
pThis->__kflcnPriRead__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnPriRead;
|
||||
|
||||
pThis->__kflcnVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnVprintf;
|
||||
|
||||
pThis->__kflcnPriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnPriWrite;
|
||||
|
||||
pThis->__kflcnSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnSyncBufferDescriptor;
|
||||
|
||||
pThis->__kflcnMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnMapBufferDescriptor;
|
||||
|
||||
pThis->__kflcnUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnUnmapBufferDescriptor;
|
||||
|
||||
pThis->__kflcnReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_kflcnReadDmem;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
__nvoc_init_funcTable_KernelFalcon_1(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
void __nvoc_init_KernelCrashCatEngine(KernelCrashCatEngine*, RmHalspecOwner* );
|
||||
void __nvoc_init_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_KernelFalcon = pThis;
|
||||
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelCrashCatEngine;
|
||||
__nvoc_init_KernelCrashCatEngine(&pThis->__nvoc_base_KernelCrashCatEngine, pRmhalspecowner);
|
||||
__nvoc_init_funcTable_KernelFalcon(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
|
@ -205,6 +329,10 @@ char __nvoc_class_id_uniqueness_check_0xabcf08 = 1;
|
|||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericKernelFalcon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService;
|
||||
|
@ -224,6 +352,18 @@ static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_GenericKernelFalco
|
|||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_CrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_KernelFalcon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelFalcon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
|
@ -243,12 +383,14 @@ static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_Object = {
|
|||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_GenericKernelFalcon = {
|
||||
/*numRelatives=*/ 4,
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_GenericKernelFalcon_GenericKernelFalcon,
|
||||
&__nvoc_rtti_GenericKernelFalcon_Object,
|
||||
&__nvoc_rtti_GenericKernelFalcon_IntrService,
|
||||
&__nvoc_rtti_GenericKernelFalcon_KernelFalcon,
|
||||
&__nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine,
|
||||
&__nvoc_rtti_GenericKernelFalcon_CrashCatEngine,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -279,14 +421,74 @@ static NV_STATUS __nvoc_thunk_GenericKernelFalcon_intrservServiceNotificationInt
|
|||
return gkflcnServiceNotificationInterrupt(arg0, (struct GenericKernelFalcon *)(((unsigned char *)arg1) - __nvoc_rtti_GenericKernelFalcon_IntrService.offset), arg2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnReadEmem(struct GenericKernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_gkflcnGetWFL0Offset(struct GenericKernelFalcon *arg0) {
|
||||
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnUnload(struct GenericKernelFalcon *arg0) {
|
||||
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_KernelCrashCatEngine_gkflcnConfigured(struct GenericKernelFalcon *arg0) {
|
||||
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_gkflcnPriRead(struct GenericKernelFalcon *arg0, NvU32 offset) {
|
||||
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset);
|
||||
}
|
||||
|
||||
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_gkflcnGetScratchOffsets(struct GenericKernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), scratchGroupId);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelFalcon_gkflcnRegWrite(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset), offset, data);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_gkflcnMaskDmemAddr(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 addr) {
|
||||
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset), addr);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnVprintf(struct GenericKernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_IntrService_gkflcnClearInterrupt(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_GenericKernelFalcon_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnPriWrite(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 data) {
|
||||
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset, data);
|
||||
}
|
||||
|
||||
static void *__nvoc_thunk_KernelCrashCatEngine_gkflcnMapBufferDescriptor(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnSyncBufferDescriptor(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_gkflcnRegRead(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset) {
|
||||
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset), offset);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnUnmapBufferDescriptor(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_IntrService_gkflcnServiceInterrupt(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
|
||||
return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_GenericKernelFalcon_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_gkflcnReadDmem(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_GenericKernelFalcon_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericKernelFalcon =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
|
@ -351,9 +553,39 @@ static void __nvoc_init_funcTable_GenericKernelFalcon_1(GenericKernelFalcon *pTh
|
|||
|
||||
pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_GenericKernelFalcon_intrservServiceNotificationInterrupt;
|
||||
|
||||
pThis->__gkflcnReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnReadEmem;
|
||||
|
||||
pThis->__gkflcnGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnGetWFL0Offset;
|
||||
|
||||
pThis->__gkflcnUnload__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnUnload;
|
||||
|
||||
pThis->__gkflcnConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnConfigured;
|
||||
|
||||
pThis->__gkflcnPriRead__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnPriRead;
|
||||
|
||||
pThis->__gkflcnGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnGetScratchOffsets;
|
||||
|
||||
pThis->__gkflcnRegWrite__ = &__nvoc_thunk_KernelFalcon_gkflcnRegWrite;
|
||||
|
||||
pThis->__gkflcnMaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_gkflcnMaskDmemAddr;
|
||||
|
||||
pThis->__gkflcnVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnVprintf;
|
||||
|
||||
pThis->__gkflcnClearInterrupt__ = &__nvoc_thunk_IntrService_gkflcnClearInterrupt;
|
||||
|
||||
pThis->__gkflcnPriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnPriWrite;
|
||||
|
||||
pThis->__gkflcnMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnMapBufferDescriptor;
|
||||
|
||||
pThis->__gkflcnSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnSyncBufferDescriptor;
|
||||
|
||||
pThis->__gkflcnRegRead__ = &__nvoc_thunk_KernelFalcon_gkflcnRegRead;
|
||||
|
||||
pThis->__gkflcnUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnUnmapBufferDescriptor;
|
||||
|
||||
pThis->__gkflcnServiceInterrupt__ = &__nvoc_thunk_IntrService_gkflcnServiceInterrupt;
|
||||
|
||||
pThis->__gkflcnReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_gkflcnReadDmem;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_GenericKernelFalcon(GenericKernelFalcon *pThis) {
|
||||
|
@ -365,6 +597,8 @@ void __nvoc_init_IntrService(IntrService*);
|
|||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_GenericKernelFalcon(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_GenericKernelFalcon = pThis;
|
||||
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon;
|
||||
pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
|
|
|
@ -41,6 +41,7 @@ extern "C" {
|
|||
|
||||
#include "core/core.h"
|
||||
#include "gpu/falcon/falcon_common.h"
|
||||
#include "gpu/falcon/kernel_crashcat_engine.h"
|
||||
#include "gpu/intr/intr_service.h"
|
||||
|
||||
struct KernelChannel;
|
||||
|
@ -67,6 +68,8 @@ typedef struct KernelFalconEngineConfig {
|
|||
NvU32 ctxAttr; // Memory attributes used for context buffers
|
||||
NvU32 ctxBufferSize; // Context buffer size in bytes
|
||||
NvU32 addrSpaceList; // index into ADDRLIST array in mem_desc.h
|
||||
|
||||
KernelCrashCatEngineConfig crashcatEngConfig;
|
||||
} KernelFalconEngineConfig;
|
||||
|
||||
/*!
|
||||
|
@ -79,7 +82,12 @@ typedef struct KernelFalconEngineConfig {
|
|||
#endif
|
||||
struct KernelFalcon {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct KernelCrashCatEngine __nvoc_base_KernelCrashCatEngine;
|
||||
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
|
||||
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
|
||||
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
|
||||
NvU32 (*__kflcnRegRead__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
|
||||
void (*__kflcnRegWrite__)(struct OBJGPU *, struct KernelFalcon *, NvU32, NvU32);
|
||||
NvBool (*__kflcnIsRiscvActive__)(struct OBJGPU *, struct KernelFalcon *);
|
||||
void (*__kflcnRiscvProgramBcr__)(struct OBJGPU *, struct KernelFalcon *, NvBool);
|
||||
void (*__kflcnSwitchToFalcon__)(struct OBJGPU *, struct KernelFalcon *);
|
||||
|
@ -90,6 +98,18 @@ struct KernelFalcon {
|
|||
void (*__kflcnIntrRetrigger__)(struct OBJGPU *, struct KernelFalcon *);
|
||||
NvU32 (*__kflcnMaskImemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
|
||||
NvU32 (*__kflcnMaskDmemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32);
|
||||
void (*__kflcnReadEmem__)(struct KernelFalcon *, NvU64, NvU64, void *);
|
||||
NvU32 (*__kflcnGetWFL0Offset__)(struct KernelFalcon *);
|
||||
const NvU32 *(*__kflcnGetScratchOffsets__)(struct KernelFalcon *, NV_CRASHCAT_SCRATCH_GROUP_ID);
|
||||
void (*__kflcnUnload__)(struct KernelFalcon *);
|
||||
NvBool (*__kflcnConfigured__)(struct KernelFalcon *);
|
||||
NvU32 (*__kflcnPriRead__)(struct KernelFalcon *, NvU32);
|
||||
void (*__kflcnVprintf__)(struct KernelFalcon *, NvBool, const char *, va_list);
|
||||
void (*__kflcnPriWrite__)(struct KernelFalcon *, NvU32, NvU32);
|
||||
void (*__kflcnSyncBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *, NvU32, NvU32);
|
||||
void *(*__kflcnMapBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *);
|
||||
void (*__kflcnUnmapBufferDescriptor__)(struct KernelFalcon *, CrashCatBufferDescriptor *);
|
||||
void (*__kflcnReadDmem__)(struct KernelFalcon *, NvU32, NvU32, void *);
|
||||
NvU32 registerBase;
|
||||
NvU32 riscvRegisterBase;
|
||||
NvU32 fbifBase;
|
||||
|
@ -130,6 +150,10 @@ NV_STATUS __nvoc_objCreate_KernelFalcon(KernelFalcon**, Dynamic*, NvU32);
|
|||
#define __objCreate_KernelFalcon(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_KernelFalcon((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
#define kflcnRegRead(pGpu, pKernelFlcn, offset) kflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
|
||||
#define kflcnRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
|
||||
#define kflcnRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
|
||||
#define kflcnRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
|
||||
#define kflcnIsRiscvActive(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn)
|
||||
#define kflcnIsRiscvActive_HAL(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn)
|
||||
#define kflcnRiscvProgramBcr(pGpu, pKernelFlcn, bBRFetch) kflcnRiscvProgramBcr_DISPATCH(pGpu, pKernelFlcn, bBRFetch)
|
||||
|
@ -149,33 +173,18 @@ NV_STATUS __nvoc_objCreate_KernelFalcon(KernelFalcon**, Dynamic*, NvU32);
|
|||
#define kflcnMaskImemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskImemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
|
||||
#define kflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
|
||||
#define kflcnMaskDmemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
|
||||
NvU32 kflcnRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
|
||||
|
||||
|
||||
#ifdef __nvoc_kernel_falcon_h_disabled
|
||||
static inline NvU32 kflcnRegRead(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_falcon_h_disabled
|
||||
#define kflcnRegRead(pGpu, pKernelFlcn, offset) kflcnRegRead_TU102(pGpu, pKernelFlcn, offset)
|
||||
#endif //__nvoc_kernel_falcon_h_disabled
|
||||
|
||||
#define kflcnRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRegRead(pGpu, pKernelFlcn, offset)
|
||||
|
||||
void kflcnRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data);
|
||||
|
||||
|
||||
#ifdef __nvoc_kernel_falcon_h_disabled
|
||||
static inline void kflcnRegWrite(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_falcon_h_disabled
|
||||
#define kflcnRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_TU102(pGpu, pKernelFlcn, offset, data)
|
||||
#endif //__nvoc_kernel_falcon_h_disabled
|
||||
|
||||
#define kflcnRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRegWrite(pGpu, pKernelFlcn, offset, data)
|
||||
|
||||
#define kflcnReadEmem(arg0, offset, size, pBuf) kflcnReadEmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define kflcnGetWFL0Offset(arg0) kflcnGetWFL0Offset_DISPATCH(arg0)
|
||||
#define kflcnGetScratchOffsets(arg0, scratchGroupId) kflcnGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
|
||||
#define kflcnUnload(arg0) kflcnUnload_DISPATCH(arg0)
|
||||
#define kflcnConfigured(arg0) kflcnConfigured_DISPATCH(arg0)
|
||||
#define kflcnPriRead(arg0, offset) kflcnPriRead_DISPATCH(arg0, offset)
|
||||
#define kflcnVprintf(arg0, bReportStart, fmt, args) kflcnVprintf_DISPATCH(arg0, bReportStart, fmt, args)
|
||||
#define kflcnPriWrite(arg0, offset, data) kflcnPriWrite_DISPATCH(arg0, offset, data)
|
||||
#define kflcnSyncBufferDescriptor(arg0, pBufDesc, offset, size) kflcnSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
|
||||
#define kflcnMapBufferDescriptor(arg0, pBufDesc) kflcnMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define kflcnUnmapBufferDescriptor(arg0, pBufDesc) kflcnUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define kflcnReadDmem(arg0, offset, size, pBuf) kflcnReadDmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
NvU32 kflcnRiscvRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
|
||||
|
||||
|
||||
|
@ -296,6 +305,18 @@ static inline NV_STATUS kflcnWaitForHalt(struct OBJGPU *pGpu, struct KernelFalco
|
|||
|
||||
#define kflcnWaitForHalt_HAL(pGpu, pKernelFlcn, timeoutUs, flags) kflcnWaitForHalt(pGpu, pKernelFlcn, timeoutUs, flags)
|
||||
|
||||
NvU32 kflcnRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset);
|
||||
|
||||
static inline NvU32 kflcnRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) {
|
||||
return pKernelFlcn->__kflcnRegRead__(pGpu, pKernelFlcn, offset);
|
||||
}
|
||||
|
||||
void kflcnRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data);
|
||||
|
||||
static inline void kflcnRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
pKernelFlcn->__kflcnRegWrite__(pGpu, pKernelFlcn, offset, data);
|
||||
}
|
||||
|
||||
NvBool kflcnIsRiscvActive_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
|
||||
|
||||
NvBool kflcnIsRiscvActive_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn);
|
||||
|
@ -380,6 +401,54 @@ static inline NvU32 kflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct Kerne
|
|||
return pKernelFlcn->__kflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr);
|
||||
}
|
||||
|
||||
static inline void kflcnReadEmem_DISPATCH(struct KernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
arg0->__kflcnReadEmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
static inline NvU32 kflcnGetWFL0Offset_DISPATCH(struct KernelFalcon *arg0) {
|
||||
return arg0->__kflcnGetWFL0Offset__(arg0);
|
||||
}
|
||||
|
||||
static inline const NvU32 *kflcnGetScratchOffsets_DISPATCH(struct KernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return arg0->__kflcnGetScratchOffsets__(arg0, scratchGroupId);
|
||||
}
|
||||
|
||||
static inline void kflcnUnload_DISPATCH(struct KernelFalcon *arg0) {
|
||||
arg0->__kflcnUnload__(arg0);
|
||||
}
|
||||
|
||||
static inline NvBool kflcnConfigured_DISPATCH(struct KernelFalcon *arg0) {
|
||||
return arg0->__kflcnConfigured__(arg0);
|
||||
}
|
||||
|
||||
static inline NvU32 kflcnPriRead_DISPATCH(struct KernelFalcon *arg0, NvU32 offset) {
|
||||
return arg0->__kflcnPriRead__(arg0, offset);
|
||||
}
|
||||
|
||||
static inline void kflcnVprintf_DISPATCH(struct KernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
arg0->__kflcnVprintf__(arg0, bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static inline void kflcnPriWrite_DISPATCH(struct KernelFalcon *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__kflcnPriWrite__(arg0, offset, data);
|
||||
}
|
||||
|
||||
static inline void kflcnSyncBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
arg0->__kflcnSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static inline void *kflcnMapBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return arg0->__kflcnMapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void kflcnUnmapBufferDescriptor_DISPATCH(struct KernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
arg0->__kflcnUnmapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void kflcnReadDmem_DISPATCH(struct KernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
arg0->__kflcnReadDmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
void kflcnConfigureEngine_IMPL(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig);
|
||||
|
||||
#ifdef __nvoc_kernel_falcon_h_disabled
|
||||
|
@ -429,6 +498,8 @@ struct GenericKernelFalcon {
|
|||
struct KernelFalcon __nvoc_base_KernelFalcon;
|
||||
struct IntrService __nvoc_base_IntrService;
|
||||
struct Object __nvoc_base_Object;
|
||||
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
|
||||
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
|
||||
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
|
||||
struct IntrService *__nvoc_pbase_IntrService;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
|
@ -436,8 +507,23 @@ struct GenericKernelFalcon {
|
|||
NV_STATUS (*__gkflcnResetHw__)(struct OBJGPU *, struct GenericKernelFalcon *);
|
||||
void (*__gkflcnRegisterIntrService__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceRecord *);
|
||||
NV_STATUS (*__gkflcnServiceNotificationInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceNotificationInterruptArguments *);
|
||||
void (*__gkflcnReadEmem__)(struct GenericKernelFalcon *, NvU64, NvU64, void *);
|
||||
NvU32 (*__gkflcnGetWFL0Offset__)(struct GenericKernelFalcon *);
|
||||
void (*__gkflcnUnload__)(struct GenericKernelFalcon *);
|
||||
NvBool (*__gkflcnConfigured__)(struct GenericKernelFalcon *);
|
||||
NvU32 (*__gkflcnPriRead__)(struct GenericKernelFalcon *, NvU32);
|
||||
const NvU32 *(*__gkflcnGetScratchOffsets__)(struct GenericKernelFalcon *, NV_CRASHCAT_SCRATCH_GROUP_ID);
|
||||
void (*__gkflcnRegWrite__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32, NvU32);
|
||||
NvU32 (*__gkflcnMaskDmemAddr__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32);
|
||||
void (*__gkflcnVprintf__)(struct GenericKernelFalcon *, NvBool, const char *, va_list);
|
||||
NvBool (*__gkflcnClearInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceClearInterruptArguments *);
|
||||
void (*__gkflcnPriWrite__)(struct GenericKernelFalcon *, NvU32, NvU32);
|
||||
void *(*__gkflcnMapBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *);
|
||||
void (*__gkflcnSyncBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *, NvU32, NvU32);
|
||||
NvU32 (*__gkflcnRegRead__)(struct OBJGPU *, struct GenericKernelFalcon *, NvU32);
|
||||
void (*__gkflcnUnmapBufferDescriptor__)(struct GenericKernelFalcon *, CrashCatBufferDescriptor *);
|
||||
NvU32 (*__gkflcnServiceInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceInterruptArguments *);
|
||||
void (*__gkflcnReadDmem__)(struct GenericKernelFalcon *, NvU32, NvU32, void *);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__
|
||||
|
@ -471,8 +557,23 @@ NV_STATUS __nvoc_objCreate_GenericKernelFalcon(GenericKernelFalcon**, Dynamic*,
|
|||
#define gkflcnResetHw(pGpu, pGenKernFlcn) gkflcnResetHw_DISPATCH(pGpu, pGenKernFlcn)
|
||||
#define gkflcnRegisterIntrService(arg0, arg1, arg2) gkflcnRegisterIntrService_DISPATCH(arg0, arg1, arg2)
|
||||
#define gkflcnServiceNotificationInterrupt(arg0, arg1, arg2) gkflcnServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
|
||||
#define gkflcnReadEmem(arg0, offset, size, pBuf) gkflcnReadEmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define gkflcnGetWFL0Offset(arg0) gkflcnGetWFL0Offset_DISPATCH(arg0)
|
||||
#define gkflcnUnload(arg0) gkflcnUnload_DISPATCH(arg0)
|
||||
#define gkflcnConfigured(arg0) gkflcnConfigured_DISPATCH(arg0)
|
||||
#define gkflcnPriRead(arg0, offset) gkflcnPriRead_DISPATCH(arg0, offset)
|
||||
#define gkflcnGetScratchOffsets(arg0, scratchGroupId) gkflcnGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
|
||||
#define gkflcnRegWrite(pGpu, pKernelFlcn, offset, data) gkflcnRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
|
||||
#define gkflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) gkflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
|
||||
#define gkflcnVprintf(arg0, bReportStart, fmt, args) gkflcnVprintf_DISPATCH(arg0, bReportStart, fmt, args)
|
||||
#define gkflcnClearInterrupt(pGpu, pIntrService, pParams) gkflcnClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define gkflcnPriWrite(arg0, offset, data) gkflcnPriWrite_DISPATCH(arg0, offset, data)
|
||||
#define gkflcnMapBufferDescriptor(arg0, pBufDesc) gkflcnMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define gkflcnSyncBufferDescriptor(arg0, pBufDesc, offset, size) gkflcnSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
|
||||
#define gkflcnRegRead(pGpu, pKernelFlcn, offset) gkflcnRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
|
||||
#define gkflcnUnmapBufferDescriptor(arg0, pBufDesc) gkflcnUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define gkflcnServiceInterrupt(pGpu, pIntrService, pParams) gkflcnServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define gkflcnReadDmem(arg0, offset, size, pBuf) gkflcnReadDmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
NV_STATUS gkflcnResetHw_IMPL(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn);
|
||||
|
||||
static inline NV_STATUS gkflcnResetHw_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn) {
|
||||
|
@ -491,14 +592,74 @@ static inline NV_STATUS gkflcnServiceNotificationInterrupt_DISPATCH(struct OBJGP
|
|||
return arg1->__gkflcnServiceNotificationInterrupt__(arg0, arg1, arg2);
|
||||
}
|
||||
|
||||
static inline void gkflcnReadEmem_DISPATCH(struct GenericKernelFalcon *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
arg0->__gkflcnReadEmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
static inline NvU32 gkflcnGetWFL0Offset_DISPATCH(struct GenericKernelFalcon *arg0) {
|
||||
return arg0->__gkflcnGetWFL0Offset__(arg0);
|
||||
}
|
||||
|
||||
static inline void gkflcnUnload_DISPATCH(struct GenericKernelFalcon *arg0) {
|
||||
arg0->__gkflcnUnload__(arg0);
|
||||
}
|
||||
|
||||
static inline NvBool gkflcnConfigured_DISPATCH(struct GenericKernelFalcon *arg0) {
|
||||
return arg0->__gkflcnConfigured__(arg0);
|
||||
}
|
||||
|
||||
static inline NvU32 gkflcnPriRead_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset) {
|
||||
return arg0->__gkflcnPriRead__(arg0, offset);
|
||||
}
|
||||
|
||||
static inline const NvU32 *gkflcnGetScratchOffsets_DISPATCH(struct GenericKernelFalcon *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return arg0->__gkflcnGetScratchOffsets__(arg0, scratchGroupId);
|
||||
}
|
||||
|
||||
static inline void gkflcnRegWrite_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
pKernelFlcn->__gkflcnRegWrite__(pGpu, pKernelFlcn, offset, data);
|
||||
}
|
||||
|
||||
static inline NvU32 gkflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 addr) {
|
||||
return pKernelFlcn->__gkflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr);
|
||||
}
|
||||
|
||||
static inline void gkflcnVprintf_DISPATCH(struct GenericKernelFalcon *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
arg0->__gkflcnVprintf__(arg0, bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static inline NvBool gkflcnClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return pIntrService->__gkflcnClearInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline void gkflcnPriWrite_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__gkflcnPriWrite__(arg0, offset, data);
|
||||
}
|
||||
|
||||
static inline void *gkflcnMapBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return arg0->__gkflcnMapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void gkflcnSyncBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
arg0->__gkflcnSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static inline NvU32 gkflcnRegRead_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pKernelFlcn, NvU32 offset) {
|
||||
return pKernelFlcn->__gkflcnRegRead__(pGpu, pKernelFlcn, offset);
|
||||
}
|
||||
|
||||
static inline void gkflcnUnmapBufferDescriptor_DISPATCH(struct GenericKernelFalcon *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
arg0->__gkflcnUnmapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline NvU32 gkflcnServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
|
||||
return pIntrService->__gkflcnServiceInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline void gkflcnReadDmem_DISPATCH(struct GenericKernelFalcon *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
arg0->__gkflcnReadDmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
NV_STATUS gkflcnConstruct_IMPL(struct GenericKernelFalcon *arg_pGenKernFlcn, struct OBJGPU *arg_pGpu, KernelFalconEngineConfig *arg_pFalconConfig);
|
||||
|
||||
#define __nvoc_gkflcnConstruct(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig) gkflcnConstruct_IMPL(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig)
|
||||
|
|
|
@ -19,6 +19,10 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
|||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
|
||||
|
||||
void __nvoc_init_KernelGsp(KernelGsp*, RmHalspecOwner* );
|
||||
|
@ -52,6 +56,18 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_IntrService = {
|
|||
/*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_IntrService),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_CrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelCrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelFalcon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelFalcon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
|
@ -59,10 +75,12 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelFalcon = {
|
|||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGsp = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*numRelatives=*/ 7,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_KernelGsp_KernelGsp,
|
||||
&__nvoc_rtti_KernelGsp_KernelFalcon,
|
||||
&__nvoc_rtti_KernelGsp_KernelCrashCatEngine,
|
||||
&__nvoc_rtti_KernelGsp_CrashCatEngine,
|
||||
&__nvoc_rtti_KernelGsp_IntrService,
|
||||
&__nvoc_rtti_KernelGsp_OBJENGSTATE,
|
||||
&__nvoc_rtti_KernelGsp_Object,
|
||||
|
@ -100,10 +118,70 @@ static NV_STATUS __nvoc_thunk_KernelGsp_kflcnResetHw(struct OBJGPU *pGpu, struct
|
|||
return kgspResetHw(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_KernelFalcon.offset));
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_KernelCrashCatEngine_kgspConfigured(struct KernelGsp *arg0) {
|
||||
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kgspPriRead(struct KernelGsp *arg0, NvU32 offset) {
|
||||
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelFalcon_kgspRegWrite(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelGsp_KernelFalcon.offset), offset, data);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_kgspMaskDmemAddr(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 addr) {
|
||||
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelGsp_KernelFalcon.offset), addr);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_kgspStateDestroy(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspVprintf(struct KernelGsp *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_IntrService_kgspClearInterrupt(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspPriWrite(struct KernelGsp *arg0, NvU32 offset, NvU32 data) {
|
||||
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset, data);
|
||||
}
|
||||
|
||||
static void *__nvoc_thunk_KernelCrashCatEngine_kgspMapBufferDescriptor(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspSyncBufferDescriptor(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_kgspRegRead(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset) {
|
||||
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelGsp_KernelFalcon.offset), offset);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_kgspIsPresent(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspReadEmem(struct KernelGsp *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_kgspGetScratchOffsets(struct KernelGsp *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), scratchGroupId);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspUnload(struct KernelGsp *arg0) {
|
||||
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
@ -112,6 +190,10 @@ static NV_STATUS __nvoc_thunk_IntrService_kgspServiceNotificationInterrupt(struc
|
|||
return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_kgspGetWFL0Offset(struct KernelGsp *arg0) {
|
||||
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateInitLocked(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
|
||||
}
|
||||
|
@ -124,10 +206,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePostUnload(POBJGPU pGpu, stru
|
|||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_kgspStateDestroy(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
@ -148,16 +226,16 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreInitUnlocked(POBJGPU pGpu,
|
|||
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_IntrService_kgspClearInterrupt(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePostLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_kgspIsPresent(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset));
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspUnmapBufferDescriptor(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_kgspReadDmem(struct KernelGsp *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelGsp_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGsp =
|
||||
|
@ -709,20 +787,50 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
|
|||
|
||||
pThis->__nvoc_base_KernelFalcon.__kflcnResetHw__ = &__nvoc_thunk_KernelGsp_kflcnResetHw;
|
||||
|
||||
pThis->__kgspConfigured__ = &__nvoc_thunk_KernelCrashCatEngine_kgspConfigured;
|
||||
|
||||
pThis->__kgspPriRead__ = &__nvoc_thunk_KernelCrashCatEngine_kgspPriRead;
|
||||
|
||||
pThis->__kgspRegWrite__ = &__nvoc_thunk_KernelFalcon_kgspRegWrite;
|
||||
|
||||
pThis->__kgspMaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_kgspMaskDmemAddr;
|
||||
|
||||
pThis->__kgspStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kgspStateDestroy;
|
||||
|
||||
pThis->__kgspVprintf__ = &__nvoc_thunk_KernelCrashCatEngine_kgspVprintf;
|
||||
|
||||
pThis->__kgspClearInterrupt__ = &__nvoc_thunk_IntrService_kgspClearInterrupt;
|
||||
|
||||
pThis->__kgspPriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_kgspPriWrite;
|
||||
|
||||
pThis->__kgspMapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kgspMapBufferDescriptor;
|
||||
|
||||
pThis->__kgspSyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kgspSyncBufferDescriptor;
|
||||
|
||||
pThis->__kgspRegRead__ = &__nvoc_thunk_KernelFalcon_kgspRegRead;
|
||||
|
||||
pThis->__kgspIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgspIsPresent;
|
||||
|
||||
pThis->__kgspReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_kgspReadEmem;
|
||||
|
||||
pThis->__kgspStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStateLoad;
|
||||
|
||||
pThis->__kgspGetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_kgspGetScratchOffsets;
|
||||
|
||||
pThis->__kgspUnload__ = &__nvoc_thunk_KernelCrashCatEngine_kgspUnload;
|
||||
|
||||
pThis->__kgspStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStateUnload;
|
||||
|
||||
pThis->__kgspServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_kgspServiceNotificationInterrupt;
|
||||
|
||||
pThis->__kgspGetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_kgspGetWFL0Offset;
|
||||
|
||||
pThis->__kgspStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStateInitLocked;
|
||||
|
||||
pThis->__kgspStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreLoad;
|
||||
|
||||
pThis->__kgspStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePostUnload;
|
||||
|
||||
pThis->__kgspStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kgspStateDestroy;
|
||||
|
||||
pThis->__kgspStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreUnload;
|
||||
|
||||
pThis->__kgspStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStateInitUnlocked;
|
||||
|
@ -733,11 +841,11 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
|
|||
|
||||
pThis->__kgspStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreInitUnlocked;
|
||||
|
||||
pThis->__kgspClearInterrupt__ = &__nvoc_thunk_IntrService_kgspClearInterrupt;
|
||||
|
||||
pThis->__kgspStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePostLoad;
|
||||
|
||||
pThis->__kgspIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgspIsPresent;
|
||||
pThis->__kgspUnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_kgspUnmapBufferDescriptor;
|
||||
|
||||
pThis->__kgspReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_kgspReadDmem;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
|
@ -752,6 +860,8 @@ void __nvoc_init_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) {
|
|||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
|
||||
pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService;
|
||||
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon;
|
||||
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
__nvoc_init_IntrService(&pThis->__nvoc_base_IntrService);
|
||||
|
|
|
@ -253,6 +253,8 @@ struct KernelGsp {
|
|||
struct Object *__nvoc_pbase_Object;
|
||||
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
|
||||
struct IntrService *__nvoc_pbase_IntrService;
|
||||
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
|
||||
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
|
||||
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
|
||||
struct KernelGsp *__nvoc_pbase_KernelGsp;
|
||||
NV_STATUS (*__kgspConstructEngine__)(struct OBJGPU *, struct KernelGsp *, ENGDESCRIPTOR);
|
||||
|
@ -294,21 +296,36 @@ struct KernelGsp {
|
|||
NV_STATUS (*__kgspFreeVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32);
|
||||
const char *(*__kgspGetSignatureSectionNamePrefix__)(struct OBJGPU *, struct KernelGsp *);
|
||||
NV_STATUS (*__kgspSetupGspFmcArgs__)(struct OBJGPU *, struct KernelGsp *, GSP_FIRMWARE *);
|
||||
NvBool (*__kgspConfigured__)(struct KernelGsp *);
|
||||
NvU32 (*__kgspPriRead__)(struct KernelGsp *, NvU32);
|
||||
void (*__kgspRegWrite__)(struct OBJGPU *, struct KernelGsp *, NvU32, NvU32);
|
||||
NvU32 (*__kgspMaskDmemAddr__)(struct OBJGPU *, struct KernelGsp *, NvU32);
|
||||
void (*__kgspStateDestroy__)(POBJGPU, struct KernelGsp *);
|
||||
void (*__kgspVprintf__)(struct KernelGsp *, NvBool, const char *, va_list);
|
||||
NvBool (*__kgspClearInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceClearInterruptArguments *);
|
||||
void (*__kgspPriWrite__)(struct KernelGsp *, NvU32, NvU32);
|
||||
void *(*__kgspMapBufferDescriptor__)(struct KernelGsp *, CrashCatBufferDescriptor *);
|
||||
void (*__kgspSyncBufferDescriptor__)(struct KernelGsp *, CrashCatBufferDescriptor *, NvU32, NvU32);
|
||||
NvU32 (*__kgspRegRead__)(struct OBJGPU *, struct KernelGsp *, NvU32);
|
||||
NvBool (*__kgspIsPresent__)(POBJGPU, struct KernelGsp *);
|
||||
void (*__kgspReadEmem__)(struct KernelGsp *, NvU64, NvU64, void *);
|
||||
NV_STATUS (*__kgspStateLoad__)(POBJGPU, struct KernelGsp *, NvU32);
|
||||
const NvU32 *(*__kgspGetScratchOffsets__)(struct KernelGsp *, NV_CRASHCAT_SCRATCH_GROUP_ID);
|
||||
void (*__kgspUnload__)(struct KernelGsp *);
|
||||
NV_STATUS (*__kgspStateUnload__)(POBJGPU, struct KernelGsp *, NvU32);
|
||||
NV_STATUS (*__kgspServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceServiceNotificationInterruptArguments *);
|
||||
NvU32 (*__kgspGetWFL0Offset__)(struct KernelGsp *);
|
||||
NV_STATUS (*__kgspStateInitLocked__)(POBJGPU, struct KernelGsp *);
|
||||
NV_STATUS (*__kgspStatePreLoad__)(POBJGPU, struct KernelGsp *, NvU32);
|
||||
NV_STATUS (*__kgspStatePostUnload__)(POBJGPU, struct KernelGsp *, NvU32);
|
||||
void (*__kgspStateDestroy__)(POBJGPU, struct KernelGsp *);
|
||||
NV_STATUS (*__kgspStatePreUnload__)(POBJGPU, struct KernelGsp *, NvU32);
|
||||
NV_STATUS (*__kgspStateInitUnlocked__)(POBJGPU, struct KernelGsp *);
|
||||
void (*__kgspInitMissing__)(POBJGPU, struct KernelGsp *);
|
||||
NV_STATUS (*__kgspStatePreInitLocked__)(POBJGPU, struct KernelGsp *);
|
||||
NV_STATUS (*__kgspStatePreInitUnlocked__)(POBJGPU, struct KernelGsp *);
|
||||
NvBool (*__kgspClearInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceClearInterruptArguments *);
|
||||
NV_STATUS (*__kgspStatePostLoad__)(POBJGPU, struct KernelGsp *, NvU32);
|
||||
NvBool (*__kgspIsPresent__)(POBJGPU, struct KernelGsp *);
|
||||
void (*__kgspUnmapBufferDescriptor__)(struct KernelGsp *, CrashCatBufferDescriptor *);
|
||||
void (*__kgspReadDmem__)(struct KernelGsp *, NvU32, NvU32, void *);
|
||||
struct MESSAGE_QUEUE_COLLECTION *pMQCollection;
|
||||
struct OBJRPC *pRpc;
|
||||
struct OBJRPC *pLocklessRpc;
|
||||
|
@ -351,6 +368,7 @@ struct KernelGsp {
|
|||
NvBool bInInit;
|
||||
NvBool bInLockdown;
|
||||
NvBool bPollingForRpcResponse;
|
||||
NvBool bFatalError;
|
||||
MEMORY_DESCRIPTOR *pMemDesc_simAccessBuf;
|
||||
SimAccessBuffer *pSimAccessBuf;
|
||||
NvP64 pSimAccessBufPriv;
|
||||
|
@ -470,21 +488,36 @@ NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp**, Dynamic*, NvU32);
|
|||
#define kgspGetSignatureSectionNamePrefix_HAL(pGpu, pKernelGsp) kgspGetSignatureSectionNamePrefix_DISPATCH(pGpu, pKernelGsp)
|
||||
#define kgspSetupGspFmcArgs(pGpu, pKernelGsp, pGspFw) kgspSetupGspFmcArgs_DISPATCH(pGpu, pKernelGsp, pGspFw)
|
||||
#define kgspSetupGspFmcArgs_HAL(pGpu, pKernelGsp, pGspFw) kgspSetupGspFmcArgs_DISPATCH(pGpu, pKernelGsp, pGspFw)
|
||||
#define kgspConfigured(arg0) kgspConfigured_DISPATCH(arg0)
|
||||
#define kgspPriRead(arg0, offset) kgspPriRead_DISPATCH(arg0, offset)
|
||||
#define kgspRegWrite(pGpu, pKernelFlcn, offset, data) kgspRegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
|
||||
#define kgspMaskDmemAddr(pGpu, pKernelFlcn, addr) kgspMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
|
||||
#define kgspStateDestroy(pGpu, pEngstate) kgspStateDestroy_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspVprintf(arg0, bReportStart, fmt, args) kgspVprintf_DISPATCH(arg0, bReportStart, fmt, args)
|
||||
#define kgspClearInterrupt(pGpu, pIntrService, pParams) kgspClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define kgspPriWrite(arg0, offset, data) kgspPriWrite_DISPATCH(arg0, offset, data)
|
||||
#define kgspMapBufferDescriptor(arg0, pBufDesc) kgspMapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define kgspSyncBufferDescriptor(arg0, pBufDesc, offset, size) kgspSyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
|
||||
#define kgspRegRead(pGpu, pKernelFlcn, offset) kgspRegRead_DISPATCH(pGpu, pKernelFlcn, offset)
|
||||
#define kgspIsPresent(pGpu, pEngstate) kgspIsPresent_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspReadEmem(arg0, offset, size, pBuf) kgspReadEmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define kgspStateLoad(pGpu, pEngstate, arg0) kgspStateLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kgspGetScratchOffsets(arg0, scratchGroupId) kgspGetScratchOffsets_DISPATCH(arg0, scratchGroupId)
|
||||
#define kgspUnload(arg0) kgspUnload_DISPATCH(arg0)
|
||||
#define kgspStateUnload(pGpu, pEngstate, arg0) kgspStateUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kgspServiceNotificationInterrupt(pGpu, pIntrService, pParams) kgspServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define kgspGetWFL0Offset(arg0) kgspGetWFL0Offset_DISPATCH(arg0)
|
||||
#define kgspStateInitLocked(pGpu, pEngstate) kgspStateInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspStatePreLoad(pGpu, pEngstate, arg0) kgspStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kgspStatePostUnload(pGpu, pEngstate, arg0) kgspStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kgspStateDestroy(pGpu, pEngstate) kgspStateDestroy_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspStatePreUnload(pGpu, pEngstate, arg0) kgspStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kgspStateInitUnlocked(pGpu, pEngstate) kgspStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspInitMissing(pGpu, pEngstate) kgspInitMissing_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspStatePreInitLocked(pGpu, pEngstate) kgspStatePreInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspStatePreInitUnlocked(pGpu, pEngstate) kgspStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspClearInterrupt(pGpu, pIntrService, pParams) kgspClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define kgspStatePostLoad(pGpu, pEngstate, arg0) kgspStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kgspIsPresent(pGpu, pEngstate) kgspIsPresent_DISPATCH(pGpu, pEngstate)
|
||||
#define kgspUnmapBufferDescriptor(arg0, pBufDesc) kgspUnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define kgspReadDmem(arg0, offset, size, pBuf) kgspReadDmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
void kgspProgramLibosBootArgsAddr_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
|
||||
|
||||
|
||||
|
@ -512,12 +545,13 @@ static inline NV_STATUS kgspSetCmdQueueHead(struct OBJGPU *pGpu, struct KernelGs
|
|||
|
||||
#define kgspSetCmdQueueHead_HAL(pGpu, pKernelGsp, queueIdx, value) kgspSetCmdQueueHead(pGpu, pKernelGsp, queueIdx, value)
|
||||
|
||||
void kgspHealthCheck_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
|
||||
NvBool kgspHealthCheck_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
|
||||
|
||||
|
||||
#ifdef __nvoc_kernel_gsp_h_disabled
|
||||
static inline void kgspHealthCheck(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
|
||||
static inline NvBool kgspHealthCheck(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_kernel_gsp_h_disabled
|
||||
#define kgspHealthCheck(pGpu, pKernelGsp) kgspHealthCheck_TU102(pGpu, pKernelGsp)
|
||||
|
@ -984,10 +1018,70 @@ static inline NV_STATUS kgspSetupGspFmcArgs_DISPATCH(struct OBJGPU *pGpu, struct
|
|||
return pKernelGsp->__kgspSetupGspFmcArgs__(pGpu, pKernelGsp, pGspFw);
|
||||
}
|
||||
|
||||
static inline NvBool kgspConfigured_DISPATCH(struct KernelGsp *arg0) {
|
||||
return arg0->__kgspConfigured__(arg0);
|
||||
}
|
||||
|
||||
static inline NvU32 kgspPriRead_DISPATCH(struct KernelGsp *arg0, NvU32 offset) {
|
||||
return arg0->__kgspPriRead__(arg0, offset);
|
||||
}
|
||||
|
||||
static inline void kgspRegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
pKernelFlcn->__kgspRegWrite__(pGpu, pKernelFlcn, offset, data);
|
||||
}
|
||||
|
||||
static inline NvU32 kgspMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 addr) {
|
||||
return pKernelFlcn->__kgspMaskDmemAddr__(pGpu, pKernelFlcn, addr);
|
||||
}
|
||||
|
||||
static inline void kgspStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
pEngstate->__kgspStateDestroy__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline void kgspVprintf_DISPATCH(struct KernelGsp *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
arg0->__kgspVprintf__(arg0, bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static inline NvBool kgspClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return pIntrService->__kgspClearInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline void kgspPriWrite_DISPATCH(struct KernelGsp *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__kgspPriWrite__(arg0, offset, data);
|
||||
}
|
||||
|
||||
static inline void *kgspMapBufferDescriptor_DISPATCH(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return arg0->__kgspMapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void kgspSyncBufferDescriptor_DISPATCH(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
arg0->__kgspSyncBufferDescriptor__(arg0, pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static inline NvU32 kgspRegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelFlcn, NvU32 offset) {
|
||||
return pKernelFlcn->__kgspRegRead__(pGpu, pKernelFlcn, offset);
|
||||
}
|
||||
|
||||
static inline NvBool kgspIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
return pEngstate->__kgspIsPresent__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline void kgspReadEmem_DISPATCH(struct KernelGsp *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
arg0->__kgspReadEmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kgspStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kgspStateLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline const NvU32 *kgspGetScratchOffsets_DISPATCH(struct KernelGsp *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return arg0->__kgspGetScratchOffsets__(arg0, scratchGroupId);
|
||||
}
|
||||
|
||||
static inline void kgspUnload_DISPATCH(struct KernelGsp *arg0) {
|
||||
arg0->__kgspUnload__(arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kgspStateUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kgspStateUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
@ -996,6 +1090,10 @@ static inline NV_STATUS kgspServiceNotificationInterrupt_DISPATCH(struct OBJGPU
|
|||
return pIntrService->__kgspServiceNotificationInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline NvU32 kgspGetWFL0Offset_DISPATCH(struct KernelGsp *arg0) {
|
||||
return arg0->__kgspGetWFL0Offset__(arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kgspStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
return pEngstate->__kgspStateInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
@ -1008,10 +1106,6 @@ static inline NV_STATUS kgspStatePostUnload_DISPATCH(POBJGPU pGpu, struct Kernel
|
|||
return pEngstate->__kgspStatePostUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline void kgspStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
pEngstate->__kgspStateDestroy__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kgspStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kgspStatePreUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
@ -1032,16 +1126,16 @@ static inline NV_STATUS kgspStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct K
|
|||
return pEngstate->__kgspStatePreInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NvBool kgspClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return pIntrService->__kgspClearInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kgspStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kgspStatePostLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NvBool kgspIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) {
|
||||
return pEngstate->__kgspIsPresent__(pGpu, pEngstate);
|
||||
static inline void kgspUnmapBufferDescriptor_DISPATCH(struct KernelGsp *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
arg0->__kgspUnmapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void kgspReadDmem_DISPATCH(struct KernelGsp *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
arg0->__kgspReadDmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
void kgspDestruct_IMPL(struct KernelGsp *pKernelGsp);
|
||||
|
|
|
@ -19,6 +19,10 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
|||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_CrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCrashCatEngine;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon;
|
||||
|
||||
void __nvoc_init_KernelSec2(KernelSec2*, RmHalspecOwner* );
|
||||
|
@ -52,6 +56,18 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_IntrService = {
|
|||
/*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_IntrService),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_CrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_CrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelCrashCatEngine = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelCrashCatEngine,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelFalcon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelFalcon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
|
@ -59,10 +75,12 @@ static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelFalcon = {
|
|||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelSec2 = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*numRelatives=*/ 7,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_KernelSec2_KernelSec2,
|
||||
&__nvoc_rtti_KernelSec2_KernelFalcon,
|
||||
&__nvoc_rtti_KernelSec2_KernelCrashCatEngine,
|
||||
&__nvoc_rtti_KernelSec2_CrashCatEngine,
|
||||
&__nvoc_rtti_KernelSec2_IntrService,
|
||||
&__nvoc_rtti_KernelSec2_OBJENGSTATE,
|
||||
&__nvoc_rtti_KernelSec2_Object,
|
||||
|
@ -104,10 +122,78 @@ static NV_STATUS __nvoc_thunk_KernelSec2_engstateStateLoad(struct OBJGPU *pGpu,
|
|||
return ksec2StateLoad(pGpu, (struct KernelSec2 *)(((unsigned char *)pKernelSec2) - __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_KernelCrashCatEngine_ksec2Configured(struct KernelSec2 *arg0) {
|
||||
return kcrashcatEngineConfigured((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_ksec2PriRead(struct KernelSec2 *arg0, NvU32 offset) {
|
||||
return kcrashcatEnginePriRead((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelFalcon_ksec2RegWrite(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
kflcnRegWrite(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelSec2_KernelFalcon.offset), offset, data);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_ksec2MaskDmemAddr(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 addr) {
|
||||
return kflcnMaskDmemAddr(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelSec2_KernelFalcon.offset), addr);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_ksec2StateDestroy(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2Vprintf(struct KernelSec2 *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
kcrashcatEngineVprintf((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_IntrService_ksec2ClearInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2PriWrite(struct KernelSec2 *arg0, NvU32 offset, NvU32 data) {
|
||||
kcrashcatEnginePriWrite((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset, data);
|
||||
}
|
||||
|
||||
static void *__nvoc_thunk_KernelCrashCatEngine_ksec2MapBufferDescriptor(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return kcrashcatEngineMapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2SyncBufferDescriptor(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
kcrashcatEngineSyncBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelFalcon_ksec2RegRead(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset) {
|
||||
return kflcnRegRead(pGpu, (struct KernelFalcon *)(((unsigned char *)pKernelFlcn) + __nvoc_rtti_KernelSec2_KernelFalcon.offset), offset);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_ksec2IsPresent(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_IntrService_ksec2ServiceInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
|
||||
return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2ReadEmem(struct KernelSec2 *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
kcrashcatEngineReadEmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
static const NvU32 *__nvoc_thunk_KernelCrashCatEngine_ksec2GetScratchOffsets(struct KernelSec2 *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return kcrashcatEngineGetScratchOffsets((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), scratchGroupId);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2Unload(struct KernelSec2 *arg0) {
|
||||
kcrashcatEngineUnload((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
|
||||
return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_KernelCrashCatEngine_ksec2GetWFL0Offset(struct KernelSec2 *arg0) {
|
||||
return kcrashcatEngineGetWFL0Offset((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateInitLocked(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
|
||||
}
|
||||
|
@ -120,10 +206,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePostUnload(POBJGPU pGpu, str
|
|||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_ksec2StateDestroy(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
@ -144,20 +226,16 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreInitUnlocked(POBJGPU pGpu
|
|||
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_IntrService_ksec2ClearInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePostLoad(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_ksec2IsPresent(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset));
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2UnmapBufferDescriptor(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
kcrashcatEngineUnmapBufferDescriptor((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), pBufDesc);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_IntrService_ksec2ServiceInterrupt(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
|
||||
return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelSec2_IntrService.offset), pParams);
|
||||
static void __nvoc_thunk_KernelCrashCatEngine_ksec2ReadDmem(struct KernelSec2 *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
kcrashcatEngineReadDmem((struct KernelCrashCatEngine *)(((unsigned char *)arg0) + __nvoc_rtti_KernelSec2_KernelCrashCatEngine.offset), offset, size, pBuf);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSec2 =
|
||||
|
@ -313,16 +391,48 @@ static void __nvoc_init_funcTable_KernelSec2_1(KernelSec2 *pThis, RmHalspecOwner
|
|||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelSec2_engstateStateLoad;
|
||||
|
||||
pThis->__ksec2Configured__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2Configured;
|
||||
|
||||
pThis->__ksec2PriRead__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2PriRead;
|
||||
|
||||
pThis->__ksec2RegWrite__ = &__nvoc_thunk_KernelFalcon_ksec2RegWrite;
|
||||
|
||||
pThis->__ksec2MaskDmemAddr__ = &__nvoc_thunk_KernelFalcon_ksec2MaskDmemAddr;
|
||||
|
||||
pThis->__ksec2StateDestroy__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateDestroy;
|
||||
|
||||
pThis->__ksec2Vprintf__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2Vprintf;
|
||||
|
||||
pThis->__ksec2ClearInterrupt__ = &__nvoc_thunk_IntrService_ksec2ClearInterrupt;
|
||||
|
||||
pThis->__ksec2PriWrite__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2PriWrite;
|
||||
|
||||
pThis->__ksec2MapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2MapBufferDescriptor;
|
||||
|
||||
pThis->__ksec2SyncBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2SyncBufferDescriptor;
|
||||
|
||||
pThis->__ksec2RegRead__ = &__nvoc_thunk_KernelFalcon_ksec2RegRead;
|
||||
|
||||
pThis->__ksec2IsPresent__ = &__nvoc_thunk_OBJENGSTATE_ksec2IsPresent;
|
||||
|
||||
pThis->__ksec2ServiceInterrupt__ = &__nvoc_thunk_IntrService_ksec2ServiceInterrupt;
|
||||
|
||||
pThis->__ksec2ReadEmem__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2ReadEmem;
|
||||
|
||||
pThis->__ksec2GetScratchOffsets__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2GetScratchOffsets;
|
||||
|
||||
pThis->__ksec2Unload__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2Unload;
|
||||
|
||||
pThis->__ksec2StateUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateUnload;
|
||||
|
||||
pThis->__ksec2GetWFL0Offset__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2GetWFL0Offset;
|
||||
|
||||
pThis->__ksec2StateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateInitLocked;
|
||||
|
||||
pThis->__ksec2StatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreLoad;
|
||||
|
||||
pThis->__ksec2StatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePostUnload;
|
||||
|
||||
pThis->__ksec2StateDestroy__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateDestroy;
|
||||
|
||||
pThis->__ksec2StatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreUnload;
|
||||
|
||||
pThis->__ksec2StateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateInitUnlocked;
|
||||
|
@ -333,13 +443,11 @@ static void __nvoc_init_funcTable_KernelSec2_1(KernelSec2 *pThis, RmHalspecOwner
|
|||
|
||||
pThis->__ksec2StatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreInitUnlocked;
|
||||
|
||||
pThis->__ksec2ClearInterrupt__ = &__nvoc_thunk_IntrService_ksec2ClearInterrupt;
|
||||
|
||||
pThis->__ksec2StatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePostLoad;
|
||||
|
||||
pThis->__ksec2IsPresent__ = &__nvoc_thunk_OBJENGSTATE_ksec2IsPresent;
|
||||
pThis->__ksec2UnmapBufferDescriptor__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2UnmapBufferDescriptor;
|
||||
|
||||
pThis->__ksec2ServiceInterrupt__ = &__nvoc_thunk_IntrService_ksec2ServiceInterrupt;
|
||||
pThis->__ksec2ReadDmem__ = &__nvoc_thunk_KernelCrashCatEngine_ksec2ReadDmem;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
|
@ -354,6 +462,8 @@ void __nvoc_init_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner)
|
|||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
|
||||
pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService;
|
||||
pThis->__nvoc_pbase_CrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine.__nvoc_base_CrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelCrashCatEngine = &pThis->__nvoc_base_KernelFalcon.__nvoc_base_KernelCrashCatEngine;
|
||||
pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon;
|
||||
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
__nvoc_init_IntrService(&pThis->__nvoc_base_IntrService);
|
||||
|
|
|
@ -57,6 +57,8 @@ struct KernelSec2 {
|
|||
struct Object *__nvoc_pbase_Object;
|
||||
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
|
||||
struct IntrService *__nvoc_pbase_IntrService;
|
||||
struct CrashCatEngine *__nvoc_pbase_CrashCatEngine;
|
||||
struct KernelCrashCatEngine *__nvoc_pbase_KernelCrashCatEngine;
|
||||
struct KernelFalcon *__nvoc_pbase_KernelFalcon;
|
||||
struct KernelSec2 *__nvoc_pbase_KernelSec2;
|
||||
NV_STATUS (*__ksec2ConstructEngine__)(struct OBJGPU *, struct KernelSec2 *, ENGDESCRIPTOR);
|
||||
|
@ -69,20 +71,35 @@ struct KernelSec2 {
|
|||
const BINDATA_ARCHIVE *(*__ksec2GetBinArchiveBlUcode__)(struct OBJGPU *, struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2GetGenericBlUcode__)(struct OBJGPU *, struct KernelSec2 *, const RM_FLCN_BL_DESC **, const NvU8 **);
|
||||
const BINDATA_ARCHIVE *(*__ksec2GetBinArchiveSecurescrubUcode__)(struct OBJGPU *, struct KernelSec2 *);
|
||||
NvBool (*__ksec2Configured__)(struct KernelSec2 *);
|
||||
NvU32 (*__ksec2PriRead__)(struct KernelSec2 *, NvU32);
|
||||
void (*__ksec2RegWrite__)(struct OBJGPU *, struct KernelSec2 *, NvU32, NvU32);
|
||||
NvU32 (*__ksec2MaskDmemAddr__)(struct OBJGPU *, struct KernelSec2 *, NvU32);
|
||||
void (*__ksec2StateDestroy__)(POBJGPU, struct KernelSec2 *);
|
||||
void (*__ksec2Vprintf__)(struct KernelSec2 *, NvBool, const char *, va_list);
|
||||
NvBool (*__ksec2ClearInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceClearInterruptArguments *);
|
||||
void (*__ksec2PriWrite__)(struct KernelSec2 *, NvU32, NvU32);
|
||||
void *(*__ksec2MapBufferDescriptor__)(struct KernelSec2 *, CrashCatBufferDescriptor *);
|
||||
void (*__ksec2SyncBufferDescriptor__)(struct KernelSec2 *, CrashCatBufferDescriptor *, NvU32, NvU32);
|
||||
NvU32 (*__ksec2RegRead__)(struct OBJGPU *, struct KernelSec2 *, NvU32);
|
||||
NvBool (*__ksec2IsPresent__)(POBJGPU, struct KernelSec2 *);
|
||||
NvU32 (*__ksec2ServiceInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceServiceInterruptArguments *);
|
||||
void (*__ksec2ReadEmem__)(struct KernelSec2 *, NvU64, NvU64, void *);
|
||||
const NvU32 *(*__ksec2GetScratchOffsets__)(struct KernelSec2 *, NV_CRASHCAT_SCRATCH_GROUP_ID);
|
||||
void (*__ksec2Unload__)(struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2StateUnload__)(POBJGPU, struct KernelSec2 *, NvU32);
|
||||
NvU32 (*__ksec2GetWFL0Offset__)(struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2StateInitLocked__)(POBJGPU, struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2StatePreLoad__)(POBJGPU, struct KernelSec2 *, NvU32);
|
||||
NV_STATUS (*__ksec2StatePostUnload__)(POBJGPU, struct KernelSec2 *, NvU32);
|
||||
void (*__ksec2StateDestroy__)(POBJGPU, struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2StatePreUnload__)(POBJGPU, struct KernelSec2 *, NvU32);
|
||||
NV_STATUS (*__ksec2StateInitUnlocked__)(POBJGPU, struct KernelSec2 *);
|
||||
void (*__ksec2InitMissing__)(POBJGPU, struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2StatePreInitLocked__)(POBJGPU, struct KernelSec2 *);
|
||||
NV_STATUS (*__ksec2StatePreInitUnlocked__)(POBJGPU, struct KernelSec2 *);
|
||||
NvBool (*__ksec2ClearInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceClearInterruptArguments *);
|
||||
NV_STATUS (*__ksec2StatePostLoad__)(POBJGPU, struct KernelSec2 *, NvU32);
|
||||
NvBool (*__ksec2IsPresent__)(POBJGPU, struct KernelSec2 *);
|
||||
NvU32 (*__ksec2ServiceInterrupt__)(struct OBJGPU *, struct KernelSec2 *, IntrServiceServiceInterruptArguments *);
|
||||
void (*__ksec2UnmapBufferDescriptor__)(struct KernelSec2 *, CrashCatBufferDescriptor *);
|
||||
void (*__ksec2ReadDmem__)(struct KernelSec2 *, NvU32, NvU32, void *);
|
||||
const RM_FLCN_BL_DESC *pGenericBlUcodeDesc;
|
||||
const NvU8 *pGenericBlUcodeImg;
|
||||
};
|
||||
|
@ -137,20 +154,35 @@ NV_STATUS __nvoc_objCreate_KernelSec2(KernelSec2**, Dynamic*, NvU32);
|
|||
#define ksec2GetGenericBlUcode_HAL(pGpu, pKernelSec2, ppDesc, ppImg) ksec2GetGenericBlUcode_DISPATCH(pGpu, pKernelSec2, ppDesc, ppImg)
|
||||
#define ksec2GetBinArchiveSecurescrubUcode(pGpu, pKernelSec2) ksec2GetBinArchiveSecurescrubUcode_DISPATCH(pGpu, pKernelSec2)
|
||||
#define ksec2GetBinArchiveSecurescrubUcode_HAL(pGpu, pKernelSec2) ksec2GetBinArchiveSecurescrubUcode_DISPATCH(pGpu, pKernelSec2)
|
||||
#define ksec2Configured(arg0) ksec2Configured_DISPATCH(arg0)
|
||||
#define ksec2PriRead(arg0, offset) ksec2PriRead_DISPATCH(arg0, offset)
|
||||
#define ksec2RegWrite(pGpu, pKernelFlcn, offset, data) ksec2RegWrite_DISPATCH(pGpu, pKernelFlcn, offset, data)
|
||||
#define ksec2MaskDmemAddr(pGpu, pKernelFlcn, addr) ksec2MaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr)
|
||||
#define ksec2StateDestroy(pGpu, pEngstate) ksec2StateDestroy_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2Vprintf(arg0, bReportStart, fmt, args) ksec2Vprintf_DISPATCH(arg0, bReportStart, fmt, args)
|
||||
#define ksec2ClearInterrupt(pGpu, pIntrService, pParams) ksec2ClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define ksec2PriWrite(arg0, offset, data) ksec2PriWrite_DISPATCH(arg0, offset, data)
|
||||
#define ksec2MapBufferDescriptor(arg0, pBufDesc) ksec2MapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define ksec2SyncBufferDescriptor(arg0, pBufDesc, offset, size) ksec2SyncBufferDescriptor_DISPATCH(arg0, pBufDesc, offset, size)
|
||||
#define ksec2RegRead(pGpu, pKernelFlcn, offset) ksec2RegRead_DISPATCH(pGpu, pKernelFlcn, offset)
|
||||
#define ksec2IsPresent(pGpu, pEngstate) ksec2IsPresent_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2ServiceInterrupt(pGpu, pIntrService, pParams) ksec2ServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define ksec2ReadEmem(arg0, offset, size, pBuf) ksec2ReadEmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
#define ksec2GetScratchOffsets(arg0, scratchGroupId) ksec2GetScratchOffsets_DISPATCH(arg0, scratchGroupId)
|
||||
#define ksec2Unload(arg0) ksec2Unload_DISPATCH(arg0)
|
||||
#define ksec2StateUnload(pGpu, pEngstate, arg0) ksec2StateUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define ksec2GetWFL0Offset(arg0) ksec2GetWFL0Offset_DISPATCH(arg0)
|
||||
#define ksec2StateInitLocked(pGpu, pEngstate) ksec2StateInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2StatePreLoad(pGpu, pEngstate, arg0) ksec2StatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define ksec2StatePostUnload(pGpu, pEngstate, arg0) ksec2StatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define ksec2StateDestroy(pGpu, pEngstate) ksec2StateDestroy_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2StatePreUnload(pGpu, pEngstate, arg0) ksec2StatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define ksec2StateInitUnlocked(pGpu, pEngstate) ksec2StateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2InitMissing(pGpu, pEngstate) ksec2InitMissing_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2StatePreInitLocked(pGpu, pEngstate) ksec2StatePreInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2StatePreInitUnlocked(pGpu, pEngstate) ksec2StatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2ClearInterrupt(pGpu, pIntrService, pParams) ksec2ClearInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define ksec2StatePostLoad(pGpu, pEngstate, arg0) ksec2StatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define ksec2IsPresent(pGpu, pEngstate) ksec2IsPresent_DISPATCH(pGpu, pEngstate)
|
||||
#define ksec2ServiceInterrupt(pGpu, pIntrService, pParams) ksec2ServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams)
|
||||
#define ksec2UnmapBufferDescriptor(arg0, pBufDesc) ksec2UnmapBufferDescriptor_DISPATCH(arg0, pBufDesc)
|
||||
#define ksec2ReadDmem(arg0, offset, size, pBuf) ksec2ReadDmem_DISPATCH(arg0, offset, size, pBuf)
|
||||
NV_STATUS ksec2ConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0);
|
||||
|
||||
static inline NV_STATUS ksec2ConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0) {
|
||||
|
@ -235,10 +267,78 @@ static inline const BINDATA_ARCHIVE *ksec2GetBinArchiveSecurescrubUcode_DISPATCH
|
|||
return pKernelSec2->__ksec2GetBinArchiveSecurescrubUcode__(pGpu, pKernelSec2);
|
||||
}
|
||||
|
||||
static inline NvBool ksec2Configured_DISPATCH(struct KernelSec2 *arg0) {
|
||||
return arg0->__ksec2Configured__(arg0);
|
||||
}
|
||||
|
||||
static inline NvU32 ksec2PriRead_DISPATCH(struct KernelSec2 *arg0, NvU32 offset) {
|
||||
return arg0->__ksec2PriRead__(arg0, offset);
|
||||
}
|
||||
|
||||
static inline void ksec2RegWrite_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset, NvU32 data) {
|
||||
pKernelFlcn->__ksec2RegWrite__(pGpu, pKernelFlcn, offset, data);
|
||||
}
|
||||
|
||||
static inline NvU32 ksec2MaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 addr) {
|
||||
return pKernelFlcn->__ksec2MaskDmemAddr__(pGpu, pKernelFlcn, addr);
|
||||
}
|
||||
|
||||
static inline void ksec2StateDestroy_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
pEngstate->__ksec2StateDestroy__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline void ksec2Vprintf_DISPATCH(struct KernelSec2 *arg0, NvBool bReportStart, const char *fmt, va_list args) {
|
||||
arg0->__ksec2Vprintf__(arg0, bReportStart, fmt, args);
|
||||
}
|
||||
|
||||
static inline NvBool ksec2ClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return pIntrService->__ksec2ClearInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline void ksec2PriWrite_DISPATCH(struct KernelSec2 *arg0, NvU32 offset, NvU32 data) {
|
||||
arg0->__ksec2PriWrite__(arg0, offset, data);
|
||||
}
|
||||
|
||||
static inline void *ksec2MapBufferDescriptor_DISPATCH(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
return arg0->__ksec2MapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline void ksec2SyncBufferDescriptor_DISPATCH(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc, NvU32 offset, NvU32 size) {
|
||||
arg0->__ksec2SyncBufferDescriptor__(arg0, pBufDesc, offset, size);
|
||||
}
|
||||
|
||||
static inline NvU32 ksec2RegRead_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelFlcn, NvU32 offset) {
|
||||
return pKernelFlcn->__ksec2RegRead__(pGpu, pKernelFlcn, offset);
|
||||
}
|
||||
|
||||
static inline NvBool ksec2IsPresent_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
return pEngstate->__ksec2IsPresent__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NvU32 ksec2ServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
|
||||
return pIntrService->__ksec2ServiceInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline void ksec2ReadEmem_DISPATCH(struct KernelSec2 *arg0, NvU64 offset, NvU64 size, void *pBuf) {
|
||||
arg0->__ksec2ReadEmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
static inline const NvU32 *ksec2GetScratchOffsets_DISPATCH(struct KernelSec2 *arg0, NV_CRASHCAT_SCRATCH_GROUP_ID scratchGroupId) {
|
||||
return arg0->__ksec2GetScratchOffsets__(arg0, scratchGroupId);
|
||||
}
|
||||
|
||||
static inline void ksec2Unload_DISPATCH(struct KernelSec2 *arg0) {
|
||||
arg0->__ksec2Unload__(arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ksec2StateUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__ksec2StateUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NvU32 ksec2GetWFL0Offset_DISPATCH(struct KernelSec2 *arg0) {
|
||||
return arg0->__ksec2GetWFL0Offset__(arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ksec2StateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
return pEngstate->__ksec2StateInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
@ -251,10 +351,6 @@ static inline NV_STATUS ksec2StatePostUnload_DISPATCH(POBJGPU pGpu, struct Kerne
|
|||
return pEngstate->__ksec2StatePostUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline void ksec2StateDestroy_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
pEngstate->__ksec2StateDestroy__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ksec2StatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__ksec2StatePreUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
@ -275,20 +371,16 @@ static inline NV_STATUS ksec2StatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct
|
|||
return pEngstate->__ksec2StatePreInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NvBool ksec2ClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceClearInterruptArguments *pParams) {
|
||||
return pIntrService->__ksec2ClearInterrupt__(pGpu, pIntrService, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ksec2StatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__ksec2StatePostLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NvBool ksec2IsPresent_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) {
|
||||
return pEngstate->__ksec2IsPresent__(pGpu, pEngstate);
|
||||
static inline void ksec2UnmapBufferDescriptor_DISPATCH(struct KernelSec2 *arg0, CrashCatBufferDescriptor *pBufDesc) {
|
||||
arg0->__ksec2UnmapBufferDescriptor__(arg0, pBufDesc);
|
||||
}
|
||||
|
||||
static inline NvU32 ksec2ServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pIntrService, IntrServiceServiceInterruptArguments *pParams) {
|
||||
return pIntrService->__ksec2ServiceInterrupt__(pGpu, pIntrService, pParams);
|
||||
static inline void ksec2ReadDmem_DISPATCH(struct KernelSec2 *arg0, NvU32 offset, NvU32 size, void *pBuf) {
|
||||
arg0->__ksec2ReadDmem__(arg0, offset, size, pBuf);
|
||||
}
|
||||
|
||||
void ksec2Destruct_IMPL(struct KernelSec2 *pKernelSec2);
|
||||
|
|
|
@ -815,6 +815,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x20B6, 0x1492, 0x10de, "NVIDIA PG506-232" },
|
||||
{ 0x20B7, 0x1532, 0x10de, "NVIDIA A30" },
|
||||
{ 0x20B7, 0x1804, 0x10de, "NVIDIA A30" },
|
||||
{ 0x20BD, 0x17f4, 0x10de, "NVIDIA A800-SXM4-40GB" },
|
||||
{ 0x20F1, 0x145f, 0x10de, "NVIDIA A100-PCIE-40GB" },
|
||||
{ 0x20F3, 0x179b, 0x10de, "NVIDIA A800-SXM4-80GB" },
|
||||
{ 0x20F3, 0x179c, 0x10de, "NVIDIA A800-SXM4-80GB" },
|
||||
|
@ -826,6 +827,10 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x20F3, 0x17a2, 0x10de, "NVIDIA A800-SXM4-80GB" },
|
||||
{ 0x20F5, 0x1799, 0x10de, "NVIDIA A800 80GB PCIe" },
|
||||
{ 0x20F5, 0x179a, 0x10de, "NVIDIA A800 80GB PCIe LC" },
|
||||
{ 0x20F6, 0x180a, 0x1028, "NVIDIA A800 40GB Active" },
|
||||
{ 0x20F6, 0x180a, 0x103c, "NVIDIA A800 40GB Active" },
|
||||
{ 0x20F6, 0x180a, 0x10de, "NVIDIA A800 40GB Active" },
|
||||
{ 0x20F6, 0x180a, 0x17aa, "NVIDIA A800 40GB Active" },
|
||||
{ 0x2182, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" },
|
||||
{ 0x2184, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660" },
|
||||
{ 0x2187, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 SUPER" },
|
||||
|
@ -885,6 +890,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x2236, 0x1482, 0x10de, "NVIDIA A10" },
|
||||
{ 0x2237, 0x152f, 0x10de, "NVIDIA A10G" },
|
||||
{ 0x2238, 0x1677, 0x10de, "NVIDIA A10M" },
|
||||
{ 0x2321, 0x1839, 0x10de, "NVIDIA H100 NVL" },
|
||||
{ 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" },
|
||||
{ 0x2324, 0x17a6, 0x10de, "NVIDIA H800" },
|
||||
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
|
||||
|
@ -892,6 +898,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
|
||||
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
|
||||
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
|
||||
{ 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" },
|
||||
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
|
||||
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
|
||||
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
|
||||
|
@ -986,10 +993,13 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" },
|
||||
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
|
||||
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },
|
||||
{ 0x26B9, 0x1851, 0x10de, "NVIDIA L40S" },
|
||||
{ 0x26B9, 0x18cf, 0x10de, "NVIDIA L40S" },
|
||||
{ 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" },
|
||||
{ 0x2717, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
|
||||
{ 0x2730, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Laptop GPU" },
|
||||
{ 0x2757, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 Laptop GPU" },
|
||||
{ 0x2770, 0x0000, 0x0000, "NVIDIA RTX 5000 Ada Generation Embedded GPU" },
|
||||
{ 0x2782, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Ti" },
|
||||
{ 0x2786, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070" },
|
||||
{ 0x27A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
|
||||
|
@ -1006,6 +1016,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x27BA, 0x0000, 0x0000, "NVIDIA RTX 4000 Ada Generation Laptop GPU" },
|
||||
{ 0x27BB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Laptop GPU" },
|
||||
{ 0x27E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080 Laptop GPU" },
|
||||
{ 0x27FB, 0x0000, 0x0000, "NVIDIA RTX 3500 Ada Generation Embedded GPU" },
|
||||
{ 0x2803, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" },
|
||||
{ 0x2805, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Ti" },
|
||||
{ 0x2820, 0x0000, 0x0000, "NVIDIA GeForce RTX 4070 Laptop GPU" },
|
||||
|
@ -1017,6 +1028,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" },
|
||||
{ 0x28E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 4060 Laptop GPU" },
|
||||
{ 0x28E1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
|
||||
{ 0x28F8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Embedded GPU" },
|
||||
{ 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" },
|
||||
{ 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" },
|
||||
{ 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" },
|
||||
|
@ -1534,6 +1546,19 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x20F5, 0x17ca, 0x10DE, "GRID A800D-40C" },
|
||||
{ 0x20F5, 0x17cb, 0x10DE, "GRID A800D-80C" },
|
||||
{ 0x20F5, 0x183f, 0x10DE, "GRID A800D-1-20C" },
|
||||
{ 0x20F6, 0x17cc, 0x10DE, "GRID A800-1-5CME" },
|
||||
{ 0x20F6, 0x17cd, 0x10DE, "GRID A800-1-5C" },
|
||||
{ 0x20F6, 0x17ce, 0x10DE, "GRID A800-2-10C" },
|
||||
{ 0x20F6, 0x17cf, 0x10DE, "GRID A800-3-20C" },
|
||||
{ 0x20F6, 0x17d0, 0x10DE, "GRID A800-4-20C" },
|
||||
{ 0x20F6, 0x17d1, 0x10DE, "GRID A800-7-40C" },
|
||||
{ 0x20F6, 0x17d2, 0x10DE, "GRID A800-4C" },
|
||||
{ 0x20F6, 0x17d3, 0x10DE, "GRID A800-5C" },
|
||||
{ 0x20F6, 0x17d4, 0x10DE, "GRID A800-8C" },
|
||||
{ 0x20F6, 0x17d5, 0x10DE, "GRID A800-10C" },
|
||||
{ 0x20F6, 0x17d6, 0x10DE, "GRID A800-20C" },
|
||||
{ 0x20F6, 0x17d7, 0x10DE, "GRID A800-40C" },
|
||||
{ 0x20F6, 0x1843, 0x10DE, "GRID A800-1-10C" },
|
||||
{ 0x2230, 0x14fa, 0x10DE, "NVIDIA RTXA6000-1B" },
|
||||
{ 0x2230, 0x14fb, 0x10DE, "NVIDIA RTXA6000-2B" },
|
||||
{ 0x2230, 0x14fc, 0x10DE, "NVIDIA RTXA6000-1Q" },
|
||||
|
@ -1772,21 +1797,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" },
|
||||
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" },
|
||||
{ 0x2322, 0x1845, 0x10DE, "NVIDIA H800-1-20C" },
|
||||
{ 0x2330, 0x187a, 0x10DE, "NVIDIA H100XM-1-10CME" },
|
||||
{ 0x2330, 0x187b, 0x10DE, "NVIDIA H100XM-1-10C" },
|
||||
{ 0x2330, 0x187c, 0x10DE, "NVIDIA H100XM-1-20C" },
|
||||
{ 0x2330, 0x187d, 0x10DE, "NVIDIA H100XM-2-20C" },
|
||||
{ 0x2330, 0x187e, 0x10DE, "NVIDIA H100XM-3-40C" },
|
||||
{ 0x2330, 0x187f, 0x10DE, "NVIDIA H100XM-4-40C" },
|
||||
{ 0x2330, 0x1880, 0x10DE, "NVIDIA H100XM-7-80C" },
|
||||
{ 0x2330, 0x1881, 0x10DE, "NVIDIA H100XM-4C" },
|
||||
{ 0x2330, 0x1882, 0x10DE, "NVIDIA H100XM-5C" },
|
||||
{ 0x2330, 0x1883, 0x10DE, "NVIDIA H100XM-8C" },
|
||||
{ 0x2330, 0x1884, 0x10DE, "NVIDIA H100XM-10C" },
|
||||
{ 0x2330, 0x1885, 0x10DE, "NVIDIA H100XM-16C" },
|
||||
{ 0x2330, 0x1886, 0x10DE, "NVIDIA H100XM-20C" },
|
||||
{ 0x2330, 0x1887, 0x10DE, "NVIDIA H100XM-40C" },
|
||||
{ 0x2330, 0x1888, 0x10DE, "NVIDIA H100XM-80C" },
|
||||
{ 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" },
|
||||
{ 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" },
|
||||
{ 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" },
|
||||
|
@ -1888,6 +1898,30 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX6000-Ada-16C" },
|
||||
{ 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX6000-Ada-24C" },
|
||||
{ 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX6000-Ada-48C" },
|
||||
{ 0x26B2, 0x1821, 0x10DE, "NVIDIA RTX5000-Ada-1B" },
|
||||
{ 0x26B2, 0x1822, 0x10DE, "NVIDIA RTX5000-Ada-2B" },
|
||||
{ 0x26B2, 0x1823, 0x10DE, "NVIDIA RTX5000-Ada-1Q" },
|
||||
{ 0x26B2, 0x1824, 0x10DE, "NVIDIA RTX5000-Ada-2Q" },
|
||||
{ 0x26B2, 0x1825, 0x10DE, "NVIDIA RTX5000-Ada-4Q" },
|
||||
{ 0x26B2, 0x1826, 0x10DE, "NVIDIA RTX5000-Ada-8Q" },
|
||||
{ 0x26B2, 0x1827, 0x10DE, "NVIDIA RTX5000-Ada-16Q" },
|
||||
{ 0x26B2, 0x1828, 0x10DE, "NVIDIA RTX5000-Ada-32Q" },
|
||||
{ 0x26B2, 0x1829, 0x10DE, "NVIDIA RTX5000-Ada-1A" },
|
||||
{ 0x26B2, 0x182a, 0x10DE, "NVIDIA RTX5000-Ada-2A" },
|
||||
{ 0x26B2, 0x182b, 0x10DE, "NVIDIA RTX5000-Ada-4A" },
|
||||
{ 0x26B2, 0x182c, 0x10DE, "NVIDIA RTX5000-Ada-8A" },
|
||||
{ 0x26B2, 0x182d, 0x10DE, "NVIDIA RTX5000-Ada-16A" },
|
||||
{ 0x26B2, 0x182e, 0x10DE, "NVIDIA RTX5000-Ada-32A" },
|
||||
{ 0x26B2, 0x182f, 0x10DE, "NVIDIA RTX5000-Ada-1" },
|
||||
{ 0x26B2, 0x1830, 0x10DE, "NVIDIA RTX5000-Ada-2" },
|
||||
{ 0x26B2, 0x1831, 0x10DE, "NVIDIA RTX5000-Ada-4" },
|
||||
{ 0x26B2, 0x1832, 0x10DE, "NVIDIA RTX5000-Ada-8" },
|
||||
{ 0x26B2, 0x1833, 0x10DE, "NVIDIA RTX5000-Ada-16" },
|
||||
{ 0x26B2, 0x1834, 0x10DE, "NVIDIA RTX5000-Ada-32" },
|
||||
{ 0x26B2, 0x1835, 0x10DE, "NVIDIA RTX5000-Ada-4C" },
|
||||
{ 0x26B2, 0x1836, 0x10DE, "NVIDIA RTX5000-Ada-8C" },
|
||||
{ 0x26B2, 0x1837, 0x10DE, "NVIDIA RTX5000-Ada-16C" },
|
||||
{ 0x26B2, 0x1838, 0x10DE, "NVIDIA RTX5000-Ada-32C" },
|
||||
{ 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" },
|
||||
{ 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" },
|
||||
{ 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" },
|
||||
|
@ -1962,6 +1996,47 @@ static const CHIPS_RELEASED sChipsReleased[] = {
|
|||
{ 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" },
|
||||
{ 0x26B8, 0x181c, 0x10DE, "NVIDIA GeForce RTX 3060" },
|
||||
{ 0x26B8, 0x181e, 0x10DE, "NVIDIA GeForce RTX 3050" },
|
||||
{ 0x26B9, 0x1889, 0x10DE, "NVIDIA L40S-1B" },
|
||||
{ 0x26B9, 0x188a, 0x10DE, "NVIDIA L40S-2B" },
|
||||
{ 0x26B9, 0x188b, 0x10DE, "NVIDIA L40S-1Q" },
|
||||
{ 0x26B9, 0x188c, 0x10DE, "NVIDIA L40S-2Q" },
|
||||
{ 0x26B9, 0x188d, 0x10DE, "NVIDIA L40S-3Q" },
|
||||
{ 0x26B9, 0x188e, 0x10DE, "NVIDIA L40S-4Q" },
|
||||
{ 0x26B9, 0x188f, 0x10DE, "NVIDIA L40S-6Q" },
|
||||
{ 0x26B9, 0x1890, 0x10DE, "NVIDIA L40S-8Q" },
|
||||
{ 0x26B9, 0x1891, 0x10DE, "NVIDIA L40S-12Q" },
|
||||
{ 0x26B9, 0x1892, 0x10DE, "NVIDIA L40S-16Q" },
|
||||
{ 0x26B9, 0x1893, 0x10DE, "NVIDIA L40S-24Q" },
|
||||
{ 0x26B9, 0x1894, 0x10DE, "NVIDIA L40S-48Q" },
|
||||
{ 0x26B9, 0x1895, 0x10DE, "NVIDIA L40S-1A" },
|
||||
{ 0x26B9, 0x1896, 0x10DE, "NVIDIA L40S-2A" },
|
||||
{ 0x26B9, 0x1897, 0x10DE, "NVIDIA L40S-3A" },
|
||||
{ 0x26B9, 0x1898, 0x10DE, "NVIDIA L40S-4A" },
|
||||
{ 0x26B9, 0x1899, 0x10DE, "NVIDIA L40S-6A" },
|
||||
{ 0x26B9, 0x189a, 0x10DE, "NVIDIA L40S-8A" },
|
||||
{ 0x26B9, 0x189b, 0x10DE, "NVIDIA L40S-12A" },
|
||||
{ 0x26B9, 0x189c, 0x10DE, "NVIDIA L40S-16A" },
|
||||
{ 0x26B9, 0x189d, 0x10DE, "NVIDIA L40S-24A" },
|
||||
{ 0x26B9, 0x189e, 0x10DE, "NVIDIA L40S-48A" },
|
||||
{ 0x26B9, 0x189f, 0x10DE, "GeForce RTX 3050" },
|
||||
{ 0x26B9, 0x18a0, 0x10DE, "GeForce RTX 3060" },
|
||||
{ 0x26B9, 0x18a1, 0x10DE, "NVIDIA L40S-1" },
|
||||
{ 0x26B9, 0x18a2, 0x10DE, "NVIDIA L40S-2" },
|
||||
{ 0x26B9, 0x18a3, 0x10DE, "NVIDIA L40S-3" },
|
||||
{ 0x26B9, 0x18a4, 0x10DE, "NVIDIA L40S-4" },
|
||||
{ 0x26B9, 0x18a5, 0x10DE, "NVIDIA L40S-6" },
|
||||
{ 0x26B9, 0x18a6, 0x10DE, "NVIDIA L40S-8" },
|
||||
{ 0x26B9, 0x18a7, 0x10DE, "NVIDIA L40S-12" },
|
||||
{ 0x26B9, 0x18a8, 0x10DE, "NVIDIA L40S-16" },
|
||||
{ 0x26B9, 0x18a9, 0x10DE, "NVIDIA L40S-24" },
|
||||
{ 0x26B9, 0x18aa, 0x10DE, "NVIDIA L40S-48" },
|
||||
{ 0x26B9, 0x18ab, 0x10DE, "NVIDIA L40S-4C" },
|
||||
{ 0x26B9, 0x18ac, 0x10DE, "NVIDIA L40S-6C" },
|
||||
{ 0x26B9, 0x18ad, 0x10DE, "NVIDIA L40S-8C" },
|
||||
{ 0x26B9, 0x18ae, 0x10DE, "NVIDIA L40S-12C" },
|
||||
{ 0x26B9, 0x18af, 0x10DE, "NVIDIA L40S-16C" },
|
||||
{ 0x26B9, 0x18b0, 0x10DE, "NVIDIA L40S-24C" },
|
||||
{ 0x26B9, 0x18b1, 0x10DE, "NVIDIA L40S-48C" },
|
||||
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA L4-1B" },
|
||||
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA L4-2B" },
|
||||
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA L4-1Q" },
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -489,6 +489,7 @@ struct Subdevice {
|
|||
NV_STATUS (*__subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_COMPUTE_PROFILES_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *);
|
||||
NV_STATUS (*__subdeviceCtrlCmdOsUnixAudioDynamicPower__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *);
|
||||
|
@ -1082,6 +1083,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
|
|||
#define subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles(pSubdevice, pParams) subdeviceCtrlCmdInternalStaticKMIGmgrGetComputeInstanceProfiles_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdOsUnixGc6BlockerRefCnt(pSubdevice, pParams) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdOsUnixAllowDisallowGcoff(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(pSubdevice, pParams)
|
||||
#define subdeviceCtrlCmdOsUnixAudioDynamicPower(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(pSubdevice, pParams)
|
||||
|
@ -3481,6 +3483,12 @@ static inline NV_STATUS subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_DISPATC
|
|||
return pSubdevice->__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__(pSubdevice, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS *pParams) {
|
||||
return pSubdevice->__subdeviceCtrlCmdInternalKMIGmgrPromoteGpuInstanceMemRange__(pSubdevice, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams) {
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
#include "g_kernel_crashcat_engine_nvoc.h"
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
#include "g_crashcat_engine_nvoc.h"
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
#include "g_crashcat_queue_nvoc.h"
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
#include "g_crashcat_report_nvoc.h"
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
#include "g_crashcat_wayfinder_nvoc.h"
|
||||
|
|
@ -566,8 +566,11 @@ typedef struct UvmPlatformInfo_tag
|
|||
// Out: ATS (Address Translation Services) is supported
|
||||
NvBool atsSupported;
|
||||
|
||||
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
|
||||
NvBool sevEnabled;
|
||||
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
|
||||
// is enabled in the VM, indicating that Confidential Computing must be
|
||||
// also enabled in the GPU(s); these two security features are either both
|
||||
// enabled, or both disabled.
|
||||
NvBool confComputingEnabled;
|
||||
} UvmPlatformInfo;
|
||||
|
||||
typedef struct UvmGpuClientInfo_tag
|
||||
|
|
|
@ -154,6 +154,7 @@ static NV_STATUS _issueRpcAndWait(OBJGPU *pGpu, OBJRPC *pRpc)
|
|||
|
||||
// should not be called in broadcast mode
|
||||
NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE);
|
||||
NV_CHECK(LEVEL_ERROR, rmDeviceGpuLockIsOwner(pGpu->gpuInstance));
|
||||
|
||||
if (bProfileRPC)
|
||||
{
|
||||
|
|
|
@ -88,6 +88,7 @@ confComputeConstructEngine_IMPL(OBJGPU *pGpu,
|
|||
else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_CC_FEATURE_CAPABLE))
|
||||
{
|
||||
pConfCompute->setProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED, NV_TRUE);
|
||||
pGpu->setProperty(pGpu, PDB_PROP_GPU_FASTPATH_SEQ_ENABLED, NV_TRUE);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -235,7 +235,8 @@ confComputeApiCtrlCmdGetGpuCertificate_IMPL
|
|||
pGpu = GPU_RES_GET_GPU(pSubdevice);
|
||||
pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
|
||||
|
||||
if (pConfCompute != NULL)
|
||||
if (pConfCompute != NULL && pConfCompute->pSpdm != NULL &&
|
||||
pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED))
|
||||
{
|
||||
// Set max size of certificate buffers before calling SPDM.
|
||||
pParams->certChainSize = NV_CONF_COMPUTE_CERT_CHAIN_MAX_SIZE;
|
||||
|
@ -271,7 +272,8 @@ confComputeApiCtrlCmdGetGpuAttestationReport_IMPL
|
|||
pGpu = GPU_RES_GET_GPU(pSubdevice);
|
||||
pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
|
||||
|
||||
if (pConfCompute != NULL)
|
||||
if (pConfCompute != NULL && pConfCompute->pSpdm != NULL &&
|
||||
pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED))
|
||||
{
|
||||
// Set max size of report buffers before calling SPDM.
|
||||
pParams->attestationReportSize = NV_CONF_COMPUTE_GPU_ATTESTATION_REPORT_MAX_SIZE;
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "gpu/falcon/kernel_crashcat_engine.h"
|
||||
#include "gpu/gpu.h"
|
||||
#include "utils/nvprintf.h"
|
||||
|
||||
#include "published/turing/tu102/dev_falcon_v4.h"
|
||||
|
||||
void kcrashcatEngineReadDmem_TU102
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
NvU32 offset,
|
||||
NvU32 size,
|
||||
void *pBuf
|
||||
)
|
||||
{
|
||||
// This implementation only supports 32-bit-aligned accesses
|
||||
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, (offset & (sizeof(NvU32) - 1)) == 0);
|
||||
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, (size & (sizeof(NvU32) - 1)) == 0);
|
||||
|
||||
NvU8 port = pKernelCrashCatEng->dmemPort;
|
||||
NvU32 dmemc = kcrashcatEngineMaskDmemAddr(pKernelCrashCatEng->pGpu, pKernelCrashCatEng, offset);
|
||||
dmemc = FLD_SET_DRF(_PFALCON, _FALCON_DMEMC, _AINCR, _TRUE, dmemc);
|
||||
kcrashcatEngineRegWrite(pKernelCrashCatEng->pGpu, pKernelCrashCatEng,
|
||||
NV_PFALCON_FALCON_DMEMC(port), dmemc);
|
||||
|
||||
NvU32 *pWordBuf = (NvU32 *)pBuf;
|
||||
for (NvU32 i = 0; i < (size >> 2); i++)
|
||||
pWordBuf[i] = kcrashcatEngineRegRead(pKernelCrashCatEng->pGpu, pKernelCrashCatEng,
|
||||
NV_PFALCON_FALCON_DMEMD(port));
|
||||
}
|
||||
|
||||
NvU32 kcrashcatEngineGetWFL0Offset_TU102(KernelCrashCatEngine *pKernelCrashCatEng)
|
||||
{
|
||||
return NV_PFALCON_FALCON_DEBUGINFO;
|
||||
}
|
||||
|
||||
const NvU32 *kcrashcatEngineGetScratchOffsets_TU102
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
NV_CRASHCAT_SCRATCH_GROUP_ID scratchId
|
||||
)
|
||||
{
|
||||
NV_CRASHCAT_DEF_SCRATCH_GROUP_V1_REGMAP_TABLE(scratchOffsetTable);
|
||||
|
||||
if (scratchId <= NV_CRASHCAT_SCRATCH_GROUP_ID_LAST)
|
||||
return scratchOffsetTable[scratchId];
|
||||
|
||||
NV_PRINTF(LEVEL_ERROR, "unknown CrashCat scratch ID %u\n", scratchId);
|
||||
|
||||
return NULL;
|
||||
}
|
|
@ -0,0 +1,328 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define NVOC_KERNEL_CRASHCAT_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "gpu/falcon/kernel_crashcat_engine.h"
|
||||
#include "gpu/gpu.h"
|
||||
#include "core/printf.h"
|
||||
#include "os/nv_memory_type.h"
|
||||
|
||||
NV_STATUS kcrashcatEngineConfigure_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
KernelCrashCatEngineConfig *pEngConfig
|
||||
)
|
||||
{
|
||||
if (!pEngConfig->bEnable)
|
||||
return NV_OK;
|
||||
|
||||
NV_CHECK_OR_RETURN(LEVEL_ERROR, pEngConfig->pName != NULL, NV_ERR_INVALID_ARGUMENT);
|
||||
NV_CHECK_OR_RETURN(LEVEL_ERROR, pEngConfig->errorId != 0, NV_ERR_INVALID_ARGUMENT);
|
||||
|
||||
pKernelCrashCatEng->bConfigured = NV_TRUE;
|
||||
pKernelCrashCatEng->pName = pEngConfig->pName;
|
||||
pKernelCrashCatEng->errorId = pEngConfig->errorId;
|
||||
pKernelCrashCatEng->pGpu = ENG_GET_GPU(pKernelCrashCatEng);
|
||||
pKernelCrashCatEng->dmemPort = pEngConfig->dmemPort;
|
||||
|
||||
if (pEngConfig->allocQueueSize > 0)
|
||||
{
|
||||
const NvU32 CRASHCAT_QUEUE_ALIGNMENT = 1u << 10;
|
||||
pEngConfig->allocQueueSize = NV_ALIGN_UP(pEngConfig->allocQueueSize,
|
||||
CRASHCAT_QUEUE_ALIGNMENT);
|
||||
NV_STATUS status;
|
||||
|
||||
//
|
||||
// The queue must be contiguous and 1KB aligned in both size and offset.
|
||||
// Typically the queue will be a single page to satisfy these requirements.
|
||||
//
|
||||
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
|
||||
memdescCreate(&pKernelCrashCatEng->pQueueMemDesc, pKernelCrashCatEng->pGpu,
|
||||
pEngConfig->allocQueueSize, CRASHCAT_QUEUE_ALIGNMENT, NV_TRUE,
|
||||
ADDR_SYSMEM, NV_MEMORY_CACHED, MEMDESC_FLAGS_NONE));
|
||||
|
||||
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
|
||||
memdescAlloc(pKernelCrashCatEng->pQueueMemDesc),
|
||||
memdescCleanup);
|
||||
|
||||
//
|
||||
// After kcrashcatEngineRegisterCrashBuffer(), the CrashCat library should be able to map
|
||||
// and access the queue buffer when it shows up in a wayfinder.
|
||||
//
|
||||
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
|
||||
kcrashcatEngineRegisterCrashBuffer(pKernelCrashCatEng,
|
||||
pKernelCrashCatEng->pQueueMemDesc),
|
||||
memdescCleanup);
|
||||
|
||||
memdescCleanup:
|
||||
if (status != NV_OK)
|
||||
{
|
||||
kcrashcatEngineUnload(pKernelCrashCatEng);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NvBool kcrashcatEngineConfigured_IMPL(KernelCrashCatEngine *pKernelCrashCatEng)
|
||||
{
|
||||
return pKernelCrashCatEng->bConfigured;
|
||||
}
|
||||
|
||||
MEMORY_DESCRIPTOR *kcrashcatEngineGetQueueMemDesc_IMPL(KernelCrashCatEngine *pKernelCrashCatEng)
|
||||
{
|
||||
return pKernelCrashCatEng->pQueueMemDesc;
|
||||
}
|
||||
|
||||
void kcrashcatEngineUnload_IMPL(KernelCrashCatEngine *pKernelCrashCatEng)
|
||||
{
|
||||
if (pKernelCrashCatEng->pQueueMemDesc != NULL)
|
||||
{
|
||||
kcrashcatEngineUnregisterCrashBuffer(pKernelCrashCatEng, pKernelCrashCatEng->pQueueMemDesc);
|
||||
memdescFree(pKernelCrashCatEng->pQueueMemDesc);
|
||||
memdescDestroy(pKernelCrashCatEng->pQueueMemDesc);
|
||||
pKernelCrashCatEng->pQueueMemDesc = NULL;
|
||||
}
|
||||
|
||||
crashcatEngineUnload_IMPL(staticCast(pKernelCrashCatEng, CrashCatEngine));
|
||||
}
|
||||
|
||||
NvU32 kcrashcatEnginePriRead_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
NvU32 offset
|
||||
)
|
||||
{
|
||||
return kcrashcatEngineRegRead(pKernelCrashCatEng->pGpu, pKernelCrashCatEng, offset);
|
||||
}
|
||||
|
||||
void kcrashcatEnginePriWrite_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
NvU32 offset,
|
||||
NvU32 data
|
||||
)
|
||||
{
|
||||
kcrashcatEngineRegWrite(pKernelCrashCatEng->pGpu, pKernelCrashCatEng, offset, data);
|
||||
}
|
||||
|
||||
void kcrashcatEngineVprintf_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
NvBool bReportStart,
|
||||
const char *fmt,
|
||||
va_list args
|
||||
)
|
||||
{
|
||||
//
|
||||
// The first line logs an Xid - subsequent crash report lines are printed via
|
||||
// portDbgPrintString() so that they are in dmesg, but don't cause additional Xid "events".
|
||||
//
|
||||
if (bReportStart)
|
||||
{
|
||||
va_list argsCopy;
|
||||
|
||||
//
|
||||
// Prefix the engine name to the format string.
|
||||
// nvErrorLog() appends a newline, so we don't add one here.
|
||||
//
|
||||
nvDbgSnprintf(pKernelCrashCatEng->fmtBuffer, MAX_ERROR_STRING, "%s %s",
|
||||
pKernelCrashCatEng->pName, fmt);
|
||||
|
||||
va_copy(argsCopy, args);
|
||||
nvErrorLog(pKernelCrashCatEng->pGpu, pKernelCrashCatEng->errorId,
|
||||
pKernelCrashCatEng->fmtBuffer, argsCopy);
|
||||
va_end(argsCopy);
|
||||
}
|
||||
|
||||
// portDbgPrintString/NVLOG_PRINTF don't add a newline, so add one here
|
||||
const char *newline = "\n";
|
||||
const NvLength fmtSize = portStringLength(fmt) + 1;
|
||||
const NvLength newlineSize = 3; // Two chars plus terminating null
|
||||
const NvLength newFmtSize = fmtSize + newlineSize - 1; // terminating null is shared
|
||||
|
||||
portMemCopy(pKernelCrashCatEng->fmtBuffer, MAX_ERROR_STRING, fmt, fmtSize);
|
||||
portStringCat(pKernelCrashCatEng->fmtBuffer, newFmtSize, newline, newlineSize);
|
||||
nvDbgVsnprintf(pKernelCrashCatEng->printBuffer, MAX_ERROR_STRING,
|
||||
pKernelCrashCatEng->fmtBuffer, args);
|
||||
|
||||
// The report-starting line was already printed by nvErrorLog above
|
||||
if (!bReportStart)
|
||||
portDbgPrintString(pKernelCrashCatEng->printBuffer, MAX_ERROR_STRING);
|
||||
|
||||
//
|
||||
// Also print the formatted string to NvLog - avoid direct NV_PRINTF calls so as not to
|
||||
// duplicate output in dmesg.
|
||||
//
|
||||
NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, LEVEL_ERROR, pKernelCrashCatEng->printBuffer);
|
||||
}
|
||||
|
||||
static NV_INLINE
|
||||
NV_CRASHCAT_MEM_APERTURE _addressSpaceToCrashcatAperture(NV_ADDRESS_SPACE addrSpace)
|
||||
{
|
||||
switch (addrSpace)
|
||||
{
|
||||
case ADDR_SYSMEM: return NV_CRASHCAT_MEM_APERTURE_SYSGPA;
|
||||
case ADDR_FBMEM: return NV_CRASHCAT_MEM_APERTURE_FBGPA;
|
||||
default: NV_ASSERT_OR_RETURN(0, NV_CRASHCAT_MEM_APERTURE_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
NV_STATUS kcrashcatEngineRegisterCrashBuffer_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
MEMORY_DESCRIPTOR *pMemDesc
|
||||
)
|
||||
{
|
||||
return crashcatEngineRegisterCrashBuffer(staticCast(pKernelCrashCatEng, CrashCatEngine),
|
||||
_addressSpaceToCrashcatAperture(memdescGetAddressSpace(pMemDesc)),
|
||||
memdescGetPhysAddr(pMemDesc, AT_GPU, 0), memdescGetSize(pMemDesc),
|
||||
pMemDesc);
|
||||
}
|
||||
|
||||
void kcrashcatEngineUnregisterCrashBuffer_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
MEMORY_DESCRIPTOR *pMemDesc
|
||||
)
|
||||
{
|
||||
crashcatEngineUnregisterCrashBuffer(staticCast(pKernelCrashCatEng, CrashCatEngine),
|
||||
_addressSpaceToCrashcatAperture(memdescGetAddressSpace(pMemDesc)),
|
||||
memdescGetPhysAddr(pMemDesc, AT_GPU, 0), memdescGetSize(pMemDesc));
|
||||
}
|
||||
|
||||
static NV_INLINE NV_ADDRESS_SPACE _crashcatApertureToAddressSpace(NV_CRASHCAT_MEM_APERTURE aper)
|
||||
{
|
||||
switch (aper)
|
||||
{
|
||||
case NV_CRASHCAT_MEM_APERTURE_FBGPA: return ADDR_FBMEM;
|
||||
case NV_CRASHCAT_MEM_APERTURE_SYSGPA: return ADDR_SYSMEM;
|
||||
default: return ADDR_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
static MEMORY_DESCRIPTOR *_kcrashcatEngineCreateBufferMemDesc
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
CrashCatBufferDescriptor *pBufDesc
|
||||
)
|
||||
{
|
||||
// Convert the buffer descriptor to a set of memdesc parameters
|
||||
MEMORY_DESCRIPTOR *pMemDesc;
|
||||
NV_STATUS status;
|
||||
NV_ADDRESS_SPACE bufAddrSpace = _crashcatApertureToAddressSpace(pBufDesc->aperture);
|
||||
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
|
||||
memdescCreate(&pMemDesc, pKernelCrashCatEng->pGpu, pBufDesc->size, 0,
|
||||
NV_TRUE, bufAddrSpace, NV_MEMORY_CACHED, MEMDESC_FLAGS_NONE),
|
||||
return NULL;);
|
||||
|
||||
memdescDescribe(pMemDesc, bufAddrSpace, pBufDesc->physOffset, pBufDesc->size);
|
||||
return pMemDesc;
|
||||
}
|
||||
|
||||
void *kcrashcatEngineMapBufferDescriptor_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
CrashCatBufferDescriptor *pBufDesc
|
||||
)
|
||||
{
|
||||
MEMORY_DESCRIPTOR *pMemDesc;
|
||||
|
||||
if (pBufDesc->bRegistered)
|
||||
pMemDesc = pBufDesc->pEngPriv;
|
||||
else
|
||||
pMemDesc = _kcrashcatEngineCreateBufferMemDesc(pKernelCrashCatEng, pBufDesc);
|
||||
|
||||
NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc != NULL, NULL);
|
||||
|
||||
NvP64 pBuf, pPriv;
|
||||
NV_STATUS status;
|
||||
|
||||
// CrashCat buffers are read-only
|
||||
NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR,
|
||||
memdescMap(pMemDesc, 0, memdescGetSize(pMemDesc), NV_TRUE,
|
||||
NV_PROTECT_READABLE, &pBuf, &pPriv),
|
||||
{
|
||||
if (pBufDesc->pEngPriv == NULL)
|
||||
memdescDestroy(pMemDesc);
|
||||
return NULL;
|
||||
});
|
||||
|
||||
memdescSetKernelMapping(pMemDesc, pBuf);
|
||||
memdescSetKernelMappingPriv(pMemDesc, pPriv);
|
||||
pBufDesc->pEngPriv = pMemDesc;
|
||||
return NvP64_VALUE(pBuf);
|
||||
}
|
||||
|
||||
void kcrashcatEngineUnmapBufferDescriptor_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
CrashCatBufferDescriptor *pBufDesc
|
||||
)
|
||||
{
|
||||
MEMORY_DESCRIPTOR *pMemDesc = pBufDesc->pEngPriv;
|
||||
NvP64 pBuf = memdescGetKernelMapping(pMemDesc);
|
||||
NvP64 pPriv = memdescGetKernelMappingPriv(pMemDesc);
|
||||
|
||||
memdescUnmap(pMemDesc, NV_TRUE, 0, pBuf, pPriv);
|
||||
memdescSetKernelMapping(pMemDesc, NULL);
|
||||
memdescSetKernelMappingPriv(pMemDesc, NULL);
|
||||
|
||||
if (!pBufDesc->bRegistered)
|
||||
memdescDestroy(pMemDesc);
|
||||
}
|
||||
|
||||
void kcrashcatEngineSyncBufferDescriptor_IMPL
|
||||
(
|
||||
KernelCrashCatEngine *pKernelCrashCatEng,
|
||||
CrashCatBufferDescriptor *pBufDesc,
|
||||
NvU32 offset,
|
||||
NvU32 size
|
||||
)
|
||||
{
|
||||
//
|
||||
// The buffers which support the "sync" operation don't have a memdesc - they are accessed
|
||||
// through ports, so we copy the data out into a local buffer instead of direct map.
|
||||
//
|
||||
NV_ASSERT_CHECKED(NvU64_HI32(pBufDesc->physOffset) == 0);
|
||||
NV_ASSERT_CHECKED(NvU64_HI32(pBufDesc->size) == 0);
|
||||
|
||||
switch (pBufDesc->aperture)
|
||||
{
|
||||
case NV_CRASHCAT_MEM_APERTURE_DMEM:
|
||||
kcrashcatEngineReadDmem_HAL(pKernelCrashCatEng,
|
||||
NvU64_LO32(pBufDesc->physOffset) + offset,
|
||||
size,
|
||||
(void *)((NvUPtr)pBufDesc->pMapping + offset));
|
||||
return;
|
||||
case NV_CRASHCAT_MEM_APERTURE_EMEM:
|
||||
kcrashcatEngineReadEmem_HAL(pKernelCrashCatEng,
|
||||
NvU64_LO32(pBufDesc->physOffset) + offset,
|
||||
size,
|
||||
(void *)((NvUPtr)pBufDesc->pMapping + offset));
|
||||
return;
|
||||
default:
|
||||
NV_ASSERT_CHECKED(0);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue