525.60.11

This commit is contained in:
Andy Ritger 2022-11-28 13:39:27 -08:00
parent 758b4ee818
commit 5f40a5aee5
No known key found for this signature in database
GPG Key ID: 6D466BB75E006CFC
113 changed files with 1408 additions and 723 deletions

View File

@ -2,6 +2,8 @@
## Release 525 Entries
### [525.60.11] 2022-11-28
### [525.53] 2022-11-10
#### Changed
@ -19,6 +21,8 @@
## Release 520 Entries
### [520.61.07] 2022-10-20
### [520.56.06] 2022-10-12
#### Added

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 525.53.
version 525.60.11.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
525.53 driver release. This can be achieved by installing
525.60.11 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -167,7 +167,7 @@ for the target kernel.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 525.53 release,
(see the table below). However, in the 525.60.11 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@ -175,7 +175,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.53/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.60.11/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@ -662,7 +662,6 @@ Subsystem Device ID.
| NVIDIA A800-SXM4-80GB | 20F3 10DE 17A2 |
| NVIDIA A800 80GB PCIe | 20F5 10DE 1799 |
| NVIDIA A800 80GB PCIe LC | 20F5 10DE 179A |
| NVIDIA A800 40GB PCIe | 20F6 10DE 17A3 |
| NVIDIA GeForce GTX 1660 Ti | 2182 |
| NVIDIA GeForce GTX 1660 | 2184 |
| NVIDIA GeForce GTX 1650 SUPER | 2187 |
@ -795,3 +794,4 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E5 |
| NVIDIA RTX A1000 Embedded GPU | 25F9 |
| NVIDIA RTX A2000 Embedded GPU | 25FA |
| NVIDIA RTX A500 Embedded GPU | 25FB |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.53\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.60.11\"
EXTRA_CFLAGS += -Wno-unused-function

View File

@ -643,12 +643,14 @@ typedef enum {
static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((offset >= nv->regs->cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
}
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
}
@ -656,6 +658,7 @@ static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
(offset >= nv->ud.cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
}
@ -664,6 +667,7 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
(offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
}

View File

@ -1364,8 +1364,6 @@ void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace,
a. pre-allocated stack
b. the fact that internal RPC infrastructure doesn't acquire GPU lock.
Therefore, locking is the caller's responsibility.
- This function DOES NOT sleep (does not allocate memory or acquire locks)
so it can be invoked while holding a spinlock.
Arguments:
channel[IN] - paging channel handle obtained via

View File

@ -830,6 +830,8 @@ typedef struct UvmGpuFaultInfo_tag
NvHandle faultBufferHandle;
} UvmGpuFaultInfo;
struct Device;
typedef struct UvmGpuPagingChannel_tag
{
struct gpuDevice *device;
@ -837,6 +839,7 @@ typedef struct UvmGpuPagingChannel_tag
NvHandle channelHandle;
NvHandle errorNotifierHandle;
void *pushStreamSp;
struct Device *pDevice;
} UvmGpuPagingChannel, *UvmGpuPagingChannelHandle;
typedef struct UvmGpuAccessCntrInfo_tag

View File

@ -95,7 +95,7 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
pfn >>= PAGE_SHIFT;
pfn += page_offset;
} else {
BUG_ON(page_offset > nv_nvkms_memory->pages_count);
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
}

View File

@ -112,7 +112,7 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
BUG_ON(page_offset > nv_user_memory->pages_count);
BUG_ON(page_offset >= nv_user_memory->pages_count);
ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
switch (ret) {
case 0:

View File

@ -47,6 +47,14 @@ module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
void *nv_drm_calloc(size_t nmemb, size_t size)
{
size_t total_size = nmemb * size;
//
// Check for overflow.
//
if ((nmemb != 0) && ((total_size / nmemb) != size))
{
return NULL;
}
return kzalloc(nmemb * size, GFP_KERNEL);
}

View File

@ -34,6 +34,7 @@
#include <linux/file.h>
#include <linux/list.h>
#include <linux/rwsem.h>
#include <linux/freezer.h>
#include <acpi/video.h>
@ -182,7 +183,10 @@ static inline int nvkms_read_trylock_pm_lock(void)
static inline void nvkms_read_lock_pm_lock(void)
{
down_read(&nvkms_pm_lock);
while (!down_read_trylock(&nvkms_pm_lock)) {
try_to_freeze();
cond_resched();
}
}
static inline void nvkms_read_unlock_pm_lock(void)
@ -1086,7 +1090,7 @@ failed:
return NULL;
}
void nvkms_close_common(struct nvkms_per_open *popen)
void nvkms_close_pm_locked(struct nvkms_per_open *popen)
{
/*
* Don't use down_interruptible(): we need to free resources
@ -1124,13 +1128,13 @@ void nvkms_close_common(struct nvkms_per_open *popen)
nvkms_free(popen, sizeof(*popen));
}
static void nvkms_close_deferred(void *data)
static void nvkms_close_pm_unlocked(void *data)
{
struct nvkms_per_open *popen = data;
nvkms_read_lock_pm_lock();
nvkms_close_common(popen);
nvkms_close_pm_locked(popen);
nvkms_read_unlock_pm_lock();
}
@ -1138,11 +1142,11 @@ static void nvkms_close_deferred(void *data)
static void nvkms_close_popen(struct nvkms_per_open *popen)
{
if (nvkms_read_trylock_pm_lock() == 0) {
nvkms_close_common(popen);
nvkms_close_pm_locked(popen);
nvkms_read_unlock_pm_lock();
} else {
nv_kthread_q_item_init(&popen->deferred_close_q_item,
nvkms_close_deferred,
nvkms_close_pm_unlocked,
popen);
nvkms_queue_work(&nvkms_deferred_close_kthread_q,
&popen->deferred_close_q_item);
@ -1195,7 +1199,7 @@ struct nvkms_per_open* nvkms_open_from_kapi
void nvkms_close_from_kapi(struct nvkms_per_open *popen)
{
nvkms_close_popen(popen);
nvkms_close_pm_unlocked(popen);
}
NvBool nvkms_ioctl_from_kapi

View File

@ -3382,7 +3382,7 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
// TODO: Bug 3660922: need to implement HMM read duplication support.
UVM_ASSERT(!uvm_va_block_is_hmm(va_block));
UVM_ASSERT(va_block_context->policy = uvm_va_range_get_policy(va_block->va_range));
UVM_ASSERT(va_block_context->policy == uvm_va_range_get_policy(va_block->va_range));
va_block_context->make_resident.dest_id = dest_id;
va_block_context->make_resident.cause = cause;

View File

@ -808,6 +808,14 @@ nvswitch_os_strncmp
NvLength length
);
char*
nvswitch_os_strncat
(
char *s1,
const char *s2,
NvLength length
);
void *
nvswitch_os_memset
(

View File

@ -2393,6 +2393,17 @@ nvswitch_os_strncmp
return strncmp(s1, s2, length);
}
char*
nvswitch_os_strncat
(
char *s1,
const char *s2,
NvLength length
)
{
return strncat(s1, s2, length);
}
void *
nvswitch_os_memset
(

View File

@ -857,7 +857,8 @@ nv_dma_buf_reuse(
goto cleanup_dmabuf;
}
if (params->index > (priv->total_objects - params->numObjects))
if ((priv->total_objects < params->numObjects) ||
(params->index > (priv->total_objects - params->numObjects)))
{
status = NV_ERR_INVALID_ARGUMENT;
goto unlock_priv;

View File

@ -132,6 +132,11 @@ nvidia_vma_access(
pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT);
pageOffset = (addr & ~PAGE_MASK);
if (length < 0)
{
return -EINVAL;
}
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
@ -210,8 +215,12 @@ static vm_fault_t nvidia_fault(
NvU64 page;
NvU64 num_pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
NvU64 pfn_start =
(nvlfp->mmap_context.mmap_start >> PAGE_SHIFT) + vma->vm_pgoff;
NvU64 pfn_start = (nvlfp->mmap_context.mmap_start >> PAGE_SHIFT);
if (vma->vm_pgoff != 0)
{
return VM_FAULT_SIGBUS;
}
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
@ -484,6 +493,11 @@ int nvidia_mmap_helper(
return -EINVAL;
}
if (vma->vm_pgoff != 0)
{
return -EINVAL;
}
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
status = nv_check_gpu_state(nv);
@ -510,6 +524,11 @@ int nvidia_mmap_helper(
NvU64 access_start = mmap_context->access_start;
NvU64 access_len = mmap_context->access_size;
// validate the size
if (NV_VMA_SIZE(vma) != mmap_length)
{
return -ENXIO;
}
if (IS_REG_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,

View File

@ -1432,6 +1432,9 @@ static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp)
return -ENODEV;
}
if (unlikely(NV_ATOMIC_READ(nvl->usage_count) >= NV_S32_MAX))
return -EMFILE;
if ( ! (nv->flags & NV_FLAG_OPEN))
{
/* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */

View File

@ -207,6 +207,9 @@ static int nvlink_fops_release(struct inode *inode, struct file *filp)
nvlink_print(NVLINK_DBG_INFO, "nvlink driver close\n");
if (private == NULL)
return -ENOMEM;
mutex_lock(&nvlink_drvctx.lock);
if (private->capability_fds.fabric_mgmt > 0)

View File

@ -1224,11 +1224,27 @@ void NV_API_CALL os_get_screen_info(
*pFbHeight = registered_fb[i]->var.yres;
*pFbDepth = registered_fb[i]->var.bits_per_pixel;
*pFbPitch = registered_fb[i]->fix.line_length;
break;
return;
}
}
}
#elif NV_IS_EXPORT_SYMBOL_PRESENT_screen_info
#endif
/*
* If the screen info is not found in the registered FBs then fallback
* to the screen_info structure.
*
* The SYSFB_SIMPLEFB option, if enabled, marks VGA/VBE/EFI framebuffers as
* generic framebuffers so the new generic system-framebuffer drivers can
* be used instead. DRM_SIMPLEDRM drives the generic system-framebuffers
* device created by SYSFB_SIMPLEFB.
*
* SYSFB_SIMPLEFB registers a dummy framebuffer which does not contain the
* information required by os_get_screen_info(), therefore you need to
* fall back onto the screen_info structure.
*/
#if NV_IS_EXPORT_SYMBOL_PRESENT_screen_info
/*
* If there is not a framebuffer console, return 0 size.
*

View File

@ -203,6 +203,8 @@ namespace DisplayPort
virtual bool getSDPExtnForColorimetrySupported() = 0;
virtual bool getPanelFwRevision(NvU16 *revision) = 0;
virtual bool getIgnoreMSACap() = 0;
virtual AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) = 0;

View File

@ -381,6 +381,8 @@ namespace DisplayPort
virtual bool getSDPExtnForColorimetrySupported();
virtual bool getPanelFwRevision(NvU16 *revision);
virtual bool isPowerSuspended();
virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn);

View File

@ -1043,6 +1043,46 @@ bool DeviceImpl::getSDPExtnForColorimetrySupported()
return (this->bSdpExtCapable == True);
}
bool DeviceImpl::getPanelFwRevision(NvU16 *revision)
{
NvU8 fwRevisionMajor = 0;
NvU8 fwRevisionMinor = 0;
unsigned size = 0;
unsigned nakReason = NakUndefined;
if (!revision)
{
return false;
}
*revision = 0;
//
// On faked mux devices, we cannot check if the device has
// the capability as we don't have access to aux.
//
if (this->isFakedMuxDevice())
{
return false;
}
if (AuxBus::success != this->getDpcdData(NV_DPCD14_FW_SW_REVISION_MAJOR,
&fwRevisionMajor, sizeof(fwRevisionMajor), &size, &nakReason))
{
return false;
}
if (AuxBus::success != this->getDpcdData(NV_DPCD14_FW_SW_REVISION_MINOR,
&fwRevisionMinor, sizeof(fwRevisionMinor), &size, &nakReason))
{
return false;
}
*revision = (fwRevisionMajor << 8) | fwRevisionMinor;
return true;
}
bool DeviceImpl::isPowerSuspended()
{
bool bPanelPowerOn, bDPCDPowerStateD0;

View File

@ -330,6 +330,10 @@
#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_NO (0x00000000) /* R-XUV */
#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_YES (0x00000001) /* R-XUV */
// Field definitions for FW/SW Revision
#define NV_DPCD14_FW_SW_REVISION_MAJOR (0x0000040A) /* R-XUR */
#define NV_DPCD14_FW_SW_REVISION_MINOR (0x0000040B) /* R-XUR */
// Field definition for 0x0200E (LANE_ALIGN_STATUS_UPDATED_ESI), used only when DP2.0+ 128b/132b is enabled.
#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI (0x0000200E) /* R-XUR */
#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE 2:2 /* R-XUF */

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r525_00
#define NV_BUILD_BRANCH r526_91
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r525_00
#define NV_PUBLIC_BRANCH r526_91
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r525_00-154"
#define NV_BUILD_CHANGELIST_NUM (31993960)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r526_91-181"
#define NV_BUILD_CHANGELIST_NUM (32108895)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r525/r525_00-154"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (31993960)
#define NV_BUILD_NAME "rel/gpu_drv/r525/r526_91-181"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32108895)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r525_00-178"
#define NV_BUILD_CHANGELIST_NUM (31990457)
#define NV_BUILD_BRANCH_VERSION "r526_91-9"
#define NV_BUILD_CHANGELIST_NUM (32103636)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "526.52"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (31990457)
#define NV_BUILD_NAME "527.27"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32103636)
#define NV_BUILD_BRANCH_BASE_VERSION R525
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "525.53"
#define NV_VERSION_STRING "525.60.11"
#else

View File

@ -28,6 +28,9 @@
#define NV_MINION_DL_CMD_DATA_RXCAL_EN_ALARM 0x50
#define NV_MINION_DL_CMD_DATA_INIT_CAL_DONE 0x26
#define NV_MINION_NVLINK_DL_CMD_COMMAND_SETUPTC 0x00000094
#define NV_MINION_NVLINK_DL_CMD_COMMAND_CLR_TLC_MISC_REGS 0x00000026
// filtering dev_minion_dlstat.ref for pattern (CMINION|MINION|NVLSTAT|PMINION|SWMINION)
#define NV_NVLSTAT 0x00000103:0x00000000 /* RW--D */
#define NV_NVLSTAT_UC01 0x00000001 /* R--4R */

View File

@ -1,5 +1,5 @@
/*******************************************************************************
Copyright (c) 2019-2020 NVidia Corporation
Copyright (c) 2019-2022 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
@ -1047,6 +1047,7 @@ nvlink_core_powerdown_floorswept_conns_to_off
if (numConnsToShutdown != 0)
{
nvlink_core_powerdown_intranode_conns_from_active_to_off(connsToShutdown, numConnsToShutdown, 0);
nvlink_core_reset_intranode_conns(connsToShutdown, numConnsToShutdown, NVLINK_STATE_CHANGE_ASYNC);
for (j = 0; j < numConnsToShutdown; ++j)
{

View File

@ -175,4 +175,11 @@ enum
RM_SOE_DMAIDX_GUEST_PHYS_SYS_NCOH_BOUND = 7
};
/*!
* SOE Debug buffer size
* Creating a copy of SOE_DMESG_BUFFER_SIZE in memmap.h
* soe/memmap.h is conflicting with sec2/memmap.h and cannot be used in the driver
*/
#define SOE_DEBUG_BUFFER_SIZE 0x1000
#endif // _GSOEIFCMN_H_

View File

@ -802,6 +802,7 @@ typedef enum nvswitch_err_type
NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END = 10005,
NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN = 10006,
NVSWITCH_ERR_HW_HOST_IO_FAILURE = 10007,
NVSWITCH_ERR_HW_HOST_FIRMWARE_INITIALIZATION_FAILURE = 10008,
NVSWITCH_ERR_HW_HOST_LAST,

View File

@ -804,6 +804,14 @@ nvswitch_os_strncmp
NvLength length
);
char*
nvswitch_os_strncat
(
char *s1,
const char *s2,
NvLength length
);
void *
nvswitch_os_memset
(

View File

@ -109,7 +109,7 @@ flcnQueueCmdPostBlocking
if (status != NV_OK)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_COMMAND_QUEUE,
"Failed to post command to SOE\n");
"Fatal, Failed to post command to SOE\n");
return status;
}
@ -117,7 +117,7 @@ flcnQueueCmdPostBlocking
if (status == NV_ERR_TIMEOUT)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT,
"Timed out while waiting for SOE command completion\n");
"Fatal, Timed out while waiting for SOE command completion\n");
flcnQueueCmdCancel(device, pFlcn, *pSeqDesc);
}
@ -691,9 +691,9 @@ flcnSetDmemAddr_HAL
NvU32
flcnRiscvRegRead_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset
nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset
)
{
NVSWITCH_ASSERT(pFlcn->pHal->riscvRegRead != (void *)0);
@ -703,12 +703,60 @@ flcnRiscvRegRead_HAL
void
flcnRiscvRegWrite_HAL
(
struct nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset,
NvU32 data
nvswitch_device *device,
PFLCN pFlcn,
NvU32 offset,
NvU32 data
)
{
NVSWITCH_ASSERT(pFlcn->pHal->riscvRegWrite != (void *)0);
pFlcn->pHal->riscvRegWrite(device, pFlcn, offset, data);
}
NV_STATUS
flcnDebugBufferInit_HAL
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 debugBufferMaxSize,
NvU32 writeRegAddr,
NvU32 readRegAddr
)
{
NVSWITCH_ASSERT(pFlcn->pHal->debugBufferInit != (void *)0);
return pFlcn->pHal->debugBufferInit(device, pFlcn, debugBufferMaxSize, writeRegAddr, readRegAddr);
}
NV_STATUS
flcnDebugBufferDestroy_HAL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->debugBufferDestroy != (void *)0);
return pFlcn->pHal->debugBufferDestroy(device, pFlcn);
}
NV_STATUS
flcnDebugBufferDisplay_HAL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->debugBufferDisplay != (void *)0);
return pFlcn->pHal->debugBufferDisplay(device, pFlcn);
}
NvBool
flcnDebugBufferIsEmpty_HAL
(
nvswitch_device *device,
FLCN *pFlcn
)
{
NVSWITCH_ASSERT(pFlcn->pHal->debugBufferIsEmpty != (void *)0);
return pFlcn->pHal->debugBufferIsEmpty(device, pFlcn);
}

View File

@ -260,6 +260,8 @@ typedef struct
NvU32 lp_threshold;
NvU32 minion_intr;
NvU32 surpress_link_errors_for_gpu_reset;
NvU32 block_code_mode;
NvU32 reference_clock_mode;
} NVSWITCH_REGKEY_TYPE;
//

View File

@ -282,6 +282,9 @@ typedef struct FLCN
/*! HW arch that is enabled and running on corresponding uproc engine. */
NvU32 engArch;
/*! Flcn debug buffer object */
} FLCN, *PFLCN;
// hal functions
@ -339,6 +342,10 @@ void flcnImemCopyTo_HAL (struct nvswitch_device
NvU32 flcnSetDmemAddr_HAL (struct nvswitch_device *, struct FLCN *, NvU32 dst);
NvU32 flcnRiscvRegRead_HAL (struct nvswitch_device *, PFLCN, NvU32 offset);
void flcnRiscvRegWrite_HAL (struct nvswitch_device *, PFLCN, NvU32 offset, NvU32 data);
NV_STATUS flcnDebugBufferInit_HAL (struct nvswitch_device *, struct FLCN *, NvU32 debugBufferMaxSize, NvU32 writeRegAddr, NvU32 readRegAddr);
NV_STATUS flcnDebugBufferDestroy_HAL (struct nvswitch_device *, struct FLCN *);
NV_STATUS flcnDebugBufferDisplay_HAL (struct nvswitch_device *, struct FLCN *);
NvBool flcnDebugBufferIsEmpty_HAL (struct nvswitch_device *, struct FLCN *);
// Falcon core revision / subversion definitions.
#define NV_FLCN_CORE_REV_3_0 0x30 // 3.0 - Core revision 3 subversion 0.

View File

@ -91,6 +91,10 @@ typedef struct {
NvU32 (*setDmemAddr) (struct nvswitch_device *, struct FLCN *, NvU32 dst);
NvU32 (*riscvRegRead) (struct nvswitch_device *, struct FLCN *, NvU32 offset);
void (*riscvRegWrite) (struct nvswitch_device *, struct FLCN *, NvU32 offset, NvU32 data);
NV_STATUS (*debugBufferInit) (struct nvswitch_device *, struct FLCN *, NvU32 debugBufferMaxSize, NvU32 writeRegAddr, NvU32 readRegAddr);
NV_STATUS (*debugBufferDestroy) (struct nvswitch_device *, struct FLCN *);
NV_STATUS (*debugBufferDisplay) (struct nvswitch_device *, struct FLCN *);
NvBool (*debugBufferIsEmpty) (struct nvswitch_device *, struct FLCN *);
} flcn_hal;
void flcnQueueSetupHal(struct FLCN *pFlcn);

View File

@ -589,4 +589,35 @@
#define NV_SWITCH_REGKEY_SURPRESS_LINK_ERRORS_FOR_GPU_RESET_DISABLE 0x0
#define NV_SWITCH_REGKEY_SURPRESS_LINK_ERRORS_FOR_GPU_RESET_ENABLE 0x1
/*
* NV_SWITCH_REGKEY_BLOCK_CODE_MODE - Indicates the Forward Error Correction Mode
*
* Forward Error Correction Mode (Pre-HS).
* DEFAULT = System Default
* OFF = 0x0
* ECC96_ENABLED = 0x1
* ECC88_ENABLED = 0x2
*/
#define NV_SWITCH_REGKEY_BLOCK_CODE_MODE "BlockCodeMode"
#define NV_SWITCH_REGKEY_BLOCK_CODE_MODE_DEFAULT 0x0
#define NV_SWITCH_REGKEY_BLOCK_CODE_MODE_OFF 0x0
#define NV_SWITCH_REGKEY_BLOCK_CODE_MODE_ECC96_ENABLED 0x1
#define NV_SWITCH_REGKEY_BLOCK_CODE_MODE_ECC88_ENABLED 0x2
/*
* NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE - Indicates the reference clock mode for
* the system w.r.t. this link.
*
* DEFAULT = System Default
* COMMON = Common reference clock. Spread Spectrum (SS) may or may not be enabled.
* NON_COMMON_NO_SS = Non-common reference clock without SS enabled.
* NON_COMMON_SS = Non-common reference clock with SS enabled.
*/
#define NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE "ReferenceClockMode"
#define NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_DEFAULT 0x0
#define NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_COMMON 0x0
#define NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_RESERVED 0x1
#define NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_NON_COMMON_NO_SS 0x2
#define NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_NON_COMMON_SS 0x3
#endif //_REGKEY_NVSWITCH_H_

View File

@ -211,11 +211,10 @@ _flcnConstruct_LR10
PFLCNABLE pFlcnable = pFlcn->pFlcnable;
PFALCON_QUEUE_INFO pQueueInfo;
pFlcn->bConstructed = NV_TRUE;
if (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_DEFAULT)
{
// Default the arch to Falcon if it's not set
pFlcn->engArch = NV_UPROC_ENGINE_ARCH_FALCON;
}
// Set the arch to Falcon
pFlcn->engArch = NV_UPROC_ENGINE_ARCH_FALCON;
// Allocate the memory for Queue Data Structure if needed.
if (pFlcn->bQueuesEnabled)
{

View File

@ -2076,7 +2076,8 @@ nvswitch_setup_link_system_registers_lr10
nvlink_link *link
)
{
NvU32 regval, fldval;
NvU32 regval = 0;
NvU32 fldval = 0;
NvU32 lineRate = 0;
NVLINK_CONFIG_DATA_LINKENTRY *vbios_link_entry = NULL;
NVSWITCH_BIOS_NVLINK_CONFIG *bios_config;
@ -2113,10 +2114,26 @@ nvswitch_setup_link_system_registers_lr10
_LINE_RATE, lineRate, regval);
NVSWITCH_PRINT(device, SETUP, "%s: LINE_RATE = 0x%x requested by regkey\n",
__FUNCTION__, lineRate);
NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CLK_CTRL, regval);
}
// REFERENCE_CLOCK_MODE SYSTEM register
if (device->regkeys.reference_clock_mode != NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_DEFAULT)
{
regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CLK_CTRL,
_REFERENCE_CLOCK_MODE, device->regkeys.reference_clock_mode, regval);
NVSWITCH_PRINT(device, SETUP, "%s: REFERENCE_CLOCK_MODE = 0x%x requested by regkey\n",
__FUNCTION__, device->regkeys.reference_clock_mode);
}
else if (vbios_link_entry != NULL)
{
regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CLK_CTRL, _REFERENCE_CLOCK_MODE,
DRF_VAL(_NVLINK_VBIOS,_PARAM3,_REFERENCE_CLOCK_MODE, vbios_link_entry->nvLinkparam3),
regval);
}
NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CLK_CTRL, regval);
// TXTRAIN SYSTEM register
regval = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL);
@ -2200,6 +2217,40 @@ nvswitch_setup_link_system_registers_lr10
regval);
}
// AC vs DC mode SYSTEM register
if (link->ac_coupled)
{
//
// In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command
// Here we just setup the register with the proper info
//
NVSWITCH_PRINT(device, SETUP, "%s: AC_DC_MODE = 0x%x\n",
__FUNCTION__, DRF_VAL(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL,
_AC_DC_MODE, regval));
regval = FLD_SET_DRF(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL,
_AC_DC_MODE, _AC, regval);
}
else if (vbios_link_entry != NULL)
{
regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE,
DRF_VAL(_NVLINK_VBIOS, _PARAM0, _ACDC_MODE, vbios_link_entry->nvLinkparam0),
regval);
}
if (device->regkeys.block_code_mode != NV_SWITCH_REGKEY_BLOCK_CODE_MODE_DEFAULT)
{
NVSWITCH_PRINT(device, SETUP, "%s: BLOCK_CODE_MODE = 0x%x requested by regkey\n",
__FUNCTION__, device->regkeys.block_code_mode);
regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL,
_BLOCK_CODE_MODE, device->regkeys.block_code_mode, regval);
}
else if (vbios_link_entry != NULL)
{
regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _BLOCK_CODE_MODE,
DRF_VAL(_NVLINK_VBIOS, _PARAM3, _CLOCK_MODE_BLOCK_CODE, vbios_link_entry->nvLinkparam3),
regval);
}
NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, regval);

View File

@ -1112,7 +1112,6 @@ nvswitch_setup_system_registers_lr10
{
nvlink_link *link;
NvU8 i;
NvU32 val;
NvU64 enabledLinkMask;
enabledLinkMask = nvswitch_get_enabled_link_mask(device);
@ -1130,21 +1129,6 @@ nvswitch_setup_system_registers_lr10
continue;
}
// AC vs DC mode SYSTEM register
if (link->ac_coupled)
{
//
// In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command
// Here we just setup the register with the proper info
//
val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL);
val = FLD_SET_DRF(_NVLIPT_LNK,
_CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE, _AC, val);
NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, val);
}
nvswitch_setup_link_system_registers(device, link);
nvswitch_load_link_disable_settings(device, link);
}

View File

@ -430,14 +430,15 @@ _makeNewRecord
pNewRec->recordSize = NV_UNSIGNED_DIV_CEIL(*pRecSize, sizeof(NvU32));
pNewRec->xidId = num;
pNewRec->seqNumber = pFifo->seqNumber++;
pNewRec->timeStamp = nvswitch_os_get_platform_time() / NVSWITCH_NSEC_PER_SEC;
pNewRec->timeStamp = nvswitch_os_get_platform_time_epoch() / NVSWITCH_NSEC_PER_SEC;
if (msglen > NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE)
if (osErrorString[msglen - 1] != 0)
{
// The text string is too long. Truncate and notify the client.
osErrorString[msglen - 1] = 0;
pNewRec->flags = FLD_SET_DRF(_MSGBOX, _DEM_RECORD_FLAGS,
_TRUNC, _SET, pNewRec->flags);
msglen = NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE - 1;
msglen = NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE;
}
nvswitch_os_memcpy(pNewRec->textMessage, osErrorString, msglen);

View File

@ -63,7 +63,7 @@ _flcnRiscvRegWrite_LS10
/*!
* @brief Retrieve the size of the falcon data memory.
*
* @param[in] pGpu OBJGPU pointer
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] bFalconReachable If set, returns size that can be reached by Falcon
*
@ -105,7 +105,7 @@ _flcnSetImemAddr_LS10
*
* @brief Copy contents of pSrc to IMEM
*
* @param[in] pGpu OBJGPU pointer
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] dst Destination in IMEM
* @param[in] pSrc IMEM contents
@ -156,7 +156,7 @@ _flcnSetDmemAddr_LS10
* Depending on the direction of the copy, copies 'sizeBytes' to/from 'pBuf'
* from/to DMEM offset 'dmemAddr' using DMEM access port 'port'.
*
* @param[in] pGpu GPU object pointer
* @param[in] device nvswitch_device pointer
* @param[in] pFlcn Falcon object pointer
* @param[in] dmemAddr The DMEM offset for the copy
* @param[in] pBuf The pointer to the buffer containing the data to copy
@ -280,6 +280,16 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
NvU32 ctl, ridx, widx, count, bufferSize;
NvBool full;
// Only supported on riscv
if (!UPROC_ENG_ARCH_FALCON_RISCV(pFlcn))
{
NVSWITCH_PRINT(device, ERROR, "%s: is not supported on falcon\n",
__FUNCTION__);
NVSWITCH_ASSERT(0);
return;
}
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL,
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _MODE, _FULL) |
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _UMODE_ENABLE, _TRUE) |
@ -346,6 +356,115 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL, ctl);
}
static NV_STATUS
_flcnDebugBufferInit_LS10
(
nvswitch_device *device,
PFLCN pFlcn,
NvU32 debugBufferMaxSize,
NvU32 writeRegAddr,
NvU32 readRegAddr
)
{
return NVL_SUCCESS;
}
static NV_STATUS
_flcnDebugBufferDestroy_LS10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
return NVL_SUCCESS;
}
static NV_STATUS
_flcnDebugBufferDisplay_LS10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
return NVL_SUCCESS;
}
static NvBool
_flcnDebugBufferIsEmpty_LS10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
return NV_TRUE;
}
//
// Store pointers to ucode header and data.
// Preload ucode from registry if available.
//
NV_STATUS
_flcnConstruct_LS10
(
nvswitch_device *device,
PFLCN pFlcn
)
{
NV_STATUS status;
PFLCNABLE pFlcnable = pFlcn->pFlcnable;
PFALCON_QUEUE_INFO pQueueInfo;
pFlcn->bConstructed = NV_TRUE;
// Set arch to Riscv
pFlcn->engArch = NV_UPROC_ENGINE_ARCH_FALCON_RISCV;
// Allocate the memory for Queue Data Structure if needed.
if (pFlcn->bQueuesEnabled)
{
pQueueInfo = pFlcn->pQueueInfo = nvswitch_os_malloc(sizeof(*pQueueInfo));
if (pQueueInfo == NULL)
{
status = NV_ERR_NO_MEMORY;
NVSWITCH_ASSERT(0);
goto _flcnConstruct_LR10_fail;
}
nvswitch_os_memset(pQueueInfo, 0, sizeof(FALCON_QUEUE_INFO));
// Assert if Number of Queues are zero
NVSWITCH_ASSERT(pFlcn->numQueues != 0);
pQueueInfo->pQueues = nvswitch_os_malloc(sizeof(FLCNQUEUE) * pFlcn->numQueues);
if (pQueueInfo->pQueues == NULL)
{
status = NV_ERR_NO_MEMORY;
NVSWITCH_ASSERT(0);
goto _flcnConstruct_LR10_fail;
}
nvswitch_os_memset(pQueueInfo->pQueues, 0, sizeof(FLCNQUEUE) * pFlcn->numQueues);
// Sequences can be optional
if (pFlcn->numSequences != 0)
{
if ((pFlcn->numSequences - 1) > ((NvU32)NV_U8_MAX))
{
status = NV_ERR_OUT_OF_RANGE;
NVSWITCH_PRINT(device, ERROR,
"Max numSequences index = %d cannot fit into byte\n",
(pFlcn->numSequences - 1));
NVSWITCH_ASSERT(0);
goto _flcnConstruct_LR10_fail;
}
flcnQueueSeqInfoStateInit(device, pFlcn);
}
}
// DEBUG
NVSWITCH_PRINT(device, INFO, "Falcon: %s\n", flcnGetName_HAL(device, pFlcn));
NVSWITCH_ASSERT(pFlcnable != NULL);
flcnableGetExternalConfig(device, pFlcnable, &pFlcn->extConfig);
return NV_OK;
_flcnConstruct_LR10_fail:
// call flcnDestruct to free the memory allocated in this construct function
flcnDestruct_HAL(device, pFlcn);
return status;
}
/**
* @brief set hal function pointers for functions defined in
* LS10 (i.e. this file)
@ -372,5 +491,9 @@ flcnSetupHal_LS10
pHal->setImemAddr = _flcnSetImemAddr_LS10;
pHal->dmemSize = _flcnDmemSize_LS10;
pHal->dbgInfoCaptureRiscvPcTrace = _flcnDbgInfoCaptureRiscvPcTrace_LS10;
pHal->debugBufferInit = _flcnDebugBufferInit_LS10;
pHal->debugBufferDestroy = _flcnDebugBufferDestroy_LS10;
pHal->debugBufferDisplay = _flcnDebugBufferDisplay_LS10;
pHal->debugBufferIsEmpty = _flcnDebugBufferIsEmpty_LS10;
pHal->construct = _flcnConstruct_LS10;
}

View File

@ -34,6 +34,7 @@
#include "nvswitch/ls10/dev_nvlphyctl_ip.h"
#include "nvswitch/ls10/dev_nvltlc_ip.h"
#include "nvswitch/ls10/dev_minion_ip.h"
#include "nvswitch/ls10/dev_minion_ip_addendum.h"
#include "nvswitch/ls10/dev_nvlipt_lnk_ip.h"
#include "nvswitch/ls10/dev_nvlipt_ip.h"
#include "nvswitch/ls10/dev_nport_ip.h"
@ -502,27 +503,20 @@ nvswitch_reset_persistent_link_hw_state_ls10
NvU32 linkNumber
)
{
NvU32 regData;
NvU32 nvliptWarmResetDelayUs = (IS_RTLSIM(device) || IS_EMULATION(device)) ? 800:8;
nvlink_link *link = nvswitch_get_link(device, linkNumber);
if (nvswitch_is_link_in_reset(device, link))
{
return;
}
regData = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _DEBUG_CLEAR);
regData = FLD_SET_DRF_NUM(_NVLIPT_LNK, _DEBUG_CLEAR, _CLEAR,
NV_NVLIPT_LNK_DEBUG_CLEAR_CLEAR_ASSERT, regData);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _DEBUG_CLEAR, regData);
// SETUPTC called with HW Reset
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_SETUPTC , 0x4);
NVSWITCH_NSEC_DELAY(nvliptWarmResetDelayUs * NVSWITCH_INTERVAL_1USEC_IN_NS);
regData = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _DEBUG_CLEAR);
regData = FLD_SET_DRF_NUM(_NVLIPT_LNK, _DEBUG_CLEAR, _CLEAR,
NV_NVLIPT_LNK_DEBUG_CLEAR_CLEAR_DEASSERT, regData);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLIPT_LNK,
_NVLIPT_LNK, _DEBUG_CLEAR, regData);
NVSWITCH_NSEC_DELAY(nvliptWarmResetDelayUs * NVSWITCH_INTERVAL_1USEC_IN_NS);
// clear TLC TP Counters
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_CLR_TLC_MISC_REGS, 0);
// clear DL error counters
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLERRCNT, 0);
}
NvlStatus

View File

@ -124,25 +124,36 @@ nvswitch_pri_ring_init_ls10
while (keepPolling);
if (!FLD_TEST_DRF(_GFW_GLOBAL, _BOOT_PARTITION_PROGRESS, _VALUE, _SUCCESS, command))
{
NVSWITCH_RAW_ERROR_LOG_TYPE report = {0, { 0 }};
NVSWITCH_RAW_ERROR_LOG_TYPE report_saw = {0, { 0 }};
NvU32 report_idx = 0;
NvU32 i;
report.data[report_idx++] = command;
NVSWITCH_PRINT(device, ERROR, "%s: -- _GFW_GLOBAL, _BOOT_PARTITION_PROGRESS (0x%x) != _SUCCESS --\n",
__FUNCTION__, command);
for (i = 0; i <= 15; i++)
{
command = NVSWITCH_SAW_RD32_LS10(device, _NVLSAW, _SW_SCRATCH(i));
report_saw.data[i] = command;
NVSWITCH_PRINT(device, ERROR, "%s: -- NV_NVLSAW_SW_SCRATCH(%d) = 0x%08x\n",
__FUNCTION__, i, command);
}
for (i = 0; i <= 2; i++)
for (i = 0; i < NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2__SIZE_1; i++)
{
command = NVSWITCH_REG_RD32(device, _PFSP, _FALCON_COMMON_SCRATCH_GROUP_2(i));
NVSWITCH_PRINT(device, ERROR, "%s: -- NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(%d) = 0x%08x\n",
report.data[report_idx++] = command;
NVSWITCH_PRINT(device, ERROR, "%s: -- NV_PFSP_FALCON_COMMON_SCRATCH_GROUP_2(%d) = 0x%08x\n",
__FUNCTION__, i, command);
}
// Include useful scratch information for triage
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_FIRMWARE_INITIALIZATION_FAILURE,
"Fatal, Firmware initialization failure (0x%x/0x%x, 0x%x, 0x%x, 0x%x/0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
report.data[0], report.data[1], report.data[2], report.data[3], report.data[4],
report_saw.data[0], report_saw.data[1], report_saw.data[12], report_saw.data[14], report_saw.data[15]);
return -NVL_INITIALIZATION_TOTAL_FAILURE;
}

View File

@ -25,6 +25,7 @@
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
#include "lr10/smbpbi_lr10.h"
#include "nvVer.h"
NvlStatus
@ -117,7 +118,7 @@ nvswitch_smbpbi_log_message_ls10
pLogCmd->sxidId = num;
pLogCmd->msgLen = msglen;
pLogCmd->timeStamp = nvswitch_os_get_platform_time() / NVSWITCH_NSEC_PER_SEC;
pLogCmd->timeStamp = nvswitch_os_get_platform_time_epoch() / NVSWITCH_NSEC_PER_SEC;
for (offset = 0; msglen > 0; offset += segSize)
{
@ -211,6 +212,7 @@ nvswitch_smbpbi_send_unload_ls10
nvswitch_device *device
)
{
nvswitch_smbpbi_send_unload_lr10(device);
}
void

View File

@ -428,7 +428,7 @@ nvswitch_init_soe_ls10
if (_nvswitch_soe_send_test_cmd(device) != NV_OK)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_BOOTSTRAP,
"SOE init failed(2)\n");
"SOE init failed(4)\n");
status = -NVL_ERR_INVALID_STATE;
goto nvswitch_init_soe_fail;
}
@ -465,6 +465,7 @@ nvswitch_unload_soe_ls10
// Detach driver from SOE Queues
_nvswitch_soe_attach_detach_driver_ls10(device, NV_FALSE);
return NVL_SUCCESS;
}
@ -577,6 +578,7 @@ _soeService_LS10
)
{
NvBool bRecheckMsgQ = NV_FALSE;
NvBool bRecheckPrintQ = NV_FALSE;
NvU32 clearBits = 0;
NvU32 intrStatus;
PFLCN pFlcn = ENG_GET_FLCN(pSoe);
@ -642,6 +644,8 @@ _soeService_LS10
NVSWITCH_PRINT(device, INFO,
"%s: Received a SWGEN1 interrupt\n",
__FUNCTION__);
flcnDebugBufferDisplay_HAL(device, pFlcn);
bRecheckPrintQ = NV_TRUE;
}
// Clear any sources that were serviced and get the new status.
@ -677,6 +681,22 @@ _soeService_LS10
}
}
//
// If we just processed a SWGEN1 interrupt (Debug Buffer interrupt), peek
// into the Debug Buffer and see if any text was missed the last time
// the buffer was displayed (above). If it is not empty, re-generate SWGEN1
// (since it is now cleared) and exit. As long as an interrupt is pending,
// this function will be re-entered and the message(s) will be processed.
//
if (bRecheckPrintQ)
{
if (!flcnDebugBufferIsEmpty_HAL(device, pFlcn))
{
flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQSSET,
DRF_DEF(_PFALCON, _FALCON_IRQSSET, _SWGEN1, _SET));
}
}
flcnIntrRetrigger_HAL(device, pFlcn);
return intrStatus;

View File

@ -535,6 +535,14 @@ _nvswitch_init_device_regkeys
NVSWITCH_INIT_REGKEY(_PUBLIC, minion_intr,
NV_SWITCH_REGKEY_MINION_INTERRUPTS,
NV_SWITCH_REGKEY_MINION_INTERRUPTS_DEFAULT);
NVSWITCH_INIT_REGKEY(_PRIVATE, block_code_mode,
NV_SWITCH_REGKEY_BLOCK_CODE_MODE,
NV_SWITCH_REGKEY_BLOCK_CODE_MODE_DEFAULT);
NVSWITCH_INIT_REGKEY(_PRIVATE, reference_clock_mode,
NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE,
NV_SWITCH_REGKEY_REFERENCE_CLOCK_MODE_DEFAULT);
}
NvU64
nvswitch_lib_deferred_task_dispatcher
@ -4164,7 +4172,9 @@ nvswitch_lib_smbpbi_log_sxid
{
va_list arglist;
int msglen;
char string[RM_SOE_SMBPBI_CMD_LOG_MESSAGE_MAX_STRING];
char string[RM_SOE_SMBPBI_CMD_LOG_MESSAGE_MAX_STRING + 1];
nvswitch_os_memset(string, 0, (NvLength)sizeof(string));
va_start(arglist, pFormat);
msglen = nvswitch_os_vsnprintf(string, sizeof(string), pFormat, arglist);
@ -4172,7 +4182,11 @@ nvswitch_lib_smbpbi_log_sxid
if (!(msglen < 0))
{
msglen = NV_MIN(msglen + 1, (int) sizeof(string));
//
// HALs will know that the string is being truncated by seeing that the
// last byte in the buffer is not nul.
//
msglen = NV_MIN(msglen + 1, (int)RM_SOE_SMBPBI_CMD_LOG_MESSAGE_MAX_STRING);
device->hal.nvswitch_smbpbi_log_message(device, sxid, msglen, (NvU8 *) string);
}
}
@ -4858,7 +4872,7 @@ nvswitch_lib_ctrl
CTRL_NVSWITCH_RESERVED_2);
NVSWITCH_DEV_CMD_DISPATCH_RESERVED(
CTRL_NVSWITCH_RESERVED_3);
NVSWITCH_DEV_CMD_DISPATCH_RESERVED(
NVSWITCH_DEV_CMD_DISPATCH_RESERVED(
CTRL_NVSWITCH_RESERVED_4);
NVSWITCH_DEV_CMD_DISPATCH_RESERVED(
CTRL_NVSWITCH_RESERVED_5);

View File

@ -2057,8 +2057,6 @@ typedef struct NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS {
NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE];
} NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS;
/*
* NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS
*

View File

@ -2922,6 +2922,29 @@ typedef struct NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS {
#define NV2080_CTRL_CMD_NVLINK_IS_GPU_DEGRADED (0x20803040U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK
*
* This command is used to check for missing
* bridge/nvlink for direct connect GPU
*
* [out] bIsEnoughNvLink
* Check if number of active nvlink meet the minimum requirements.
* [out] numBridge
* Number of NVLink bridge
* [out] bridgePresenceMask
* Bit mask of NVLink bridges's presence
*/
#define NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID (0x41U)
typedef struct NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS {
NvBool bIsEnoughNvLink;
NvU32 numBridge;
NvU32 bridgePresenceMask;
} NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK (0x20803041U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID" */
/* _ctrl2080nvlink_h_ */

View File

@ -548,6 +548,11 @@ typedef struct NVA080_CTRL_UPDATE_SYSMEM_BITMAP_PARAMS {
* This parameter returns mask of UVM enabled features on vGPU. It comprises of
* UVM managed APIs and replayable faults that are enabled or disabled based on
* vGPU version.
* enableKmdSysmemScratch
* This parameter is used to overwrite guest regkey PreferSystemMemoryScratch.
* Setting vgpu parameter "vgpu_enable_kmd_sysmem_scratch" in plugin will
* set this parameter. If the parameter is set, guest moves shader buffer
* allocation from FB to sysmem.
*
* Possible status values returned are:
* NV_OK
@ -632,19 +637,20 @@ typedef struct NVA080_CTRL_UPDATE_SYSMEM_BITMAP_PARAMS {
#define NVA080_CTRL_VGPU_GET_CONFIG_PARAMS_MESSAGE_ID (0x1U)
typedef struct NVA080_CTRL_VGPU_GET_CONFIG_PARAMS {
NvU32 frameRateLimiter;
NvU32 swVSyncEnabled;
NvU32 cudaEnabled;
NvU32 pluginPteBlitEnabled;
NvU32 disableWddm1xPreemption;
NvU32 debugBufferSize;
NvU32 frameRateLimiter;
NvU32 swVSyncEnabled;
NvU32 cudaEnabled;
NvU32 pluginPteBlitEnabled;
NvU32 disableWddm1xPreemption;
NvU32 debugBufferSize;
NV_DECLARE_ALIGNED(NvP64 debugBuffer, 8);
NV_DECLARE_ALIGNED(NvU64 guestFbOffset, 8);
NV_DECLARE_ALIGNED(NvU64 mappableCpuHostAperture, 8);
NvU32 linuxInterruptOptimization;
NvU32 vgpuDeviceCapsBits;
NvU32 maxPixels;
NvU32 uvmEnabledFeatures;
NvU32 linuxInterruptOptimization;
NvU32 vgpuDeviceCapsBits;
NvU32 maxPixels;
NvU32 uvmEnabledFeatures;
NvBool enableKmdSysmemScratch;
} NVA080_CTRL_VGPU_GET_CONFIG_PARAMS;

View File

@ -195,6 +195,27 @@
(x - ROBUST_CHANNEL_NVENC1_ERROR + 1) : \
(x - ROBUST_CHANNEL_NVENC2_ERROR + 2)))
// Indexed NVJPG reference
#define ROBUST_CHANNEL_NVJPG_ERROR(x) \
((x < 1) ? \
(ROBUST_CHANNEL_NVJPG0_ERROR) : \
(ROBUST_CHANNEL_NVJPG1_ERROR + (x - 1)))
#define ROBUST_CHANNEL_IS_NVJPG_ERROR(x) \
((x == ROBUST_CHANNEL_NVJPG0_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG1_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG2_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG3_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG4_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG5_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG6_ERROR) || \
(x == ROBUST_CHANNEL_NVJPG7_ERROR))
#define ROBUST_CHANNEL_NVJPG_ERROR_IDX(x) \
((x == ROBUST_CHANNEL_NVJPG0_ERROR) ? \
(x - ROBUST_CHANNEL_NVJPG0_ERROR) : \
(x - ROBUST_CHANNEL_NVJPG1_ERROR + 1))
// Error Levels
#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_INFO (0)
#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_NON_FATAL (1)

View File

@ -2,58 +2,6 @@
* ################### THIS FILE IS AUTOGENERATED. DO NOT EDIT. ################### *
***********************************************************************************/
#ifdef __VGPU_SRIOV_ENABLED_SKUS__
static inline NvBool isSriovEnabledSKU(NvU16 devid, NvU16 ssid)
{
if (devid == 0x20B0)
return NV_TRUE;
if (devid == 0x20B2)
return NV_TRUE;
if ((devid == 0x20B5) && (ssid == 0x1533))
return NV_TRUE;
if ((devid == 0x20B7) && (ssid == 0x1532))
return NV_TRUE;
if (devid == 0x20BE)
return NV_TRUE;
if (devid == 0x20BF)
return NV_TRUE;
if (devid == 0x20F1)
return NV_TRUE;
if (devid == 0x20F3)
return NV_TRUE;
if (devid == 0x20F5)
return NV_TRUE;
if ((devid == 0x20F6) && (ssid == 0x17A3))
return NV_TRUE;
if (devid == 0x2230)
return NV_TRUE;
if (devid == 0x2231)
return NV_TRUE;
if ((devid == 0x2233) && (ssid == 0x165A))
return NV_TRUE;
if (devid == 0x2235)
return NV_TRUE;
if (devid == 0x2236)
return NV_TRUE;
if (devid == 0x2237)
return NV_TRUE;
if ((devid == 0x2238) && (ssid == 0x1677))
return NV_TRUE;
if ((devid == 0x2322) && (ssid == 0x17A4))
return NV_TRUE;
if ((devid == 0x2331) && (ssid == 0x1626))
return NV_TRUE;
if ((devid == 0x25B6) && (ssid == 0x14A9))
return NV_TRUE;
if ((devid == 0x25B6) && (ssid == 0x157E))
return NV_TRUE;
if ((devid == 0x20B8) && (ssid == 0x1581))
return NV_TRUE;
if ((devid == 0x20B9) && (ssid == 0x157F))
return NV_TRUE;
return NV_FALSE;
}
#endif //__VGPU_SRIOV_ENABLED_SKUS__
#ifdef __VGPU_ALIAS_PGPU_LIST__
static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
@ -64,6 +12,7 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
NvU32 devid;
NvU32 subdevid;
} vgpu_aliases[] = {
{ 0x20B5, 0x1642, 0x20B5, 0x1533 },
{ 0x20B8, 0x1581, 0x20B5, 0x1533 },
{ 0x20B9, 0x157F, 0x20B7, 0x1532 },
};

View File

@ -235,7 +235,6 @@ void nvUpdateCurrentHardwareColorSpaceAndRangeEvo(
NVEvoUpdateState *pUpdateState);
NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask);
void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo);
void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo,
NvBool enable, NvBool isPre);
@ -307,6 +306,9 @@ NvBool nvNeedsTmoLut(NVDevEvoPtr pDevEvo,
NvBool nvIsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix);
void nvSuspendDevEvo(NVDevEvoRec *pDevEvo);
NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo);
#ifdef __cplusplus
};
#endif

View File

@ -926,11 +926,6 @@ typedef struct _NVEvoDevRec {
NvU32 displayHandle;
/*!
* lastModesettingClient points to the pOpenDev of the client that
* performed the last modeset.
*/
const struct NvKmsPerOpenDev *lastModesettingClient;
/*!
* modesetOwner points to the pOpenDev of the client that called
@ -938,6 +933,12 @@ typedef struct _NVEvoDevRec {
*/
const struct NvKmsPerOpenDev *modesetOwner;
/*!
* Indicates whether modeset ownership is changed since
* last modeset.
*/
NvBool modesetOwnerChanged;
/*!
* NVEvoDevRec::numSubDevices is the number of GPUs in the SLI
* device. This is the number of NVEvoSubDevPtrs in

View File

@ -27,9 +27,11 @@
#include "nvtypes.h"
#include "nvkms-api.h"
#define NVKMS_KAPI_MAX_EVENT_CHANNELS 3
struct NvKmsKapiPrivAllocateChannelEventParams {
NvU32 hClient;
NvU32 hChannel;
NvU32 hChannels[NVKMS_KAPI_MAX_EVENT_CHANNELS];
};
struct NvKmsKapiPrivSurfaceParams {

View File

@ -39,7 +39,7 @@ struct NvKmsKapiChannelEvent {
struct NvKmsKapiPrivAllocateChannelEventParams nvKmsParams;
NvHandle hCallback;
NvHandle hCallbacks[NVKMS_KAPI_MAX_EVENT_CHANNELS];
NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback;
};
@ -50,6 +50,34 @@ static void ChannelEventHandler(void *arg1, void *arg2, NvHandle hEvent,
cb->proc(cb->data, 0);
}
void nvKmsKapiFreeChannelEvent
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiChannelEvent *cb
)
{
int i;
if (device == NULL || cb == NULL) {
return;
}
for (i = 0; i < NVKMS_KAPI_MAX_EVENT_CHANNELS; ++i) {
if (!cb->hCallbacks[i]) {
continue;
}
nvRmApiFree(device->hRmClient,
device->hRmClient,
cb->hCallbacks[i]);
nvFreeUnixRmHandle(&device->handleAllocator,
cb->hCallbacks[i]);
}
nvKmsKapiFree(cb);
}
struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent
(
struct NvKmsKapiDevice *device,
@ -59,11 +87,8 @@ struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent
NvU64 nvKmsParamsSize
)
{
int status;
NvU32 ret;
int status, i;
struct NvKmsKapiChannelEvent *cb = NULL;
NV0005_ALLOC_PARAMETERS eventParams = { };
if (device == NULL || proc == NULL) {
goto fail;
@ -101,50 +126,45 @@ struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent
cb->rmCallback.func = ChannelEventHandler;
cb->rmCallback.arg = cb;
cb->hCallback = nvGenerateUnixRmHandle(&device->handleAllocator);
if (cb->hCallback == 0x0) {
nvKmsKapiLogDeviceDebug(device,
"Failed to allocate event callback handle");
goto fail;
}
for (i = 0; i < NVKMS_KAPI_MAX_EVENT_CHANNELS; ++i) {
NV0005_ALLOC_PARAMETERS eventParams = { };
NvU32 ret;
eventParams.hParentClient = cb->nvKmsParams.hClient;
eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
eventParams.notifyIndex = 0;
eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback);
if (!cb->nvKmsParams.hChannels[i]) {
continue;
}
ret = nvRmApiAlloc(device->hRmClient,
cb->nvKmsParams.hChannel,
cb->hCallback,
NV01_EVENT_KERNEL_CALLBACK_EX,
&eventParams);
if (ret != NVOS_STATUS_SUCCESS) {
nvKmsKapiLogDeviceDebug(device, "Failed to allocate event callback");
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback);
goto fail;
cb->hCallbacks[i] = nvGenerateUnixRmHandle(&device->handleAllocator);
if (cb->hCallbacks[i] == 0x0) {
nvKmsKapiLogDeviceDebug(device,
"Failed to allocate event callback handle for channel 0x%x",
cb->nvKmsParams.hChannels[i]);
goto fail;
}
eventParams.hParentClient = cb->nvKmsParams.hClient;
eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
eventParams.notifyIndex = 0;
eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback);
ret = nvRmApiAlloc(device->hRmClient,
cb->nvKmsParams.hChannels[i],
cb->hCallbacks[i],
NV01_EVENT_KERNEL_CALLBACK_EX,
&eventParams);
if (ret != NVOS_STATUS_SUCCESS) {
nvKmsKapiLogDeviceDebug(device,
"Failed to allocate event callback for channel 0x%x",
cb->nvKmsParams.hChannels[i]);
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallbacks[i]);
cb->hCallbacks[i] = 0;
goto fail;
}
}
return cb;
fail:
nvKmsKapiFree(cb);
nvKmsKapiFreeChannelEvent(device, cb);
return NULL;
}
void nvKmsKapiFreeChannelEvent
(
struct NvKmsKapiDevice *device,
struct NvKmsKapiChannelEvent *cb
)
{
if (device == NULL || cb == NULL) {
return;
}
nvRmApiFree(device->hRmClient,
device->hRmClient,
cb->hCallback);
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback);
nvKmsKapiFree(cb);
}

View File

@ -3627,8 +3627,6 @@ NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo)
NvU32 dispIndex;
NvU32 head;
nvAssert(pDevEvo->lastModesettingClient == NULL);
/* Do nothing if the display was already allocated */
if (pDevEvo->displayHandle != 0) {
return TRUE;
@ -3934,72 +3932,124 @@ NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask)
return TRUE;
}
void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo)
static void CacheSorAssignList(const NVDispEvoRec *pDispEvo,
const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])
{
const NVConnectorEvoRec *pConnectorEvo;
FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
NvU32 i;
if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
continue;
}
FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.mask) {
/*
* RM populates same sor index into more than one connectors if
* they are are DCC partners, this checks make sure SOR
* assignment happens only for a single connector. The sor
* assignment call before modeset/dp-link-training makes sure
* assignment happens for the correct connector.
*/
if (sorAssignList[i] != NULL) {
continue;
}
sorAssignList[i] = pConnectorEvo;
} FOR_EACH_INDEX_IN_MASK_END
}
}
static void RestoreSorAssignList(NVDispEvoRec *pDispEvo,
const NVConnectorEvoRec *sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS])
{
NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo;
NvU32 sorIndex;
for (sorIndex = 0;
sorIndex < NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS; sorIndex++) {
if (sorAssignList[sorIndex] == NULL) {
continue;
}
NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = {
.subDeviceInstance = pDispEvo->displayOwner,
.displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId),
.sorExcludeMask = ~NVBIT(sorIndex),
};
NvU32 ret;
ret = nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
NV0073_CTRL_CMD_DFP_ASSIGN_SOR,
&params,
sizeof(params));
if (ret != NVOS_STATUS_SUCCESS) {
nvEvoLogDispDebug(pDispEvo,
EVO_LOG_ERROR,
"Failed to restore SOR-%u -> %s assignment.",
sorIndex, sorAssignList[sorIndex]->name);
} else {
RefreshSORAssignments(pDispEvo, &params);
}
}
}
NvBool nvResumeDevEvo(NVDevEvoRec *pDevEvo)
{
struct {
const NVConnectorEvoRec *
sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
} disp[NVKMS_MAX_SUBDEVICES] = { };
NVDispEvoRec *pDispEvo;
NvU32 dispIndex;
if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
return;
}
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
const NVConnectorEvoRec *
sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] = { };
const NVConnectorEvoRec *pConnectorEvo;
NvU32 sorIndex;
FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) {
NvU32 i;
if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) {
continue;
}
FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.mask) {
/*
* RM populates same sor index into more than one connectors if
* they are are DCC partners, this checks make sure SOR
* assignment happens only for a single connector. The sor
* assignment call before modeset/dp-link-training makes sure
* assignment happens for the correct connector.
*/
if (sorAssignList[i] != NULL) {
continue;
}
sorAssignList[i] = pConnectorEvo;
} FOR_EACH_INDEX_IN_MASK_END
}
for (sorIndex = 0; sorIndex < ARRAY_LEN(sorAssignList); sorIndex++) {
if (sorAssignList[sorIndex] == NULL) {
continue;
}
NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = {
.subDeviceInstance = pDispEvo->displayOwner,
.displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId),
.sorExcludeMask = ~NVBIT(sorIndex),
};
NvU32 ret;
ret = nvRmApiControl(nvEvoGlobal.clientHandle,
pDevEvo->displayCommonHandle,
NV0073_CTRL_CMD_DFP_ASSIGN_SOR,
&params,
sizeof(params));
if (ret != NVOS_STATUS_SUCCESS) {
nvEvoLogDispDebug(pDispEvo,
EVO_LOG_ERROR,
"Failed to restore SOR-%u -> %s assigment.",
sorIndex, sorAssignList[sorIndex]->name);
} else {
RefreshSORAssignments(pDispEvo, &params);
}
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
CacheSorAssignList(pDispEvo, disp[dispIndex].sorAssignList);
}
}
if (!nvAllocCoreChannelEvo(pDevEvo)) {
return FALSE;
}
/*
* During the hibernate-resume cycle vbios or GOP driver programs
* the display engine to lit up the boot display. In
* hibernate-resume path, doing NV0073_CTRL_CMD_DFP_ASSIGN_SOR
* rm-control call before the core channel allocation causes display
* channel hang because at that stage RM is not aware of the boot
* display actived by vbios and it ends up unrouting active SOR
* assignments. Therefore restore the SOR assignment only after the
* core channel allocation.
*/
if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits,
NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) {
/*
* Shutdown all heads before restoring the SOR assignments because in
* case of hibernate-resume the SOR, for which NVKMS is trying to
* restore the assignment, might be in use by the boot display setup
* by vbios/gop driver.
*/
nvShutDownHeads(pDevEvo, NULL /* pTestFunc, shut down all heads */);
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
RestoreSorAssignList(pDispEvo, disp[dispIndex].sorAssignList);
}
}
return TRUE;
}
void nvSuspendDevEvo(NVDevEvoRec *pDevEvo)
{
nvFreeCoreChannelEvo(pDevEvo);
}
static void ClearApiHeadState(NVDevEvoRec *pDevEvo)
@ -4088,8 +4138,6 @@ void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo)
nvFree(pDevEvo->gpus);
pDevEvo->gpus = NULL;
pDevEvo->lastModesettingClient = NULL;
}

View File

@ -1078,15 +1078,12 @@ void nvHsConfigInitSwapGroup(
}
/*
*
* If (pDevEvo->modesetOwner != NULL) but
* (pDevEvo->lastModesettingClient != pDevEvo->modesetOwner) that
* means the modeset ownership is grabbed by the external client
* but it hasn't performed any modeset and the console is still
* active.
* pDevEvo->modesetOwnerChanged is TRUE, that means the modeset
* ownership is grabbed by the external client but it hasn't
* performed any modeset and the console is still active.
*/
if ((pDevEvo->modesetOwner != NULL) &&
(pDevEvo->lastModesettingClient != pDevEvo->modesetOwner)) {
if ((pDevEvo->modesetOwner != NULL) && pDevEvo->modesetOwnerChanged) {
continue;
}

View File

@ -145,7 +145,7 @@ InheritPreviousModesetState(const NVDevEvoRec *pDevEvo,
const struct NvKmsPerOpenDev *pCurrentModesetOpenDev)
{
return (pCurrentModesetOpenDev != pDevEvo->pNvKmsOpenDev) &&
(pCurrentModesetOpenDev == pDevEvo->lastModesettingClient);
!pDevEvo->modesetOwnerChanged;
}
/*!
@ -2824,7 +2824,7 @@ NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo,
pDevEvo->skipConsoleRestore = FALSE;
pDevEvo->lastModesettingClient = pOpenDev;
pDevEvo->modesetOwnerChanged = FALSE;
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
NvU32 head;

View File

@ -935,6 +935,7 @@ static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev)
}
pDevEvo->modesetOwner = pOpenDev;
pDevEvo->modesetOwnerChanged = TRUE;
AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions);
AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions);
@ -996,14 +997,6 @@ static void RevokePermissionsInternal(
}
}
static void ReallocCoreChannel(NVDevEvoRec *pDevEvo)
{
if (nvAllocCoreChannelEvo(pDevEvo)) {
nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
AllocSurfaceCtxDmasForAllOpens(pDevEvo);
}
}
static void RestoreConsole(NVDevEvoPtr pDevEvo)
{
// Try to issue a modeset and flip to the framebuffer console surface.
@ -1016,7 +1009,10 @@ static void RestoreConsole(NVDevEvoPtr pDevEvo)
// Reallocate the core channel right after freeing it. This makes sure
// that it's allocated and ready right away if another NVKMS client is
// started.
ReallocCoreChannel(pDevEvo);
if (nvAllocCoreChannelEvo(pDevEvo)) {
nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
AllocSurfaceCtxDmasForAllOpens(pDevEvo);
}
}
}
@ -1039,6 +1035,7 @@ static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev)
FreeSwapGroups(pOpenDev);
pDevEvo->modesetOwner = NULL;
pDevEvo->modesetOwnerChanged = TRUE;
pDevEvo->handleConsoleHotplugs = TRUE;
RestoreConsole(pDevEvo);
@ -1489,7 +1486,6 @@ static void FreeDeviceReference(struct NvKmsPerOpen *pOpen,
ReleaseModesetOwnership(pOpenDev);
nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev);
nvAssert(pOpenDev->pDevEvo->lastModesettingClient != pOpenDev);
}
nvFreePerOpenDev(pOpen, pOpenDev);
@ -5648,7 +5644,7 @@ void nvKmsSuspend(NvU32 gpuId)
FreeSurfaceCtxDmasForAllOpens(pDevEvo);
nvFreeCoreChannelEvo(pDevEvo);
nvSuspendDevEvo(pDevEvo);
}
}
@ -5665,9 +5661,10 @@ void nvKmsResume(NvU32 gpuId)
FOR_ALL_EVO_DEVS(pDevEvo) {
nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming");
nvRestoreSORAssigmentsEvo(pDevEvo);
ReallocCoreChannel(pDevEvo);
if (nvResumeDevEvo(pDevEvo)) {
nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */);
AllocSurfaceCtxDmasForAllOpens(pDevEvo);
}
if (pDevEvo->modesetOwner == NULL) {
// Hardware state was lost, so we need to force a console

View File

@ -643,12 +643,14 @@ typedef enum {
static inline NvBool IS_REG_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((offset >= nv->regs->cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->regs->cpu_address + (nv->regs->size - 1))));
}
static inline NvBool IS_FB_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->fb) && (offset >= nv->fb->cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->fb->cpu_address + (nv->fb->size - 1))));
}
@ -656,6 +658,7 @@ static inline NvBool IS_UD_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
{
return ((nv->ud.cpu_address != 0) && (nv->ud.size != 0) &&
(offset >= nv->ud.cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->ud.cpu_address + (nv->ud.size - 1))));
}
@ -664,6 +667,7 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
return ((nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) &&
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) &&
(offset >= nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) &&
((offset + (length - 1)) >= offset) &&
((offset + (length - 1)) <= (nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address +
(nv->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))));
}

View File

@ -890,7 +890,7 @@ NV_STATUS osAllocPagesInternal(
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJGPU *pGpu = pMemDesc->pGpu;
nv_state_t *nv = NV_GET_NV_STATE(pGpu);
void *pMemData;
void *pMemData = NULL;
NV_STATUS status;
memdescSetAddress(pMemDesc, NvP64_NULL);

View File

@ -775,7 +775,7 @@ static NV_STATUS RmAccessRegistry(
// the passed-in ParmStrLength does not account for '\0'
ParmStrLength++;
if (ParmStrLength > NVOS38_MAX_REGISTRY_STRING_LENGTH)
if ((ParmStrLength == 0) || (ParmStrLength > NVOS38_MAX_REGISTRY_STRING_LENGTH))
{
RmStatus = NV_ERR_INVALID_STRING_LENGTH;
goto done;
@ -788,6 +788,11 @@ static NV_STATUS RmAccessRegistry(
RmStatus = NV_ERR_OPERATING_SYSTEM;
goto done;
}
if (tmpParmStr[ParmStrLength - 1] != '\0')
{
RmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
}
if ((AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) ||
@ -2121,9 +2126,20 @@ static NV_STATUS RmGetAllocPrivate(
if (rmStatus != NV_OK)
goto done;
endingOffset = pageOffset + length;
if (!portSafeAddU64(pageOffset, length, &endingOffset))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
pageCount = (endingOffset / os_page_size);
pageCount += (*pPageIndex + ((endingOffset % os_page_size) ? 1 : 0));
if (!portSafeAddU64(*pPageIndex + ((endingOffset % os_page_size) ? 1 : 0),
pageCount, &pageCount))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (pageCount > NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount))
{

View File

@ -2506,12 +2506,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispComm
#endif
},
{ /* [2] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetVblankCounter_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x730109u,
/*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS),
@ -4211,7 +4211,7 @@ __nvoc_ctor_DispCommon_exit:
static void __nvoc_init_funcTable_DispCommon_1(DispCommon *pThis) {
PORT_UNREFERENCED_VARIABLE(pThis);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__dispcmnCtrlCmdSystemGetVblankCounter__ = &dispcmnCtrlCmdSystemGetVblankCounter_IMPL;
#endif

View File

@ -70,16 +70,16 @@ void __nvoc_dtor_OBJFBSR(OBJFBSR *pThis) {
}
void __nvoc_init_dataField_OBJFBSR(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) {
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
}
NV_STATUS __nvoc_ctor_Object(Object* );
@ -97,16 +97,16 @@ __nvoc_ctor_OBJFBSR_exit:
}
static void __nvoc_init_funcTable_OBJFBSR_1(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) {
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
ChipHal *chipHal = &pRmhalspecowner->chipHal;
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
PORT_UNREFERENCED_VARIABLE(pThis);
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
PORT_UNREFERENCED_VARIABLE(chipHal);
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
// Hal function -- fbsrBegin
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */

View File

@ -158,6 +158,38 @@ NV_STATUS __nvoc_objCreate_OBJFBSR(OBJFBSR**, Dynamic*, NvU32);
#define fbsrBegin_HAL(pGpu, pFbsr, op) fbsrBegin_DISPATCH(pGpu, pFbsr, op)
#define fbsrEnd(pGpu, pFbsr) fbsrEnd_DISPATCH(pGpu, pFbsr)
#define fbsrEnd_HAL(pGpu, pFbsr) fbsrEnd_DISPATCH(pGpu, pFbsr)
static inline NV_STATUS fbsrCreateChannelForCopy_46f6a7(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) {
return NV_ERR_NOT_SUPPORTED;
}
#ifdef __nvoc_fbsr_h_disabled
static inline NV_STATUS fbsrCreateChannelForCopy(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) {
NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_fbsr_h_disabled
#define fbsrCreateChannelForCopy(pGpu, pFbsr) fbsrCreateChannelForCopy_46f6a7(pGpu, pFbsr)
#endif //__nvoc_fbsr_h_disabled
#define fbsrCreateChannelForCopy_HAL(pGpu, pFbsr) fbsrCreateChannelForCopy(pGpu, pFbsr)
static inline NV_STATUS fbsrDestroyChannelForCopy_46f6a7(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) {
return NV_ERR_NOT_SUPPORTED;
}
#ifdef __nvoc_fbsr_h_disabled
static inline NV_STATUS fbsrDestroyChannelForCopy(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) {
NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_fbsr_h_disabled
#define fbsrDestroyChannelForCopy(pGpu, pFbsr) fbsrDestroyChannelForCopy_46f6a7(pGpu, pFbsr)
#endif //__nvoc_fbsr_h_disabled
#define fbsrDestroyChannelForCopy_HAL(pGpu, pFbsr) fbsrDestroyChannelForCopy(pGpu, pFbsr)
NV_STATUS fbsrInit_GM107(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr);

View File

@ -1115,6 +1115,7 @@ struct OBJGPU {
NvBool bComputePolicyTimesliceSupported;
NvBool bGlobalPoisonFuseEnabled;
RmPhysAddr simAccessBufPhysAddr;
NvU32 fabricProbeRegKeyOverride;
NvU8 fabricProbeRetryDelay;
NvU8 fabricProbeSlowdownThreshold;
NvBool bVgpuGspPluginOffloadEnabled;

View File

@ -132,6 +132,8 @@ typedef enum
HAL_IMPL_T194,
HAL_IMPL_T002_TURING_NOT_EXIST,
HAL_IMPL_T234,
HAL_IMPL_T003_HOPPER_NOT_EXIST,
HAL_IMPL_T004_ADA_NOT_EXIST,
HAL_IMPL_T234D,
HAL_IMPL_AMODEL,

View File

@ -91,6 +91,8 @@ const struct ChipID
{ 0x0, 0x0, 0x0 } , // T194 (disabled)
{ 0x0, 0x0, 0x0 } , // T002_TURING_NOT_EXIST (disabled)
{ 0x0, 0x0, 0x0 } , // T234 (disabled)
{ 0x0, 0x0, 0x0 } , // T003_HOPPER_NOT_EXIST (disabled)
{ 0x0, 0x0, 0x0 } , // T004_ADA_NOT_EXIST (disabled)
{ 0x0, 0x0, 0x0 } , // T234D (disabled)
{ 0x0, 0x0, 0x0 } , // AMODEL (disabled)

View File

@ -63,7 +63,7 @@ struct OBJHALMGR {
struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object;
struct OBJHALMGR *__nvoc_pbase_OBJHALMGR;
struct OBJHAL *pHalList[66];
struct OBJHAL *pHalList[68];
};
#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__

View File

@ -273,7 +273,7 @@ static void __nvoc_init_funcTable_KernelDisplay_1(KernelDisplay *pThis, RmHalspe
// Hal function -- kdispServiceInterrupt
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->__kdispServiceInterrupt__ = &kdispServiceInterrupt_cd2c9e;
pThis->__kdispServiceInterrupt__ = &kdispServiceInterrupt_d3ef2b;
}
// Hal function -- kdispSelectClass

View File

@ -512,8 +512,8 @@ static inline void kdispRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct Kernel
pKernelDisplay->__kdispRegisterIntrService__(pGpu, pKernelDisplay, pRecords);
}
static inline NvU32 kdispServiceInterrupt_cd2c9e(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, IntrServiceServiceInterruptArguments *pParams) {
kdispServiceVblank(pGpu, pKernelDisplay, 0, ((2) | (4)), ((void *)0));
static inline NvU32 kdispServiceInterrupt_d3ef2b(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, IntrServiceServiceInterruptArguments *pParams) {
kdispServiceVblank(pGpu, pKernelDisplay, 0, ((2) | (16)), ((void *)0));
return NV_OK;
}

View File

@ -144,7 +144,7 @@ static void __nvoc_init_funcTable_KernelHead_1(KernelHead *pThis, RmHalspecOwner
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kheadResetPendingVblankForKernel__ = &kheadResetPendingVblankForKernel_1ad688;
pThis->__kheadResetPendingVblankForKernel__ = &kheadResetPendingVblankForKernel_8305c4;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -66,17 +66,7 @@ struct __nvoc_inner_struc_KernelHead_1__ {
struct {
VBLANKCALLBACK *pListLL;
VBLANKCALLBACK *pListNL;
VBLANKCALLBACK gatherInfo;
} Callback;
struct {
struct {
NvU64 Current;
NvU64 Last;
NvU32 Average;
} Time;
NvU32 Count;
NvU32 Timeout;
} Info;
NvU32 IntrState;
};
@ -124,10 +114,10 @@ NV_STATUS __nvoc_objCreate_KernelHead(KernelHead**, Dynamic*, NvU32);
#define kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_DISPATCH(pGpu, pKernelHead, arg0)
#define kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_DISPATCH(pGpu, pKernelHead, arg0)
#define kheadResetPendingVblank(pGpu, pKhead, arg0) kheadResetPendingVblank_DISPATCH(pGpu, pKhead, arg0)
#define kheadResetPendingVblank_HAL(pGpu, pKhead, arg0) kheadResetPendingVblank_DISPATCH(pGpu, pKhead, arg0)
#define kheadResetPendingVblankForKernel(pGpu, pKhead, pThreadState) kheadResetPendingVblankForKernel_DISPATCH(pGpu, pKhead, pThreadState)
#define kheadResetPendingVblankForKernel_HAL(pGpu, pKhead, pThreadState) kheadResetPendingVblankForKernel_DISPATCH(pGpu, pKhead, pThreadState)
#define kheadResetPendingVblank(pGpu, pKernelHead, arg0) kheadResetPendingVblank_DISPATCH(pGpu, pKernelHead, arg0)
#define kheadResetPendingVblank_HAL(pGpu, pKernelHead, arg0) kheadResetPendingVblank_DISPATCH(pGpu, pKernelHead, arg0)
#define kheadResetPendingVblankForKernel(pGpu, pKernelHead, pThreadState) kheadResetPendingVblankForKernel_DISPATCH(pGpu, pKernelHead, pThreadState)
#define kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState) kheadResetPendingVblankForKernel_DISPATCH(pGpu, pKernelHead, pThreadState)
#define kheadReadPendingVblank(pGpu, pKernelHead, intr) kheadReadPendingVblank_DISPATCH(pGpu, pKernelHead, intr)
#define kheadReadPendingVblank_HAL(pGpu, pKernelHead, intr) kheadReadPendingVblank_DISPATCH(pGpu, pKernelHead, intr)
NvU32 kheadGetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead);
@ -262,33 +252,6 @@ static inline void kheadWriteVblankIntrEnable(struct OBJGPU *pGpu, struct Kernel
#define kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0)
void kheadSetVblankGatherInfo_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0);
#ifdef __nvoc_kernel_head_h_disabled
static inline void kheadSetVblankGatherInfo(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
}
#else //__nvoc_kernel_head_h_disabled
#define kheadSetVblankGatherInfo(pGpu, pKernelHead, arg0) kheadSetVblankGatherInfo_IMPL(pGpu, pKernelHead, arg0)
#endif //__nvoc_kernel_head_h_disabled
#define kheadSetVblankGatherInfo_HAL(pGpu, pKernelHead, arg0) kheadSetVblankGatherInfo(pGpu, pKernelHead, arg0)
NvU32 kheadTickVblankInfo_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead);
#ifdef __nvoc_kernel_head_h_disabled
static inline NvU32 kheadTickVblankInfo(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
return 0;
}
#else //__nvoc_kernel_head_h_disabled
#define kheadTickVblankInfo(pGpu, pKernelHead) kheadTickVblankInfo_IMPL(pGpu, pKernelHead)
#endif //__nvoc_kernel_head_h_disabled
#define kheadTickVblankInfo_HAL(pGpu, pKernelHead) kheadTickVblankInfo(pGpu, pKernelHead)
void kheadProcessVblankCallbacks_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0);
static inline void kheadProcessVblankCallbacks_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
@ -300,28 +263,28 @@ static inline void kheadProcessVblankCallbacks_DISPATCH(struct OBJGPU *pGpu, str
pKernelHead->__kheadProcessVblankCallbacks__(pGpu, pKernelHead, arg0);
}
void kheadResetPendingVblank_v04_00_KERNEL(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0);
void kheadResetPendingVblank_v04_00_KERNEL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *arg0);
static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *arg0) {
NV_ASSERT_PRECOMP(0);
return;
}
static inline void kheadResetPendingVblank_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
pKhead->__kheadResetPendingVblank__(pGpu, pKhead, arg0);
static inline void kheadResetPendingVblank_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *arg0) {
pKernelHead->__kheadResetPendingVblank__(pGpu, pKernelHead, arg0);
}
static inline void kheadResetPendingVblankForKernel_1ad688(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *pThreadState) {
kheadResetPendingVblank(pGpu, pKhead, pThreadState);
static inline void kheadResetPendingVblankForKernel_8305c4(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) {
kheadResetPendingVblank(pGpu, pKernelHead, pThreadState);
}
static inline void kheadResetPendingVblankForKernel_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *pThreadState) {
static inline void kheadResetPendingVblankForKernel_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) {
NV_ASSERT_PRECOMP(0);
return;
}
static inline void kheadResetPendingVblankForKernel_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *pThreadState) {
pKhead->__kheadResetPendingVblankForKernel__(pGpu, pKhead, pThreadState);
static inline void kheadResetPendingVblankForKernel_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) {
pKernelHead->__kheadResetPendingVblankForKernel__(pGpu, pKernelHead, pThreadState);
}
NvU32 kheadReadPendingVblank_v04_00_KERNEL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr);

View File

@ -588,6 +588,19 @@ static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecO
}
}
// Hal function -- knvlinkDirectConnectCheck
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__knvlinkDirectConnectCheck__ = &knvlinkDirectConnectCheck_GH100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__knvlinkDirectConnectCheck__ = &knvlinkDirectConnectCheck_b3696a;
}
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelNvlink_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelNvlink_engstateStatePreInitLocked;

View File

@ -233,6 +233,7 @@ struct KernelNvlink {
NV_STATUS (*__knvlinkDiscoverPostRxDetLinks__)(OBJGPU *, struct KernelNvlink *, OBJGPU *);
NV_STATUS (*__knvlinkLogAliDebugMessages__)(OBJGPU *, struct KernelNvlink *);
NvBool (*__knvlinkIsFloorSweepingNeeded__)(OBJGPU *, struct KernelNvlink *, NvU32, NvU32);
void (*__knvlinkDirectConnectCheck__)(OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkReconcileTunableState__)(POBJGPU, struct KernelNvlink *, void *);
NV_STATUS (*__knvlinkStateInitLocked__)(POBJGPU, struct KernelNvlink *);
NV_STATUS (*__knvlinkStatePreLoad__)(POBJGPU, struct KernelNvlink *, NvU32);
@ -416,6 +417,8 @@ NV_STATUS __nvoc_objCreate_KernelNvlink(KernelNvlink**, Dynamic*, NvU32);
#define knvlinkLogAliDebugMessages_HAL(pGpu, pKernelNvlink) knvlinkLogAliDebugMessages_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkIsFloorSweepingNeeded(pGpu, pKernelNvlink, numActiveLinksPerIoctrl, numLinksPerIoctrl) knvlinkIsFloorSweepingNeeded_DISPATCH(pGpu, pKernelNvlink, numActiveLinksPerIoctrl, numLinksPerIoctrl)
#define knvlinkIsFloorSweepingNeeded_HAL(pGpu, pKernelNvlink, numActiveLinksPerIoctrl, numLinksPerIoctrl) knvlinkIsFloorSweepingNeeded_DISPATCH(pGpu, pKernelNvlink, numActiveLinksPerIoctrl, numLinksPerIoctrl)
#define knvlinkDirectConnectCheck(pGpu, pKernelNvlink) knvlinkDirectConnectCheck_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkDirectConnectCheck_HAL(pGpu, pKernelNvlink) knvlinkDirectConnectCheck_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkReconcileTunableState(pGpu, pEngstate, pTunableState) knvlinkReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define knvlinkStateInitLocked(pGpu, pEngstate) knvlinkStateInitLocked_DISPATCH(pGpu, pEngstate)
#define knvlinkStatePreLoad(pGpu, pEngstate, arg0) knvlinkStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
@ -1534,6 +1537,16 @@ static inline NvBool knvlinkIsFloorSweepingNeeded_DISPATCH(OBJGPU *pGpu, struct
return pKernelNvlink->__knvlinkIsFloorSweepingNeeded__(pGpu, pKernelNvlink, numActiveLinksPerIoctrl, numLinksPerIoctrl);
}
static inline void knvlinkDirectConnectCheck_b3696a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
return;
}
void knvlinkDirectConnectCheck_GH100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink);
static inline void knvlinkDirectConnectCheck_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
pKernelNvlink->__knvlinkDirectConnectCheck__(pGpu, pKernelNvlink);
}
static inline NV_STATUS knvlinkReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) {
return pEngstate->__knvlinkReconcileTunableState__(pGpu, pEngstate, pTunableState);
}

View File

@ -825,7 +825,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x20F3, 0x17a2, 0x10de, "NVIDIA A800-SXM4-80GB" },
{ 0x20F5, 0x1799, 0x10de, "NVIDIA A800 80GB PCIe" },
{ 0x20F5, 0x179a, 0x10de, "NVIDIA A800 80GB PCIe LC" },
{ 0x20F6, 0x17a3, 0x10de, "NVIDIA A800 40GB PCIe" },
{ 0x2182, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" },
{ 0x2184, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660" },
{ 0x2187, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 SUPER" },
@ -959,7 +958,11 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
{ 0x25F9, 0x0000, 0x0000, "NVIDIA RTX A1000 Embedded GPU" },
{ 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" },
{ 0x25FB, 0x0000, 0x0000, "NVIDIA RTX A500 Embedded GPU" },
{ 0x2684, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090" },
{ 0x26B1, 0x16a1, 0x10de, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x2704, 0x0000, 0x0000, "NVIDIA GeForce RTX 4080" },
{ 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" },
{ 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" },
{ 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" },
@ -1367,18 +1370,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x20F5, 0x17c9, 0x10DE, "GRID A800D-20C" },
{ 0x20F5, 0x17ca, 0x10DE, "GRID A800D-40C" },
{ 0x20F5, 0x17cb, 0x10DE, "GRID A800D-80C" },
{ 0x20F6, 0x17cc, 0x10DE, "GRID A800-1-5CME" },
{ 0x20F6, 0x17cd, 0x10DE, "GRID A800-1-5C" },
{ 0x20F6, 0x17ce, 0x10DE, "GRID A800-2-10C" },
{ 0x20F6, 0x17cf, 0x10DE, "GRID A800-3-20C" },
{ 0x20F6, 0x17d0, 0x10DE, "GRID A800-4-20C" },
{ 0x20F6, 0x17d1, 0x10DE, "GRID A800-7-40C" },
{ 0x20F6, 0x17d2, 0x10DE, "GRID A800-4C" },
{ 0x20F6, 0x17d3, 0x10DE, "GRID A800-5C" },
{ 0x20F6, 0x17d4, 0x10DE, "GRID A800-8C" },
{ 0x20F6, 0x17d5, 0x10DE, "GRID A800-10C" },
{ 0x20F6, 0x17d6, 0x10DE, "GRID A800-20C" },
{ 0x20F6, 0x17d7, 0x10DE, "GRID A800-40C" },
{ 0x2230, 0x14fa, 0x10DE, "NVIDIA RTXA6000-1B" },
{ 0x2230, 0x14fb, 0x10DE, "NVIDIA RTXA6000-2B" },
{ 0x2230, 0x14fc, 0x10DE, "NVIDIA RTXA6000-1Q" },
@ -1528,20 +1519,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" },
{ 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" },
{ 0x2238, 0x16e6, 0x10DE, "NVIDIA A10M-1" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA H800-1-10CME" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA H800-1-10C" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA H800-2-20C" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA H800-3-40C" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA H800-4-40C" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA H800-7-80C" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA H800-4C" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA H800-5C" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA H800-8C" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA H800-10C" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA H800-16C" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA H800-20C" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" },
{ 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" },
{ 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" },
{ 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" },

View File

@ -342,12 +342,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [10] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReservePmAreaPcSampler_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*flags=*/ 0x2210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc010bu,
/*paramSize=*/ 0,
@ -357,12 +357,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler
#endif
},
{ /* [11] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReleasePmAreaPcSampler_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*flags=*/ 0x2210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb0cc010cu,
/*paramSize=*/ 0,
@ -546,11 +546,11 @@ static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecO
pThis->__profilerBaseCtrlCmdInternalPermissionsInit__ = &profilerBaseCtrlCmdInternalPermissionsInit_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
pThis->__profilerBaseCtrlCmdReservePmAreaPcSampler__ = &profilerBaseCtrlCmdReservePmAreaPcSampler_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
pThis->__profilerBaseCtrlCmdReleasePmAreaPcSampler__ = &profilerBaseCtrlCmdReleasePmAreaPcSampler_IMPL;
#endif

View File

@ -343,6 +343,12 @@
#define IsT234(pGpu) ((0) && (pGpu))
#define IsT234orBetter(pGpu) ((0) && (pGpu))
#define IsT003_HOPPER_NOT_EXIST(pGpu) ((0) && (pGpu))
#define IsT003_HOPPER_NOT_EXISTorBetter(pGpu) ((0) && (pGpu))
#define IsT004_ADA_NOT_EXIST(pGpu) ((0) && (pGpu))
#define IsT004_ADA_NOT_EXISTorBetter(pGpu) ((0) && (pGpu))
// Any T23XG chip?
#define IsT23XG(pGpu) (0 && (pGpu))
#define IsT23XGorBetter(pGpu) (0 && (pGpu))
@ -607,6 +613,16 @@
#define IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu))
// Any HOPPER_TEGRA_BIG_GPUS chip?
#define IsHOPPER_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu))
#define IsHOPPER_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu))
// Any ADA_TEGRA_BIG_GPUS chip?
#define IsADA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu))
#define IsADA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu))
// Any TEGRA_NVDISP_GPUS chip?
#define IsTEGRA_NVDISP_GPUS(pGpu) (0 && (pGpu))
#define IsTEGRA_NVDISP_GPUSorBetter(pGpu) (0 && (pGpu))

View File

@ -1855,6 +1855,23 @@ static void rpc_iGrp_ipVersions_Install_v1E_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInf
pRpcHal += 0;
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v1E_0C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
@ -1957,23 +1974,6 @@ static void rpc_iGrp_ipVersions_Install_v1F_05(IGRP_IP_VERSIONS_TABLE_INFO *pInf
pRpcHal += 0;
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v1F_06(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
@ -2334,6 +2334,23 @@ static void rpc_iGrp_ipVersions_Install_v21_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInf
#endif //
}
// No enabled chips use this variant provider
static void rpc_iGrp_ipVersions_Install_v21_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
{
#if 0
POBJGPU pGpu = pInfo->pGpu;
OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic;
RPC_HAL_IFACES *pRpcHal = &pRpc->_hal;
// avoid possible unused warnings
pGpu += 0;
pRpcHal += 0;
#endif //
}
@ -2756,6 +2773,9 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0B[] = {
{ 0x1E0B0000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0C[] = {
{ 0x1E0C0000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0D[] = {
{ 0x1E0D0000, 0xFFFFFFFF, }, //
};
@ -2774,9 +2794,6 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_05[] = {
{ 0x1F050000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_06[] = {
{ 0x1F060000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_07[] = {
{ 0x1F070000, 0xFFFFFFFF, }, //
};
@ -2840,6 +2857,9 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v21_0A[] = {
{ 0x210A0000, 0xFFFFFFFF, }, //
};
static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v21_0B[] = {
{ 0x210B0000, 0xFFFFFFFF, }, //
};
#define _RPC_HAL_IGRP_ENTRY_INIT(v) \
{ RPC_IGRP_IP_VERSIONS_RANGES_##v, NV_ARRAY_ELEMENTS(RPC_IGRP_IP_VERSIONS_RANGES_##v), rpc_iGrp_ipVersions_Install_##v, }
@ -2947,13 +2967,13 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
_RPC_HAL_IGRP_ENTRY_INIT(v1E_08), //
_RPC_HAL_IGRP_ENTRY_INIT(v1E_0A), //
_RPC_HAL_IGRP_ENTRY_INIT(v1E_0B), //
_RPC_HAL_IGRP_ENTRY_INIT(v1E_0C), //
_RPC_HAL_IGRP_ENTRY_INIT(v1E_0D), //
_RPC_HAL_IGRP_ENTRY_INIT(v1E_0E), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_00), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_03), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_04), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_05), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_06), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_07), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_08), //
_RPC_HAL_IGRP_ENTRY_INIT(v1F_0A), //
@ -2975,6 +2995,7 @@ static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo)
_RPC_HAL_IGRP_ENTRY_INIT(v21_08), //
_RPC_HAL_IGRP_ENTRY_INIT(v21_09), //
_RPC_HAL_IGRP_ENTRY_INIT(v21_0A), //
_RPC_HAL_IGRP_ENTRY_INIT(v21_0B), //
};
#undef _RPC_HAL_IGRP_ENTRY_INIT

View File

@ -6532,6 +6532,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
{ /* [420] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803041u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkDirectConnectCheck"
#endif
},
{ /* [421] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnGetDmemUsage_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
@ -6544,7 +6559,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage"
#endif
},
{ /* [421] */
{ /* [422] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6559,7 +6574,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch"
#endif
},
{ /* [422] */
{ /* [423] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6574,7 +6589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo"
#endif
},
{ /* [423] */
{ /* [424] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6589,7 +6604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet"
#endif
},
{ /* [424] */
{ /* [425] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6604,7 +6619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet"
#endif
},
{ /* [425] */
{ /* [426] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6619,7 +6634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo"
#endif
},
{ /* [426] */
{ /* [427] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6634,7 +6649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize"
#endif
},
{ /* [427] */
{ /* [428] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6649,7 +6664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters"
#endif
},
{ /* [428] */
{ /* [429] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6664,7 +6679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaRange"
#endif
},
{ /* [429] */
{ /* [430] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6679,7 +6694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock"
#endif
},
{ /* [430] */
{ /* [431] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6694,7 +6709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetRange"
#endif
},
{ /* [431] */
{ /* [432] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6709,7 +6724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats"
#endif
},
{ /* [432] */
{ /* [433] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6724,7 +6739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetFeatures"
#endif
},
{ /* [433] */
{ /* [434] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6739,7 +6754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo"
#endif
},
{ /* [434] */
{ /* [435] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6754,7 +6769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt"
#endif
},
{ /* [435] */
{ /* [436] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6769,7 +6784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff"
#endif
},
{ /* [436] */
{ /* [437] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6784,7 +6799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower"
#endif
},
{ /* [437] */
{ /* [438] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6799,7 +6814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus"
#endif
},
{ /* [438] */
{ /* [439] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6814,7 +6829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus"
#endif
},
{ /* [439] */
{ /* [440] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6829,7 +6844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask"
#endif
},
{ /* [440] */
{ /* [441] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6844,7 +6859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask"
#endif
},
{ /* [441] */
{ /* [442] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6859,7 +6874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType"
#endif
},
{ /* [442] */
{ /* [443] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6874,7 +6889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu"
#endif
},
{ /* [443] */
{ /* [444] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6889,7 +6904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo"
#endif
},
{ /* [444] */
{ /* [445] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6904,7 +6919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage"
#endif
},
{ /* [445] */
{ /* [446] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6919,7 +6934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity"
#endif
},
{ /* [446] */
{ /* [447] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6934,7 +6949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources"
#endif
},
{ /* [447] */
{ /* [448] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6949,7 +6964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding"
#endif
},
{ /* [448] */
{ /* [449] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6964,7 +6979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport"
#endif
},
{ /* [449] */
{ /* [450] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6979,7 +6994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig"
#endif
},
{ /* [450] */
{ /* [451] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6994,7 +7009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask"
#endif
},
{ /* [451] */
{ /* [452] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7014,7 +7029,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice =
{
/*numEntries=*/ 452,
/*numEntries=*/ 453,
/*pExportEntries=*/ __nvoc_exported_method_def_Subdevice
};
@ -7458,6 +7473,10 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdNvlinkL1Threshold__ = &subdeviceCtrlCmdNvlinkL1Threshold_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdNvlinkDirectConnectCheck__ = &subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdI2cReadBuffer__ = &subdeviceCtrlCmdI2cReadBuffer_IMPL;
#endif
@ -8121,10 +8140,6 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuAcquireComputeModeReservation__ = &subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuReleaseComputeModeReservation__ = &subdeviceCtrlCmdGpuReleaseComputeModeReservation_IMPL;
#endif
}
static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) {
@ -8135,6 +8150,10 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuReleaseComputeModeReservation__ = &subdeviceCtrlCmdGpuReleaseComputeModeReservation_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
pThis->__subdeviceCtrlCmdGpuInitializeCtx__ = &subdeviceCtrlCmdGpuInitializeCtx_IMPL;
#endif

View File

@ -215,6 +215,7 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdNvlinkInbandSendData__)(struct Subdevice *, NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkEomControl__)(struct Subdevice *, NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkDirectConnectCheck__)(struct Subdevice *, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cReadBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cWriteBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cReadReg__)(struct Subdevice *, NV2080_CTRL_I2C_RW_REG_PARAMS *);
@ -753,6 +754,7 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdNvlinkInbandSendData(pSubdevice, pParams) subdeviceCtrlCmdNvlinkInbandSendData_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkEomControl(pSubdevice, pParams) subdeviceCtrlCmdNvlinkEomControl_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkDirectConnectCheck(pSubdevice, pParams) subdeviceCtrlCmdNvlinkDirectConnectCheck_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdI2cReadBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadBuffer_DISPATCH(pSubdevice, pI2cParams)
#define subdeviceCtrlCmdI2cWriteBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cWriteBuffer_DISPATCH(pSubdevice, pI2cParams)
#define subdeviceCtrlCmdI2cReadReg(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadReg_DISPATCH(pSubdevice, pI2cParams)
@ -1732,6 +1734,12 @@ static inline NV_STATUS subdeviceCtrlCmdNvlinkL1Threshold_DISPATCH(struct Subdev
return pSubdevice->__subdeviceCtrlCmdNvlinkL1Threshold__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkDirectConnectCheck_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkDirectConnectCheck__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdI2cReadBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams);
static inline NV_STATUS subdeviceCtrlCmdI2cReadBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams) {

View File

@ -127,6 +127,8 @@
#define RMCFG_CHIP_T210 0
#define RMCFG_CHIP_T186 0
#define RMCFG_CHIP_T002_TURING_NOT_EXIST 0
#define RMCFG_CHIP_T003_HOPPER_NOT_EXIST 0
#define RMCFG_CHIP_T004_ADA_NOT_EXIST 0
//
@ -201,6 +203,8 @@
#define RMCFG_CHIP_tAMPERE 0
#define RMCFG_CHIP_TAMPERE 0
#define RMCFG_CHIP_AMPERE_TEGRA_BIG_GPUS 0
#define RMCFG_CHIP_HOPPER_TEGRA_BIG_GPUS 0
#define RMCFG_CHIP_ADA_TEGRA_BIG_GPUS 0
#define RMCFG_CHIP_TEGRA_NVDISP_GPUS 0
#define RMCFG_CHIP_T23X_TEGRA_NVDISP_GPUS 0
#define RMCFG_CHIP_TEGRA_TEGRA_NVDISP_GPUS 0

View File

@ -68,6 +68,7 @@ typedef struct
NvU32 frlEnable;
NvU32 gpuDirectSupported;
NvU32 nvlinkP2PSupported;
NvU32 multiVgpuExclusive;
NvU8 vgpuExtraParams[VGPU_CONFIG_PARAMS_MAX_LENGTH];
NvU8 vgpuSignature[VGPU_SIGNATURE_SIZE];
} VGPU_TYPE;

View File

@ -801,6 +801,8 @@ typedef struct UvmGpuFaultInfo_tag
NvHandle faultBufferHandle;
} UvmGpuFaultInfo;
struct Device;
typedef struct UvmGpuPagingChannel_tag
{
struct gpuDevice *device;
@ -808,6 +810,7 @@ typedef struct UvmGpuPagingChannel_tag
NvHandle channelHandle;
NvHandle errorNotifierHandle;
void *pushStreamSp;
struct Device *pDevice;
} UvmGpuPagingChannel, *UvmGpuPagingChannelHandle;
typedef struct UvmGpuAccessCntrInfo_tag

View File

@ -1634,6 +1634,7 @@
#define NV_REG_STR_RM_GPU_FABRIC_PROBE "RmGpuFabricProbe"
#define NV_REG_STR_RM_GPU_FABRIC_PROBE_DELAY 7:0
#define NV_REG_STR_RM_GPU_FABRIC_PROBE_SLOWDOWN_THRESHOLD 15:8
#define NV_REG_STR_RM_GPU_FABRIC_PROBE_OVERRIDE 31:31
// Type DWORD
// Enable GPU fabric probe
//

View File

@ -205,6 +205,7 @@ enum {
X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196
X(RM, CTRL_GET_HS_CREDITS) // 197
X(RM, CTRL_SET_HS_CREDITS) // 198
X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};

View File

@ -31,7 +31,7 @@
#define RPC_VERSION_FROM_VGX_VERSION(major, minor) ( DRF_NUM(_RPC, _VERSION_NUMBER, _MAJOR, major) | \
DRF_NUM(_RPC, _VERSION_NUMBER, _MINOR, minor))
#define VGX_MAJOR_VERSION_NUMBER 0x21
#define VGX_MINOR_VERSION_NUMBER 0x0A
#define VGX_MINOR_VERSION_NUMBER 0x0C
#define VGX_MAJOR_VERSION_NUMBER_VGPU_12_0 0x1A
@ -50,7 +50,7 @@
* 2. This is the first break in migration compatibility after a release.
*/
#define NV_VGPU_GRIDSW_INTERNAL_TO_EXTERNAL_VERSION_MAPPING \
{{0x21, 0x0}, {0x21, 0x0A}, {0x10, 0x1}}, \
{{0x21, 0x0}, {0x21, 0x0C}, {0x10, 0x1}}, \
{{0x20, 0x0}, {0x20, 0x04}, {0xF, 0x1}}, \
{{0x1F, 0x0}, {0x1F, 0xF}, {0xE, 0x1}}, \
{{0x1E, 0x0}, {0x1E, 0xE}, {0xD, 0x1}}, \

View File

@ -464,8 +464,20 @@ nvdDumpDebugBuffers_IMPL
if (status != NV_OK)
break;
dataBuffer = NvP64_VALUE(pUmdBuffer);
dataBuffer = (NvU8 *) portMemAllocStackOrHeap(bufSize);
if (dataBuffer == NULL)
{
status = NV_ERR_NO_MEMORY;
break;
}
// Copy UmdBuffer to prevent data races
portMemCopy(dataBuffer, bufSize, pUmdBuffer, bufSize);
portAtomicMemoryFenceFull();
status = prbAppendSubMsg(pPrbEnc, pCurrent->tag, dataBuffer, bufSize);
portMemFreeStackOrHeap(dataBuffer);
// Unmap DebugBuffer address
memdescUnmap(pCurrent->pMemDesc, NV_TRUE, // Kernel mapping?
osGetCurrentProcess(), pUmdBuffer, priv);
@ -523,7 +535,24 @@ prbAppendSubMsg
header = (NVDUMP_SUB_ALLOC_HEADER *)pCurrent;
subAlloc = pCurrent + sizeof(NVDUMP_SUB_ALLOC_HEADER);
subMsgLen = header->end - header->start;
// Check for out-of-bounds buffer access
if (pCurrent < buffer || subAlloc > (buffer + size))
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (!portSafeSubU16(header->end, header->start, &subMsgLen))
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if ((subAlloc + subMsgLen) >= (buffer + size))
{
status = NV_ERR_INSUFFICIENT_RESOURCES;
goto done;
}
// If valid, copy contents
if (header->flags & NVDUMP_SUB_ALLOC_VALID)
{

View File

@ -865,6 +865,10 @@ kbusMapBar2Aperture_SCRATCH
NvU32 flags
)
{
if (pMemDesc->Size >= NV_U32_MAX)
{
return NULL;
}
return portMemAllocNonPaged((NvU32)pMemDesc->Size);
}

View File

@ -157,6 +157,8 @@ NV_STATUS _createThirdPartyP2PMappingExtent
PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfoTmp;
RsClient *pClient;
Device *pDevice;
NvBool bGpuLockTaken = (rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)) ||
rmGpuLockIsOwner());
status = serverGetClientUnderLock(&g_resServ, hClient, &pClient);
NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_ARGUMENT);
@ -200,12 +202,26 @@ NV_STATUS _createThirdPartyP2PMappingExtent
0,
&fbApertureOffset, status);
}
else if ((status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
else
{
status = kbusMapFbAperture_HAL(pGpu, pKernelBus, (*ppExtentInfo)->pMemDesc, 0,
&fbApertureOffset, &fbApertureMapLength,
BUS_MAP_FB_FLAGS_MAP_UNICAST, hClient);
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
if (!bGpuLockTaken)
{
status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_P2P);
NV_ASSERT_OR_GOTO(status == NV_OK, out);
}
status = kbusMapFbAperture_HAL(pGpu, pKernelBus,
(*ppExtentInfo)->pMemDesc, 0,
&fbApertureOffset,
&fbApertureMapLength,
BUS_MAP_FB_FLAGS_MAP_UNICAST,
hClient);
if (!bGpuLockTaken)
{
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
}
if (status != NV_OK)
{
@ -246,14 +262,32 @@ out:
0,
fbApertureOffset, tmpStatus);
}
else if ((tmpStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
else
{
if (!bGpuLockTaken)
{
tmpStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_P2P);
NV_ASSERT(tmpStatus == NV_OK);
if (tmpStatus != NV_OK)
{
_freeMappingExtentInfo(*ppExtentInfo);
return tmpStatus;
}
}
tmpStatus = kbusUnmapFbAperture_HAL(pGpu, pKernelBus,
(*ppExtentInfo)->pMemDesc,
fbApertureOffset,
fbApertureMapLength,
BUS_MAP_FB_FLAGS_MAP_UNICAST);
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
if (!bGpuLockTaken)
{
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
}
NV_ASSERT(tmpStatus == NV_OK);
}
@ -338,6 +372,11 @@ NV_STATUS RmThirdPartyP2PMappingFree
PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfoNext = NULL;
RsClient *pClient;
Device *pDevice;
NvBool bGpuLockTaken;
NvBool bVgpuRpc;
bGpuLockTaken = (rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)) ||
rmGpuLockIsOwner());
NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN((pMappingInfo != NULL), NV_ERR_INVALID_ARGUMENT);
@ -357,6 +396,15 @@ NV_STATUS RmThirdPartyP2PMappingFree
length = pMappingInfo->length;
address = pMappingInfo->address;
bVgpuRpc = IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
if (!bGpuLockTaken && !bVgpuRpc)
{
status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_P2P);
NV_ASSERT_OK_OR_RETURN(status);
}
for(pExtentInfo = pMappingInfo->pStart; (pExtentInfo != NULL) && (length != 0);
pExtentInfo = pExtentInfoNext)
{
@ -369,7 +417,7 @@ NV_STATUS RmThirdPartyP2PMappingFree
pExtentInfo->refCount--;
if (pExtentInfo->refCount == 0)
{
if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
if (bVgpuRpc)
{
NV_RM_RPC_UNMAP_MEMORY(pGpu, hClient,
RES_GET_HANDLE(pDevice),
@ -377,14 +425,13 @@ NV_STATUS RmThirdPartyP2PMappingFree
0,
pExtentInfo->fbApertureOffset, status);
}
else if ((status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
else
{
status = kbusUnmapFbAperture_HAL(pGpu, pKernelBus,
pExtentInfo->pMemDesc,
pExtentInfo->fbApertureOffset,
pExtentInfo->length,
BUS_MAP_FB_FLAGS_MAP_UNICAST);
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
NV_ASSERT(status == NV_OK);
@ -394,6 +441,12 @@ NV_STATUS RmThirdPartyP2PMappingFree
_freeMappingExtentInfo(pExtentInfo);
}
}
if (!bGpuLockTaken && !bVgpuRpc)
{
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
NV_ASSERT(length == 0);
pMappingInfo->pStart = NULL;

View File

@ -894,13 +894,32 @@ static NV_STATUS _thirdpartyp2pDelMappingInfoByKey
NvU64 address;
NvU64 startOffset;
CLI_THIRD_PARTY_P2P_VIDMEM_INFO_MAPIter vidMemMapIter;
NvBool bGpuLockTaken;
NvBool bVgpuRpc;
NV_ASSERT_OR_RETURN((pKey != NULL), NV_ERR_INVALID_ARGUMENT);
bGpuLockTaken = (rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)) ||
rmGpuLockIsOwner());
bVgpuRpc = IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu);
pSubdevice = pThirdPartyP2P->pSubdevice;
GPU_RES_SET_THREAD_BC_STATE(pThirdPartyP2P);
//
// vGPU RPC is being called without GPU lock held.
// So acquire the lock only for non-vGPU case and if
// no locks are held.
//
if (!bVgpuRpc && !bGpuLockTaken)
{
status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_P2P);
NV_ASSERT_OK_OR_RETURN(status);
}
vidMemMapIter = mapIterAll(&pThirdPartyP2P->vidmemInfoMap);
while (mapIterNext(&vidMemMapIter))
{
@ -930,7 +949,7 @@ static NV_STATUS _thirdpartyp2pDelMappingInfoByKey
"Freeing P2P mapping for gpu VA: 0x%llx, length: 0x%llx\n",
pExtentInfo->address, pExtentInfo->length);
if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))
if (bVgpuRpc)
{
NV_RM_RPC_UNMAP_MEMORY(pGpu, pThirdPartyP2P->hClient,
RES_GET_PARENT_HANDLE(pSubdevice),
@ -938,14 +957,13 @@ static NV_STATUS _thirdpartyp2pDelMappingInfoByKey
0,
pExtentInfo->fbApertureOffset, status);
}
else if ((status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK)
else
{
status = kbusUnmapFbAperture_HAL(pGpu, pKernelBus,
pExtentInfo->pMemDesc,
pExtentInfo->fbApertureOffset,
pExtentInfo->length,
BUS_MAP_FB_FLAGS_MAP_UNICAST);
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
NV_ASSERT(status == NV_OK);
@ -982,6 +1000,10 @@ static NV_STATUS _thirdpartyp2pDelMappingInfoByKey
}
}
if (!bVgpuRpc && !bGpuLockTaken)
{
rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL);
}
return NV_OK;
}

View File

@ -34,7 +34,6 @@
#include "os/os.h"
#include "gpu/gpu.h"
#include "gpu/disp/kern_disp.h"
#include "gpu/disp/head/kernel_head.h"
#include "gpu/disp/disp_objs.h"
#include "rmapi/rs_utils.h"
#include "rmapi/rmapi.h"
@ -209,34 +208,3 @@ dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL
return NV_OK;
}
NV_STATUS
dispcmnCtrlCmdSystemGetVblankCounter_IMPL
(
DispCommon *pDispCommon,
NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS *pVBCounterParams
)
{
KernelDisplay *pKernelDisplay;
KernelHead *pKernelHead;
pKernelDisplay = GPU_GET_KERNEL_DISPLAY(DISPAPI_GET_GPU(pDispCommon));
if (pVBCounterParams->head >= kdispGetNumHeads(pKernelDisplay))
{
return NV_ERR_INVALID_ARGUMENT;
}
pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pVBCounterParams->head);
NV_ASSERT(pKernelHead);
//
// Add a callback to start the vblank interrupt
// which will update the counter.
//
kheadSetVblankGatherInfo(DISPAPI_GET_GPU(pDispCommon), pKernelHead, NV_TRUE);
pVBCounterParams->verticalBlankCounter =
kheadGetVblankNormLatencyCounter_HAL(pKernelHead);
return NV_OK;
}

View File

@ -1,126 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "gpu/disp/head/kernel_head.h"
#include "gpu/disp/kern_disp.h"
#include "objtmr.h"
// Callback proc for vblank info gathering
static NV_STATUS
_kheadSetVblankGatherInfoCallback
(
POBJGPU pGpu,
void *Object,
NvU32 param1,
NvV32 BuffNum,
NV_STATUS Status
)
{
KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu);
// We get the head from param1. Beware that this is what we told the vblank support to tell us.
KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, param1);
NvU32 Timeout = 0;
Timeout = kheadTickVblankInfo(pGpu, pKernelHead);
if (Timeout == 0)
{
// Time to kill off our persistence. The vblank service will remove us when we clear the persitent flag durring a callback.
pKernelHead->Vblank.Callback.gatherInfo.Flags &= ~VBLANK_CALLBACK_FLAG_PERSISTENT;
}
return NV_OK;
}
void
kheadSetVblankGatherInfo_IMPL
(
OBJGPU *pGpu,
KernelHead *pKernelHead,
NvBool enable
)
{
if(enable)
{
//
// Update the timeout member to some number of seconds worth of callbacks.
// Note we're assuming 60 Hz here which doesn't really matter since
// that's what headAddVblankCallback assumes as well.
//
pKernelHead->Vblank.Info.Timeout = 60 * VBLANK_INFO_GATHER_KEEPALIVE_SECONDS;
//
// Schedule a persistent vblank callback to handle the updates.
// This will enable vblank IRQs if not already running.
//
pKernelHead->Vblank.Callback.gatherInfo.Proc = _kheadSetVblankGatherInfoCallback;
pKernelHead->Vblank.Callback.gatherInfo.pObject = NULL;
pKernelHead->Vblank.Callback.gatherInfo.bObjectIsChannelDescendant = NV_FALSE;
pKernelHead->Vblank.Callback.gatherInfo.Param1 = pKernelHead->PublicId;
pKernelHead->Vblank.Callback.gatherInfo.Param2 = 0;
pKernelHead->Vblank.Callback.gatherInfo.Status = NV_OK;
pKernelHead->Vblank.Callback.gatherInfo.Flags = VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT | VBLANK_CALLBACK_FLAG_PERSISTENT;
kheadAddVblankCallback(pGpu, pKernelHead, &pKernelHead->Vblank.Callback.gatherInfo);
NV_PRINTF(LEVEL_INFO, "VBlank Gather Info requested,\n"
" vblank service scheduled on head %d.\n",
pKernelHead->PublicId);
}
else
{
kheadDeleteVblankCallback(pGpu, pKernelHead, &pKernelHead->Vblank.Callback.gatherInfo);
}
}
NvU32
kheadTickVblankInfo_IMPL
(
OBJGPU *pGpu,
KernelHead *pKernelHead
)
{
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
NvU64 Time;
NvU32 Delta;
NvU32 Average;
// Get current time.
Time = tmrGetTime_HAL(pGpu, pTmr);
pKernelHead->Vblank.Info.Time.Current = Time;
if (pKernelHead->Vblank.Info.Count > 2)
{
Delta = NvU64_LO32(pKernelHead->Vblank.Info.Time.Current - pKernelHead->Vblank.Info.Time.Last);
Average = pKernelHead->Vblank.Info.Time.Average;
Average = (((Average << 2) - Average) + Delta) >> 2;
pKernelHead->Vblank.Info.Time.Average = Average;
}
pKernelHead->Vblank.Info.Count++;
pKernelHead->Vblank.Info.Time.Last = Time;
pKernelHead->Vblank.Info.Timeout--;
return pKernelHead->Vblank.Info.Timeout;
}

View File

@ -942,6 +942,11 @@ kdispServiceVblank_KERNEL
// DPC/BottomHalf/whatever to service the rest of the
// vblank callback queues
//
for(i=0; i< OBJ_MAX_HEADS; i++)
{
pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i);
kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState);
}
}
else
{

View File

@ -181,16 +181,6 @@ kflcnSecureReset_TU102
kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn);
}
void
_kflcnClearInterrupts(OBJGPU *pGpu, KernelFalcon *pKernelFlcn)
{
// Delay 1us in case engine is still resetting.
osDelayUs(1);
// Clear Interrupts
kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQMCLR, 0xffffffff);
}
/*!
* Enable or disable the Falcon to FALCON mode.
*/
@ -206,11 +196,6 @@ kflcnEnable_TU102
if (!bEnable)
{
// Switch to Falcon to release lockdown
kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn);
_kflcnClearInterrupts(pGpu, pKernelFlcn);
// Disable in PMC if engine is present in PMC
if (pKernelFlcn->pmcEnableMask > 0)
{

View File

@ -3734,7 +3734,12 @@ kchannelUpdateWorkSubmitTokenNotifIndex_IMPL
NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR,
NV_ERR_INVALID_ARGUMENT);
notificationBufferSize = (index + 1) * sizeof(NvNotification);
// Check for integer overflows
if (((index + 1) < index) ||
!portSafeMulU64(index + 1, sizeof(NvNotification), &notificationBufferSize))
{
return NV_ERR_OUT_OF_RANGE;
}
status = deviceGetByInstance(pClient, gpuGetDeviceInstance(pGpu), &pDevice);
if (status != NV_OK)

View File

@ -187,8 +187,6 @@ _gpuDetectNvswitchSupport
OBJGPU *pGpu
)
{
NvU32 data32;
//
// Slowdown Threshold 0 leads to driver crash with DIVIDE ERROR
// Hence setting it to 1
@ -222,12 +220,12 @@ _gpuDetectNvswitchSupport
}
}
if (osReadRegistryDword(pGpu, NV_REG_STR_RM_GPU_FABRIC_PROBE, &data32) == NV_OK)
if (pGpu->fabricProbeRegKeyOverride)
{
pGpu->fabricProbeSlowdownThreshold =
NV_MAX(DRF_VAL(_REG_STR, _RM_GPU_FABRIC_PROBE, _SLOWDOWN_THRESHOLD, data32), 1);
NV_MAX(DRF_VAL(_REG_STR, _RM_GPU_FABRIC_PROBE, _SLOWDOWN_THRESHOLD, pGpu->fabricProbeRegKeyOverride), 1);
pGpu->fabricProbeRetryDelay =
DRF_VAL(_REG_STR, _RM_GPU_FABRIC_PROBE, _DELAY, data32);
DRF_VAL(_REG_STR, _RM_GPU_FABRIC_PROBE, _DELAY, pGpu->fabricProbeRegKeyOverride);
if (pGpu->fabricProbeRetryDelay)
{
@ -2485,10 +2483,7 @@ _gpuSetVgpuMgrConfig
RM_API *pPeerRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NV2080_CTRL_VGPU_MGR_INTERNAL_SET_VGPU_MGR_CONFIG_PARAMS params = {0};
if (osIsVgpuVfioPresent() != NV_OK)
return NV_OK;
params.bSupportHeterogeneousTimeSlicedVgpuTypes = (osIsVgpuVfioPresent() == NV_OK);
params.bSupportHeterogeneousTimeSlicedVgpuTypes = kvgpumgrIsHeterogeneousVgpuSupported();
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
pPeerRmApi->Control(pPeerRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
@ -4615,6 +4610,7 @@ VGPU_STATIC_INFO *gpuGetStaticInfo(OBJGPU *pGpu)
GspStaticConfigInfo *gpuGetGspStaticInfo(OBJGPU *pGpu)
{
NV_ASSERT_OR_RETURN(GPU_GET_KERNEL_GSP(pGpu) != NULL, NULL);
return &(GPU_GET_KERNEL_GSP(pGpu)->gspStaticInfo);
}
@ -4622,6 +4618,7 @@ OBJRPC *gpuGetGspClientRpc(OBJGPU *pGpu)
{
if (IS_GSP_CLIENT(pGpu))
{
NV_ASSERT_OR_RETURN(GPU_GET_KERNEL_GSP(pGpu) != NULL, NULL);
return GPU_GET_KERNEL_GSP(pGpu)->pRpc;
}
return NULL;

View File

@ -32,10 +32,6 @@
#include "virtualization/hypervisor/hypervisor.h"
#define __VGPU_SRIOV_ENABLED_SKUS__
#include "g_vgpu_resman_specific.h" // isSriovEnabledSKU
#undef __VGPU_SRIOV_ENABLED_SKUS__
static void _gpuInitGlobalSurfaceOverride(OBJGPU *pGpu);
/*!
@ -118,15 +114,7 @@ gpuInitRegistryOverrides_KERNEL
{
if (hypervisorIsVgxHyper() && !RMCFG_FEATURE_PLATFORM_GSP)
{
NvU32 devID = 0;
NvU32 ssID = 0;
gpuReadDeviceId_HAL(pGpu, &devID, &ssID);
devID = DRF_VAL(_PCI, _DEVID, _DEVICE, devID);
ssID = DRF_VAL(_PCI, _DEVID, _DEVICE, ssID);
if (isSriovEnabledSKU(devID, ssID))
if (!IsTURING(pGpu))
{
pGpu->bSriovEnabled = NV_TRUE;
@ -180,6 +168,12 @@ gpuInitRegistryOverrides_KERNEL
pGpu->bSplitVasManagementServerClientRm);
}
if (osReadRegistryDword(pGpu, NV_REG_STR_RM_GPU_FABRIC_PROBE, &pGpu->fabricProbeRegKeyOverride) == NV_OK)
{
pGpu->fabricProbeRegKeyOverride |= DRF_NUM(_REG_STR, _RM_GPU_FABRIC_PROBE, _OVERRIDE, 1);
}
return NV_OK;
}

View File

@ -229,7 +229,8 @@ _nv8deCtrlCmdReadWriteSurface
}
else if (traceArg.aperture == ADDR_FBMEM)
{
memdescCreate(&pMemDesc, pGpu, curSize, 0, NV_TRUE, traceArg.aperture, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE);
NV_ASSERT_OK_OR_RETURN(memdescCreate(&pMemDesc, pGpu, curSize, 0, NV_TRUE,
traceArg.aperture, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE));
memdescDescribe(pMemDesc, traceArg.aperture, traceArg.pa, curSize);
}
@ -684,6 +685,10 @@ NV_STATUS ksmdbgssnCtrlCmdDebugExecRegOps_IMPL
NV_STATUS status = NV_OK;
NvBool isClientGspPlugin = NV_FALSE;
NV_CHECK_OR_RETURN(LEVEL_ERROR,
pParams->regOpCount <= NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS,
NV_ERR_INVALID_ARGUMENT);
// Check if User have permission to access register offset
NV_CHECK_OK_OR_RETURN(LEVEL_INFO,
gpuValidateRegOps(pGpu, pParams->regOps, pParams->regOpCount,
@ -725,8 +730,11 @@ ksmdbgssnCtrlCmdDebugReadBatchMemory_IMPL
{
NV_STATUS localStatus = NV_OK;
NvP64 pData = (NvP64)(((NvU8 *)pParams->pData) + pParams->entries[i].dataOffset);
NvU32 endingOffset;
NV_CHECK_OR_ELSE(LEVEL_ERROR,
pParams->entries[i].dataOffset < pParams->dataLength,
portSafeAddU32(pParams->entries[i].dataOffset, pParams->entries[i].length, &endingOffset) &&
(endingOffset <= pParams->dataLength),
localStatus = NV_ERR_INVALID_OFFSET;
goto updateStatus; );
@ -761,12 +769,18 @@ ksmdbgssnCtrlCmdDebugWriteBatchMemory_IMPL
NV_STATUS status = NV_OK;
NvU32 i;
NV_CHECK_OR_RETURN(LEVEL_ERROR, pParams->count <= MAX_ACCESS_MEMORY_OPS,
NV_ERR_INVALID_ARGUMENT);
for (i = 0; i < pParams->count; ++i)
{
NV_STATUS localStatus = NV_OK;
NvP64 pData = (NvP64)(((NvU8 *)pParams->pData) + pParams->entries[i].dataOffset);
NvU32 endingOffset;
NV_CHECK_OR_ELSE(LEVEL_ERROR,
pParams->entries[i].dataOffset + pParams->entries[i].length <= pParams->dataLength,
portSafeAddU32(pParams->entries[i].dataOffset, pParams->entries[i].length, &endingOffset) &&
(endingOffset <= pParams->dataLength),
localStatus = NV_ERR_INVALID_OFFSET;
goto updateStatus; );

View File

@ -26,6 +26,12 @@
#include "os/os.h"
#include "gpu/mem_mgr/mem_desc.h"
#include "gpu/mem_mgr/fbsr.h"
#include "class/cl906f.h"
#include "nvrm_registry.h"
#include "gpu/mem_mgr/mem_mgr.h"
#include "gpu/bus/kern_bus.h"
#include "rmapi/client.h"
#include "vgpu/rpc_headers.h"
NV_STATUS
fbsrObjectInit_IMPL(OBJFBSR *pFbsr, NvU32 type)
@ -96,3 +102,13 @@ fbsrFreeReservedSysMemoryForPowerMgmt_IMPL(OBJFBSR *pFbsr)
pFbsr->pSysReservedMemDesc = NULL;
}
}
/*!
* @brief create channel for FB save/restore.
*
* @param[in] pGpu OBJGPU pointer
* @param[in] pFbsr OBJFBSR pointer
*
* @returns status
*/

View File

@ -254,6 +254,10 @@ memdescCreate
if (pMemoryManager && pMemoryManager->sysmemPageSize)
{
allocSize = RM_ALIGN_UP(allocSize, pMemoryManager->sysmemPageSize);
if (allocSize < Size)
{
return NV_ERR_INVALID_ARGUMENT;
}
}
}
@ -278,7 +282,10 @@ memdescCreate
if ((AddressSpace == ADDR_SYSMEM || AddressSpace == ADDR_UNKNOWN) &&
PhysicallyContiguous && (Alignment > RM_PAGE_SIZE))
{
allocSize += (Alignment - RM_PAGE_SIZE);
if (!portSafeAddU64(allocSize, (Alignment - RM_PAGE_SIZE), &allocSize))
{
return NV_ERR_INVALID_ARGUMENT;
}
}
}
}
@ -2717,6 +2724,7 @@ void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc,
else
{
pageIndex = offset >> RM_PAGE_SHIFT;
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, pageIndex < pMemDesc->PageCount);
pAddresses[i] = pteArray[pageIndex] + (offset & RM_PAGE_MASK);
}

View File

@ -1961,7 +1961,7 @@ memmgrFillComprInfo_IMPL
pComprInfo->compPageShift = pMemorySystemConfig->comprPageShift;
pComprInfo->compTagLineMin = compTagStartOffset;
pComprInfo->compPageIndexLo = (NvU32)(surfOffset >> pComprInfo->compPageShift);
pComprInfo->compPageIndexHi = (NvU32)((surfOffset + pageSize * pageCount - 1) >> pComprInfo->compPageShift);
pComprInfo->compPageIndexHi = (NvU32)((surfOffset + (NvU64)pageSize * (NvU64)pageCount - 1) >> pComprInfo->compPageShift);
pComprInfo->compTagLineMultiplier = 1;
return NV_OK;

Some files were not shown because too many files have changed in this diff Show More