535.161.07

This commit is contained in:
Bernhard Stoeckner 2024-02-22 17:27:30 +01:00
parent ee55481a49
commit 6d33efe502
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
79 changed files with 75714 additions and 75417 deletions

View File

@ -2,6 +2,8 @@
## Release 535 Entries
### [535.161.07] 2024-02-22
### [535.154.05] 2024-01-16
### [535.146.02] 2023-12-07

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 535.154.05.
version 535.161.07.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
535.154.05 driver release. This can be achieved by installing
535.161.07 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -180,7 +180,7 @@ software applications.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 535.154.05 release,
(see the table below). However, in the 535.161.07 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@ -188,7 +188,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.154.05/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/535.161.07/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@ -746,12 +746,15 @@ Subsystem Device ID.
| NVIDIA H800 PCIe | 2322 10DE 17A4 |
| NVIDIA H800 | 2324 10DE 17A6 |
| NVIDIA H800 | 2324 10DE 17A8 |
| NVIDIA H20 | 2329 10DE 198B |
| NVIDIA H20 | 2329 10DE 198C |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
| NVIDIA H100 PCIe | 2331 10DE 1626 |
| NVIDIA H100 | 2339 10DE 17FC |
| NVIDIA H800 NVL | 233A 10DE 183A |
| NVIDIA GH200 120GB | 2342 10DE 16EB |
| NVIDIA GH200 120GB | 2342 10DE 1805 |
| NVIDIA GH200 480GB | 2342 10DE 1809 |
| NVIDIA GeForce RTX 3060 Ti | 2414 |
| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 |
@ -805,6 +808,7 @@ Subsystem Device ID.
| NVIDIA RTX A2000 12GB | 2571 10DE 1611 |
| NVIDIA RTX A2000 12GB | 2571 17AA 1611 |
| NVIDIA GeForce RTX 3050 | 2582 |
| NVIDIA GeForce RTX 3050 | 2584 |
| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25A0 |
| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 8928 |
| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 89F9 |
@ -837,7 +841,6 @@ Subsystem Device ID.
| NVIDIA RTX A2000 Embedded GPU | 25FA |
| NVIDIA RTX A500 Embedded GPU | 25FB |
| NVIDIA GeForce RTX 4090 | 2684 |
| NVIDIA GeForce RTX 4090 D | 2685 |
| NVIDIA RTX 6000 Ada Generation | 26B1 1028 16A1 |
| NVIDIA RTX 6000 Ada Generation | 26B1 103C 16A1 |
| NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 |
@ -846,6 +849,7 @@ Subsystem Device ID.
| NVIDIA RTX 5000 Ada Generation | 26B2 103C 17FA |
| NVIDIA RTX 5000 Ada Generation | 26B2 10DE 17FA |
| NVIDIA RTX 5000 Ada Generation | 26B2 17AA 17FA |
| NVIDIA RTX 5880 Ada Generation | 26B3 103C 1934 |
| NVIDIA RTX 5880 Ada Generation | 26B3 10DE 1934 |
| NVIDIA L40 | 26B5 10DE 169D |
| NVIDIA L40 | 26B5 10DE 17DA |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.154.05\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"535.161.07\"
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -615,6 +615,14 @@ typedef enum
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
/*
* For console setup by EFI GOP, the base address is BAR1.
* For console setup by VBIOS, the base address is BAR2 + 16MB.
*/
#define NV_IS_CONSOLE_MAPPED(nv, addr) \
(((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \
((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
((nv)->iso_iommu_present)
@ -874,6 +882,8 @@ NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
NvU32 NV_API_CALL nv_get_os_type(void);
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64 *);
struct dma_buf;
typedef struct nv_dma_buf nv_dma_buf_t;
struct drm_gem_object;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -162,10 +162,9 @@ NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);

View File

@ -5213,10 +5213,16 @@ compile_test() {
# Added by commit 7b7b27214bba ("mm/memory_hotplug: introduce
# add_memory_driver_managed()") in v5.8-rc1 (2020-06-05)
#
# Before commit 3a0aaefe4134 ("mm/memory_hotplug: guard more
# declarations by CONFIG_MEMORY_HOTPLUG") in v5.10, the
# add_memory_driver_managed() was not guarded.
#
CODE="
#include <linux/memory_hotplug.h>
void conftest_add_memory_driver_managed() {
#if defined(CONFIG_MEMORY_HOTPLUG)
add_memory_driver_managed();
#endif
}"
compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT" "" "functions"
@ -6396,10 +6402,16 @@ compile_test() {
# DRM_UNLOCKED was removed by commit 2798ffcc1d6a ("drm: Remove
# locking for legacy ioctls and DRM_UNLOCKED") in Linux
# next-20231208.
#
# DRM_UNLOCKED definition was moved from drmP.h to drm_ioctl.h by
# commit 2640981f3600 ("drm: document drm_ioctl.[hc]") in v4.12.
CODE="
#if defined(NV_DRM_DRM_IOCTL_H_PRESENT)
#include <drm/drm_ioctl.h>
#endif
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
int flags = DRM_UNLOCKED;"
compile_check_conftest "$CODE" "NV_DRM_UNLOCKED_IOCTL_FLAG_PRESENT" "" "types"

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -577,12 +577,9 @@ int nvidia_mmap_helper(
//
// This path is similar to the sysmem mapping code.
// TODO: Refactor is needed as part of bug#2001704.
// Use pfn_valid to determine whether the physical address has
// backing struct page. This is used to isolate P8 from P9.
//
if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) &&
!IS_REG_OFFSET(nv, access_start, access_len) &&
(pfn_valid(PFN_DOWN(mmap_start))))
!IS_REG_OFFSET(nv, access_start, access_len))
{
ret = nvidia_mmap_numa(vma, mmap_context);
if (ret)

View File

@ -5640,3 +5640,128 @@ failed:
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Cannot get EGM info\n");
return NV_ERR_NOT_SUPPORTED;
}
void NV_API_CALL nv_get_screen_info(
nv_state_t *nv,
NvU64 *pPhysicalAddress,
NvU16 *pFbWidth,
NvU16 *pFbHeight,
NvU16 *pFbDepth,
NvU16 *pFbPitch,
NvU64 *pFbSize
)
{
*pPhysicalAddress = 0;
*pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = *pFbSize = 0;
#if defined(CONFIG_FB) && defined(NV_NUM_REGISTERED_FB_PRESENT)
if (num_registered_fb > 0)
{
int i;
for (i = 0; i < num_registered_fb; i++)
{
if (!registered_fb[i])
continue;
/* Make sure base address is mapped to GPU BAR */
if (NV_IS_CONSOLE_MAPPED(nv, registered_fb[i]->fix.smem_start))
{
*pPhysicalAddress = registered_fb[i]->fix.smem_start;
*pFbWidth = registered_fb[i]->var.xres;
*pFbHeight = registered_fb[i]->var.yres;
*pFbDepth = registered_fb[i]->var.bits_per_pixel;
*pFbPitch = registered_fb[i]->fix.line_length;
*pFbSize = (NvU64)(*pFbHeight) * (NvU64)(*pFbPitch);
return;
}
}
}
#endif
/*
* If the screen info is not found in the registered FBs then fallback
* to the screen_info structure.
*
* The SYSFB_SIMPLEFB option, if enabled, marks VGA/VBE/EFI framebuffers as
* generic framebuffers so the new generic system-framebuffer drivers can
* be used instead. DRM_SIMPLEDRM drives the generic system-framebuffers
* device created by SYSFB_SIMPLEFB.
*
* SYSFB_SIMPLEFB registers a dummy framebuffer which does not contain the
* information required by nv_get_screen_info(), therefore you need to
* fall back onto the screen_info structure.
*
* After commit b8466fe82b79 ("efi: move screen_info into efi init code")
* in v6.7, 'screen_info' is exported as GPL licensed symbol for ARM64.
*/
#if NV_CHECK_EXPORT_SYMBOL(screen_info)
/*
* If there is not a framebuffer console, return 0 size.
*
* orig_video_isVGA is set to 1 during early Linux kernel
* initialization, and then will be set to a value, such as
* VIDEO_TYPE_VLFB or VIDEO_TYPE_EFI if an fbdev console is used.
*/
if (screen_info.orig_video_isVGA > 1)
{
NvU64 physAddr = screen_info.lfb_base;
#if defined(VIDEO_CAPABILITY_64BIT_BASE)
physAddr |= (NvU64)screen_info.ext_lfb_base << 32;
#endif
/* Make sure base address is mapped to GPU BAR */
if (NV_IS_CONSOLE_MAPPED(nv, physAddr))
{
*pPhysicalAddress = physAddr;
*pFbWidth = screen_info.lfb_width;
*pFbHeight = screen_info.lfb_height;
*pFbDepth = screen_info.lfb_depth;
*pFbPitch = screen_info.lfb_linelength;
*pFbSize = (NvU64)(*pFbHeight) * (NvU64)(*pFbPitch);
}
}
#else
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct pci_dev *pci_dev = nvl->pci_dev;
int i;
if (pci_dev == NULL)
return;
BUILD_BUG_ON(NV_GPU_BAR_INDEX_IMEM != NV_GPU_BAR_INDEX_FB + 1);
for (i = NV_GPU_BAR_INDEX_FB; i <= NV_GPU_BAR_INDEX_IMEM; i++)
{
int bar_index = nv_bar_index_to_os_bar_index(pci_dev, i);
struct resource *gpu_bar_res = &pci_dev->resource[bar_index];
struct resource *res = gpu_bar_res->child;
/*
* Console resource will become child resource of pci-dev resource.
* Check if child resource start address matches with expected
* console start address.
*/
if ((res != NULL) &&
NV_IS_CONSOLE_MAPPED(nv, res->start))
{
NvU32 res_name_len = strlen(res->name);
/*
* The resource name ends with 'fb' (efifb, vesafb, etc.).
* For simple-framebuffer, the resource name is 'BOOTFB'.
* Confirm if the resources name either ends with 'fb' or 'FB'.
*/
if ((res_name_len > 2) &&
!strcasecmp((res->name + res_name_len - 2), "fb"))
{
*pPhysicalAddress = res->start;
*pFbSize = resource_size(res);
return;
}
}
}
}
#endif
}

View File

@ -1197,90 +1197,6 @@ NvBool NV_API_CALL os_is_efi_enabled(void)
return efi_enabled(EFI_BOOT);
}
void NV_API_CALL os_get_screen_info(
NvU64 *pPhysicalAddress,
NvU16 *pFbWidth,
NvU16 *pFbHeight,
NvU16 *pFbDepth,
NvU16 *pFbPitch,
NvU64 consoleBar1Address,
NvU64 consoleBar2Address
)
{
*pPhysicalAddress = 0;
*pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = 0;
#if defined(CONFIG_FB) && defined(NV_NUM_REGISTERED_FB_PRESENT)
if (num_registered_fb > 0)
{
int i;
for (i = 0; i < num_registered_fb; i++)
{
if (!registered_fb[i])
continue;
/* Make sure base address is mapped to GPU BAR */
if ((registered_fb[i]->fix.smem_start == consoleBar1Address) ||
(registered_fb[i]->fix.smem_start == consoleBar2Address))
{
*pPhysicalAddress = registered_fb[i]->fix.smem_start;
*pFbWidth = registered_fb[i]->var.xres;
*pFbHeight = registered_fb[i]->var.yres;
*pFbDepth = registered_fb[i]->var.bits_per_pixel;
*pFbPitch = registered_fb[i]->fix.line_length;
return;
}
}
}
#endif
/*
* If the screen info is not found in the registered FBs then fallback
* to the screen_info structure.
*
* The SYSFB_SIMPLEFB option, if enabled, marks VGA/VBE/EFI framebuffers as
* generic framebuffers so the new generic system-framebuffer drivers can
* be used instead. DRM_SIMPLEDRM drives the generic system-framebuffers
* device created by SYSFB_SIMPLEFB.
*
* SYSFB_SIMPLEFB registers a dummy framebuffer which does not contain the
* information required by os_get_screen_info(), therefore you need to
* fall back onto the screen_info structure.
*
* After commit b8466fe82b79 ("efi: move screen_info into efi init code")
* in v6.7, 'screen_info' is exported as GPL licensed symbol for ARM64.
*/
#if NV_CHECK_EXPORT_SYMBOL(screen_info)
/*
* If there is not a framebuffer console, return 0 size.
*
* orig_video_isVGA is set to 1 during early Linux kernel
* initialization, and then will be set to a value, such as
* VIDEO_TYPE_VLFB or VIDEO_TYPE_EFI if an fbdev console is used.
*/
if (screen_info.orig_video_isVGA > 1)
{
NvU64 physAddr = screen_info.lfb_base;
#if defined(VIDEO_CAPABILITY_64BIT_BASE)
physAddr |= (NvU64)screen_info.ext_lfb_base << 32;
#endif
/* Make sure base address is mapped to GPU BAR */
if ((physAddr == consoleBar1Address) ||
(physAddr == consoleBar2Address))
{
*pPhysicalAddress = physAddr;
*pFbWidth = screen_info.lfb_width;
*pFbHeight = screen_info.lfb_height;
*pFbDepth = screen_info.lfb_depth;
*pFbPitch = screen_info.lfb_linelength;
}
}
#endif
}
void NV_API_CALL os_dump_stack(void)
{
dump_stack();

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -87,59 +87,10 @@ static NV_STATUS get_io_ptes(struct vm_area_struct *vma,
return NV_OK;
}
/*!
* @brief Pins user IO pages that have been mapped to the user processes virtual
* address space with remap_pfn_range.
*
* @param[in] vma VMA that contains the virtual address range given by the
* start and the page count.
* @param[in] start Beginning of the virtual address range of the IO pages.
* @param[in] page_count Number of pages to pin from start.
* @param[in,out] page_array Storage array for pointers to the pinned pages.
* Must be large enough to contain at least page_count
* pointers.
*
* @return NV_OK if the pages were pinned successfully, error otherwise.
*/
static NV_STATUS get_io_pages(struct vm_area_struct *vma,
NvUPtr start,
NvU64 page_count,
struct page **page_array)
{
NV_STATUS rmStatus = NV_OK;
NvU64 i, pinned = 0;
unsigned long pfn;
for (i = 0; i < page_count; i++)
{
if ((nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) ||
(!pfn_valid(pfn)))
{
rmStatus = NV_ERR_INVALID_ADDRESS;
break;
}
// Page-backed memory mapped to userspace with remap_pfn_range
page_array[i] = pfn_to_page(pfn);
get_page(page_array[i]);
pinned++;
}
if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
put_page(page_array[i]);
rmStatus = NV_ERR_INVALID_ADDRESS;
}
return rmStatus;
}
NV_STATUS NV_API_CALL os_lookup_user_io_memory(
void *address,
NvU64 page_count,
NvU64 **pte_array,
void **page_array
NvU64 **pte_array
)
{
NV_STATUS rmStatus;
@ -187,18 +138,9 @@ NV_STATUS NV_API_CALL os_lookup_user_io_memory(
goto done;
}
if (pfn_valid(pfn))
{
rmStatus = get_io_pages(vma, start, page_count, (struct page **)result_array);
if (rmStatus == NV_OK)
*page_array = (void *)result_array;
}
else
{
rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array);
if (rmStatus == NV_OK)
*pte_array = (NvU64 *)result_array;
}
rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array);
if (rmStatus == NV_OK)
*pte_array = (NvU64 *)result_array;
done:
nv_mmap_read_unlock(mm);

View File

@ -163,6 +163,7 @@ namespace DisplayPort
bool _enableFecCheckForDDS;
bool _isLTPhyRepeaterSupported;
bool _isMSTPCONCapsReadDisabled;
//
// LTTPR count reported by RM, it might not be the same with DPLib probe
// For example, some Intel LTTPR might not be ready to response 0xF0000 probe
@ -383,6 +384,7 @@ namespace DisplayPort
bool isActive();
bool isEDP();
bool skipPowerdownEdpPanelWhenHeadDetach();
bool isMSTPCONCapsReadDisabled();
bool supportMSAOverMST();
bool queryAndUpdateDfpParams();
bool controlRateGoverning(NvU32 head, bool enable, bool updateNow);

View File

@ -144,6 +144,9 @@ namespace DisplayPort
// Check if we should skip power down eDP when head detached.
virtual bool skipPowerdownEdpPanelWhenHeadDetach() = 0;
// Check if we should skip reading PCON Caps in MST case.
virtual bool isMSTPCONCapsReadDisabled() = 0;
// Get GPU DSC capabilities
virtual void getDscCaps(bool *pbDscSupported = NULL,
unsigned *pEncoderColorFormatMask = NULL,

View File

@ -77,6 +77,10 @@
//
#define NV_DP_DSC_MST_CAP_BUG_3143315 "DP_DSC_MST_CAP_BUG_3143315"
//
// Bug 4388987 : This regkey will disable reading PCON caps for MST.
//
#define NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED "DP_BUG_4388987_WAR"
//
// Data Base used to store all the regkey values.
@ -112,6 +116,7 @@ struct DP_REGKEY_DATABASE
bool bPowerDownPhyBeforeD3;
bool bCheckFECForDynamicMuxDSCPanel;
bool bReassessMaxLink;
bool bMSTPCONCapsReadDisabled;
};
#endif //INCLUDED_DP_REGKEYDATABASE_H

View File

@ -682,8 +682,11 @@ create:
//
else if (newDev->parent && (newDev->parent)->isVirtualPeerDevice())
{
newDev->parent->getPCONCaps(&(newDev->pconCaps));
newDev->connectorType = newDev->parent->getConnectorType();
if (!main->isMSTPCONCapsReadDisabled())
{
newDev->parent->getPCONCaps(&(newDev->pconCaps));
newDev->connectorType = newDev->parent->getConnectorType();
}
}
}

View File

@ -95,7 +95,8 @@ const struct
{NV_DP_DSC_MST_CAP_BUG_3143315, &dpRegkeyDatabase.bDscMstCapBug3143315, DP_REG_VAL_BOOL},
{NV_DP_CHECK_FEC_FOR_DDS_DSC_PANEL, &dpRegkeyDatabase.bCheckFECForDynamicMuxDSCPanel, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_POWER_DOWN_PHY, &dpRegkeyDatabase.bPowerDownPhyBeforeD3, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_REASSESS_MAX_LINK, &dpRegkeyDatabase.bReassessMaxLink, DP_REG_VAL_BOOL}
{NV_DP_REGKEY_REASSESS_MAX_LINK, &dpRegkeyDatabase.bReassessMaxLink, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL}
};
EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) :
@ -894,6 +895,7 @@ void EvoMainLink::applyRegkeyOverrides()
_applyLinkBwOverrideWarRegVal = dpRegkeyDatabase.bLinkBwOverrideWarApplied;
_enableMSAOverrideOverMST = dpRegkeyDatabase.bMsaOverMstEnabled;
_enableFecCheckForDDS = dpRegkeyDatabase.bCheckFECForDynamicMuxDSCPanel;
_isMSTPCONCapsReadDisabled = dpRegkeyDatabase.bMSTPCONCapsReadDisabled;
}
NvU32 EvoMainLink::getRegkeyValue(const char *key)
@ -1510,6 +1512,10 @@ bool EvoMainLink::skipPowerdownEdpPanelWhenHeadDetach()
return _skipPowerdownEDPPanelWhenHeadDetach;
}
bool EvoMainLink::isMSTPCONCapsReadDisabled()
{
return _isMSTPCONCapsReadDisabled;
}
bool EvoMainLink::isActive()
{

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r538_10
#define NV_BUILD_BRANCH r538_27
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r538_10
#define NV_PUBLIC_BRANCH r538_27
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r538_10-414"
#define NV_BUILD_CHANGELIST_NUM (33694617)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r535/r538_27-450"
#define NV_BUILD_CHANGELIST_NUM (33916993)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r535/r538_10-414"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33694617)
#define NV_BUILD_NAME "rel/gpu_drv/r535/r538_27-450"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33916993)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r538_10-3"
#define NV_BUILD_CHANGELIST_NUM (33691963)
#define NV_BUILD_BRANCH_VERSION "r538_27-5"
#define NV_BUILD_CHANGELIST_NUM (33810369)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "538.15"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33691963)
#define NV_BUILD_NAME "538.33"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (33810369)
#define NV_BUILD_BRANCH_BASE_VERSION R535
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "535.154.05"
#define NV_VERSION_STRING "535.161.07"
#else

View File

@ -3,7 +3,7 @@
#define NV_COMPANY_NAME_STRING_SHORT "NVIDIA"
#define NV_COMPANY_NAME_STRING_FULL "NVIDIA Corporation"
#define NV_COMPANY_NAME_STRING NV_COMPANY_NAME_STRING_FULL
#define NV_COPYRIGHT_YEAR "2023"
#define NV_COPYRIGHT_YEAR "2024"
#define NV_COPYRIGHT "(C) " NV_COPYRIGHT_YEAR " NVIDIA Corporation. All rights reserved." // Please do not use the non-ascii copyright symbol for (C).
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \

View File

@ -1,24 +1,25 @@
/*******************************************************************************
Copyright (c) 2014-2023 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//
// nvlink.h

View File

@ -1,24 +1,25 @@
/*******************************************************************************
Copyright (c) 2016-2023 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVLINK_LIB_CTRL_H_
#define _NVLINK_LIB_CTRL_H_

View File

@ -1,21 +1,25 @@
/*******************************************************************************
Copyright (c) 2020 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVLINK_LOCK_H_

View File

@ -1,24 +1,25 @@
/*******************************************************************************
Copyright (c) 2019-2022 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvlink.h"
#include "nvlink_export.h"

View File

@ -1,24 +1,25 @@
/*******************************************************************************
Copyright (c) 2017-2023 NVidia Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*******************************************************************************/
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nvlink.h"
#include "nvVer.h"

View File

@ -41,6 +41,11 @@
#include "nvswitch/ls10/dev_minion_ip_addendum.h"
#include "ls10/minion_nvlink_defines_public_ls10.h"
#define NV_NVLINK_TLREQ_TIMEOUT_ACTIVE 10000
#define NV_NVLINK_TLREQ_TIMEOUT_SHUTDOWN 10
#define NV_NVLINK_TLREQ_TIMEOUT_RESET 4
#define NV_NVLINK_TLREQ_TIMEOUT_L2 5
static void
_nvswitch_configure_reserved_throughput_counters
(
@ -144,7 +149,7 @@ nvswitch_init_lpwr_regs_ls10
if (nvswitch_lib_get_bios_version(device, &biosVersion) != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, WARN, "%s Get VBIOS version failed.\n",
__FUNCTION__);
__FUNCTION__);
biosVersion = 0;
}
@ -1713,6 +1718,39 @@ nvswitch_are_link_clocks_on_ls10
return NV_TRUE;
}
static
NvlStatus
_nvswitch_tl_request_get_timeout_value_ls10
(
nvswitch_device *device,
NvU32 tlLinkState,
NvU32 *timeoutVal
)
{
switch (tlLinkState)
{
case NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_ACTIVE:
*timeoutVal = NV_NVLINK_TLREQ_TIMEOUT_ACTIVE;
break;
case NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET:
*timeoutVal = NV_NVLINK_TLREQ_TIMEOUT_RESET;
break;
case NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_SHUTDOWN:
*timeoutVal = NV_NVLINK_TLREQ_TIMEOUT_SHUTDOWN;
break;
case NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_L2:
*timeoutVal = NV_NVLINK_TLREQ_TIMEOUT_L2;
break;
default:
NVSWITCH_PRINT(device, ERROR,
"%s: Invalid tlLinkState %d provided!\n",
__FUNCTION__, tlLinkState);
return NVL_BAD_ARGS;
}
return NVL_SUCCESS;
}
NvlStatus
nvswitch_request_tl_link_state_ls10
(
@ -1726,6 +1764,9 @@ nvswitch_request_tl_link_state_ls10
NvU32 linkStatus;
NvU32 lnkErrStatus;
NvU32 bit;
NvU32 timeoutVal;
NVSWITCH_TIMEOUT timeout;
NvBool keepPolling;
if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLIPT_LNK, link->linkNumber))
{
@ -1759,25 +1800,53 @@ nvswitch_request_tl_link_state_ls10
if (bSync)
{
// Wait for the TL link state register to complete
status = nvswitch_wait_for_tl_request_ready_lr10(link);
// setup timeouts for the TL request
status = _nvswitch_tl_request_get_timeout_value_ls10(device, tlLinkState, &timeoutVal);
if (status != NVL_SUCCESS)
{
return status;
return NVL_ERR_INVALID_STATE;
}
// Check for state requested
linkStatus = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber,
nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * timeoutVal, &timeout);
status = NVL_MORE_PROCESSING_REQUIRED;
do
{
keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE;
// Check for state requested
linkStatus = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber,
NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_STATUS);
if (DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, linkStatus) !=
tlLinkState)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TL link state request to state 0x%x for link #%d did not complete!\n",
__FUNCTION__, tlLinkState, link->linkNumber);
return -NVL_ERR_GENERIC;
if (DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, linkStatus) ==
tlLinkState)
{
status = NVL_SUCCESS;
break;
}
nvswitch_os_sleep(1);
}
while(keepPolling);
// Do one final check if the polling loop didn't see the target linkState
if (status == NVL_MORE_PROCESSING_REQUIRED)
{
// Check for state requested
linkStatus = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber,
NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_STATUS);
if (DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, linkStatus) !=
tlLinkState)
{
NVSWITCH_PRINT(device, ERROR,
"%s: TL link state request to state 0x%x for link #%d did not complete!\n",
__FUNCTION__, tlLinkState, link->linkNumber);
return -NVL_ERR_GENERIC;
}
}
}
return status;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -60,4 +60,80 @@ typedef struct NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS {
NV_DECLARE_ALIGNED(NvU64 dramCorrectedTotalCounts, 8);
NV_DECLARE_ALIGNED(NvU64 dramUncorrectedTotalCounts, 8);
} NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS;
#define NV2080_CTRL_CMD_ECC_GET_ECI_COUNTERS (0x20803401U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS
*
* sramParityUncorrectedUnique [out]
* sramSecDedUncorrectedUnique [out]
* sramCorrectedTotal [out]
* dramUncorrectedTotal [out]
* dramCorrectedTotal [out]
* Aggregate error counts for SRAM and DRAM.
*
* lastClearedTimestamp [out]
* unix-epoch based timestamp. These fields indicate when the error counters
* were last cleared by the user.
*
* sramBucketL2 [out]
* sramBucketSM [out]
* sramBucketPcie [out]
* sramBucketFirmware [out]
* sramBucketOther [out]
* Aggregate unique uncorrctable error counts for SRAM buckets.
*
* sramErrorThresholdExceeded [out]
* Boolean flag which is set if SRAM error threshold was exceeded
*/
#define NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS_MESSAGE_ID (0x1U)
typedef struct NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS {
NV_DECLARE_ALIGNED(NvU64 sramParityUncorrectedUnique, 8);
NV_DECLARE_ALIGNED(NvU64 sramSecDedUncorrectedUnique, 8);
NV_DECLARE_ALIGNED(NvU64 sramCorrectedTotal, 8);
NV_DECLARE_ALIGNED(NvU64 dramUncorrectedTotal, 8);
NV_DECLARE_ALIGNED(NvU64 dramCorrectedTotal, 8);
NvU32 lastClearedTimestamp;
NV_DECLARE_ALIGNED(NvU64 sramBucketL2, 8);
NV_DECLARE_ALIGNED(NvU64 sramBucketSM, 8);
NV_DECLARE_ALIGNED(NvU64 sramBucketPcie, 8);
NV_DECLARE_ALIGNED(NvU64 sramBucketFirmware, 8);
NV_DECLARE_ALIGNED(NvU64 sramBucketOther, 8);
NvBool sramErrorThresholdExceeded;
} NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS;
/*
* NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS
*
* Reports count of volatile errors
*
* sramCorUni [out]:
* Unique correctable SRAM error count
* sramUncParityUni [out]:
* Unique uncorrectable SRAM parity error count
* sramUncSecDedUni [out]:
* Unique uncorrectable SRAM SEC-DED error count
* dramCorTot [out]:
* Total correctable DRAM error count
* dramUncTot [out]:
* total uncorrectable DRAM error count
*/
#define NV2080_CTRL_CMD_ECC_GET_VOLATILE_COUNTS (0x20803402U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS_MESSAGE_ID (0x2U)
typedef struct NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS {
NV_DECLARE_ALIGNED(NvU64 sramCorUni, 8);
NV_DECLARE_ALIGNED(NvU64 sramUncParityUni, 8);
NV_DECLARE_ALIGNED(NvU64 sramUncSecDedUni, 8);
NV_DECLARE_ALIGNED(NvU64 dramCorTot, 8);
NV_DECLARE_ALIGNED(NvU64 dramUncTot, 8);
} NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS;
/* _ctrl2080ecc_h_ */

View File

@ -15,7 +15,6 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
{ 0x20B5, 0x1642, 0x20B5, 0x1533 },
{ 0x20B8, 0x1581, 0x20B5, 0x1533 },
{ 0x20B7, 0x1804, 0x20B7, 0x1532 },
{ 0x20B7, 0x1852, 0x20B7, 0x1532 },
{ 0x20B9, 0x157F, 0x20B7, 0x1532 },
{ 0x20FD, 0x17F8, 0x20F5, 0x0 },
{ 0x2330, 0x16C0, 0x2330, 0x16C1 },

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -187,9 +187,12 @@ typedef struct
// Used during boot to ensure the heap is adequately sized
NvU8 gspFwHeapVfPartitionCount;
// Flags to help decide GSP-FW flow.
NvU8 flags;
// Pad structure to exactly 256 bytes. Can replace padding with additional
// fields without incrementing revision. Padding initialized to 0.
NvU8 padding[7];
NvU8 padding[6];
// BL to use for verification (i.e. Booter says OK to boot)
NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
@ -220,4 +223,8 @@ typedef struct
#define GSP_FW_HEAP_FREE_LIST_MAGIC 0x4845415046524545ULL
#define GSP_FW_FLAGS 8:0
#define GSP_FW_FLAGS_CLOCK_BOOST NVBIT(0)
#define GSP_FW_FLAGS_RECOVERY_MARGIN_PRESENT NVBIT(1)
#endif // GSP_FW_WPR_META_H_

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@ -615,6 +615,14 @@ typedef enum
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
/*
* For console setup by EFI GOP, the base address is BAR1.
* For console setup by VBIOS, the base address is BAR2 + 16MB.
*/
#define NV_IS_CONSOLE_MAPPED(nv, addr) \
(((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \
((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
((nv)->iso_iommu_present)
@ -874,6 +882,8 @@ NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
NvU32 NV_API_CALL nv_get_os_type(void);
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64 *);
struct dma_buf;
typedef struct nv_dma_buf nv_dma_buf_t;
struct drm_gem_object;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -162,10 +162,9 @@ NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -157,25 +157,11 @@ static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secI
}
else if (rmStatus == NV_ERR_INVALID_ADDRESS)
{
rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount,
&pPteArray, &pPageArray);
rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount, &pPteArray);
if (rmStatus == NV_OK)
{
if (pPageArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY;
}
else if (pPteArray != NULL)
{
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY;
}
else
{
NV_ASSERT_FAILED("unknown memory import type");
rmStatus = NV_ERR_NOT_SUPPORTED;
}
pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray;
pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY;
}
}
if (rmStatus != NV_OK)

View File

@ -5131,16 +5131,11 @@ NvBool rm_get_uefi_console_status(
NvU64 fbBaseAddress = 0;
NvBool bConsoleDevice = NV_FALSE;
// os_get_screen_info() will return dimensions and an address for
// any fbdev driver (e.g., efifb, vesafb, etc). To find if this is a
// UEFI console check the fbBaseAddress: if it was set up by the EFI GOP
// driver, it will point into BAR1 (FB); if it was set up by the VBIOS,
// it will point to BAR2 + 16MB.
os_get_screen_info(&fbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch,
nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
fbSize = fbHeight * fbPitch;
//
// nv_get_screen_info() will return dimensions and an address for
// any fbdev driver (e.g., efifb, vesafb, etc).
//
nv_get_screen_info(nv, &fbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, &fbSize);
bConsoleDevice = (fbSize != 0);
@ -5157,16 +5152,11 @@ NvU64 rm_get_uefi_console_size(
fbSize = fbWidth = fbHeight = fbDepth = fbPitch = 0;
// os_get_screen_info() will return dimensions and an address for
// any fbdev driver (e.g., efifb, vesafb, etc). To find if this is a
// UEFI console check the fbBaseAddress: if it was set up by the EFI GOP
// driver, it will point into BAR1 (FB); if it was set up by the VBIOS,
// it will point to BAR2 + 16MB.
os_get_screen_info(pFbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch,
nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
fbSize = fbHeight * fbPitch;
//
// nv_get_screen_info() will return dimensions and an address for
// any fbdev driver (e.g., efifb, vesafb, etc).
//
nv_get_screen_info(nv, pFbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, &fbSize);
return fbSize;
}

View File

@ -900,14 +900,12 @@ RmInitNvDevice(
return;
}
os_disable_console_access();
status->rmStatus = gpumgrStateInitGpu(pGpu);
if (status->rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize the device\n");
RM_SET_ERROR(*status, RM_INIT_GPU_STATE_INIT_FAILED);
os_enable_console_access();
return;
}
nvp->flags |= NV_INIT_FLAG_GPU_STATE;
@ -937,13 +935,10 @@ RmInitNvDevice(
NV_PRINTF(LEVEL_ERROR,
"*** Cannot load state into the device\n");
RM_SET_ERROR(*status, RM_INIT_GPU_LOAD_FAILED);
os_enable_console_access();
return;
}
nvp->flags |= NV_INIT_FLAG_GPU_STATE_LOAD;
os_enable_console_access();
status->rmStatus = gpuPerformUniversalValidation_HAL(pGpu);
if (status->rmStatus != NV_OK)
{
@ -1542,6 +1537,7 @@ NvBool RmInitAdapter(
KernelDisplay *pKernelDisplay;
const void *gspFwHandle = NULL;
const void *gspFwLogHandle = NULL;
NvBool consoleDisabled = NV_FALSE;
GSP_FIRMWARE gspFw = {0};
PORT_UNREFERENCED_VARIABLE(gspFw);
@ -1650,6 +1646,16 @@ NvBool RmInitAdapter(
RmInitAcpiMethods(pOS, pSys, pGpu);
//
// For GPU driving console, disable console access here, to ensure no console
// writes through BAR1 can interfere with physical RM's setup of BAR1
//
if (rm_get_uefi_console_status(nv))
{
os_disable_console_access();
consoleDisabled = NV_TRUE;
}
//
// If GSP fw RM support is enabled then start the GSP microcode
// (including the task running the full instance of the RM) and
@ -1725,6 +1731,12 @@ NvBool RmInitAdapter(
goto shutdown;
}
if (consoleDisabled)
{
os_enable_console_access();
consoleDisabled = NV_FALSE;
}
// LOCK: acquire GPUs lock
status.rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE,
RM_LOCK_MODULES_INIT);
@ -1920,6 +1932,11 @@ NvBool RmInitAdapter(
shutdown:
nv->flags &= ~NV_FLAG_IN_RECOVERY;
if (consoleDisabled)
{
os_enable_console_access();
}
// call ShutdownAdapter to undo anything we've done above
RmShutdownAdapter(nv);

View File

@ -336,7 +336,7 @@ NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL
if ((memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) != NULL) && bContinue)
{
NvU64 baseAddr;
NvU64 baseAddr, size;
// There should only be one.
NV_ASSERT(pParams->width == 0);
@ -344,11 +344,9 @@ NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL
pParams->subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu);
// Console is either mapped to BAR1 or BAR2 + 16 MB
os_get_screen_info(&baseAddr, &pParams->width,
nv_get_screen_info(nv, &baseAddr, &pParams->width,
&pParams->height, &pParams->depth,
&pParams->pitch,
nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address,
nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000);
&pParams->pitch, &size);
if (baseAddr != 0)
{

View File

@ -744,28 +744,6 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
pThis->__gpuIsCtxBufAllocInPmaSupported__ = &gpuIsCtxBufAllocInPmaSupported_491d52;
}
// Hal function -- gpuCheckEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__gpuCheckEccCounts__ = &gpuCheckEccCounts_TU102;
}
// default
else
{
pThis->__gpuCheckEccCounts__ = &gpuCheckEccCounts_d44104;
}
// Hal function -- gpuClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__gpuClearEccCounts__ = &gpuClearEccCounts_TU102;
}
// default
else
{
pThis->__gpuClearEccCounts__ = &gpuClearEccCounts_ac1694;
}
// Hal function -- gpuWaitForGfwBootComplete
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -896,8 +896,6 @@ struct OBJGPU {
NvBool (*__gpuIsCCEnabledInHw__)(struct OBJGPU *);
NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *);
NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *);
void (*__gpuCheckEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuClearEccCounts__)(struct OBJGPU *);
NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *);
NvBool PDB_PROP_GPU_HIGH_SPEED_BRIDGE_CONNECTED;
NvBool bVideoLinkDisabled;
@ -1148,6 +1146,7 @@ struct OBJGPU {
NvBool bStateUnloading;
NvBool bStateLoaded;
NvBool bFullyConstructed;
NvBool bRecoveryMarginPresent;
NvBool bBf3WarBug4040336Enabled;
NvBool bUnifiedMemorySpaceEnabled;
NvBool bSriovEnabled;
@ -1473,10 +1472,6 @@ NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32,
#define gpuIsDevModeEnabledInHw_HAL(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu)
#define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
#define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
#define gpuCheckEccCounts(pGpu) gpuCheckEccCounts_DISPATCH(pGpu)
#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuClearEccCounts_HAL(pGpu) gpuClearEccCounts_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
#define gpuWaitForGfwBootComplete_HAL(pGpu) gpuWaitForGfwBootComplete_DISPATCH(pGpu)
static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) {
@ -2933,6 +2928,20 @@ static inline GPU_P2P_PEER_GPU_CAPS *gpuFindP2PPeerGpuCapsByGpuId(struct OBJGPU
#define gpuFindP2PPeerGpuCapsByGpuId_HAL(pGpu, peerGpuId) gpuFindP2PPeerGpuCapsByGpuId(pGpu, peerGpuId)
NvBool gpuCheckEccCounts_TU102(struct OBJGPU *pGpu);
#ifdef __nvoc_gpu_h_disabled
static inline NvBool gpuCheckEccCounts(struct OBJGPU *pGpu) {
NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!");
return NV_FALSE;
}
#else //__nvoc_gpu_h_disabled
#define gpuCheckEccCounts(pGpu) gpuCheckEccCounts_TU102(pGpu)
#endif //__nvoc_gpu_h_disabled
#define gpuCheckEccCounts_HAL(pGpu) gpuCheckEccCounts(pGpu)
NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu);
static inline NV_STATUS gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU *pGpu) {
@ -3211,26 +3220,6 @@ static inline NvBool gpuIsCtxBufAllocInPmaSupported_DISPATCH(struct OBJGPU *pGpu
return pGpu->__gpuIsCtxBufAllocInPmaSupported__(pGpu);
}
static inline void gpuCheckEccCounts_d44104(struct OBJGPU *pGpu) {
return;
}
void gpuCheckEccCounts_TU102(struct OBJGPU *pGpu);
static inline void gpuCheckEccCounts_DISPATCH(struct OBJGPU *pGpu) {
pGpu->__gpuCheckEccCounts__(pGpu);
}
static inline NV_STATUS gpuClearEccCounts_ac1694(struct OBJGPU *pGpu) {
return NV_OK;
}
NV_STATUS gpuClearEccCounts_TU102(struct OBJGPU *pGpu);
static inline NV_STATUS gpuClearEccCounts_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuClearEccCounts__(pGpu);
}
NV_STATUS gpuWaitForGfwBootComplete_TU102(struct OBJGPU *pGpu);
static inline NV_STATUS gpuWaitForGfwBootComplete_5baef9(struct OBJGPU *pGpu) {

View File

@ -346,25 +346,26 @@ static inline void rcdbDestroyRingBuffer(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE
#define rcdbDestroyRingBuffer(pRcdb, type) rcdbDestroyRingBuffer_IMPL(pRcdb, type)
#endif //__nvoc_journal_h_disabled
void rcdbAddRecToRingBuffer_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 recordSize, NvU8 *pRecord);
RmRCCommonJournal_RECORD *rcdbAddRecToRingBuffer_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 recordSize, NvU8 *pRecord);
#ifdef __nvoc_journal_h_disabled
static inline void rcdbAddRecToRingBuffer(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 recordSize, NvU8 *pRecord) {
static inline RmRCCommonJournal_RECORD *rcdbAddRecToRingBuffer(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 recordSize, NvU8 *pRecord) {
NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!");
return NULL;
}
#else //__nvoc_journal_h_disabled
#define rcdbAddRecToRingBuffer(pGpu, pRcdb, type, recordSize, pRecord) rcdbAddRecToRingBuffer_IMPL(pGpu, pRcdb, type, recordSize, pRecord)
#endif //__nvoc_journal_h_disabled
NvU32 rcdbGetOcaRecordSize_IMPL(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type);
NvU32 rcdbGetOcaRecordSizeWithHeader_IMPL(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type);
#ifdef __nvoc_journal_h_disabled
static inline NvU32 rcdbGetOcaRecordSize(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type) {
static inline NvU32 rcdbGetOcaRecordSizeWithHeader(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type) {
NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!");
return 0;
}
#else //__nvoc_journal_h_disabled
#define rcdbGetOcaRecordSize(pRcdb, type) rcdbGetOcaRecordSize_IMPL(pRcdb, type)
#define rcdbGetOcaRecordSizeWithHeader(pRcdb, type) rcdbGetOcaRecordSizeWithHeader_IMPL(pRcdb, type)
#endif //__nvoc_journal_h_disabled
NvU32 rcdbDumpJournal_IMPL(struct OBJRCDB *pRcdb, struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, const PRB_FIELD_DESC *pFieldDesc);
@ -433,17 +434,28 @@ static inline NV_STATUS rcdbGetRcDiagRecBoundaries(struct OBJRCDB *pRcdb, NvU16
#define rcdbGetRcDiagRecBoundaries(pRcdb, arg0, arg1, arg2, arg3) rcdbGetRcDiagRecBoundaries_IMPL(pRcdb, arg0, arg1, arg2, arg3)
#endif //__nvoc_journal_h_disabled
NV_STATUS rcdbAddRcDiagRec_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRcDiag_RECORD *arg0);
RmRCCommonJournal_RECORD *rcdbAddRcDiagRec_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRcDiag_RECORD *arg0);
#ifdef __nvoc_journal_h_disabled
static inline NV_STATUS rcdbAddRcDiagRec(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRcDiag_RECORD *arg0) {
static inline RmRCCommonJournal_RECORD *rcdbAddRcDiagRec(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRcDiag_RECORD *arg0) {
NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!");
return NV_ERR_NOT_SUPPORTED;
return NULL;
}
#else //__nvoc_journal_h_disabled
#define rcdbAddRcDiagRec(pGpu, pRcdb, arg0) rcdbAddRcDiagRec_IMPL(pGpu, pRcdb, arg0)
#endif //__nvoc_journal_h_disabled
RmRCCommonJournal_RECORD *rcdbAddRcDiagRecFromGsp_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRCCommonJournal_RECORD *arg0, RmRcDiag_RECORD *arg1);
#ifdef __nvoc_journal_h_disabled
static inline RmRCCommonJournal_RECORD *rcdbAddRcDiagRecFromGsp(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRCCommonJournal_RECORD *arg0, RmRcDiag_RECORD *arg1) {
NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!");
return NULL;
}
#else //__nvoc_journal_h_disabled
#define rcdbAddRcDiagRecFromGsp(pGpu, pRcdb, arg0, arg1) rcdbAddRcDiagRecFromGsp_IMPL(pGpu, pRcdb, arg0, arg1)
#endif //__nvoc_journal_h_disabled
NV_STATUS rcdbGetRcDiagRec_IMPL(struct OBJRCDB *pRcdb, NvU16 arg0, RmRCCommonJournal_RECORD **arg1, NvU32 arg2, NvU32 arg3);
#ifdef __nvoc_journal_h_disabled

View File

@ -903,17 +903,6 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusGetEccCounts__ = &kbusGetEccCounts_4a4dee;
}
// Hal function -- kbusClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kbusClearEccCounts__ = &kbusClearEccCounts_GH100;
}
// default
else
{
pThis->__kbusClearEccCounts__ = &kbusClearEccCounts_b3696a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelBus_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelBus_engstateStatePreInitLocked;

View File

@ -382,7 +382,6 @@ struct KernelBus {
void (*__kbusUnmapCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, PMEMORY_DESCRIPTOR);
void (*__kbusTeardownCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, NvBool);
NvU32 (*__kbusGetEccCounts__)(OBJGPU *, struct KernelBus *);
void (*__kbusClearEccCounts__)(OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusStatePostUnload__)(POBJGPU, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *);
void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *);
@ -611,8 +610,6 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusTeardownCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusGetEccCounts(pGpu, pKernelBus) kbusGetEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusGetEccCounts_HAL(pGpu, pKernelBus) kbusGetEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusClearEccCounts(pGpu, pKernelBus) kbusClearEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusClearEccCounts_HAL(pGpu, pKernelBus) kbusClearEccCounts_DISPATCH(pGpu, pKernelBus)
#define kbusStatePostUnload(pGpu, pEngstate, arg0) kbusStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate)
@ -2231,16 +2228,6 @@ static inline NvU32 kbusGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelBus *pK
return pKernelBus->__kbusGetEccCounts__(pGpu, pKernelBus);
}
void kbusClearEccCounts_GH100(OBJGPU *pGpu, struct KernelBus *pKernelBus);
static inline void kbusClearEccCounts_b3696a(OBJGPU *pGpu, struct KernelBus *pKernelBus) {
return;
}
static inline void kbusClearEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) {
pKernelBus->__kbusClearEccCounts__(pGpu, pKernelBus);
}
static inline NV_STATUS kbusStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return pEngstate->__kbusStatePostUnload__(pGpu, pEngstate, arg0);
}

View File

@ -504,17 +504,6 @@ static void __nvoc_init_funcTable_KernelGmmu_1(KernelGmmu *pThis, RmHalspecOwner
pThis->__kgmmuGetEccCounts__ = &kgmmuGetEccCounts_4a4dee;
}
// Hal function -- kgmmuClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__kgmmuClearEccCounts__ = &kgmmuClearEccCounts_TU102;
}
// default
else
{
pThis->__kgmmuClearEccCounts__ = &kgmmuClearEccCounts_b3696a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGmmu_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelGmmu_engstateStateInitLocked;

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -357,7 +357,6 @@ struct KernelGmmu {
NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *);
NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *);
void (*__kgmmuClearEccCounts__)(OBJGPU *, struct KernelGmmu *);
NV_STATUS (*__kgmmuStateLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuStateUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *);
@ -436,7 +435,6 @@ struct KernelGmmu_PRIVATE {
NvU32 (*__kgmmuGetGraphicsEngineId__)(struct KernelGmmu *);
NvU32 (*__kgmmuReadShadowBufPutIndex__)(OBJGPU *, struct KernelGmmu *, FAULT_BUFFER_TYPE);
NvU32 (*__kgmmuGetEccCounts__)(OBJGPU *, struct KernelGmmu *);
void (*__kgmmuClearEccCounts__)(OBJGPU *, struct KernelGmmu *);
NV_STATUS (*__kgmmuStateLoad__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuStateUnload__)(POBJGPU, struct KernelGmmu *, NvU32);
NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *);
@ -567,8 +565,6 @@ NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32);
#define kgmmuReadShadowBufPutIndex_HAL(pGpu, pKernelGmmu, type) kgmmuReadShadowBufPutIndex_DISPATCH(pGpu, pKernelGmmu, type)
#define kgmmuGetEccCounts(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuGetEccCounts_HAL(pGpu, pKernelGmmu) kgmmuGetEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuClearEccCounts(pGpu, pKernelGmmu) kgmmuClearEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuClearEccCounts_HAL(pGpu, pKernelGmmu) kgmmuClearEccCounts_DISPATCH(pGpu, pKernelGmmu)
#define kgmmuStateLoad(pGpu, pEngstate, arg0) kgmmuStateLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kgmmuStateUnload(pGpu, pEngstate, arg0) kgmmuStateUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kgmmuServiceNotificationInterrupt(pGpu, pIntrService, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams)
@ -1323,16 +1319,6 @@ static inline NvU32 kgmmuGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *
return pKernelGmmu->__kgmmuGetEccCounts__(pGpu, pKernelGmmu);
}
void kgmmuClearEccCounts_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
static inline void kgmmuClearEccCounts_b3696a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
return;
}
static inline void kgmmuClearEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
pKernelGmmu->__kgmmuClearEccCounts__(pGpu, pKernelGmmu);
}
static inline NV_STATUS kgmmuStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) {
return pEngstate->__kgmmuStateLoad__(pGpu, pEngstate, arg0);
}

View File

@ -426,14 +426,14 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
}
// Hal function -- kmemsysGetMaxFbpas
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000020UL) )) /* ChipHal: TU102 */
{
pThis->__kmemsysGetMaxFbpas__ = &kmemsysGetMaxFbpas_TU102;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
pThis->__kmemsysGetMaxFbpas__ = &kmemsysGetMaxFbpas_GA100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */
{
pThis->__kmemsysGetMaxFbpas__ = &kmemsysGetMaxFbpas_TU102;
}
// default
else
{
@ -445,14 +445,9 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
{
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_GH100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000420UL) )) /* ChipHal: TU102 | GA100 */
{
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_TU102;
}
// default
else
{
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_4a4dee;
pThis->__kmemsysGetEccDedCountSize__ = &kmemsysGetEccDedCountSize_TU102;
}
// Hal function -- kmemsysGetEccDedCountRegAddr
@ -460,37 +455,10 @@ static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis
{
pThis->__kmemsysGetEccDedCountRegAddr__ = &kmemsysGetEccDedCountRegAddr_GH100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000420UL) )) /* ChipHal: TU102 | GA100 */
else
{
pThis->__kmemsysGetEccDedCountRegAddr__ = &kmemsysGetEccDedCountRegAddr_TU102;
}
// default
else
{
pThis->__kmemsysGetEccDedCountRegAddr__ = &kmemsysGetEccDedCountRegAddr_4a4dee;
}
// Hal function -- kmemsysGetEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__kmemsysGetEccCounts__ = &kmemsysGetEccCounts_TU102;
}
// default
else
{
pThis->__kmemsysGetEccCounts__ = &kmemsysGetEccCounts_b3696a;
}
// Hal function -- kmemsysClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000420UL) )) /* ChipHal: TU102 | GA100 | GH100 */
{
pThis->__kmemsysClearEccCounts__ = &kmemsysClearEccCounts_TU102;
}
// default
else
{
pThis->__kmemsysClearEccCounts__ = &kmemsysClearEccCounts_b3696a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelMemorySystem_engstateConstructEngine;

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -225,8 +225,6 @@ struct KernelMemorySystem {
NvU32 (*__kmemsysGetMaxFbpas__)(OBJGPU *, struct KernelMemorySystem *);
NvU32 (*__kmemsysGetEccDedCountSize__)(OBJGPU *, struct KernelMemorySystem *);
NvU32 (*__kmemsysGetEccDedCountRegAddr__)(OBJGPU *, struct KernelMemorySystem *, NvU32, NvU32);
void (*__kmemsysGetEccCounts__)(OBJGPU *, struct KernelMemorySystem *, NvU32 *, NvU32 *);
void (*__kmemsysClearEccCounts__)(OBJGPU *, struct KernelMemorySystem *);
NV_STATUS (*__kmemsysStateLoad__)(POBJGPU, struct KernelMemorySystem *, NvU32);
NV_STATUS (*__kmemsysStateUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32);
NV_STATUS (*__kmemsysStatePostUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32);
@ -336,10 +334,6 @@ NV_STATUS __nvoc_objCreate_KernelMemorySystem(KernelMemorySystem**, Dynamic*, Nv
#define kmemsysGetEccDedCountSize_HAL(pGpu, pKernelMemorySystem) kmemsysGetEccDedCountSize_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysGetEccDedCountRegAddr(pGpu, pKernelMemorySystem, fbpa, subp) kmemsysGetEccDedCountRegAddr_DISPATCH(pGpu, pKernelMemorySystem, fbpa, subp)
#define kmemsysGetEccDedCountRegAddr_HAL(pGpu, pKernelMemorySystem, fbpa, subp) kmemsysGetEccDedCountRegAddr_DISPATCH(pGpu, pKernelMemorySystem, fbpa, subp)
#define kmemsysGetEccCounts(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1)
#define kmemsysGetEccCounts_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts_DISPATCH(pGpu, pKernelMemorySystem, arg0, arg1)
#define kmemsysClearEccCounts(pGpu, pKernelMemorySystem) kmemsysClearEccCounts_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysClearEccCounts_HAL(pGpu, pKernelMemorySystem) kmemsysClearEccCounts_DISPATCH(pGpu, pKernelMemorySystem)
#define kmemsysStateLoad(pGpu, pEngstate, arg0) kmemsysStateLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kmemsysStateUnload(pGpu, pEngstate, arg0) kmemsysStateUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kmemsysStatePostUnload(pGpu, pEngstate, arg0) kmemsysStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
@ -532,6 +526,19 @@ static inline NvBool kmemsysNeedInvalidateGpuCacheOnMap(OBJGPU *pGpu, struct Ker
#define kmemsysNeedInvalidateGpuCacheOnMap_HAL(pGpu, pKernelMemorySystem, bIsVolatile, aperture) kmemsysNeedInvalidateGpuCacheOnMap(pGpu, pKernelMemorySystem, bIsVolatile, aperture)
void kmemsysGetEccCounts_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1);
#ifdef __nvoc_kern_mem_sys_h_disabled
static inline void kmemsysGetEccCounts(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1) {
NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!");
}
#else //__nvoc_kern_mem_sys_h_disabled
#define kmemsysGetEccCounts(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts_TU102(pGpu, pKernelMemorySystem, arg0, arg1)
#endif //__nvoc_kern_mem_sys_h_disabled
#define kmemsysGetEccCounts_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysGetEccCounts(pGpu, pKernelMemorySystem, arg0, arg1)
NV_STATUS kmemsysConstructEngine_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, ENGDESCRIPTOR arg0);
static inline NV_STATUS kmemsysConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, ENGDESCRIPTOR arg0) {
@ -766,10 +773,6 @@ NvU32 kmemsysGetEccDedCountSize_TU102(OBJGPU *pGpu, struct KernelMemorySystem *p
NvU32 kmemsysGetEccDedCountSize_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
static inline NvU32 kmemsysGetEccDedCountSize_4a4dee(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return 0;
}
static inline NvU32 kmemsysGetEccDedCountSize_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return pKernelMemorySystem->__kmemsysGetEccDedCountSize__(pGpu, pKernelMemorySystem);
}
@ -778,34 +781,10 @@ NvU32 kmemsysGetEccDedCountRegAddr_TU102(OBJGPU *pGpu, struct KernelMemorySystem
NvU32 kmemsysGetEccDedCountRegAddr_GH100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 fbpa, NvU32 subp);
static inline NvU32 kmemsysGetEccDedCountRegAddr_4a4dee(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 fbpa, NvU32 subp) {
return 0;
}
static inline NvU32 kmemsysGetEccDedCountRegAddr_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 fbpa, NvU32 subp) {
return pKernelMemorySystem->__kmemsysGetEccDedCountRegAddr__(pGpu, pKernelMemorySystem, fbpa, subp);
}
void kmemsysGetEccCounts_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1);
static inline void kmemsysGetEccCounts_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1) {
return;
}
static inline void kmemsysGetEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 *arg0, NvU32 *arg1) {
pKernelMemorySystem->__kmemsysGetEccCounts__(pGpu, pKernelMemorySystem, arg0, arg1);
}
void kmemsysClearEccCounts_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem);
static inline void kmemsysClearEccCounts_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
return;
}
static inline void kmemsysClearEccCounts_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) {
pKernelMemorySystem->__kmemsysClearEccCounts__(pGpu, pKernelMemorySystem);
}
static inline NV_STATUS kmemsysStateLoad_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) {
return pEngstate->__kmemsysStateLoad__(pGpu, pEngstate, arg0);
}
@ -968,9 +947,9 @@ static inline NV_STATUS kmemsysInitMIGGPUInstanceMemConfigForSwizzId(OBJGPU *arg
#undef PRIVATE_FIELD
#define IS_COHERENT_CPU_ATS_OFFSET(kmemsys, offset, length) \
(kmemsys && ((offset) >= kmemsys->coherentCpuFbBase) && \
(((NvU64)offset + size) <= kmemsys->coherentCpuFbEnd))
#define IS_COHERENT_CPU_ATS_OFFSET(kmemsys, offset, length) \
(kmemsys && ((offset) >= (kmemsys->coherentCpuFbBase + kmemsys->numaOnlineBase)) && \
(((NvU64)offset + size) <= (kmemsys->coherentCpuFbBase + kmemsys->numaOnlineBase + kmemsys->numaOnlineSize)))
#endif // KERN_MEM_SYS_H

View File

@ -494,17 +494,6 @@ static void __nvoc_init_funcTable_KernelBif_1(KernelBif *pThis, RmHalspecOwner *
pThis->__kbifGetEccCounts__ = &kbifGetEccCounts_4a4dee;
}
// Hal function -- kbifClearEccCounts
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kbifClearEccCounts__ = &kbifClearEccCounts_GH100;
}
// default
else
{
pThis->__kbifClearEccCounts__ = &kbifClearEccCounts_56cd7a;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelBif_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelBif_engstateStateInitLocked;

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -117,7 +117,6 @@ struct KernelBif {
NV_STATUS (*__kbifPreOsGlobalErotGrantRequest__)(struct OBJGPU *, struct KernelBif *);
void (*__kbifCacheVFInfo__)(struct OBJGPU *, struct KernelBif *);
NvU32 (*__kbifGetEccCounts__)(struct OBJGPU *, struct KernelBif *);
NV_STATUS (*__kbifClearEccCounts__)(struct OBJGPU *, struct KernelBif *);
NV_STATUS (*__kbifStatePreLoad__)(POBJGPU, struct KernelBif *, NvU32);
NV_STATUS (*__kbifStatePostUnload__)(POBJGPU, struct KernelBif *, NvU32);
void (*__kbifStateDestroy__)(POBJGPU, struct KernelBif *);
@ -270,8 +269,6 @@ NV_STATUS __nvoc_objCreate_KernelBif(KernelBif**, Dynamic*, NvU32);
#define kbifCacheVFInfo_HAL(pGpu, pKernelBif) kbifCacheVFInfo_DISPATCH(pGpu, pKernelBif)
#define kbifGetEccCounts(pGpu, pKernelBif) kbifGetEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifGetEccCounts_HAL(pGpu, pKernelBif) kbifGetEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifClearEccCounts(pGpu, pKernelBif) kbifClearEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifClearEccCounts_HAL(pGpu, pKernelBif) kbifClearEccCounts_DISPATCH(pGpu, pKernelBif)
#define kbifStatePreLoad(pGpu, pEngstate, arg0) kbifStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kbifStatePostUnload(pGpu, pEngstate, arg0) kbifStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kbifStateDestroy(pGpu, pEngstate) kbifStateDestroy_DISPATCH(pGpu, pEngstate)
@ -633,16 +630,6 @@ static inline NvU32 kbifGetEccCounts_DISPATCH(struct OBJGPU *pGpu, struct Kernel
return pKernelBif->__kbifGetEccCounts__(pGpu, pKernelBif);
}
NV_STATUS kbifClearEccCounts_GH100(struct OBJGPU *pGpu, struct KernelBif *pKernelBif);
static inline NV_STATUS kbifClearEccCounts_56cd7a(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) {
return NV_OK;
}
static inline NV_STATUS kbifClearEccCounts_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) {
return pKernelBif->__kbifClearEccCounts__(pGpu, pKernelBif);
}
static inline NV_STATUS kbifStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) {
return pEngstate->__kbifStatePreLoad__(pGpu, pEngstate, arg0);
}

View File

@ -577,7 +577,7 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
}
// Hal function -- kgspGetFrtsSize
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */
{
pThis->__kgspGetFrtsSize__ = &kgspGetFrtsSize_4a4dee;
}

View File

@ -365,6 +365,7 @@ struct KernelGsp {
NvU64 logElfDataSize;
PORT_MUTEX *pNvlogFlushMtx;
NvBool bLibosLogsPollingEnabled;
NvU8 bootAttempts;
NvBool bInInit;
NvBool bInLockdown;
NvBool bPollingForRpcResponse;
@ -1210,6 +1211,17 @@ static inline NvU64 kgspGetFwHeapSize(struct OBJGPU *pGpu, struct KernelGsp *pKe
#define kgspGetFwHeapSize(pGpu, pKernelGsp, posteriorFbSize) kgspGetFwHeapSize_IMPL(pGpu, pKernelGsp, posteriorFbSize)
#endif //__nvoc_kernel_gsp_h_disabled
NvU64 kgspGetWprEndMargin_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
#ifdef __nvoc_kernel_gsp_h_disabled
static inline NvU64 kgspGetWprEndMargin(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!");
return 0;
}
#else //__nvoc_kernel_gsp_h_disabled
#define kgspGetWprEndMargin(pGpu, pKernelGsp) kgspGetWprEndMargin_IMPL(pGpu, pKernelGsp)
#endif //__nvoc_kernel_gsp_h_disabled
void kgspSetupLibosInitArgs_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
#ifdef __nvoc_kernel_gsp_h_disabled

View File

@ -57,7 +57,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x1349, 0x36d1, 0x17aa, "NVIDIA GeForce 930A" },
{ 0x1349, 0x36d8, 0x17aa, "NVIDIA GeForce 930A" },
{ 0x134B, 0x0000, 0x0000, "NVIDIA GeForce 940MX" },
{ 0x134B, 0x0008, 0x1414, "NVIDIA GeForce GPU" },
{ 0x134D, 0x0000, 0x0000, "NVIDIA GeForce 940MX" },
{ 0x134E, 0x0000, 0x0000, "NVIDIA GeForce 930MX" },
{ 0x134F, 0x0000, 0x0000, "NVIDIA GeForce 920MX" },
@ -897,12 +896,15 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2322, 0x17a4, 0x10de, "NVIDIA H800 PCIe" },
{ 0x2324, 0x17a6, 0x10de, "NVIDIA H800" },
{ 0x2324, 0x17a8, 0x10de, "NVIDIA H800" },
{ 0x2329, 0x198b, 0x10de, "NVIDIA H20" },
{ 0x2329, 0x198c, 0x10de, "NVIDIA H20" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2339, 0x17fc, 0x10de, "NVIDIA H100" },
{ 0x233A, 0x183a, 0x10de, "NVIDIA H800 NVL" },
{ 0x2342, 0x16eb, 0x10de, "NVIDIA GH200 120GB" },
{ 0x2342, 0x1805, 0x10de, "NVIDIA GH200 120GB" },
{ 0x2342, 0x1809, 0x10de, "NVIDIA GH200 480GB" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
@ -956,6 +958,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2571, 0x1611, 0x10de, "NVIDIA RTX A2000 12GB" },
{ 0x2571, 0x1611, 0x17aa, "NVIDIA RTX A2000 12GB" },
{ 0x2582, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" },
{ 0x2584, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" },
{ 0x25A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" },
{ 0x25A0, 0x8928, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" },
{ 0x25A0, 0x89f9, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" },
@ -988,7 +991,6 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" },
{ 0x25FB, 0x0000, 0x0000, "NVIDIA RTX A500 Embedded GPU" },
{ 0x2684, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090" },
{ 0x2685, 0x0000, 0x0000, "NVIDIA GeForce RTX 4090 D" },
{ 0x26B1, 0x16a1, 0x1028, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B1, 0x16a1, 0x103c, "NVIDIA RTX 6000 Ada Generation" },
{ 0x26B1, 0x16a1, 0x10de, "NVIDIA RTX 6000 Ada Generation" },
@ -997,6 +999,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x17fa, 0x103c, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x10de, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B2, 0x17fa, 0x17aa, "NVIDIA RTX 5000 Ada Generation" },
{ 0x26B3, 0x1934, 0x103c, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B3, 0x1934, 0x10de, "NVIDIA RTX 5880 Ada Generation" },
{ 0x26B5, 0x169d, 0x10de, "NVIDIA L40" },
{ 0x26B5, 0x17da, 0x10de, "NVIDIA L40" },

View File

@ -298,6 +298,8 @@ typedef struct rpc_rc_triggered_v17_02
NvU32 exceptType;
NvU32 scope;
NvU16 partitionAttributionId;
NvU32 rcJournalBufferSize;
NvU8 rcJournalBuffer[];
} rpc_rc_triggered_v17_02;
typedef rpc_rc_triggered_v17_02 rpc_rc_triggered_v;
@ -1811,6 +1813,21 @@ static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rc_triggered_v17_02[] = {
.name = "partitionAttributionId"
#endif
},
{
.vtype = vtype_NvU32,
.offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, rcJournalBufferSize),
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rcJournalBufferSize"
#endif
},
{
.vtype = vtype_NvU8_array,
.offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, rcJournalBuffer),
.array_length = 0,
#if (defined(DEBUG) || defined(DEVELOP))
.name = "rcJournalBuffer"
#endif
},
{
.vtype = vt_end
}

View File

@ -7363,6 +7363,36 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif
},
{ /* [475] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetEciCounters_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803401u,
/*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdEccGetEciCounters"
#endif
},
{ /* [476] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetVolatileCounts_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803402u,
/*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdEccGetVolatileCounts"
#endif
},
{ /* [477] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7377,7 +7407,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaRange"
#endif
},
{ /* [476] */
{ /* [478] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7392,7 +7422,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock"
#endif
},
{ /* [477] */
{ /* [479] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7407,7 +7437,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetRange"
#endif
},
{ /* [478] */
{ /* [480] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7422,7 +7452,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats"
#endif
},
{ /* [479] */
{ /* [481] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7437,7 +7467,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetFeatures"
#endif
},
{ /* [480] */
{ /* [482] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7452,7 +7482,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetRmHeapStats"
#endif
},
{ /* [481] */
{ /* [483] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7467,7 +7497,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo"
#endif
},
{ /* [482] */
{ /* [484] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7482,7 +7512,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt"
#endif
},
{ /* [483] */
{ /* [485] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7497,7 +7527,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff"
#endif
},
{ /* [484] */
{ /* [486] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7512,7 +7542,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower"
#endif
},
{ /* [485] */
{ /* [487] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7527,7 +7557,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus"
#endif
},
{ /* [486] */
{ /* [488] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7542,7 +7572,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus"
#endif
},
{ /* [487] */
{ /* [489] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7557,7 +7587,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask"
#endif
},
{ /* [488] */
{ /* [490] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7572,7 +7602,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask"
#endif
},
{ /* [489] */
{ /* [491] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7587,7 +7617,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType"
#endif
},
{ /* [490] */
{ /* [492] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7602,7 +7632,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu"
#endif
},
{ /* [491] */
{ /* [493] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7617,7 +7647,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo"
#endif
},
{ /* [492] */
{ /* [494] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7632,7 +7662,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage"
#endif
},
{ /* [493] */
{ /* [495] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7647,7 +7677,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity"
#endif
},
{ /* [494] */
{ /* [496] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7662,7 +7692,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources"
#endif
},
{ /* [495] */
{ /* [497] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7677,7 +7707,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding"
#endif
},
{ /* [496] */
{ /* [498] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7692,7 +7722,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport"
#endif
},
{ /* [497] */
{ /* [499] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7707,7 +7737,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig"
#endif
},
{ /* [498] */
{ /* [500] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7722,7 +7752,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask"
#endif
},
{ /* [499] */
{ /* [501] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7737,7 +7767,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples"
#endif
},
{ /* [500] */
{ /* [502] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7757,7 +7787,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice =
{
/*numEntries=*/ 501,
/*numEntries=*/ 503,
/*pExportEntries=*/ __nvoc_exported_method_def_Subdevice
};
@ -9208,6 +9238,14 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceCtrlCmdEccGetClientExposedCounters__ = &subdeviceCtrlCmdEccGetClientExposedCounters_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdEccGetEciCounters__ = &subdeviceCtrlCmdEccGetEciCounters_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdEccGetVolatileCounts__ = &subdeviceCtrlCmdEccGetVolatileCounts_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdGpuQueryEccConfiguration__ = &subdeviceCtrlCmdGpuQueryEccConfiguration_IMPL;
#endif
@ -9867,10 +9905,6 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *
pThis->__subdeviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_subdeviceGetMemoryMappingDescriptor;
pThis->__subdeviceUnregisterEvent__ = &__nvoc_thunk_Notifier_subdeviceUnregisterEvent;
pThis->__subdeviceControlSerialization_Prologue__ = &__nvoc_thunk_RmResource_subdeviceControlSerialization_Prologue;
pThis->__subdeviceCanCopy__ = &__nvoc_thunk_RsResource_subdeviceCanCopy;
}
static void __nvoc_init_funcTable_Subdevice_3(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) {
@ -9881,6 +9915,10 @@ static void __nvoc_init_funcTable_Subdevice_3(Subdevice *pThis, RmHalspecOwner *
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
pThis->__subdeviceControlSerialization_Prologue__ = &__nvoc_thunk_RmResource_subdeviceControlSerialization_Prologue;
pThis->__subdeviceCanCopy__ = &__nvoc_thunk_RsResource_subdeviceCanCopy;
pThis->__subdeviceIsDuplicate__ = &__nvoc_thunk_RsResource_subdeviceIsDuplicate;
pThis->__subdeviceControlSerialization_Epilogue__ = &__nvoc_thunk_RmResource_subdeviceControlSerialization_Epilogue;

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -466,6 +466,8 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdFlcnGetCtxBufferInfo__)(struct Subdevice *, NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdFlcnGetCtxBufferSize__)(struct Subdevice *, NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdEccGetClientExposedCounters__)(struct Subdevice *, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdEccGetEciCounters__)(struct Subdevice *, NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdEccGetVolatileCounts__)(struct Subdevice *, NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuQueryEccConfiguration__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuSetEccConfiguration__)(struct Subdevice *, NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdGpuResetEccErrorStatus__)(struct Subdevice *, NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS *);
@ -675,6 +677,7 @@ struct Subdevice {
NvBool bSchedPolicySet;
NvBool bGcoffDisallowed;
NvBool bUpdateTGP;
NvBool bVFRefCntIncRequested;
};
#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__
@ -1061,6 +1064,8 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdFlcnGetCtxBufferInfo(pSubdevice, pParams) subdeviceCtrlCmdFlcnGetCtxBufferInfo_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdFlcnGetCtxBufferSize(pSubdevice, pParams) subdeviceCtrlCmdFlcnGetCtxBufferSize_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdEccGetClientExposedCounters(pSubdevice, pParams) subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdEccGetEciCounters(pSubdevice, pParams) subdeviceCtrlCmdEccGetEciCounters_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdEccGetVolatileCounts(pSubdevice, pParams) subdeviceCtrlCmdEccGetVolatileCounts_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdGpuQueryEccConfiguration(pSubdevice, pConfig) subdeviceCtrlCmdGpuQueryEccConfiguration_DISPATCH(pSubdevice, pConfig)
#define subdeviceCtrlCmdGpuSetEccConfiguration(pSubdevice, pConfig) subdeviceCtrlCmdGpuSetEccConfiguration_DISPATCH(pSubdevice, pConfig)
#define subdeviceCtrlCmdGpuResetEccErrorStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuResetEccErrorStatus_DISPATCH(pSubdevice, pParams)
@ -3349,6 +3354,18 @@ static inline NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(str
return pSubdevice->__subdeviceCtrlCmdEccGetClientExposedCounters__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdEccGetEciCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdEccGetEciCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_ECI_COUNTERS_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdEccGetEciCounters__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdEccGetVolatileCounts_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdEccGetVolatileCounts_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_VOLATILE_COUNTS_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdEccGetVolatileCounts__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdGpuQueryEccConfiguration_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS *pConfig);
static inline NV_STATUS subdeviceCtrlCmdGpuQueryEccConfiguration_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS *pConfig) {
@ -4397,6 +4414,10 @@ static inline void subdeviceRestoreLockedClock(struct Subdevice *pSubdevice, str
return;
}
static inline void subdeviceRestoreVF(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) {
return;
}
static inline void subdeviceReleaseNvlinkErrorInjectionMode(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) {
return;
}

View File

@ -59,6 +59,7 @@ NvBool rmapiutilIsExternalClassIdInternalOnly(NvU32 externalClassId);
//
// Return the flags and access right associated with this RM control command
//
NV_STATUS rmapiutilGetControlInfo(NvU32 cmd, NvU32 *pFlags, NvU32 *pAccessRight);
NV_STATUS rmapiutilGetControlInfo(NvU32 cmd, NvU32 *pFlags,
NvU32 *pAccessRight, NvU32 *pParamsSize);
#endif /* RMAPI_UTILS_H */

View File

@ -910,7 +910,7 @@
// will fail in such a case.
//
// TYPE_DEFAULT let RM to choose a P2P type. The priority is:
// C2C > NVLINK > mailbox P2P > BAR1P2P
// C2C > NVLINK > mailbox P2P > BAR1P2P
//
// TYPE_C2C to use C2C P2P if it supports
// TYPE_NVLINK to use NVLINK P2P, including INDIRECT_NVLINK_P2P if it supports
@ -1887,7 +1887,7 @@
// and could benefit from the lesser reserved GPU memory. Other use cases may
// exhibit an even more pathological/stressful resource allocation pattern,
// which can be enabled (up to a limit) with this regkey.
//
//
// However, NVIDIA does not support setting this registry key, and will require
// that any bugs observed with it set be reproducible with the default setting
// as well.
@ -1912,7 +1912,7 @@
// WAR for BlueField3: Bug 4040336
// BF3's PCI MMIO bus address 0x800000000000 is too high for Ampere to address.
// Due to this, BF3's bus address is now moved to < 4GB. So, the CPU PA is no longer
// the same as the bus address and this regkey adjusts the CPU PA passed in to the
// the same as the bus address and this regkey adjusts the CPU PA passed in to the
// correct bus address.
//
#define NV_REG_STR_RM_DMA_ADJUST_PEER_MMIO_BF3 "RmDmaAdjustPeerMmioBF3"
@ -1929,4 +1929,33 @@
#define NV_REG_STR_RM_FORCE_DISABLE_IOMAP_WC_NO 0x00000000
#define NV_REG_STR_RM_FORCE_DISABLE_IOMAP_WC_DEFAULT NV_REG_STR_RM_FORCE_DISABLE_IOMAP_WC_NO
//
// TYPE DWORD
// This regkey will increase the margin after the end of WPR2 when booting GSP-RM.
//
// This margin can be used to help GSP firmware boot in the presence of ECC
// errors which might affect the default GSP firmware image location in the GPU
// framebuffer. If GPU firmware is able to successfully boot with this registry
// key enabled, it should scan the margin area to attempt to handle ECC errors in
// the region, so that the region can be safely used in a subsequent boot.
//
// NV_REG_RM_GSP_WPR_END_MARGIN_MB
// Possible values:
// 0 - (Default) use the default calculated GSP WPR size
// 1+ - size of the end margin in megabytes
//
// NV_REG_RM_GSP_WPR_END_MARGIN_APPLY
// Possible values:
// _ON_RETRY (0) - (Default) only increase the margin to the requested size
// when retrying GSP firmware boot after a failed boot attempt
// _ALWAYS (1) - increase the margin to the requested size for all GSP
// firmware boot attempts, including the first
//
#define NV_REG_STR_RM_GSP_WPR_END_MARGIN "RmGspWprEndMargin"
#define NV_REG_RM_GSP_WPR_END_MARGIN_MB 30:0
#define NV_REG_RM_GSP_WPR_END_MARGIN_APPLY 31:31
#define NV_REG_RM_GSP_WPR_END_MARGIN_APPLY_ON_RETRY 0x00000000
#define NV_REG_RM_GSP_WPR_END_MARGIN_APPLY_ALWAYS 0x00000001
#endif // NVRM_REGISTRY_H

View File

@ -1587,7 +1587,7 @@ NV_STATUS rpcRmApiControl_GSP
GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease));
}
rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight);
rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight, NULL);
bCacheable = rmapiControlIsCacheable(ctrlFlags, ctrlAccessRight, NV_TRUE);
pCallContext = resservGetTlsCallContext();

View File

@ -88,7 +88,7 @@ static NvBool rcdProbeAllGpusPresent(NvU64 ip);
static volatile NvS32 probeGpuRecursion = 0;
#endif
#endif
static NvU32 _rcdbGetOcaRecordSizeWithHeader(Journal *pRcDB, RMCD_RECORD_TYPE type);
static NvU32 _rcdbGetOcaRecordSize(Journal *pRcDB, RMCD_RECORD_TYPE type);
static volatile NvS32 concurrentRingBufferAccess = 0;
static volatile NvS32 assertListRecursion = 0;
static void rcdbFindRingBufferForType(Journal *pRcDB, RMCD_RECORD_TYPE recType, RING_BUFFER_LOG **ppRingBuffer);
@ -678,7 +678,7 @@ rcdbGetRcDiagRecBoundaries_IMPL
for (i = 0; i < pRingBuffer->numEntries; ++i)
{
// get a pointer to the record from the buffer.
pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (_rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport) * ((logicalStartIdx + i) % pRingBuffer->maxEntries)));
pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport) * ((logicalStartIdx + i) % pRingBuffer->maxEntries)));
pRecord = (RmRcDiag_RECORD*) &(pCommon[1]);
// check to see if the record qualifies
@ -745,7 +745,7 @@ rcdbGetRcDiagRecBoundaries_IMPL
return status;
}
NV_STATUS
RmRCCommonJournal_RECORD *
rcdbAddRcDiagRec_IMPL
(
OBJGPU *pGpu,
@ -753,6 +753,7 @@ rcdbAddRcDiagRec_IMPL
RmRcDiag_RECORD *pRmDiagWrapBuffRec
)
{
RmRCCommonJournal_RECORD *pCommon;
NvU32 usec;
// Create Records, then write it.
@ -767,11 +768,32 @@ rcdbAddRcDiagRec_IMPL
}
osGetCurrentTime(&(pRmDiagWrapBuffRec->timeStamp), &usec);
rcdbAddRecToRingBuffer(pGpu, pRcDB, RmRcDiagReport,
sizeof(RmRcDiag_RECORD), (NvU8 *) pRmDiagWrapBuffRec);
pCommon = rcdbAddRecToRingBuffer(pGpu, pRcDB, RmRcDiagReport,
sizeof(RmRcDiag_RECORD), (NvU8 *)pRmDiagWrapBuffRec);
pRcDB->RcErrRptRecordsDropped |= pRcDB->RcErrRptNextIdx >= MAX_RCDB_RCDIAG_WRAP_BUFF;
return NV_OK;
return pCommon;
}
RmRCCommonJournal_RECORD *
rcdbAddRcDiagRecFromGsp_IMPL
(
OBJGPU *pGpu,
Journal *pRcDB,
RmRCCommonJournal_RECORD *pCommonGsp,
RmRcDiag_RECORD *pRmDiagGsp
)
{
RmRCCommonJournal_RECORD *pCommonCpu;
pCommonCpu = rcdbAddRcDiagRec(pGpu, pRcDB, pRmDiagGsp);
if (pCommonCpu)
{
NV_ASSERT(pCommonCpu->GPUTag == pCommonGsp->GPUTag);
pCommonCpu->stateMask |= pCommonGsp->stateMask;
}
return pCommonCpu;
}
NV_STATUS
@ -811,7 +833,7 @@ _rcdbInternalGetRcDiagRec
i %= pRingBuffer->maxEntries;
// get a pointer to the record from the buffer.
pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (_rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport) * i));
pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport) * i));
pRecord = (RmRcDiag_RECORD*) &(pCommon[1]);
// verify we have the record that was requested.
@ -841,6 +863,7 @@ _rcdbInternalGetRcDiagRec
exit:
return status;
}
NV_STATUS
rcdbGetRcDiagRec_IMPL
(
@ -851,19 +874,23 @@ rcdbGetRcDiagRec_IMPL
NvU32 processId
)
{
NV_STATUS status = NV_ERR_INVALID_INDEX;
NV_STATUS status;
if (ppRmDiagWrapBuffRec == NULL)
{
return NV_ERR_INVALID_ARGUMENT;
}
// assume we will fail.
*ppRmDiagWrapBuffRec = NULL;
if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1)
{
status = _rcdbInternalGetRcDiagRec(pRcDB, reqIdx, ppRmDiagWrapBuffRec, owner, processId);
}
else
{
status = NV_ERR_BUSY_RETRY;
}
portAtomicDecrementS32(&concurrentRingBufferAccess);
return status;
}
@ -1857,7 +1884,7 @@ rcdbInsertRingBufferToList(
NvU32 recordSize;
NvU32 i;
recordSize = _rcdbGetOcaRecordSizeWithHeader(pRcDB, pRingBuffer->entryType);
recordSize = rcdbGetOcaRecordSizeWithHeader(pRcDB, pRingBuffer->entryType);
//
// Order does not matter here because the record will be inserted into the
@ -1889,7 +1916,7 @@ rcdbInsertRingBufferCollectionToList(
NvU32 recSize = pCurrentBuffer->bufferSize;
NV_ASSERT(pCurrentBuffer->maxEntries *
_rcdbGetOcaRecordSizeWithHeader(pRcDB, pCurrentBuffer->entryType) ==
rcdbGetOcaRecordSizeWithHeader(pRcDB, pCurrentBuffer->entryType) ==
pCurrentBuffer->bufferSize);
if (recSize > 0)
@ -2473,7 +2500,7 @@ rcdbCreateRingBuffer_IMPL
rcdbFindRingBufferForType(pRcDB, type, &pRingBuffer);
entrySize = _rcdbGetOcaRecordSizeWithHeader(pRcDB, type);
entrySize = rcdbGetOcaRecordSizeWithHeader(pRcDB, type);
if (entrySize == 0)
{
NV_ASSERT(entrySize != 0);
@ -2646,7 +2673,7 @@ rcdbDestroyRingBuffer_IMPL
** it is assumed the caller has successfully acquired the concurrentRingBufferAccess lock.
** failure to do so can result in concurrency issues.
*/
RmRCCommonJournal_RECORD*
RmRCCommonJournal_RECORD *
_rcdbAllocRecFromRingBuffer
(
OBJGPU *pGpu,
@ -2675,10 +2702,10 @@ _rcdbAllocRecFromRingBuffer
newItemIndex = (pRingBuffer->numEntries + pRingBuffer->headIndex) % pRingBuffer->maxEntries;
// prepend the rmJournalCommon record to record.
pCommon = (RmRCCommonJournal_RECORD*)(pRingBuffer->pBuffer + (_rcdbGetOcaRecordSizeWithHeader(pRcDB, type) * newItemIndex));
pCommon = (RmRCCommonJournal_RECORD*)(pRingBuffer->pBuffer + (rcdbGetOcaRecordSizeWithHeader(pRcDB, type) * newItemIndex));
pCommon->Header.cRecordGroup = RmGroup;
pCommon->Header.cRecordType = type;
pCommon->Header.wRecordSize = (NvU16)_rcdbGetOcaRecordSizeWithHeader(pRcDB, type);
pCommon->Header.wRecordSize = (NvU16)rcdbGetOcaRecordSizeWithHeader(pRcDB, type);
rcdbSetCommonJournalRecord(pGpu, pCommon);
// Increment the number of entries or advance the head index.
@ -2710,7 +2737,7 @@ _rcdbAllocRecFromRingBuffer
**
** notes:
*/
void
RmRCCommonJournal_RECORD *
rcdbAddRecToRingBuffer_IMPL
(
OBJGPU *pGpu,
@ -2720,10 +2747,9 @@ rcdbAddRecToRingBuffer_IMPL
NvU8 *pRecord
)
{
RmRCCommonJournal_RECORD
*pCommon;
RmRCCommonJournal_RECORD *pCommon = NULL;
NV_ASSERT(recordSize == rcdbGetOcaRecordSize(pRcDB, type));
NV_ASSERT(recordSize == _rcdbGetOcaRecordSize(pRcDB, type));
if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1)
{
@ -2735,10 +2761,11 @@ rcdbAddRecToRingBuffer_IMPL
}
}
portAtomicDecrementS32(&concurrentRingBufferAccess);
return pCommon;
}
// Non-hal function to return the sizes of records that are not chip dependent.
NvU32 rcdbGetOcaRecordSize_IMPL(Journal *pRcDB, RMCD_RECORD_TYPE type)
static NvU32 _rcdbGetOcaRecordSize(Journal *pRcDB, RMCD_RECORD_TYPE type)
{
switch(type)
{
@ -2752,11 +2779,12 @@ NvU32 rcdbGetOcaRecordSize_IMPL(Journal *pRcDB, RMCD_RECORD_TYPE type)
return 0;
}
}
static NvU32 _rcdbGetOcaRecordSizeWithHeader(Journal *pRcDB, RMCD_RECORD_TYPE type)
NvU32 rcdbGetOcaRecordSizeWithHeader_IMPL(Journal *pRcDB, RMCD_RECORD_TYPE type)
{
NvU32 recSz;
recSz = rcdbGetOcaRecordSize(pRcDB, type);
recSz = _rcdbGetOcaRecordSize(pRcDB, type);
if (0 < recSz)
{
recSz += sizeof(RmRCCommonJournal_RECORD);
@ -2771,7 +2799,6 @@ static NvU32 _rcdbGetOcaRecordSizeWithHeader(Journal *pRcDB, RMCD_RECORD_TYPE ty
return NV_ALIGN_UP(recSz, 8);
}
NV_STATUS
rcdbAddRmGpuDump
(
@ -3377,7 +3404,7 @@ _rcdbGetNocatJournalRecord
idx %= pRingBuffer->numEntries;
// get a pointer to the common record & the record from the buffer.
pCommon = (RmRCCommonJournal_RECORD*)(((NvU8*)pRingBuffer->pBuffer) + (_rcdbGetOcaRecordSizeWithHeader(pRcdb, RmNocatReport) * idx));
pCommon = (RmRCCommonJournal_RECORD*)(((NvU8*)pRingBuffer->pBuffer) + (rcdbGetOcaRecordSizeWithHeader(pRcdb, RmNocatReport) * idx));
// get a pointer to the data that follows the common header, that is the record data.
pNocatEntry = (RM_NOCAT_JOURNAL_ENTRY*)(((NvU8*)pCommon) + sizeof(RmRCCommonJournal_RECORD));

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -30,10 +30,13 @@
#include "gpu/mem_mgr/rm_page_size.h"
#include "nverror.h"
#include "jt.h"
#include "gpu/falcon/kernel_falcon.h"
#include "gpu/gsp/kernel_gsp.h"
#include "published/turing/tu102/dev_nv_xve.h"
#include "published/turing/tu102/dev_gc6_island.h"
#include "published/turing/tu102/dev_gc6_island_addendum.h"
#include "published/turing/tu102/dev_falcon_v4.h"
/*!
* @brief Returns SR-IOV capabilities
@ -272,7 +275,7 @@ gpuJtVersionSanityCheck_TU102_EXIT:
* registers/interrupt registers. This function is not floorsweeping-aware so
* PRI errors are ignored
*/
void
NvBool
gpuCheckEccCounts_TU102
(
OBJGPU *pGpu
@ -295,35 +298,10 @@ gpuCheckEccCounts_TU102
"An uncorrectable ECC error detected "
"(possible firmware handling failure) "
"DRAM:%d, LTC:%d, MMU:%d, PCIE:%d", dramCount, ltcCount, mmuCount, pcieCount);
}
}
/*
* @brief Function that clears ECC error count registers.
*/
NV_STATUS
gpuClearEccCounts_TU102
(
OBJGPU *pGpu
)
{
NV_STATUS status = NV_OK;
gpuClearFbhubPoisonIntrForBug2924523_HAL(pGpu);
kmemsysClearEccCounts_HAL(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu));
kgmmuClearEccCounts_HAL(pGpu, GPU_GET_KERNEL_GMMU(pGpu));
kbusClearEccCounts_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu));
status = kbifClearEccCounts_HAL(pGpu, GPU_GET_KERNEL_BIF(pGpu));
if (status != NV_OK)
{
return status;
return NV_TRUE;
}
return NV_OK;
return NV_FALSE;
}
//
@ -405,6 +383,8 @@ gpuWaitForGfwBootComplete_TU102
NvU32 gfwBootProgressVal = 0;
RMTIMEOUT timeout;
NV_STATUS status = NV_OK;
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon);
// Use the OS timer since the GPU timer is not ready yet
gpuSetTimeout(pGpu, gpuScaleTimeout(pGpu, timeoutUs), &timeout,
@ -413,6 +393,29 @@ gpuWaitForGfwBootComplete_TU102
while (status == NV_OK)
{
if (_gpuIsGfwBootCompleted_TU102(pGpu, NULL, &gfwBootProgressVal))
{
status = NV_OK;
break;
}
status = gpuCheckTimeout(pGpu, &timeout);
}
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: (progress 0x%x)\n",
gfwBootProgressVal);
return status;
}
//
// GFW runs on GSP, so wait for GSP to halt as well
// OS timer need to be used here, hence not using wrapper kflcnWaitForHalt_HAL
//
while (status == NV_OK)
{
if (FLD_TEST_DRF(_PFALCON, _FALCON, _CPUCTL_HALTED, _TRUE,
kflcnRegRead_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_CPUCTL)))
{
return NV_OK;
}
@ -420,8 +423,8 @@ gpuWaitForGfwBootComplete_TU102
status = gpuCheckTimeout(pGpu, &timeout);
}
NV_PRINTF(LEVEL_ERROR, "failed to wait for GFW_BOOT: (progress 0x%x)\n",
gfwBootProgressVal);
NV_PRINTF(LEVEL_ERROR, "GSP failed to halt after GFW completion\n");
return status;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -907,41 +907,3 @@ kbifGetEccCounts_GH100
return count;
}
NV_STATUS
kbifClearEccCounts_GH100
(
OBJGPU *pGpu,
KernelBif *pKernelBif
)
{
NV_STATUS status = NV_OK;
RMTIMEOUT timeout;
NvU32 regVal;
// Reset XTL-EP status registers
GPU_REG_WR32(pGpu, NV_XTL_BASE_ADDRESS + NV_XTL_EP_PRI_DED_ERROR_STATUS, ~0);
GPU_REG_WR32(pGpu, NV_XTL_BASE_ADDRESS + NV_XTL_EP_PRI_RAM_ERROR_INTR_STATUS, ~0);
// Reset XPL-EP error counters
regVal = DRF_DEF(_XPL, _DL_ERR_RESET, _RBUF_UNCORR_ERR_COUNT, _PENDING) |
DRF_DEF(_XPL, _DL_ERR_RESET, _SEQ_LUT_UNCORR_ERR_COUNT, _PENDING);
GPU_REG_WR32(pGpu, NV_XPL_BASE_ADDRESS + NV_XPL_DL_ERR_RESET, regVal);
// Wait for the error counter reset to complete
gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0);
for (;;)
{
status = gpuCheckTimeout(pGpu, &timeout);
regVal = GPU_REG_RD32(pGpu, NV_XPL_BASE_ADDRESS + NV_XPL_DL_ERR_RESET);
if (FLD_TEST_DRF(_XPL, _DL_ERR_RESET, _RBUF_UNCORR_ERR_COUNT, _DONE, regVal) &&
FLD_TEST_DRF(_XPL, _DL_ERR_RESET, _SEQ_LUT_UNCORR_ERR_COUNT, _DONE, regVal))
break;
if (status != NV_OK)
return status;
}
return NV_OK;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -2629,15 +2629,3 @@ kbusGetEccCounts_GH100
return count;
}
void
kbusClearEccCounts_GH100
(
OBJGPU *pGpu,
KernelBus *pKernelBus
)
{
// Reset XAL-EP counts
GPU_REG_WR32(pGpu, NV_XAL_EP_REORDER_ECC_UNCORRECTED_ERR_COUNT, 0);
GPU_REG_WR32(pGpu, NV_XAL_EP_P2PREQ_ECC_UNCORRECTED_ERR_COUNT, 0);
}

View File

@ -1048,12 +1048,6 @@ kfspSendBootCommands_GH100
return NV_OK;
}
if (pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_BOOT_COMMAND_OK))
{
NV_PRINTF(LEVEL_ERROR, "Cannot send FSP boot commands multiple times.\n");
return NV_ERR_NOT_SUPPORTED;
}
// Confirm FSP secure boot partition is done
statusBoot = kfspWaitForSecureBoot_HAL(pGpu, pKernelFsp);
@ -1135,9 +1129,24 @@ kfspSendBootCommands_GH100
// Bug 200711957 has more info and tracks longer term improvements.
//
const NvU32 ESTIMATED_RESERVE_FB = 0x200000;
NvU64 frtsOffsetFromEnd = ESTIMATED_RESERVE_FB;
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
//
// In the boot retry path, we may need to apply an extra margin the end of memory.
// This is done to avoid memory with an ECC error that caused the first boot
// attempt failure. This value will be 0 during normal boot.
//
// Align the margin size to 2MB, as there's potentially an undocumented alignment
// requirement (the previous value should already be 2MB-aligned) and the extra
// padding won't hurt.
//
if (pKernelGsp != NULL)
frtsOffsetFromEnd += NV_ALIGN_UP64(kgspGetWprEndMargin(pGpu, pKernelGsp), 0x200000U);
// Offset from end of FB to be used by FSP
pCotPayload->frtsVidmemOffset = ESTIMATED_RESERVE_FB;
pCotPayload->frtsVidmemOffset = frtsOffsetFromEnd;
pCotPayload->frtsVidmemSize = frtsSize;
}
@ -1157,7 +1166,7 @@ kfspSendBootCommands_GH100
}
status = kfspSendAndReadMessage(pGpu, pKernelFsp, (NvU8 *)pCotPayload,
sizeof(NVDM_PAYLOAD_COT), NVDM_TYPE_COT, NULL, 0);
sizeof(NVDM_PAYLOAD_COT), NVDM_TYPE_COT, NULL, 0);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Sent following content to FSP: \n");

View File

@ -297,8 +297,6 @@ kgspCalculateFbLayout_GH100
NV_ASSERT_OR_RETURN(pKernelGsp->gspRmBootUcodeSize != 0, NV_ERR_INVALID_STATE);
NV_ASSERT_OR_RETURN(pRiscvDesc != NULL, NV_ERR_INVALID_STATE);
portMemSet(pWprMeta, 0, sizeof *pWprMeta);
//
// We send this to FSP as the size to reserve above FRTS.
// The actual offset gets filled in by ACR ucode when it sets up WPR2.
@ -414,27 +412,32 @@ kgspSetupGspFmcArgs_GH100
}
/*!
* Determine if lockdown is released.
* Determine if PRIV lockdown is released or the FMC has encountered an error.
*/
static NvBool
_kgspIsLockdownReleased
_kgspLockdownReleasedOrFmcError
(
OBJGPU *pGpu,
void *pVoid
)
{
KernelGsp *pKernelGsp = reinterpretCast(pVoid, KernelGsp *);
NvU32 reg;
KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon);
NvU32 hwcfg2, mailbox0;
reg = kflcnRegRead_HAL(pGpu, staticCast(pKernelGsp, KernelFalcon), NV_PFALCON_FALCON_HWCFG2);
//
// If lockdown has not been released, check NV_PGSP_FALCON_MAILBOX0, where the GSP-FMC
// (namely ACR) logs error codes during boot. GSP-FMC reported errors are always fatal,
// so there's no reason to continue polling for lockdown release.
//
mailbox0 = kflcnRegRead_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_MAILBOX0);
hwcfg2 = kflcnRegRead_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_HWCFG2);
return FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG2, _RISCV_BR_PRIV_LOCKDOWN,
_UNLOCK, reg);
return (FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG2, _RISCV_BR_PRIV_LOCKDOWN, _UNLOCK, hwcfg2) ||
(mailbox0 != 0));
}
static void
_kgspBootstrapGspFmc_GH100
(
@ -525,6 +528,7 @@ kgspBootstrapRiscvOSEarly_GH100
KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon);
KernelFsp *pKernelFsp = GPU_GET_KERNEL_FSP(pGpu);
NV_STATUS status = NV_OK;
NvU32 mailbox0;
// Only for GSP client builds
if (!IS_GSP_CLIENT(pGpu))
@ -533,16 +537,8 @@ kgspBootstrapRiscvOSEarly_GH100
return NV_ERR_NOT_SUPPORTED;
}
// Clear ECC errors before attempting to load GSP
status = gpuClearEccCounts_HAL(pGpu);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Issue clearing ECC counts! Status:0x%x\n", status);
}
// Setup the descriptors that GSP-FMC needs to boot GSP-RM
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
kgspSetupGspFmcArgs_HAL(pGpu, pKernelGsp, pGspFw), exit);
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgspSetupGspFmcArgs_HAL(pGpu, pKernelGsp, pGspFw));
kgspSetupLibosInitArgs(pGpu, pKernelGsp);
@ -557,22 +553,21 @@ kgspBootstrapRiscvOSEarly_GH100
if (status != NV_OK)
{
NV_ASSERT_OK_FAILED("NV_RM_RPC_GSP_SET_SYSTEM_INFO", status);
goto exit;
return status;
}
NV_RM_RPC_SET_REGISTRY(pGpu, status);
if (status != NV_OK)
{
NV_ASSERT_OK_FAILED("NV_RM_RPC_SET_REGISTRY", status);
goto exit;
return status;
}
if (pKernelFsp != NULL && !pKernelFsp->getProperty(pKernelFsp, PDB_PROP_KFSP_DISABLE_GSPFMC))
{
NV_PRINTF(LEVEL_NOTICE, "Starting to boot GSP via FSP.\n");
pKernelFsp->setProperty(pKernelFsp, PDB_PROP_KFSP_GSP_MODE_GSPRM, NV_TRUE);
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR,
kfspSendBootCommands_HAL(pGpu, pKernelFsp), exit);
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kfspSendBootCommands_HAL(pGpu, pKernelFsp));
}
else
{
@ -595,12 +590,12 @@ kgspBootstrapRiscvOSEarly_GH100
kfspDumpDebugState_HAL(pGpu, pKernelFsp);
}
goto exit;
return status;
}
}
// Wait for lockdown to be released.
status = gpuTimeoutCondWait(pGpu, _kgspIsLockdownReleased, pKernelGsp, NULL);
// Wait for lockdown to be released or the FMC to report an error
status = gpuTimeoutCondWait(pGpu, _kgspLockdownReleasedOrFmcError, pKernelGsp, NULL);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Timeout waiting for lockdown release. It's also "
@ -616,7 +611,13 @@ kgspBootstrapRiscvOSEarly_GH100
kflcnRegRead_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_MAILBOX0));
NV_PRINTF(LEVEL_ERROR, "NV_PGSP_FALCON_MAILBOX1 = 0x%x\n",
kflcnRegRead_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_MAILBOX1));
goto exit;
return status;
}
else if ((mailbox0 = kflcnRegRead_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_MAILBOX0)) != 0)
{
NV_PRINTF(LEVEL_ERROR, "GSP-FMC reported an error while attempting to boot GSP: 0x%x\n",
mailbox0);
return NV_ERR_NOT_READY;
}
// Start polling for libos logs now that lockdown is released
@ -634,30 +635,19 @@ kgspBootstrapRiscvOSEarly_GH100
else
{
NV_ASSERT_FAILED("Failed to boot GSP");
status = NV_ERR_NOT_READY;
goto exit;
return NV_ERR_NOT_READY;
}
NV_PRINTF(LEVEL_INFO, "Waiting for GSP fw RM to be ready...\n");
// Link the status queue.
NV_ASSERT_OK_OR_GOTO(status, GspStatusQueueInit(pGpu, &pKernelGsp->pRpc->pMessageQueueInfo),
exit);
NV_ASSERT_OK_OR_RETURN(GspStatusQueueInit(pGpu, &pKernelGsp->pRpc->pMessageQueueInfo));
NV_ASSERT_OK_OR_GOTO(status, kgspWaitForRmInitDone(pGpu, pKernelGsp),
exit);
NV_ASSERT_OK_OR_RETURN(kgspWaitForRmInitDone(pGpu, pKernelGsp));
NV_PRINTF(LEVEL_INFO, "GSP FW RM ready.\n");
exit:
// If GSP fails to boot, check if there's any DED error.
if (status != NV_OK)
{
gpuCheckEccCounts_HAL(pGpu);
}
NV_ASSERT(status == NV_OK);
return status;
return NV_OK;
}
void

View File

@ -140,6 +140,8 @@ kgspAllocBootArgs_TU102
pKernelGsp->pWprMeta = (GspFwWprMeta *)NvP64_VALUE(pVa);
pKernelGsp->pWprMetaMappingPriv = pPriv;
portMemSet(pKernelGsp->pWprMeta, 0, sizeof(*pKernelGsp->pWprMeta));
//
// Setup libos arguments memory
//
@ -166,6 +168,8 @@ kgspAllocBootArgs_TU102
pKernelGsp->pLibosInitArgumentsCached = (LibosMemoryRegionInitArgument *)NvP64_VALUE(pVa);
pKernelGsp->pLibosInitArgumentsMappingPriv = pPriv;
portMemSet(pKernelGsp->pLibosInitArgumentsCached, 0, LIBOS_INIT_ARGUMENTS_SIZE);
// Setup bootloader arguments memory.
NV_ASSERT(sizeof(GSP_ARGUMENTS_CACHED) <= 0x1000);
@ -190,6 +194,8 @@ kgspAllocBootArgs_TU102
pKernelGsp->pGspArgumentsCached = (GSP_ARGUMENTS_CACHED *)NvP64_VALUE(pVa);
pKernelGsp->pGspArgumentsMappingPriv = pPriv;
portMemSet(pKernelGsp->pGspArgumentsCached, 0, sizeof(*pKernelGsp->pGspArgumentsCached));
return nvStatus;
_kgspAllocBootArgs_exit_cleanup:
@ -492,8 +498,9 @@ kgspGetGspRmBootUcodeStorage_TU102
* | GSP FW (non-WPR) HEAP |
* ---------------------------- <- nonWprHeapOffset, gspFwRsvdStart
*
* gspFwHeapOffset** contains the RM/Libos Heap. First 16 Mb are for Libos heap
* rest is for GSP-RM
* gspFwHeapOffset** contains the entire WPR heap region, which can be subdivided
* for various GSP FW components.
*
* @param pGpu GPU object pointer
* @param pKernelGsp KernelGsp object pointer
* @param pGspFw Pointer to GSP-RM fw image.
@ -523,8 +530,6 @@ kgspCalculateFbLayout_TU102
NV_ASSERT_OR_RETURN(pKernelGsp->gspRmBootUcodeSize != 0, NV_ERR_INVALID_STATE);
NV_ASSERT_OR_RETURN(pRiscvDesc != NULL, NV_ERR_INVALID_STATE);
portMemSet(pWprMeta, 0, sizeof *pWprMeta);
NV_ASSERT_OK_OR_RETURN(kmemsysGetUsableFbSize_HAL(pGpu, pKernelMemorySystem, &pWprMeta->fbSize));
//
@ -558,8 +563,11 @@ kgspCalculateFbLayout_TU102
else
vbiosReservedOffset = pWprMeta->vgaWorkspaceOffset;
// End of WPR region (128KB aligned)
pWprMeta->gspFwWprEnd = NV_ALIGN_DOWN64(vbiosReservedOffset, 0x20000);
// Set the size of the GSP FW ahead of kgspGetWprEndMargin()
pWprMeta->sizeOfRadix3Elf = pGspFw->imageSize;
// End of WPR region (128KB aligned), shifted for any WPR end margin
pWprMeta->gspFwWprEnd = NV_ALIGN_DOWN64(vbiosReservedOffset - kgspGetWprEndMargin(pGpu, pKernelGsp), 0x20000);
pWprMeta->frtsSize = kgspGetFrtsSize(pGpu, pKernelGsp);
pWprMeta->frtsOffset = pWprMeta->gspFwWprEnd - pWprMeta->frtsSize;
@ -568,9 +576,6 @@ kgspCalculateFbLayout_TU102
pWprMeta->sizeOfBootloader = pKernelGsp->gspRmBootUcodeSize;
pWprMeta->bootBinOffset = NV_ALIGN_DOWN64(pWprMeta->frtsOffset - pWprMeta->sizeOfBootloader, 0x1000);
// Compute GSP firmware image size
pWprMeta->sizeOfRadix3Elf = pGspFw->imageSize;
//
// Compute the start of the ELF. Align to 64K to avoid issues with
// inherent alignment constraints.

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -28,6 +28,7 @@
#include "kernel/core/thread_state.h"
#include "kernel/core/locks.h"
#include "kernel/diagnostics/gpu_acct.h"
#include "kernel/diagnostics/journal.h"
#include "kernel/gpu/fifo/kernel_channel.h"
#include "kernel/gpu/intr/engine_idx.h"
#include "kernel/gpu/mem_mgr/heap.h"
@ -506,6 +507,64 @@ _kgspRpcRCTriggered
pKernelChannel != NULL,
NV_ERR_INVALID_CHANNEL);
// Add the RcDiag records we received from GSP-RM to our system wide journal
{
OBJSYS *pSys = SYS_GET_INSTANCE();
Journal *pRcDB = SYS_GET_RCDB(pSys);
RmClient *pClient;
NvU32 recordSize = rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport);
NvU32 rcDiagRecStart = pRcDB->RcErrRptNextIdx;
NvU32 rcDiagRecEnd;
NvU32 processId = 0;
NvU32 owner = RCDB_RCDIAG_DEFAULT_OWNER;
pClient = dynamicCast(RES_GET_CLIENT(pKernelChannel), RmClient);
NV_ASSERT(pClient != NULL);
if (pClient != NULL)
processId = pClient->ProcID;
for (NvU32 i = 0; i < rpc_params->rcJournalBufferSize / recordSize; i++)
{
RmRCCommonJournal_RECORD *pCommonRecord =
(RmRCCommonJournal_RECORD *)((NvU8*)&rpc_params->rcJournalBuffer + i * recordSize);
RmRcDiag_RECORD *pRcDiagRecord =
(RmRcDiag_RECORD *)&pCommonRecord[1];
#if defined(DEBUG)
NV_PRINTF(LEVEL_INFO, "%d: GPUTag=0x%x CPUTag=0x%llx timestamp=0x%llx stateMask=0x%llx\n",
i, pCommonRecord->GPUTag, pCommonRecord->CPUTag, pCommonRecord->timeStamp,
pCommonRecord->stateMask);
NV_PRINTF(LEVEL_INFO, " idx=%d timeStamp=0x%x type=0x%x flags=0x%x count=%d owner=0x%x processId=0x%x\n",
pRcDiagRecord->idx, pRcDiagRecord->timeStamp, pRcDiagRecord->type, pRcDiagRecord->flags,
pRcDiagRecord->count, pRcDiagRecord->owner, processId);
for (NvU32 j = 0; j < pRcDiagRecord->count; j++)
{
NV_PRINTF(LEVEL_INFO, " %d: offset=0x08%x tag=0x08%x value=0x08%x attribute=0x08%x\n",
j, pRcDiagRecord->data[j].offset, pRcDiagRecord->data[j].tag,
pRcDiagRecord->data[j].value, pRcDiagRecord->data[j].attribute);
}
#endif
if (rcdbAddRcDiagRecFromGsp(pGpu, pRcDB, pCommonRecord, pRcDiagRecord) == NULL)
{
NV_PRINTF(LEVEL_WARNING, "Lost RC diagnostic record coming from GPU%d GSP: type=0x%x stateMask=0x%llx\n",
gpuGetInstance(pGpu), pRcDiagRecord->type, pCommonRecord->stateMask);
}
}
rcDiagRecEnd = pRcDB->RcErrRptNextIdx - 1;
// Update records to have the correct PID associated with the channel
if (rcDiagRecStart != rcDiagRecEnd)
{
rcdbUpdateRcDiagRecContext(pRcDB,
rcDiagRecStart,
rcDiagRecEnd,
processId,
owner);
}
}
// With CC enabled, CPU-RM needs to write error notifiers
if (gpuIsCCFeatureEnabled(pGpu))
{
@ -2489,6 +2548,51 @@ _kgspVbiosVersionToStr(NvU64 vbiosVersionCombined, char *pVbiosVersionStr, NvU32
(vbiosVersionCombined) & 0xff);
}
static NV_STATUS
_kgspPrepareScrubberImageIfNeeded(OBJGPU *pGpu, KernelGsp *pKernelGsp)
{
// Prepare Scrubber ucode image if pre-scrubbed memory is insufficient
NvU64 neededSize = pKernelGsp->pWprMeta->fbSize - pKernelGsp->pWprMeta->gspFwRsvdStart;
NvU64 prescrubbedSize = kgspGetPrescrubbedTopFbSize(pGpu, pKernelGsp);
NV_PRINTF(LEVEL_INFO, "pre-scrubbed memory: 0x%llx bytes, needed: 0x%llx bytes\n",
prescrubbedSize, neededSize);
if (neededSize > prescrubbedSize)
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
kgspAllocateScrubberUcodeImage(pGpu, pKernelGsp, &pKernelGsp->pScrubberUcode));
return NV_OK;
}
static NV_STATUS
_kgspBootGspRm(OBJGPU *pGpu, KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw)
{
NV_STATUS status;
// Fail early if WPR2 is up
if (kgspIsWpr2Up_HAL(pGpu, pKernelGsp))
{
NV_PRINTF(LEVEL_ERROR, "unexpected WPR2 already up, cannot proceed with booting GSP\n");
NV_PRINTF(LEVEL_ERROR, "(the GPU is likely in a bad state and may need to be reset)\n");
return NV_ERR_INVALID_STATE;
}
// Calculate FB layout (requires knowing FB size which depends on GFW_BOOT)
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgspCalculateFbLayout_HAL(pGpu, pKernelGsp, pGspFw));
// If the new FB layout requires a scrubber ucode to scrub additional space, prepare it now
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, _kgspPrepareScrubberImageIfNeeded(pGpu, pKernelGsp));
// Proceed with GSP boot - if it fails, check for ECC errors
status = kgspBootstrapRiscvOSEarly_HAL(pGpu, pKernelGsp, pGspFw);
if ((status != NV_OK) && gpuCheckEccCounts_HAL(pGpu))
status = NV_ERR_ECC_ERROR;
pKernelGsp->bootAttempts++;
return status;
}
/*!
* Initialize GSP-RM
*
@ -2528,9 +2632,6 @@ kgspInitRm_IMPL
GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT, &gpusLockedMask),
done);
// Set the GPU time to the wall-clock time before loading GSP ucode.
tmrSetCurrentTime_HAL(pGpu, pTmr);
/*
* For GSP-RM boot, we must trigger FRTS (if it exists for the chip)
* before loading GSP-RM so that FRTS data and GSP-RM code/data/heap can coexist
@ -2573,11 +2674,7 @@ kgspInitRm_IMPL
}
else if (status == NV_ERR_NOT_SUPPORTED)
{
//
// Extracting VBIOS image from ROM is not supported.
// Sanity check we don't depend on it for FRTS, and proceed without FWSEC.
//
NV_ASSERT_OR_GOTO(kgspGetFrtsSize(pGpu, pKernelGsp) == 0, done);
status = NV_OK;
}
else
@ -2656,61 +2753,41 @@ kgspInitRm_IMPL
threadStateResetTimeout(pGpu);
NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, kgspWaitForGfwBootOk_HAL(pGpu, pKernelGsp), done);
// Fail early if WPR2 is up
if (kgspIsWpr2Up_HAL(pGpu, pKernelGsp))
{
NV_PRINTF(LEVEL_ERROR, "unexpected WPR2 already up, cannot proceed with booting gsp\n");
NV_PRINTF(LEVEL_ERROR, "(the GPU is likely in a bad state and may need to be reset)\n");
status = NV_ERR_INVALID_STATE;
goto done;
}
//
// Set the GPU time to the wall-clock time after GFW boot is complete
// (to avoid PLM collisions) but before loading GSP-RM ucode (which
// consumes the updated GPU time).
//
tmrSetCurrentTime_HAL(pGpu, pTmr);
//
// Bring up ucode with RM offload task.
// If an ECC error occurs which results in the failure of the bootstrap, try again.
// Subsequent attempts will shift the GSP region of FB in an attempt to avoid the
// unstable memory.
//
const NvU8 MAX_GSP_BOOT_ATTEMPTS = 4;
do
{
// Reset the thread state timeout after failed attempts to prevent premature timeouts.
if (status != NV_OK)
threadStateResetTimeout(pGpu);
//
// _kgspBootGspRm() will return NV_ERR_ECC_ERROR if any unhandled ECC errors are
// detected during a failed GSP boot attempt. Depending on where and when the
// error occurred, we may not be able to try again, in which case a different
// error code will be returned.
//
status = _kgspBootGspRm(pGpu, pKernelGsp, pGspFw);
} while ((status == NV_ERR_ECC_ERROR) && (pKernelGsp->bootAttempts < MAX_GSP_BOOT_ATTEMPTS));
// Calculate FB layout (requires knowing FB size which depends on GFW_BOOT)
status = kgspCalculateFbLayout_HAL(pGpu, pKernelGsp, pGspFw);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Error calculating FB layout\n");
goto done;
}
// Prepare Scrubber ucode image if pre-scrubbed memory is insufficient
if (pKernelGsp->pScrubberUcode == NULL)
{
NvU64 neededSize = pKernelGsp->pWprMeta->fbSize - pKernelGsp->pWprMeta->gspFwRsvdStart;
NvU64 prescrubbedSize = kgspGetPrescrubbedTopFbSize(pGpu, pKernelGsp);
if (neededSize > prescrubbedSize)
{
NV_PRINTF(LEVEL_INFO,
"allocating Scrubber ucode as pre-scrubbed memory (0x%llx bytes) is insufficient (0x%llx bytes needed)\n",
prescrubbedSize, neededSize);
status = kgspAllocateScrubberUcodeImage(pGpu, pKernelGsp,
&pKernelGsp->pScrubberUcode);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "failed to allocate Scrubber ucode: 0x%x\n", status);
goto done;
}
}
else
{
NV_PRINTF(LEVEL_INFO,
"skipping allocating Scrubber ucode as pre-scrubbed memory (0x%llx bytes) is sufficient (0x%llx bytes needed)\n",
prescrubbedSize, neededSize);
}
}
// bring up ucode with RM offload task
status = kgspBootstrapRiscvOSEarly_HAL(pGpu, pKernelGsp, pGspFw);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "cannot bootstrap riscv/gsp: 0x%x\n", status);
//
// Ignore return value - a crash report may have already been consumed,
// this is just here as a last attempt to report boot issues that might
// escaped prior checks.
// have escaped prior checks.
//
(void)kgspHealthCheck_HAL(pGpu, pKernelGsp);
goto done;
@ -4031,3 +4108,58 @@ kgspGetFwHeapSize_IMPL
return _kgspCalculateFwHeapSize(pGpu, pKernelGsp, maxScrubbedHeapSizeMB);
}
NvU64 kgspGetWprEndMargin_IMPL(OBJGPU *pGpu, KernelGsp *pKernelGsp)
{
NvU64 wprEndMargin;
NvU32 marginOverride = 0;
GspFwWprMeta *pWprMeta = pKernelGsp->pWprMeta;
(void)osReadRegistryDword(pGpu, NV_REG_STR_RM_GSP_WPR_END_MARGIN, &marginOverride);
wprEndMargin = ((NvU64)DRF_VAL(_REG, _RM_GSP_WPR_END_MARGIN, _MB, marginOverride)) << 20;
if (wprEndMargin == 0)
{
// Calculate the default margin size based on the WPR size
const GspFwWprMeta *pWprMeta = pKernelGsp->pWprMeta;
//
// This needs to be called after pWprMeta->sizeOfRadix3Elf has been initialized,
// in order to estimate the default WPR size.
//
NV_ASSERT(pWprMeta->sizeOfRadix3Elf > 0);
//
// If the bounds are encoded in GspFwWprMeta from a prior attempt, use them.
// Otherwise, estimate the WPR size by the sizes of the elements in the layout
//
if (pWprMeta->gspFwWprEnd > pWprMeta->nonWprHeapOffset)
{
wprEndMargin = pWprMeta->gspFwWprEnd - pWprMeta->nonWprHeapOffset;
}
else
{
wprEndMargin += kgspGetFrtsSize_HAL(pGpu, pKernelGsp);
wprEndMargin += pKernelGsp->gspRmBootUcodeSize;
wprEndMargin += pWprMeta->sizeOfRadix3Elf;
wprEndMargin += kgspGetFwHeapSize(pGpu, pKernelGsp, 0);
wprEndMargin += kgspGetNonWprHeapSize(pGpu, pKernelGsp);
}
if (pKernelGsp->bootAttempts > 0)
wprEndMargin *= pKernelGsp->bootAttempts;
}
if (FLD_TEST_DRF(_REG, _RM_GSP_WPR_END_MARGIN, _APPLY, _ALWAYS, marginOverride) ||
(pKernelGsp->bootAttempts > 0))
{
NV_PRINTF(LEVEL_WARNING, "Adding margin of 0x%llx bytes after the end of WPR2\n",
wprEndMargin);
pWprMeta->flags |= GSP_FW_FLAGS_RECOVERY_MARGIN_PRESENT;
return wprEndMargin;
}
// Normal boot path
pWprMeta->flags &= ~GSP_FW_FLAGS_RECOVERY_MARGIN_PRESENT;
return 0;
}

View File

@ -541,6 +541,7 @@ NV_STATUS GspMsgQueueSendCommand(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu)
// Use sequence number as AAD.
portMemCopy((NvU8*)pCQE->aadBuffer, sizeof(pCQE->aadBuffer), (NvU8 *)&pCQE->seqNum, sizeof(pCQE->seqNum));
// We need to encrypt the full queue elements to obscure the data.
nvStatus = ccslEncrypt(pCC->pRpcCcslCtx,
(pCQE->elemCount * GSP_MSG_QUEUE_ELEMENT_SIZE_MIN) - GSP_MSG_QUEUE_ELEMENT_HDR_SIZE,
pSrc + GSP_MSG_QUEUE_ELEMENT_HDR_SIZE,
@ -555,9 +556,14 @@ NV_STATUS GspMsgQueueSendCommand(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu)
// Do not re-try if decryption failed.
return nvStatus;
}
}
pCQE->checkSum = _checkSum32(pSrc, pCQE->elemCount * GSP_MSG_QUEUE_ELEMENT_SIZE_MIN);
// Now that encryption covers elements completely, include them in checksum.
pCQE->checkSum = _checkSum32(pSrc, pCQE->elemCount * GSP_MSG_QUEUE_ELEMENT_SIZE_MIN);
}
else
{
pCQE->checkSum = _checkSum32(pSrc, uElementSize);
}
for (i = 0; i < pCQE->elemCount; i++)
{
@ -657,6 +663,7 @@ NV_STATUS GspMsgQueueReceiveStatus(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu)
NvU32 uElementSize = 0;
NvU32 seqMismatchDiff = NV_U32_MAX;
NV_STATUS nvStatus = NV_OK;
ConfidentialCompute *pCC = NULL;
for (nRetries = 0; nRetries < nMaxRetries; nRetries++)
{
@ -703,7 +710,18 @@ NV_STATUS GspMsgQueueReceiveStatus(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu)
continue;
// Retry if checksum fails.
if (_checkSum32(pMQI->pCmdQueueElement, (nElements * GSP_MSG_QUEUE_ELEMENT_SIZE_MIN)) != 0)
pCC = GPU_GET_CONF_COMPUTE(pGpu);
if (pCC != NULL && pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_ENCRYPT_READY))
{
// In Confidential Compute scenario, checksum includes complete element range.
if (_checkSum32(pMQI->pCmdQueueElement, (nElements * GSP_MSG_QUEUE_ELEMENT_SIZE_MIN)) != 0)
{
NV_PRINTF(LEVEL_ERROR, "Bad checksum.\n");
nvStatus = NV_ERR_INVALID_DATA;
continue;
}
} else
if (_checkSum32(pMQI->pCmdQueueElement, uElementSize) != 0)
{
NV_PRINTF(LEVEL_ERROR, "Bad checksum.\n");
nvStatus = NV_ERR_INVALID_DATA;
@ -756,7 +774,7 @@ NV_STATUS GspMsgQueueReceiveStatus(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu)
}
}
ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
pCC = GPU_GET_CONF_COMPUTE(pGpu);
if (pCC != NULL && pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_ENCRYPT_READY))
{
nvStatus = ccslDecrypt(pCC->pRpcCcslCtx,

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -115,10 +115,11 @@ _kmemsysReadRegAndMaskPriError
NvU32 regVal;
regVal = osGpuReadReg032(pGpu, regAddr);
if ((regVal & GPU_READ_PRI_ERROR_MASK) == GPU_READ_PRI_ERROR_CODE)
{
if (regVal == GPU_REG_VALUE_INVALID)
return 0;
if ((regVal & GPU_READ_PRI_ERROR_MASK) == GPU_READ_PRI_ERROR_CODE)
return 0;
}
return regVal;
}
@ -160,25 +161,3 @@ kmemsysGetEccCounts_TU102
}
}
}
void
kmemsysClearEccCounts_TU102
(
OBJGPU *pGpu,
KernelMemorySystem *pKernelMemorySystem
)
{
NvU32 maxFbpas = kmemsysGetMaxFbpas_HAL(pGpu, pKernelMemorySystem);
NvU32 dedCountSize = kmemsysGetEccDedCountSize_HAL(pGpu, pKernelMemorySystem);
NvU32 fbpaDedCountRegAddr = 0;
for (NvU32 i = 0; i < maxFbpas; i++)
{
for (NvU32 j = 0; j < dedCountSize; j++)
{
fbpaDedCountRegAddr = kmemsysGetEccDedCountRegAddr_HAL(pGpu, pKernelMemorySystem, i, j);
osGpuWriteReg032(pGpu, fbpaDedCountRegAddr, 0);
osGpuWriteReg032(pGpu, NV_PLTCG_LTC0_LTS0_L2_CACHE_ECC_UNCORRECTED_ERR_COUNT + (i * NV_LTC_PRI_STRIDE) + (j * NV_LTS_PRI_STRIDE), 0);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -472,15 +472,3 @@ kgmmuGetEccCounts_TU102
return mmuCount;
}
void
kgmmuClearEccCounts_TU102
(
OBJGPU *pGpu,
KernelGmmu *pKernelGmmu
)
{
GPU_REG_WR32(pGpu, NV_PFB_PRI_MMU_L2TLB_ECC_UNCORRECTED_ERR_COUNT, 0);
GPU_REG_WR32(pGpu, NV_PFB_PRI_MMU_HUBTLB_ECC_UNCORRECTED_ERR_COUNT, 0);
GPU_REG_WR32(pGpu, NV_PFB_PRI_MMU_FILLUNIT_ECC_UNCORRECTED_ERR_COUNT, 0);
}

View File

@ -174,7 +174,8 @@ knvlinkCoreGetRemoteDeviceInfo_IMPL
//
if (!conn_info.bConnected &&
(bNvswitchProxyPresent ||
GPU_IS_NVSWITCH_DETECTED(pGpu)))
(!pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) &&
GPU_IS_NVSWITCH_DETECTED(pGpu))))
{
conn_info.bConnected = NV_TRUE;
conn_info.deviceType = NVLINK_DEVICE_TYPE_NVSWITCH;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -195,6 +195,9 @@ subdeviceDestruct_IMPL
subdeviceRestoreLockedClock(pSubdevice, pCallContext);
// Decrement the reference count for VF if previously incremented.
subdeviceRestoreVF(pSubdevice, pCallContext);
// Restore GR tick frequency to default.
subdeviceRestoreGrTickFreq(pSubdevice, pCallContext);

View File

@ -31,6 +31,7 @@
#include "kernel/gpu/bif/kernel_bif.h"
#include "gpu/subdevice/subdevice.h"
#include "gpu/gpu.h"
#include "virtualization/hypervisor/hypervisor.h"
#include "vgpu/rpc.h"
#include "vgpu/vgpu_events.h"
#include "platform/chipset/chipset.h"
@ -348,6 +349,7 @@ _kp2pCapsGetStatusOverPcie
NvU8 gpuP2PWriteCapsStatus = NV0000_P2P_CAPS_STATUS_OK;
NvU32 lockedGpuMask = 0;
NV_STATUS status = NV_OK;
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
// Check if any overrides are enabled.
if (_kp2pCapsCheckStatusOverridesForPcie(gpuMask, pP2PWriteCapStatus,
@ -365,6 +367,16 @@ _kp2pCapsGetStatusOverPcie
}
}
// Check for hypervisor oriented PCIe P2P overrides
if (pHypervisor &&
pHypervisor->bDetected &&
hypervisorPcieP2pDetection(pHypervisor, gpuMask))
{
*pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_OK;
*pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_OK;
goto done;
}
// PCI-E topology checks
gpuInstance = 0;
while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL)

View File

@ -4315,7 +4315,6 @@ cliresCtrlCmdNvdGetRcerrRpt_IMPL
NvU32 gpuAttachCount = 0;
NvU32 gpuIdx = 0;
OBJGPU *pGpu = NULL;
NvU32 processId = osGetCurrentProcess();
NV_ASSERT_OK_OR_RETURN(gpumgrGetGpuAttachInfo(&gpuAttachCount, &gpuMask));
@ -4330,7 +4329,7 @@ cliresCtrlCmdNvdGetRcerrRpt_IMPL
pParams->flags = 0;
if (!RMCFG_FEATURE_PLATFORM_GSP)
{
pParams->processId = processId;
pParams->processId = osGetCurrentProcess();
}
if ((status = krcCliresCtrlNvdGetRcerrRptCheckPermissions_HAL(
@ -4341,75 +4340,66 @@ cliresCtrlCmdNvdGetRcerrRpt_IMPL
return status;
}
if (IS_GSP_CLIENT(pGpu))
{
NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pLocalParams =
portMemAllocNonPaged(sizeof *pLocalParams);
Journal *pRcDB = SYS_GET_RCDB(SYS_GET_INSTANCE());
RmRCCommonJournal_RECORD *pCommon;
NV_CHECK_OR_RETURN(LEVEL_INFO, pLocalParams != NULL, NV_ERR_NO_MEMORY);
//
// Pre-GSP, RcDiagRec from all GPUs were stored in kernel sysmem in a
// single RING_BUFFER_LOG.
//
// With GSP, each GPU its own separate RING_BUFFER_LOG. We need to
// search in all of them.
//
// However, we will always return only the first matching record in all
// cases (similar to pre-GSP behavior)
//
for (; pGpu != NULL ; pGpu = gpumgrGetNextGpu(gpuMask, &gpuIdx))
status = rcdbGetRcDiagRecBoundaries(pRcDB,
&pParams->startIdx,
&pParams->endIdx,
pParams->owner,
pParams->processId);
if (status != NV_OK)
{
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
portMemSet(pLocalParams, 0, sizeof(*pLocalParams));
pLocalParams->reqIdx = pParams->reqIdx;
pLocalParams->owner = pParams->owner;
pLocalParams->processId = pParams->processId;
return status;
}
status = pRmApi->Control(pRmApi,
RES_GET_CLIENT_HANDLE(pRmCliRes),
RES_GET_HANDLE(pRmCliRes),
NV0000_CTRL_CMD_NVD_GET_RCERR_RPT,
pLocalParams,
sizeof *pLocalParams);
if (status == NV_OK &&
(pLocalParams->flags &
NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID))
pParams->flags |= NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID;
{
NV_STATUS localStatus = rcdbGetRcDiagRec(pRcDB,
pParams->reqIdx,
&pCommon,
pParams->owner,
pParams->processId);
switch (localStatus)
{
//
// Each RING_BUFFER_LOG can contain MAX_RCDB_RCDIAG_WRAP_BUFF
// RmRcDiag_RECORD. We will multiply indices returned to the
// client by this value so the GPU can be uniquely identified
// (in addition to GPUTag) from
// NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS.rptIdx
//
// Note that this will result in clients receivinga rptIdx value
// larger than MAX_RCDB_RCDIAG_WRAP_BUFF.
//
NvU16 indexOffset = gpuIdx * MAX_RCDB_RCDIAG_WRAP_BUFF;
*pParams = *pLocalParams;
pParams->startIdx += indexOffset;
pParams->endIdx += indexOffset;
pParams->rptIdx += indexOffset;
break;
}
if (status == NV_ERR_BUSY_RETRY)
{
//
// To avoid the case where we silently fail to find a record
// because we skipped over to the next Gpu on getting a
// BUSY_RETRY on one of the Gpus (which might have contained the
// record).
//
break;
case NV_OK:
break;
case NV_ERR_BUSY_RETRY:
return localStatus;
default:
return status;
}
}
portMemFree(pLocalParams);
pLocalParams = NULL;
if (pCommon != NULL)
{
NvU32 i = 0;
RmRcDiag_RECORD *pRecord = (RmRcDiag_RECORD *)&pCommon[1];
pParams->GPUTag = pCommon->GPUTag;
pParams->rptIdx = pRecord->idx;
pParams->rptTime = pRecord->timeStamp;
pParams->rptType = pRecord->type;
pParams->rptCount = pRecord->count;
pParams->flags |= pRecord->flags;
for (i = 0; i < pRecord->count; ++i)
{
pParams->report[i].tag = pRecord->data[i].tag;
pParams->report[i].value = pRecord->data[i].value;
pParams->report[i].attribute = pRecord->data[i].attribute;
}
for (; i < NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES; ++i)
{
pParams->report[i].tag =
NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_EMPTY;
pParams->report[i].value = 0;
pParams->report[i].attribute = 0;
}
pParams->flags |= NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID;
}
}
return status;

View File

@ -275,6 +275,9 @@ serverControlApiCopyIn
rmStatus = embeddedParamCopyIn(pEmbeddedParamCopies, pRmCtrlParams);
if (rmStatus != NV_OK)
{
rmapiParamsRelease(pParamCopy);
pRmCtrlParams->pParams = NvP64_VALUE(pUserParams);
pCookie->bFreeParamCopy = NV_FALSE;
return rmStatus;
}
pCookie->bFreeEmbeddedCopy = NV_TRUE;
@ -401,6 +404,7 @@ _rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams
RS_LOCK_INFO lockInfo = {0};
NvU32 ctrlFlags = 0;
NvU32 ctrlAccessRight = 0;
NvU32 ctrlParamsSize = 0;
NV_STATUS getCtrlInfoStatus;
RMTRACE_RMAPI(_RMCTRL_ENTRY, cmd);
@ -488,13 +492,17 @@ _rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams
}
}
getCtrlInfoStatus = rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight, &ctrlParamsSize);
// error check parameters
if (((paramsSize != 0) && (pUserParams == (NvP64) 0)) ||
((paramsSize == 0) && (pUserParams != (NvP64) 0))
|| ((getCtrlInfoStatus == NV_OK) && (paramsSize != ctrlParamsSize))
)
{
NV_PRINTF(LEVEL_WARNING, "bad params: ptr " NvP64_fmt " size: 0x%x\n",
pUserParams, paramsSize);
NV_PRINTF(LEVEL_WARNING,
"bad params: cmd:0x%x ptr " NvP64_fmt " size: 0x%x expect size: 0x%x\n",
cmd, pUserParams, paramsSize, ctrlParamsSize);
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto done;
}
@ -521,8 +529,6 @@ _rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams
lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK;
}
getCtrlInfoStatus = rmapiutilGetControlInfo(cmd, &ctrlFlags, &ctrlAccessRight);
if (getCtrlInfoStatus == NV_OK)
{
//
@ -600,9 +606,20 @@ _rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams
{
rmCtrlParams.pCookie->apiCopyFlags |= RMCTRL_API_COPY_FLAGS_FORCE_SKIP_COPYOUT_ON_ERROR;
serverControlApiCopyIn(&g_resServ, &rmCtrlParams, rmCtrlParams.pCookie);
rmStatus = rmapiControlCacheGet(hClient, hObject, cmd, rmCtrlParams.pParams, paramsSize);
serverControlApiCopyOut(&g_resServ, &rmCtrlParams, rmCtrlParams.pCookie, rmStatus);
rmStatus = serverControlApiCopyIn(&g_resServ, &rmCtrlParams,
rmCtrlParams.pCookie);
if (rmStatus == NV_OK)
{
rmStatus = rmapiControlCacheGet(hClient, hObject, cmd,
rmCtrlParams.pParams,
paramsSize);
// rmStatus is passed in for error handling
rmStatus = serverControlApiCopyOut(&g_resServ,
&rmCtrlParams,
rmCtrlParams.pCookie,
rmStatus);
}
if (rmStatus == NV_OK)
{
@ -848,7 +865,7 @@ serverControlLookupLockFlags
NvU32 controlFlags = pRmCtrlExecuteCookie->ctrlFlags;
if (controlFlags == 0 && !RMCFG_FEATURE_PLATFORM_GSP && areAllGpusInOffloadMode)
{
NV_STATUS status = rmapiutilGetControlInfo(pRmCtrlParams->cmd, &controlFlags, NULL);
NV_STATUS status = rmapiutilGetControlInfo(pRmCtrlParams->cmd, &controlFlags, NULL, NULL);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_INFO,

View File

@ -147,7 +147,7 @@ NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool bAllowInternal)
NvU32 flags;
NvU32 accessRight;
if (rmapiutilGetControlInfo(cmd, &flags, &accessRight) != NV_OK)
if (rmapiutilGetControlInfo(cmd, &flags, &accessRight, NULL) != NV_OK)
return NV_FALSE;
return rmapiControlIsCacheable(flags, accessRight, bAllowInternal);
@ -947,14 +947,19 @@ NV_STATUS rmapiControlCacheGet
{
NV_STATUS status = NV_OK;
NvU32 flags = 0;
NvU32 ctrlParamsSize;
if (RmapiControlCache.mode == NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY)
return NV_ERR_OBJECT_NOT_FOUND;
status = rmapiutilGetControlInfo(cmd, &flags, NULL);
status = rmapiutilGetControlInfo(cmd, &flags, NULL, &ctrlParamsSize);
if (status != NV_OK)
goto done;
NV_CHECK_OR_ELSE(LEVEL_ERROR,
(params != NULL && paramsSize == ctrlParamsSize),
status = NV_ERR_INVALID_PARAMETER; goto done);
switch ((flags & RMCTRL_FLAGS_CACHEABLE_ANY))
{
case RMCTRL_FLAGS_CACHEABLE:
@ -985,11 +990,16 @@ NV_STATUS rmapiControlCacheSet
{
NV_STATUS status = NV_OK;
NvU32 flags = 0;
NvU32 ctrlParamsSize;
status = rmapiutilGetControlInfo(cmd, &flags, NULL);
status = rmapiutilGetControlInfo(cmd, &flags, NULL, &ctrlParamsSize);
if (status != NV_OK)
goto done;
NV_CHECK_OR_ELSE(LEVEL_ERROR,
(params != NULL && paramsSize == ctrlParamsSize),
status = NV_ERR_INVALID_PARAMETER; goto done);
switch ((flags & RMCTRL_FLAGS_CACHEABLE_ANY))
{
case RMCTRL_FLAGS_CACHEABLE:

View File

@ -155,7 +155,8 @@ rmapiutilGetControlInfo
(
NvU32 cmd,
NvU32 *pFlags,
NvU32 *pAccessRight
NvU32 *pAccessRight,
NvU32 *pParamsSize
)
{
RS_RESOURCE_DESC *pResourceDesc = RsResInfoByExternalClassId(DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd));
@ -176,6 +177,9 @@ rmapiutilGetControlInfo
if (pAccessRight != NULL)
*pAccessRight = pMethodDef->accessRight;
if (pParamsSize != NULL)
*pParamsSize = pMethodDef->paramSize;
return NV_OK;
}
}

View File

@ -798,7 +798,7 @@ clientFreeResource_IMPL
pResource = pResourceRef->pResource;
pParentRef = pResourceRef->pParentRef;
if (!pParams->bInvalidateOnly && pResourceRef->bInvalidated)
if (pResourceRef->bInvalidated)
goto done;
portMemSet(&callContext, 0, sizeof(callContext));

View File

@ -1,4 +1,4 @@
NVIDIA_VERSION = 535.154.05
NVIDIA_VERSION = 535.161.07
# This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))