This commit is contained in:
Bernhard Stoeckner 2024-07-09 15:47:48 +02:00
parent e45d91de02
commit caa2dd11a0
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
44 changed files with 35854 additions and 35675 deletions

View File

@ -2,6 +2,8 @@
## Release 550 Entries ## Release 550 Entries
### [550.100] 2024-07-09
### [550.90.07] 2024-06-04 ### [550.90.07] 2024-06-04
### [550.78] 2024-04-25 ### [550.78] 2024-04-25

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source # NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules, This is the source release of the NVIDIA Linux open GPU kernel modules,
version 550.90.07. version 550.100.
## How to Build ## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding firmware and user-space NVIDIA GPU driver components from a corresponding
550.90.07 driver release. This can be achieved by installing 550.100 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g., option. E.g.,
@ -188,7 +188,7 @@ encountered specific to them.
For details on feature support and limitations, see the NVIDIA GPU driver For details on feature support and limitations, see the NVIDIA GPU driver
end user README here: end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/550.90.07/README/kernel_open.html https://us.download.nvidia.com/XFree86/Linux-x86_64/550.100/README/kernel_open.html
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
Package for more details. Package for more details.

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src) EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.90.07\" EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.100\"
ifneq ($(SYSSRCHOST1X),) ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X) EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -246,7 +246,7 @@ namespace DisplayPort
virtual bool getOuiSupported() = 0; virtual bool getOuiSupported() = 0;
virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0; virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0;
virtual bool getOuiSource(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0; virtual bool getOuiSource(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0;
virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0; virtual bool getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0;
}; };
class HDCP class HDCP

View File

@ -688,7 +688,7 @@ namespace DisplayPort
virtual bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) = 0; virtual bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) = 0;
virtual bool getOuiSink(unsigned &ouiId, char * modelName, virtual bool getOuiSink(unsigned &ouiId, unsigned char * modelName,
size_t modelNameBufferSize, NvU8 & chipRevision) = 0; size_t modelNameBufferSize, NvU8 & chipRevision) = 0;
virtual bool getIgnoreSourceOuiHandshake() = 0; virtual bool getIgnoreSourceOuiHandshake() = 0;

View File

@ -102,9 +102,9 @@ namespace DisplayPort
NvU8 cachedSourceChipRevision; NvU8 cachedSourceChipRevision;
bool bOuiCached; bool bOuiCached;
unsigned ouiId; // Sink ouiId unsigned ouiId; // Sink ouiId
char modelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; // Device Model-name unsigned char modelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; // Device Model-name
bool bIgnoreSrcOuiHandshake; // Skip writing source OUI bool bIgnoreSrcOuiHandshake; // Skip writing source OUI
LinkPolicy linkPolicy; LinkPolicy linkPolicy;
@ -624,7 +624,7 @@ namespace DisplayPort
void freeTimeslice(GroupImpl * targetGroup); void freeTimeslice(GroupImpl * targetGroup);
void flushTimeslotsToHardware(); void flushTimeslotsToHardware();
bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12); bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12);
bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision); bool getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision);
bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size);
void cancelHdcpCallbacks(); void cancelHdcpCallbacks();
bool handleCPIRQ(); bool handleCPIRQ();

View File

@ -865,7 +865,7 @@ struct DPCDHALImpl : DPCDHAL
return true; return true;
} }
virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) virtual bool getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision)
{ {
NvU8 ouiBuffer[16]; NvU8 ouiBuffer[16];
int address = NV_DPCD_SINK_IEEE_OUI; int address = NV_DPCD_SINK_IEEE_OUI;
@ -903,7 +903,7 @@ struct DPCDHALImpl : DPCDHAL
// Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case). // Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case).
unsigned int i; unsigned int i;
for (i = 0; i < modelNameBufferSize; i++) for (i = 0; i < modelNameBufferSize; i++)
modelName[i] = ouiBuffer[3+i]; modelName[i] = (unsigned char)ouiBuffer[3+i];
chipRevision = ouiBuffer[9]; chipRevision = ouiBuffer[9];

View File

@ -3616,7 +3616,7 @@ bool ConnectorImpl::assessPCONLinkCapability(PCONLinkControl *pConControl)
return true; return true;
} }
bool ConnectorImpl::getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) bool ConnectorImpl::getOuiSink(unsigned &ouiId, unsigned char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision)
{ {
if (!previousPlugged || !hal->getOuiSupported()) if (!previousPlugged || !hal->getOuiSupported())
return false; return false;

View File

@ -66,21 +66,15 @@ void ConnectorImpl::applyOuiWARs()
// Synaptics // Synaptics
case 0x24CC90: case 0x24CC90:
if ((modelName[0] == 'S') && (modelName[1] == 'Y') && (modelName[2] == 'N') && if ((modelName[0] == 'S') && (modelName[1] == 'Y') && (modelName[2] == 'N') &&
(modelName[3] == 'A') && (modelName[4] == 'S') && (modelName[3] == 'A') && (((modelName[4] == 'S') &&
((modelName[5] == '1') || (modelName[5] == '2') || ((modelName[5] == '1') || (modelName[5] == '2') ||
(modelName[5] == '3') || (modelName[5] == '#') || (modelName[5] == '3') || (modelName[5] == '#') ||
(modelName[5] == '\"'))) (modelName[5] == '\"')))||((modelName[4] == 0x84) &&
(modelName[5] == '0'))))
{ {
// //
// Extended latency from link-train end to FEC enable pattern // Extended latency from link-train end to FEC enable pattern
// to avoid link lost or blank screen with Synaptics branch. // to avoid link lost or blank screen with Synaptics branch.
// (Bug 2561206)
//
// Dock SKU ID:
// Dell Salomon-WD19TB SYNAS1
// HP Hook SYNAS3
// HP Adira-A SYNAS#
// Lenovo SYNAS" / SYNAS2
// //
LT2FecLatencyMs = 57; LT2FecLatencyMs = 57;

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without // and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script // corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH #ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r552_52 #define NV_BUILD_BRANCH r550_00
#endif #endif
#ifndef NV_PUBLIC_BRANCH #ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r552_52 #define NV_PUBLIC_BRANCH r550_00
#endif #endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r552_52-292" #define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r550_00-326"
#define NV_BUILD_CHANGELIST_NUM (34362171) #define NV_BUILD_CHANGELIST_NUM (34471492)
#define NV_BUILD_TYPE "Official" #define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r550/r552_52-292" #define NV_BUILD_NAME "rel/gpu_drv/r550/r550_00-326"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34362171) #define NV_LAST_OFFICIAL_CHANGELIST_NUM (34471492)
#else /* Windows builds */ #else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r552_52-2" #define NV_BUILD_BRANCH_VERSION "r550_00-324"
#define NV_BUILD_CHANGELIST_NUM (34331643) #define NV_BUILD_CHANGELIST_NUM (34468048)
#define NV_BUILD_TYPE "Official" #define NV_BUILD_TYPE "Nightly"
#define NV_BUILD_NAME "552.55" #define NV_BUILD_NAME "r550_00-240627"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34331643) #define NV_LAST_OFFICIAL_CHANGELIST_NUM (34454921)
#define NV_BUILD_BRANCH_BASE_VERSION R550 #define NV_BUILD_BRANCH_BASE_VERSION R550
#endif #endif
// End buildmeister python edited section // End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "550.90.07" #define NV_VERSION_STRING "550.100"
#else #else

View File

@ -516,7 +516,8 @@ NvlStatus nvlink_lib_get_remote_conn_info(nvlink_link *link, nvlink_conn_info *c
*/ */
NvlStatus nvlink_lib_discover_and_get_remote_conn_info(nvlink_link *end, NvlStatus nvlink_lib_discover_and_get_remote_conn_info(nvlink_link *end,
nvlink_conn_info *conn_info, nvlink_conn_info *conn_info,
NvU32 flags); NvU32 flags,
NvBool bForceDiscovery);
/************************************************************************************************/ /************************************************************************************************/

View File

@ -48,7 +48,8 @@ nvlink_core_discover_and_get_remote_end
( (
nvlink_link *end, nvlink_link *end,
nvlink_link **remote_end, nvlink_link **remote_end,
NvU32 flags NvU32 flags,
NvBool bForceDiscovery
) )
{ {
nvlink_intranode_conn *conn = NULL; nvlink_intranode_conn *conn = NULL;
@ -68,7 +69,7 @@ nvlink_core_discover_and_get_remote_end
return; return;
} }
if (nvlinkLibCtx.bNewEndpoints) if (nvlinkLibCtx.bNewEndpoints || bForceDiscovery)
{ {
if (!_nvlink_core_all_links_initialized()) if (!_nvlink_core_all_links_initialized())
{ {

View File

@ -1481,7 +1481,7 @@ _nvlink_lib_ctrl_device_discover_peer_link
(linkMode == NVLINK_LINKSTATE_SLEEP)) (linkMode == NVLINK_LINKSTATE_SLEEP))
{ {
nvlink_link *remoteLink = NULL; nvlink_link *remoteLink = NULL;
nvlink_core_discover_and_get_remote_end(link, &remoteLink, 0); nvlink_core_discover_and_get_remote_end(link, &remoteLink, 0, NV_FALSE);
if (remoteLink == NULL) if (remoteLink == NULL)
{ {
NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO,

View File

@ -183,7 +183,8 @@ nvlink_lib_discover_and_get_remote_conn_info
( (
nvlink_link *end, nvlink_link *end,
nvlink_conn_info *conn_info, nvlink_conn_info *conn_info,
NvU32 flags NvU32 flags,
NvBool bForceDiscovery
) )
{ {
NvlStatus status = NVL_SUCCESS; NvlStatus status = NVL_SUCCESS;
@ -258,7 +259,8 @@ nvlink_lib_discover_and_get_remote_conn_info
conn_info->bConnected = NV_FALSE; conn_info->bConnected = NV_FALSE;
// Get the remote_end of the link // Get the remote_end of the link
nvlink_core_discover_and_get_remote_end(end, &remote_end, flags); nvlink_core_discover_and_get_remote_end(end, &remote_end, flags,
bForceDiscovery);
if (remote_end) if (remote_end)
{ {

View File

@ -175,7 +175,8 @@ void nvlink_core_correlate_conn_by_token(nvlink_link *srcLink, NvU64 writeToken,
*/ */
void nvlink_core_discover_and_get_remote_end(nvlink_link *end, void nvlink_core_discover_and_get_remote_end(nvlink_link *end,
nvlink_link **remote_end, nvlink_link **remote_end,
NvU32 flags); NvU32 flags,
NvBool bForceDiscovery);
/************************************************************************************************/ /************************************************************************************************/

View File

@ -4075,7 +4075,9 @@ nvswitch_ctrl_get_nvlink_status_lr10
} }
else else
{ {
nvlink_lib_discover_and_get_remote_conn_info(link, &conn_info, NVLINK_STATE_CHANGE_SYNC); nvlink_lib_discover_and_get_remote_conn_info(link, &conn_info,
NVLINK_STATE_CHANGE_SYNC,
NV_FALSE);
} }
// Set NVLINK per-link caps // Set NVLINK per-link caps

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -285,7 +285,8 @@ nvswitch_minion_send_command_ls10
data = FLD_SET_DRF_NUM(_MINION, _NVLINK_DL_CMD, _FAULT, 1, 0x0); data = FLD_SET_DRF_NUM(_MINION, _NVLINK_DL_CMD, _FAULT, 1, 0x0);
NVSWITCH_MINION_LINK_WR32_LS10(device, linkNumber, _MINION, _NVLINK_DL_CMD(localLinkNumber), data); NVSWITCH_MINION_LINK_WR32_LS10(device, linkNumber, _MINION, _NVLINK_DL_CMD(localLinkNumber), data);
return -NVL_ERR_INVALID_STATE; return (DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, statData) == MINION_ALARM_BUSY) ?
-NVL_ERR_STATE_IN_USE : -NVL_ERR_INVALID_STATE;
} }
else else
{ {

View File

@ -460,11 +460,20 @@ typedef struct NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS {
NV_DECLARE_ALIGNED(NvU64 address, 8); NV_DECLARE_ALIGNED(NvU64 address, 8);
} NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS; } NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS;
typedef struct NV208F_CTRL_FB_REMAPPING_RBC_ADDRESS_INFO {
NvU32 bank;
NvU32 stackId;
NvU32 row;
NvU32 partition;
NvU32 sublocation;
} NV208F_CTRL_FB_REMAPPING_RBC_ADDRESS_INFO;
#define NV208F_CTRL_FB_REMAP_ROW_ADDRESS_TYPE_PHYSICAL 0x0
#define NV208F_CTRL_FB_REMAP_ROW_ADDRESS_TYPE_RBC 0x1
/* /*
* NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO * NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO
* *
* physicalAddress
* Physical address to be remapped
* source * source
* The reason for retirement. Valid values for this parameter are * The reason for retirement. Valid values for this parameter are
* from NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_* * from NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_*
@ -480,11 +489,23 @@ typedef struct NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS {
* Attempting to remap a reserved row * Attempting to remap a reserved row
* NV208F_CTRL_FB_REMAP_ROW_STATUS_INTERNAL_ERROR * NV208F_CTRL_FB_REMAP_ROW_STATUS_INTERNAL_ERROR
* Some other RM failure * Some other RM failure
* addressType
* Type of address passed. Valid values are:
* NV208F_CTRL_FB_REMAP_ROW_ADDRESS_TYPE_PHYSICAL
* The specified address is physical address.
* NV208F_CTRL_FB_REMAP_ROW_ADDRESS_TYPE_RBC
* The specified address is DRAM Row Bank Column address.
* address
* Union of physicalAddress and rbcAddress. Set the appropriate one based on the address type.
*/ */
typedef struct NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO { typedef struct NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO {
NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8);
NvU8 source; NvU8 source;
NvU32 status; NvU32 status;
NvU8 addressType;
union {
NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8);
NV208F_CTRL_FB_REMAPPING_RBC_ADDRESS_INFO rbcAddress;
} address;
} NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO; } NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO;
/* valid values for status */ /* valid values for status */

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 200-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2004-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -632,6 +632,7 @@ enum {
, CS_INTEL_1B81 , CS_INTEL_1B81
, CS_INTEL_18DC , CS_INTEL_18DC
, CS_INTEL_7A04 , CS_INTEL_7A04
, CS_INTEL_5795
, CS_SIFIVE_FU740_C000 , CS_SIFIVE_FU740_C000
, CS_PLDA_XPRESSRICH_AXI_REF , CS_PLDA_XPRESSRICH_AXI_REF
, CS_AMPERE_AMPEREONE , CS_AMPERE_AMPEREONE

View File

@ -100,61 +100,61 @@ enum
#define CC_KEYSPACE_TOTAL_SIZE (CC_KEYSPACE_GSP_SIZE + CC_KEYSPACE_SEC2_SIZE + (8 * CC_KEYSPACE_LCE_SIZE)) #define CC_KEYSPACE_TOTAL_SIZE (CC_KEYSPACE_GSP_SIZE + CC_KEYSPACE_SEC2_SIZE + (8 * CC_KEYSPACE_LCE_SIZE))
#define CC_LKEYID_LCE0_H2D_USER_STR "Lce0_h2d_user" #define CC_LKEYID_LCE0_H2D_USER_STR "Lce00_h2d_user"
#define CC_LKEYID_LCE0_D2H_USER_STR "Lce0_d2h_user" #define CC_LKEYID_LCE0_D2H_USER_STR "Lce00_d2h_user"
#define CC_LKEYID_LCE0_H2D_KERN_STR "Lce0_h2d_kernel" #define CC_LKEYID_LCE0_H2D_KERN_STR "Lce00_h2d_kernel"
#define CC_LKEYID_LCE0_D2H_KERN_STR "Lce0_d2h_kernel" #define CC_LKEYID_LCE0_D2H_KERN_STR "Lce00_d2h_kernel"
#define CC_LKEYID_LCE0_H2D_P2P_STR "Lce0_h2d_p2p" #define CC_LKEYID_LCE0_H2D_P2P_STR "Lce00_h2d_p2p"
#define CC_LKEYID_LCE0_D2H_P2P_STR "Lce0_d2h_p2p" #define CC_LKEYID_LCE0_D2H_P2P_STR "Lce00_d2h_p2p"
#define CC_LKEYID_LCE1_H2D_USER_STR "Lce1_h2d_user" #define CC_LKEYID_LCE1_H2D_USER_STR "Lce01_h2d_user"
#define CC_LKEYID_LCE1_D2H_USER_STR "Lce1_d2h_user" #define CC_LKEYID_LCE1_D2H_USER_STR "Lce01_d2h_user"
#define CC_LKEYID_LCE1_H2D_KERN_STR "Lce1_h2d_kernel" #define CC_LKEYID_LCE1_H2D_KERN_STR "Lce01_h2d_kernel"
#define CC_LKEYID_LCE1_D2H_KERN_STR "Lce1_d2h_kernel" #define CC_LKEYID_LCE1_D2H_KERN_STR "Lce01_d2h_kernel"
#define CC_LKEYID_LCE1_H2D_P2P_STR "Lce1_h2d_p2p" #define CC_LKEYID_LCE1_H2D_P2P_STR "Lce01_h2d_p2p"
#define CC_LKEYID_LCE1_D2H_P2P_STR "Lce1_d2h_p2p" #define CC_LKEYID_LCE1_D2H_P2P_STR "Lce01_d2h_p2p"
#define CC_LKEYID_LCE2_H2D_USER_STR "Lce2_h2d_user" #define CC_LKEYID_LCE2_H2D_USER_STR "Lce02_h2d_user"
#define CC_LKEYID_LCE2_D2H_USER_STR "Lce2_d2h_user" #define CC_LKEYID_LCE2_D2H_USER_STR "Lce02_d2h_user"
#define CC_LKEYID_LCE2_H2D_KERN_STR "Lce2_h2d_kernel" #define CC_LKEYID_LCE2_H2D_KERN_STR "Lce02_h2d_kernel"
#define CC_LKEYID_LCE2_D2H_KERN_STR "Lce2_d2h_kernel" #define CC_LKEYID_LCE2_D2H_KERN_STR "Lce02_d2h_kernel"
#define CC_LKEYID_LCE2_H2D_P2P_STR "Lce2_h2d_p2p" #define CC_LKEYID_LCE2_H2D_P2P_STR "Lce02_h2d_p2p"
#define CC_LKEYID_LCE2_D2H_P2P_STR "Lce2_d2h_p2p" #define CC_LKEYID_LCE2_D2H_P2P_STR "Lce02_d2h_p2p"
#define CC_LKEYID_LCE3_H2D_USER_STR "Lce3_h2d_user" #define CC_LKEYID_LCE3_H2D_USER_STR "Lce03_h2d_user"
#define CC_LKEYID_LCE3_D2H_USER_STR "Lce3_d2h_user" #define CC_LKEYID_LCE3_D2H_USER_STR "Lce03_d2h_user"
#define CC_LKEYID_LCE3_H2D_KERN_STR "Lce3_h2d_kernel" #define CC_LKEYID_LCE3_H2D_KERN_STR "Lce03_h2d_kernel"
#define CC_LKEYID_LCE3_D2H_KERN_STR "Lce3_d2h_kernel" #define CC_LKEYID_LCE3_D2H_KERN_STR "Lce03_d2h_kernel"
#define CC_LKEYID_LCE3_H2D_P2P_STR "Lce3_h2d_p2p" #define CC_LKEYID_LCE3_H2D_P2P_STR "Lce03_h2d_p2p"
#define CC_LKEYID_LCE3_D2H_P2P_STR "Lce3_d2h_p2p" #define CC_LKEYID_LCE3_D2H_P2P_STR "Lce03_d2h_p2p"
#define CC_LKEYID_LCE4_H2D_USER_STR "Lce4_h2d_user" #define CC_LKEYID_LCE4_H2D_USER_STR "Lce04_h2d_user"
#define CC_LKEYID_LCE4_D2H_USER_STR "Lce4_d2h_user" #define CC_LKEYID_LCE4_D2H_USER_STR "Lce04_d2h_user"
#define CC_LKEYID_LCE4_H2D_KERN_STR "Lce4_h2d_kernel" #define CC_LKEYID_LCE4_H2D_KERN_STR "Lce04_h2d_kernel"
#define CC_LKEYID_LCE4_D2H_KERN_STR "Lce4_d2h_kernel" #define CC_LKEYID_LCE4_D2H_KERN_STR "Lce04_d2h_kernel"
#define CC_LKEYID_LCE4_H2D_P2P_STR "Lce4_h2d_p2p" #define CC_LKEYID_LCE4_H2D_P2P_STR "Lce04_h2d_p2p"
#define CC_LKEYID_LCE4_D2H_P2P_STR "Lce4_d2h_p2p" #define CC_LKEYID_LCE4_D2H_P2P_STR "Lce04_d2h_p2p"
#define CC_LKEYID_LCE5_H2D_USER_STR "Lce5_h2d_user" #define CC_LKEYID_LCE5_H2D_USER_STR "Lce05_h2d_user"
#define CC_LKEYID_LCE5_D2H_USER_STR "Lce5_d2h_user" #define CC_LKEYID_LCE5_D2H_USER_STR "Lce05_d2h_user"
#define CC_LKEYID_LCE5_H2D_KERN_STR "Lce5_h2d_kernel" #define CC_LKEYID_LCE5_H2D_KERN_STR "Lce05_h2d_kernel"
#define CC_LKEYID_LCE5_D2H_KERN_STR "Lce5_d2h_kernel" #define CC_LKEYID_LCE5_D2H_KERN_STR "Lce05_d2h_kernel"
#define CC_LKEYID_LCE5_H2D_P2P_STR "Lce5_h2d_p2p" #define CC_LKEYID_LCE5_H2D_P2P_STR "Lce05_h2d_p2p"
#define CC_LKEYID_LCE5_D2H_P2P_STR "Lce5_d2h_p2p" #define CC_LKEYID_LCE5_D2H_P2P_STR "Lce05_d2h_p2p"
#define CC_LKEYID_LCE6_H2D_USER_STR "Lce6_h2d_user" #define CC_LKEYID_LCE6_H2D_USER_STR "Lce06_h2d_user"
#define CC_LKEYID_LCE6_D2H_USER_STR "Lce6_d2h_user" #define CC_LKEYID_LCE6_D2H_USER_STR "Lce06_d2h_user"
#define CC_LKEYID_LCE6_H2D_KERN_STR "Lce6_h2d_kernel" #define CC_LKEYID_LCE6_H2D_KERN_STR "Lce06_h2d_kernel"
#define CC_LKEYID_LCE6_D2H_KERN_STR "Lce6_d2h_kernel" #define CC_LKEYID_LCE6_D2H_KERN_STR "Lce06_d2h_kernel"
#define CC_LKEYID_LCE6_H2D_P2P_STR "Lce6_h2d_p2p" #define CC_LKEYID_LCE6_H2D_P2P_STR "Lce06_h2d_p2p"
#define CC_LKEYID_LCE6_D2H_P2P_STR "Lce6_d2h_p2p" #define CC_LKEYID_LCE6_D2H_P2P_STR "Lce06_d2h_p2p"
#define CC_LKEYID_LCE7_H2D_USER_STR "Lce7_h2d_user" #define CC_LKEYID_LCE7_H2D_USER_STR "Lce07_h2d_user"
#define CC_LKEYID_LCE7_D2H_USER_STR "Lce7_d2h_user" #define CC_LKEYID_LCE7_D2H_USER_STR "Lce07_d2h_user"
#define CC_LKEYID_LCE7_H2D_KERN_STR "Lce7_h2d_kernel" #define CC_LKEYID_LCE7_H2D_KERN_STR "Lce07_h2d_kernel"
#define CC_LKEYID_LCE7_D2H_KERN_STR "Lce7_d2h_kernel" #define CC_LKEYID_LCE7_D2H_KERN_STR "Lce07_d2h_kernel"
#define CC_LKEYID_LCE7_H2D_P2P_STR "Lce7_h2d_p2p" #define CC_LKEYID_LCE7_H2D_P2P_STR "Lce07_h2d_p2p"
#define CC_LKEYID_LCE7_D2H_P2P_STR "Lce7_d2h_p2p" #define CC_LKEYID_LCE7_D2H_P2P_STR "Lce07_d2h_p2p"
// Generate a global key ID from a keyspace (a) and local key ID (b). // Generate a global key ID from a keyspace (a) and local key ID (b).
#define CC_GKEYID_GEN(a, b) (NvU32)(((a) << 16) | (b)) #define CC_GKEYID_GEN(a, b) (NvU32)(((a) << 16) | (b))

View File

@ -192,6 +192,7 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_INTEL, 0x7A8A, CS_INTEL_1B81, "Intel-SapphireRapids", NULL}, {PCI_VENDOR_ID_INTEL, 0x7A8A, CS_INTEL_1B81, "Intel-SapphireRapids", NULL},
{PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", NULL}, {PCI_VENDOR_ID_INTEL, 0x18DC, CS_INTEL_18DC, "Intel-IceLake", NULL},
{PCI_VENDOR_ID_INTEL, 0x7A04, CS_INTEL_7A04, "Intel-RaptorLake", Intel_7A04_setupFunc}, {PCI_VENDOR_ID_INTEL, 0x7A04, CS_INTEL_7A04, "Intel-RaptorLake", Intel_7A04_setupFunc},
{PCI_VENDOR_ID_INTEL, 0x5795, CS_INTEL_5795, "Intel-GraniteRapids", NULL},
{PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc}, {PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},
{PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc}, {PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc},

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -76,7 +76,7 @@ typedef struct _NV_SPDM_DESC_HEADER
#define NV_SPDM_MAX_TRANSCRIPT_BUFFER_SIZE (2 * NV_SPDM_MAX_SPDM_PAYLOAD_SIZE) #define NV_SPDM_MAX_TRANSCRIPT_BUFFER_SIZE (2 * NV_SPDM_MAX_SPDM_PAYLOAD_SIZE)
// Limited by the transport size, do not increase without increasing transport buffer. // Limited by the transport size, do not increase without increasing transport buffer.
#define NV_SPDM_MAX_RANDOM_MSG_BYTES (0x0) #define NV_SPDM_MAX_RANDOM_MSG_BYTES (0x80)
#ifdef NVRM #ifdef NVRM
#include "gpu/mem_mgr/mem_desc.h" #include "gpu/mem_mgr/mem_desc.h"

View File

@ -268,6 +268,17 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeKeyStoreRetrieveViaKeyId__ = &confComputeKeyStoreRetrieveViaKeyId_46f6a7; pThis->__confComputeKeyStoreRetrieveViaKeyId__ = &confComputeKeyStoreRetrieveViaKeyId_46f6a7;
} }
// Hal function -- confComputeDeriveSecretsForCEKeySpace
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeDeriveSecretsForCEKeySpace__ = &confComputeDeriveSecretsForCEKeySpace_GH100;
}
// default
else
{
pThis->__confComputeDeriveSecretsForCEKeySpace__ = &confComputeDeriveSecretsForCEKeySpace_46f6a7;
}
// Hal function -- confComputeDeriveSecrets // Hal function -- confComputeDeriveSecrets
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {
@ -312,6 +323,39 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeGetEngineIdFromKeySpace__ = &confComputeGetEngineIdFromKeySpace_78ac8b; pThis->__confComputeGetEngineIdFromKeySpace__ = &confComputeGetEngineIdFromKeySpace_78ac8b;
} }
// Hal function -- confComputeGetKeySpaceFromKChannel
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGetKeySpaceFromKChannel__ = &confComputeGetKeySpaceFromKChannel_GH100;
}
// default
else
{
pThis->__confComputeGetKeySpaceFromKChannel__ = &confComputeGetKeySpaceFromKChannel_46f6a7;
}
// Hal function -- confComputeGetLceKeyIdFromKChannel
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGetLceKeyIdFromKChannel__ = &confComputeGetLceKeyIdFromKChannel_GH100;
}
// default
else
{
pThis->__confComputeGetLceKeyIdFromKChannel__ = &confComputeGetLceKeyIdFromKChannel_46f6a7;
}
// Hal function -- confComputeGetMaxCeKeySpaceIdx
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGetMaxCeKeySpaceIdx__ = &confComputeGetMaxCeKeySpaceIdx_6c58cf;
}
// default
else
{
pThis->__confComputeGetMaxCeKeySpaceIdx__ = &confComputeGetMaxCeKeySpaceIdx_4a4dee;
}
// Hal function -- confComputeGlobalKeyIsKernelPriv // Hal function -- confComputeGlobalKeyIsKernelPriv
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {

View File

@ -103,10 +103,14 @@ struct ConfidentialCompute {
void (*__confComputeSetErrorState__)(struct OBJGPU *, struct ConfidentialCompute *); void (*__confComputeSetErrorState__)(struct OBJGPU *, struct ConfidentialCompute *);
NV_STATUS (*__confComputeKeyStoreRetrieveViaChannel__)(struct ConfidentialCompute *, struct KernelChannel *, ROTATE_IV_TYPE, NvBool, CC_KMB *); NV_STATUS (*__confComputeKeyStoreRetrieveViaChannel__)(struct ConfidentialCompute *, struct KernelChannel *, ROTATE_IV_TYPE, NvBool, CC_KMB *);
NV_STATUS (*__confComputeKeyStoreRetrieveViaKeyId__)(struct ConfidentialCompute *, NvU32, ROTATE_IV_TYPE, NvBool, CC_KMB *); NV_STATUS (*__confComputeKeyStoreRetrieveViaKeyId__)(struct ConfidentialCompute *, NvU32, ROTATE_IV_TYPE, NvBool, CC_KMB *);
NV_STATUS (*__confComputeDeriveSecretsForCEKeySpace__)(struct ConfidentialCompute *, RM_ENGINE_TYPE, NvU32);
NV_STATUS (*__confComputeDeriveSecrets__)(struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeDeriveSecrets__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeUpdateSecrets__)(struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeUpdateSecrets__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeIsSpdmEnabled__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsSpdmEnabled__)(struct OBJGPU *, struct ConfidentialCompute *);
RM_ENGINE_TYPE (*__confComputeGetEngineIdFromKeySpace__)(struct ConfidentialCompute *, NvU32); RM_ENGINE_TYPE (*__confComputeGetEngineIdFromKeySpace__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeGetKeySpaceFromKChannel__)(struct ConfidentialCompute *, struct KernelChannel *, NvU16 *);
NV_STATUS (*__confComputeGetLceKeyIdFromKChannel__)(struct ConfidentialCompute *, struct KernelChannel *, ROTATE_IV_TYPE, NvU16 *);
NvU32 (*__confComputeGetMaxCeKeySpaceIdx__)(struct ConfidentialCompute *);
NvBool (*__confComputeGlobalKeyIsKernelPriv__)(struct ConfidentialCompute *, NvU32); NvBool (*__confComputeGlobalKeyIsKernelPriv__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeGlobalKeyIsUvmKey__)(struct ConfidentialCompute *, NvU32); NvBool (*__confComputeGlobalKeyIsUvmKey__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeGetKeyPairByChannel__)(struct OBJGPU *, struct ConfidentialCompute *, struct KernelChannel *, NvU32 *, NvU32 *); NV_STATUS (*__confComputeGetKeyPairByChannel__)(struct OBJGPU *, struct ConfidentialCompute *, struct KernelChannel *, NvU32 *, NvU32 *);
@ -240,6 +244,8 @@ NV_STATUS __nvoc_objCreate_ConfidentialCompute(ConfidentialCompute**, Dynamic*,
#define confComputeKeyStoreRetrieveViaChannel_HAL(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaChannel_HAL(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeKeyStoreRetrieveViaKeyId(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaKeyId(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeKeyStoreRetrieveViaKeyId_HAL(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaKeyId_HAL(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeDeriveSecretsForCEKeySpace(pConfCompute, ceRmEngineTypeIdx, ccKeyspaceIndex) confComputeDeriveSecretsForCEKeySpace_DISPATCH(pConfCompute, ceRmEngineTypeIdx, ccKeyspaceIndex)
#define confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute, ceRmEngineTypeIdx, ccKeyspaceIndex) confComputeDeriveSecretsForCEKeySpace_DISPATCH(pConfCompute, ceRmEngineTypeIdx, ccKeyspaceIndex)
#define confComputeDeriveSecrets(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine) #define confComputeDeriveSecrets(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine)
#define confComputeDeriveSecrets_HAL(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine) #define confComputeDeriveSecrets_HAL(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine)
#define confComputeUpdateSecrets(pConfCompute, globalKeyId) confComputeUpdateSecrets_DISPATCH(pConfCompute, globalKeyId) #define confComputeUpdateSecrets(pConfCompute, globalKeyId) confComputeUpdateSecrets_DISPATCH(pConfCompute, globalKeyId)
@ -248,6 +254,12 @@ NV_STATUS __nvoc_objCreate_ConfidentialCompute(ConfidentialCompute**, Dynamic*,
#define confComputeIsSpdmEnabled_HAL(pGpu, pConfCompute) confComputeIsSpdmEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsSpdmEnabled_HAL(pGpu, pConfCompute) confComputeIsSpdmEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeGetEngineIdFromKeySpace(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace) #define confComputeGetEngineIdFromKeySpace(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace)
#define confComputeGetEngineIdFromKeySpace_HAL(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace) #define confComputeGetEngineIdFromKeySpace_HAL(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace)
#define confComputeGetKeySpaceFromKChannel(pConfCompute, pKernelChannel, keyspace) confComputeGetKeySpaceFromKChannel_DISPATCH(pConfCompute, pKernelChannel, keyspace)
#define confComputeGetKeySpaceFromKChannel_HAL(pConfCompute, pKernelChannel, keyspace) confComputeGetKeySpaceFromKChannel_DISPATCH(pConfCompute, pKernelChannel, keyspace)
#define confComputeGetLceKeyIdFromKChannel(pConfCompute, pKernelChannel, rotateOperation, pKeyId) confComputeGetLceKeyIdFromKChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, pKeyId)
#define confComputeGetLceKeyIdFromKChannel_HAL(pConfCompute, pKernelChannel, rotateOperation, pKeyId) confComputeGetLceKeyIdFromKChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, pKeyId)
#define confComputeGetMaxCeKeySpaceIdx(pConfCompute) confComputeGetMaxCeKeySpaceIdx_DISPATCH(pConfCompute)
#define confComputeGetMaxCeKeySpaceIdx_HAL(pConfCompute) confComputeGetMaxCeKeySpaceIdx_DISPATCH(pConfCompute)
#define confComputeGlobalKeyIsKernelPriv(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId) #define confComputeGlobalKeyIsKernelPriv(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId)
#define confComputeGlobalKeyIsKernelPriv_HAL(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId) #define confComputeGlobalKeyIsKernelPriv_HAL(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId)
#define confComputeGlobalKeyIsUvmKey(pConfCompute, keyId) confComputeGlobalKeyIsUvmKey_DISPATCH(pConfCompute, keyId) #define confComputeGlobalKeyIsUvmKey(pConfCompute, keyId) confComputeGlobalKeyIsUvmKey_DISPATCH(pConfCompute, keyId)
@ -391,6 +403,16 @@ static inline NV_STATUS confComputeKeyStoreRetrieveViaKeyId_DISPATCH(struct Conf
return pConfCompute->__confComputeKeyStoreRetrieveViaKeyId__(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle); return pConfCompute->__confComputeKeyStoreRetrieveViaKeyId__(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle);
} }
NV_STATUS confComputeDeriveSecretsForCEKeySpace_GH100(struct ConfidentialCompute *pConfCompute, RM_ENGINE_TYPE ceRmEngineTypeIdx, NvU32 ccKeyspaceIndex);
static inline NV_STATUS confComputeDeriveSecretsForCEKeySpace_46f6a7(struct ConfidentialCompute *pConfCompute, RM_ENGINE_TYPE ceRmEngineTypeIdx, NvU32 ccKeyspaceIndex) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeDeriveSecretsForCEKeySpace_DISPATCH(struct ConfidentialCompute *pConfCompute, RM_ENGINE_TYPE ceRmEngineTypeIdx, NvU32 ccKeyspaceIndex) {
return pConfCompute->__confComputeDeriveSecretsForCEKeySpace__(pConfCompute, ceRmEngineTypeIdx, ccKeyspaceIndex);
}
NV_STATUS confComputeDeriveSecrets_GH100(struct ConfidentialCompute *pConfCompute, NvU32 engine); NV_STATUS confComputeDeriveSecrets_GH100(struct ConfidentialCompute *pConfCompute, NvU32 engine);
static inline NV_STATUS confComputeDeriveSecrets_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 engine) { static inline NV_STATUS confComputeDeriveSecrets_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 engine) {
@ -433,6 +455,38 @@ static inline RM_ENGINE_TYPE confComputeGetEngineIdFromKeySpace_DISPATCH(struct
return pConfCompute->__confComputeGetEngineIdFromKeySpace__(pConfCompute, keySpace); return pConfCompute->__confComputeGetEngineIdFromKeySpace__(pConfCompute, keySpace);
} }
NV_STATUS confComputeGetKeySpaceFromKChannel_GH100(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU16 *keyspace);
static inline NV_STATUS confComputeGetKeySpaceFromKChannel_46f6a7(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU16 *keyspace) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeGetKeySpaceFromKChannel_DISPATCH(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU16 *keyspace) {
return pConfCompute->__confComputeGetKeySpaceFromKChannel__(pConfCompute, pKernelChannel, keyspace);
}
NV_STATUS confComputeGetLceKeyIdFromKChannel_GH100(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvU16 *pKeyId);
static inline NV_STATUS confComputeGetLceKeyIdFromKChannel_46f6a7(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvU16 *pKeyId) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeGetLceKeyIdFromKChannel_DISPATCH(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvU16 *pKeyId) {
return pConfCompute->__confComputeGetLceKeyIdFromKChannel__(pConfCompute, pKernelChannel, rotateOperation, pKeyId);
}
static inline NvU32 confComputeGetMaxCeKeySpaceIdx_6c58cf(struct ConfidentialCompute *pConfCompute) {
return CC_KEYSPACE_LCE7;
}
static inline NvU32 confComputeGetMaxCeKeySpaceIdx_4a4dee(struct ConfidentialCompute *pConfCompute) {
return 0;
}
static inline NvU32 confComputeGetMaxCeKeySpaceIdx_DISPATCH(struct ConfidentialCompute *pConfCompute) {
return pConfCompute->__confComputeGetMaxCeKeySpaceIdx__(pConfCompute);
}
NvBool confComputeGlobalKeyIsKernelPriv_GH100(struct ConfidentialCompute *pConfCompute, NvU32 keyId); NvBool confComputeGlobalKeyIsKernelPriv_GH100(struct ConfidentialCompute *pConfCompute, NvU32 keyId);
static inline NvBool confComputeGlobalKeyIsKernelPriv_491d52(struct ConfidentialCompute *pConfCompute, NvU32 keyId) { static inline NvBool confComputeGlobalKeyIsKernelPriv_491d52(struct ConfidentialCompute *pConfCompute, NvU32 keyId) {

View File

@ -236,6 +236,17 @@ static void __nvoc_init_funcTable_KernelCE_1(KernelCE *pThis, RmHalspecOwner *pR
pThis->__kceServiceNotificationInterrupt__ = &kceServiceNotificationInterrupt_IMPL; pThis->__kceServiceNotificationInterrupt__ = &kceServiceNotificationInterrupt_IMPL;
// Hal function -- kceIsSecureCe
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kceIsSecureCe__ = &kceIsSecureCe_GH100;
}
// default
else
{
pThis->__kceIsSecureCe__ = &kceIsSecureCe_491d52;
}
// Hal function -- kceGetP2PCes // Hal function -- kceGetP2PCes
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {

View File

@ -121,6 +121,7 @@ struct KernelCE {
void (*__kceStateDestroy__)(OBJGPU *, struct KernelCE *); void (*__kceStateDestroy__)(OBJGPU *, struct KernelCE *);
void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *); void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *);
NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *); NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *);
NvBool (*__kceIsSecureCe__)(OBJGPU *, struct KernelCE *);
NV_STATUS (*__kceGetP2PCes__)(struct KernelCE *, OBJGPU *, NvU32, NvU32 *); NV_STATUS (*__kceGetP2PCes__)(struct KernelCE *, OBJGPU *, NvU32, NvU32 *);
NV_STATUS (*__kceGetNvlinkAutoConfigCeValues__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *); NV_STATUS (*__kceGetNvlinkAutoConfigCeValues__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *);
NvBool (*__kceGetNvlinkMaxTopoForTable__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, void *, NvU32, NvU32 *); NvBool (*__kceGetNvlinkMaxTopoForTable__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, void *, NvU32, NvU32 *);
@ -200,6 +201,8 @@ NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32);
#define kceStateDestroy(arg0, arg1) kceStateDestroy_DISPATCH(arg0, arg1) #define kceStateDestroy(arg0, arg1) kceStateDestroy_DISPATCH(arg0, arg1)
#define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2) #define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2)
#define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2) #define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2)
#define kceIsSecureCe(pGpu, pKCe) kceIsSecureCe_DISPATCH(pGpu, pKCe)
#define kceIsSecureCe_HAL(pGpu, pKCe) kceIsSecureCe_DISPATCH(pGpu, pKCe)
#define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask) #define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
#define kceGetP2PCes_HAL(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask) #define kceGetP2PCes_HAL(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_DISPATCH(arg0, pGpu, gpuMask, nvlinkP2PCeMask)
#define kceGetNvlinkAutoConfigCeValues(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2) #define kceGetNvlinkAutoConfigCeValues(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2)
@ -405,6 +408,16 @@ static inline NV_STATUS kceServiceNotificationInterrupt_DISPATCH(OBJGPU *arg0, s
return arg1->__kceServiceNotificationInterrupt__(arg0, arg1, arg2); return arg1->__kceServiceNotificationInterrupt__(arg0, arg1, arg2);
} }
NvBool kceIsSecureCe_GH100(OBJGPU *pGpu, struct KernelCE *pKCe);
static inline NvBool kceIsSecureCe_491d52(OBJGPU *pGpu, struct KernelCE *pKCe) {
return ((NvBool)(0 != 0));
}
static inline NvBool kceIsSecureCe_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe) {
return pKCe->__kceIsSecureCe__(pGpu, pKCe);
}
NV_STATUS kceGetP2PCes_GV100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask); NV_STATUS kceGetP2PCes_GV100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);
NV_STATUS kceGetP2PCes_GH100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask); NV_STATUS kceGetP2PCes_GH100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask);

View File

@ -327,6 +327,7 @@ struct KernelNvlink {
NvBool PRIVATE_FIELD(bEnableSafeModeAtLoad); NvBool PRIVATE_FIELD(bEnableSafeModeAtLoad);
NvBool PRIVATE_FIELD(bEnableAli); NvBool PRIVATE_FIELD(bEnableAli);
NvBool PRIVATE_FIELD(bFloorSwept); NvBool PRIVATE_FIELD(bFloorSwept);
NvU32 PRIVATE_FIELD(numPortEvents);
NvBool PRIVATE_FIELD(bLinkTrainingDebugSpew); NvBool PRIVATE_FIELD(bLinkTrainingDebugSpew);
NvBool PRIVATE_FIELD(bDisableL2Mode); NvBool PRIVATE_FIELD(bDisableL2Mode);
NvU32 PRIVATE_FIELD(nvlinkLinkSpeed); NvU32 PRIVATE_FIELD(nvlinkLinkSpeed);
@ -443,6 +444,7 @@ struct KernelNvlink_PRIVATE {
NvBool bEnableSafeModeAtLoad; NvBool bEnableSafeModeAtLoad;
NvBool bEnableAli; NvBool bEnableAli;
NvBool bFloorSwept; NvBool bFloorSwept;
NvU32 numPortEvents;
NvBool bLinkTrainingDebugSpew; NvBool bLinkTrainingDebugSpew;
NvBool bDisableL2Mode; NvBool bDisableL2Mode;
NvU32 nvlinkLinkSpeed; NvU32 nvlinkLinkSpeed;

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2015-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -230,6 +230,8 @@ typedef union {
volatile NvU32 guestEccStatus; // guest ecc status volatile NvU32 guestEccStatus; // guest ecc status
volatile NvU64 sysmemBitMapTablePfn; // Root node's pfn value of dirty sysmem tracking table volatile NvU64 sysmemBitMapTablePfn; // Root node's pfn value of dirty sysmem tracking table
volatile NvU32 guestOsType; // Guest OS type volatile NvU32 guestOsType; // Guest OS type
volatile NvU32 requestedGspCaps; // requested GSP caps
volatile VGPU_GSP_BUF_INFO debugBuf; // Debug buffer address
} ; } ;
volatile NvU8 buf[VGPU_GSP_CTRL_BUF_SIZE_V1]; volatile NvU8 buf[VGPU_GSP_CTRL_BUF_SIZE_V1];
} VGPU_GSP_CTRL_BUF_V1; } VGPU_GSP_CTRL_BUF_V1;
@ -248,6 +250,8 @@ ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, getEventBuf ) == 0x048);
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestEccStatus ) == 0x04C); ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestEccStatus ) == 0x04C);
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, sysmemBitMapTablePfn ) == 0x050); ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, sysmemBitMapTablePfn ) == 0x050);
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestOsType ) == 0x058); ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, guestOsType ) == 0x058);
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, requestedGspCaps ) == 0x05C);
ct_assert(NV_OFFSETOF(VGPU_GSP_CTRL_BUF_V1, debugBuf ) == 0x060);
/****** Response buffer: written by GSP vGPU plugin and read by guest RM ******/ /****** Response buffer: written by GSP vGPU plugin and read by guest RM ******/
@ -259,6 +263,7 @@ typedef union {
volatile NvU32 putEventBuf; // PUT index in circular event buffer volatile NvU32 putEventBuf; // PUT index in circular event buffer
volatile NvU32 hostEccStatus; // host ecc status volatile NvU32 hostEccStatus; // host ecc status
volatile NvU32 usmType; // Host USM Type volatile NvU32 usmType; // Host USM Type
volatile NvU32 enabledGspCaps; // Enabled GSP caps
}; };
volatile NvU8 buf[VGPU_GSP_RESPONSE_BUF_SIZE_V1]; volatile NvU8 buf[VGPU_GSP_RESPONSE_BUF_SIZE_V1];
} VGPU_GSP_RESPONSE_BUF_V1; } VGPU_GSP_RESPONSE_BUF_V1;
@ -271,6 +276,7 @@ ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, responseId ) == 0x000);
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, putEventBuf ) == 0x004); ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, putEventBuf ) == 0x004);
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, hostEccStatus ) == 0x008); ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, hostEccStatus ) == 0x008);
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, usmType ) == 0x00C); ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, usmType ) == 0x00C);
ct_assert(NV_OFFSETOF(VGPU_GSP_RESPONSE_BUF_V1, enabledGspCaps ) == 0x010);
/******************************************************************************/ /******************************************************************************/
/* GSP Control buffer format - Version 1 - END */ /* GSP Control buffer format - Version 1 - END */
@ -320,5 +326,10 @@ typedef struct {
NvU64 nodePfns[MAX_PFNS_PER_4K_PAGE - 1]; NvU64 nodePfns[MAX_PFNS_PER_4K_PAGE - 1];
} VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE; } VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE;
#define NV_VGPU_DEBUG_BUFF_DRIVER_SIZE 0x7FF
#define NV_VGPU_GSP_CAPS_DEBUG_BUFF_SUPPORTED 0:0
#define NV_VGPU_GSP_CAPS_DEBUG_BUFF_SUPPORTED_TRUE 0x00000001
#define NV_VGPU_GSP_CAPS_DEBUG_BUFF_SUPPORTED_FALSE 0x00000000
ct_assert(sizeof(VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE) == 0x1000); ct_assert(sizeof(VGPU_GSP_SYSMEM_BITMAP_ROOT_NODE) == 0x1000);
#endif // __vgpu_dev_nv_vgpu_h__ #endif // __vgpu_dev_nv_vgpu_h__

View File

@ -153,6 +153,8 @@ struct _object_vgpu
VGPU_GSP_CTRL_BUF_RM *gspCtrlBuf; VGPU_GSP_CTRL_BUF_RM *gspCtrlBuf;
VGPU_GSP_RESPONSE_BUF_RM *gspResponseBuf; VGPU_GSP_RESPONSE_BUF_RM *gspResponseBuf;
VGPU_MEM_INFO debugBuff;
NvBool bGspPlugin; NvBool bGspPlugin;
NvBool bIsBar2Physical; NvBool bIsBar2Physical;
// Start offset of FB to use in Physical BAR2 mode // Start offset of FB to use in Physical BAR2 mode

View File

@ -538,6 +538,84 @@ static void _teardownGspSharedMemory(OBJGPU *pGpu, OBJVGPU *pVGpu)
_freeSharedMemory(pGpu, pVGpu); _freeSharedMemory(pGpu, pVGpu);
} }
static NvU64 vgpuGspMakeBufferAddress(VGPU_MEM_INFO *pMemInfo, NvU64 gpfn);
static NV_STATUS _setupGspDebugBuff(OBJGPU *pGpu, OBJVGPU *pVGpu)
{
NV_STATUS status;
if (!RMCFG_FEATURE_PLATFORM_WINDOWS)
return NV_OK;
status = _allocRpcMemDesc(pGpu,
RM_PAGE_SIZE,
NV_MEMORY_CONTIGUOUS,
ADDR_SYSMEM,
0,
&pVGpu->debugBuff.pMemDesc,
(void**)&pVGpu->debugBuff.pMemory,
(void**)&pVGpu->debugBuff.pPriv);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Debug memory setup failed: 0x%x\n", status);
return status;
}
pVGpu->debugBuff.pfn = memdescGetPte(pVGpu->debugBuff.pMemDesc, AT_GPU, 0) >> RM_PAGE_SHIFT;
pVGpu->gspCtrlBuf->v1.debugBuf.addr = vgpuGspMakeBufferAddress(&pVGpu->debugBuff, pVGpu->debugBuff.pfn);
pVGpu->gspCtrlBuf->v1.requestedGspCaps = FLD_SET_DRF(_VGPU, _GSP_CAPS, _DEBUG_BUFF_SUPPORTED, _TRUE,
pVGpu->gspCtrlBuf->v1.requestedGspCaps);
return NV_OK;
}
static void _teardownGspDebugBuff(OBJGPU *pGpu, OBJVGPU *pVGpu)
{
VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
if (!RMCFG_FEATURE_PLATFORM_WINDOWS)
return;
if (!pVGpu->debugBuff.pfn)
return;
NV_ASSERT_OR_RETURN_VOID(pVSI);
pVSI->vgpuConfig.debugBufferSize = 0;
pVSI->vgpuConfig.debugBuffer = NULL;
pVGpu->debugBuff.pfn = 0;
_freeRpcMemDesc(pGpu,
&pVGpu->debugBuff.pMemDesc,
(void**)&pVGpu->debugBuff.pMemory,
(void**)&pVGpu->debugBuff.pPriv);
}
static NV_STATUS _tryEnableGspDebugBuff(OBJGPU *pGpu, OBJVGPU *pVGpu)
{
VGPU_STATIC_INFO *pVSI = GPU_GET_STATIC_INFO(pGpu);
if (!RMCFG_FEATURE_PLATFORM_WINDOWS)
return NV_OK;
if (!FLD_TEST_DRF(_VGPU, _GSP_CAPS, _DEBUG_BUFF_SUPPORTED, _TRUE,
pVGpu->gspResponseBuf->v1.enabledGspCaps)) {
_teardownGspDebugBuff(pGpu, pVGpu);
return NV_OK;
}
NV_ASSERT_OR_RETURN(pVSI, NV_ERR_GENERIC);
NV_ASSERT_OR_RETURN(pVGpu->debugBuff.pMemory, NV_ERR_GENERIC);
pVSI->vgpuConfig.debugBufferSize = NV_VGPU_DEBUG_BUFF_DRIVER_SIZE;
pVSI->vgpuConfig.debugBuffer = NV_PTR_TO_NvP64(pVGpu->debugBuff.pMemory);
return NV_OK;
}
static NV_STATUS _initSysmemPfnRing(OBJGPU *pGpu) static NV_STATUS _initSysmemPfnRing(OBJGPU *pGpu)
{ {
NV_STATUS status = NV_OK; NV_STATUS status = NV_OK;
@ -1247,6 +1325,9 @@ static NV_STATUS _vgpuGspSetupCommunicationWithPlugin(OBJGPU *pGpu, OBJVGPU *pVG
rpcVgpuGspWriteScratchRegister_HAL(pRpc, pGpu, addrCtrlBuf); rpcVgpuGspWriteScratchRegister_HAL(pRpc, pGpu, addrCtrlBuf);
} }
NV_PRINTF(LEVEL_INFO, "RPC: Version 0x%x\n", pVGpu->gspCtrlBuf->v1.version);
NV_PRINTF(LEVEL_INFO, "RPC: Requested GSP caps 0x%x\n", pVGpu->gspCtrlBuf->v1.requestedGspCaps);
NV_PRINTF(LEVEL_INFO, "RPC: Enabled GSP caps 0x%x\n", pVGpu->gspResponseBuf->v1.enabledGspCaps);
NV_PRINTF(LEVEL_INFO, "RPC: Control buf addr 0x%llx\n", addrCtrlBuf); NV_PRINTF(LEVEL_INFO, "RPC: Control buf addr 0x%llx\n", addrCtrlBuf);
NV_PRINTF(LEVEL_INFO, "RPC: Response buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.responseBuf.addr); NV_PRINTF(LEVEL_INFO, "RPC: Response buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.responseBuf.addr);
NV_PRINTF(LEVEL_INFO, "RPC: Message buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.msgBuf.addr); NV_PRINTF(LEVEL_INFO, "RPC: Message buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.msgBuf.addr);
@ -1255,6 +1336,7 @@ static NV_STATUS _vgpuGspSetupCommunicationWithPlugin(OBJGPU *pGpu, OBJVGPU *pVG
NV_PRINTF(LEVEL_INFO, "RPC: Shared buf BAR2 offset 0x%llx\n", pVGpu->gspCtrlBuf->v1.sharedMem.bar2Offset); NV_PRINTF(LEVEL_INFO, "RPC: Shared buf BAR2 offset 0x%llx\n", pVGpu->gspCtrlBuf->v1.sharedMem.bar2Offset);
NV_PRINTF(LEVEL_INFO, "RPC: Event buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.eventBuf.addr); NV_PRINTF(LEVEL_INFO, "RPC: Event buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.eventBuf.addr);
NV_PRINTF(LEVEL_INFO, "RPC: Event buf BAR2 offset 0x%llx\n", pVGpu->gspCtrlBuf->v1.eventBuf.bar2Offset); NV_PRINTF(LEVEL_INFO, "RPC: Event buf BAR2 offset 0x%llx\n", pVGpu->gspCtrlBuf->v1.eventBuf.bar2Offset);
NV_PRINTF(LEVEL_INFO, "RPC: Debug buf addr 0x%llx\n", pVGpu->gspCtrlBuf->v1.debugBuf.addr);
return status; return status;
} }
@ -1283,6 +1365,8 @@ void vgpuGspTeardownBuffers(OBJGPU *pGpu)
} }
} }
_teardownGspDebugBuff(pGpu, pVGpu);
_teardownGspSharedMemory(pGpu, pVGpu); _teardownGspSharedMemory(pGpu, pVGpu);
_teardownGspEventInfrastructure(pGpu, pVGpu); _teardownGspEventInfrastructure(pGpu, pVGpu);
@ -1351,6 +1435,13 @@ NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu)
goto fail; goto fail;
} }
status = _setupGspDebugBuff(pGpu, pVGpu);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Debug memory setup failed: 0x%x\n", status);
goto fail;
}
// Update Guest OS Type, before establishing communication with GSP. // Update Guest OS Type, before establishing communication with GSP.
vgpuUpdateGuestOsType(pGpu, pVGpu); vgpuUpdateGuestOsType(pGpu, pVGpu);
@ -1374,6 +1465,13 @@ NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu)
// Update Guest ECC status based on Host ECC status, after establishing RPC with GSP. // Update Guest ECC status based on Host ECC status, after establishing RPC with GSP.
setGuestEccStatus(pGpu); setGuestEccStatus(pGpu);
status = _tryEnableGspDebugBuff(pGpu, pVGpu);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Enable debug buffer failed: 0x%x\n", status);
goto fail;
}
pVGpu->bGspBuffersInitialized = NV_TRUE; pVGpu->bGspBuffersInitialized = NV_TRUE;
return NV_OK; return NV_OK;

View File

@ -628,7 +628,7 @@ rmGpuLockFree(NvU32 gpuInst)
// Disable GPUs Interrupts thus blocking the ISR from // Disable GPUs Interrupts thus blocking the ISR from
// entering. // entering.
// //
static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvBool bInIsr) static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvU32 flags)
{ {
OBJGPU *pGpu = gpumgrGetGpu(gpuInst); OBJGPU *pGpu = gpumgrGetGpu(gpuInst);
@ -653,6 +653,7 @@ static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvBool bInIsr)
if (osLockShouldToggleInterrupts(pGpu)) if (osLockShouldToggleInterrupts(pGpu))
{ {
Intr *pIntr = GPU_GET_INTR(pGpu); Intr *pIntr = GPU_GET_INTR(pGpu);
NvBool isIsr = !!(flags & GPU_LOCK_FLAGS_COND_ACQUIRE);
NvBool bBcEnabled = gpumgrGetBcEnabledStatus(pGpu); NvBool bBcEnabled = gpumgrGetBcEnabledStatus(pGpu);
// Always disable intrs for cond code // Always disable intrs for cond code
@ -666,10 +667,10 @@ static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvBool bInIsr)
tmrRmCallbackIntrDisable(pTmr, pGpu); tmrRmCallbackIntrDisable(pTmr, pGpu);
} }
osDisableInterrupts(pGpu, bInIsr); osDisableInterrupts(pGpu, isIsr);
if ((pIntr != NULL) && pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING) && if ((pIntr != NULL) && pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING) &&
(bInIsr == NV_FALSE) ) (isIsr == NV_FALSE) )
{ {
NvU64 oldIrql; NvU64 oldIrql;
NvU32 intrMaskFlags; NvU32 intrMaskFlags;
@ -721,7 +722,7 @@ _rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pG
NvU32 gpuMaskLocked = 0; NvU32 gpuMaskLocked = 0;
GPULOCK *pAllocLock = &rmGpuLockInfo.gpuAllocLock; GPULOCK *pAllocLock = &rmGpuLockInfo.gpuAllocLock;
GPULOCK *pGpuLock; GPULOCK *pGpuLock;
NvBool bHighIrql, bInIsr, bCondAcquireCheck; NvBool bHighIrql, bCondAcquireCheck;
NvU32 maxLockableGpuInst; NvU32 maxLockableGpuInst;
NvU64 threadId = portThreadGetCurrentThreadId(); NvU64 threadId = portThreadGetCurrentThreadId();
NvU64 priority = 0; NvU64 priority = 0;
@ -733,7 +734,6 @@ _rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pG
NvU32 loopCount; NvU32 loopCount;
bHighIrql = (portSyncExSafeToSleep() == NV_FALSE); bHighIrql = (portSyncExSafeToSleep() == NV_FALSE);
bInIsr = portUtilIsInterruptContext();
bCondAcquireCheck = ((flags & GPU_LOCK_FLAGS_COND_ACQUIRE) != 0); bCondAcquireCheck = ((flags & GPU_LOCK_FLAGS_COND_ACQUIRE) != 0);
if (pGpuLockedMask) if (pGpuLockedMask)
@ -1084,7 +1084,7 @@ per_gpu_lock_acquired:
if (gpuInst != GPU_INST_ALLOC_LOCK) if (gpuInst != GPU_INST_ALLOC_LOCK)
{ {
// now disable interrupts // now disable interrupts
_gpuLocksAcquireDisableInterrupts(gpuInst, bInIsr); _gpuLocksAcquireDisableInterrupts(gpuInst, flags);
// mark this one as locked // mark this one as locked
gpuMaskLocked |= NVBIT(gpuInst); gpuMaskLocked |= NVBIT(gpuInst);

View File

@ -1245,3 +1245,18 @@ NV_STATUS kceGetP2PCes_GH100(KernelCE *pKCe, OBJGPU *pGpu, NvU32 gpuMask, NvU32
return NV_OK; return NV_OK;
} }
/*! Determine if CE support confidential compute secure copy */
NvBool kceIsSecureCe_GH100(OBJGPU *pGpu, KernelCE *pKCe)
{
NV_STATUS status;
NvU8 ceCaps[NV2080_CTRL_CE_CAPS_TBL_SIZE];
NV_ASSERT_OK_OR_ELSE(status,
kceGetDeviceCaps(pGpu, pKCe, RM_ENGINE_TYPE_COPY(pKCe->publicID), ceCaps),
return NV_FALSE);
return (NV2080_CTRL_CE_GET_CAP(ceCaps, NV2080_CTRL_CE_CAPS_CE_CC_SECURE) != 0) ?
NV_TRUE : NV_FALSE;
};

View File

@ -43,36 +43,15 @@ NV_STATUS kceStateLoad_GP100(OBJGPU *pGpu, KernelCE *pKCe, NvU32 flags)
} }
if (gpuIsCCFeatureEnabled(pGpu)) if (gpuIsCCFeatureEnabled(pGpu))
{ {
ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu); if (kceIsSecureCe_HAL(pGpu, pKCe))
switch (pKCe->publicID)
{ {
case 2: ConfidentialCompute *pCC = GPU_GET_CONF_COMPUTE(pGpu);
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE2));
break; NvU32 mcEngineIdx = MC_ENGINE_IDX_CE(pKCe->publicID);
case 3:
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE3)); NV_ASSERT_OR_RETURN(mcEngineIdx <= MC_ENGINE_IDX_CE_MAX, NV_ERR_NOT_SUPPORTED);
break;
case 4: NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, mcEngineIdx));
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE4));
break;
case 5:
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE5));
break;
case 6:
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE6));
break;
case 7:
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE7));
break;
case 8:
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE8));
break;
case 9:
NV_ASSERT_OK_OR_RETURN(confComputeDeriveSecrets(pCC, MC_ENGINE_IDX_CE9));
break;
default:
break;
} }
} }
return NV_OK; return NV_OK;

View File

@ -87,6 +87,71 @@ confComputeIsGpuCcCapable_GH100
return NV_FALSE; return NV_FALSE;
} }
/*!
* @brief Derives secrets for given CE key space.
*
* @param[in] ceRmEngineTypeIdx the RM engine type for LCE
* @param[in] ccKeyspaceLCEIndex the key space index
*
* @return NV_ERR_INVALID_ARGUMENT if engine is not correct.
* NV_OK otherwise.
*/
NV_STATUS
confComputeDeriveSecretsForCEKeySpace_GH100
(
ConfidentialCompute *pConfCompute,
RM_ENGINE_TYPE ceRmEngineTypeIdx,
NvU32 ccKeyspaceLCEIndex
)
{
OBJGPU *pGpu = ENG_GET_GPU(pConfCompute);
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0};
//
// ceRmEngineTypeIdx is not exactly used as a CE index.
// For example, ceRmEngineTypeIdx is 0 for the first secure CE which is
// actually the LCE 2.
// It is used as a key space index.
//
// TODO: refactor the code to use exact the engine type number, bug 4594450.
//
params.engineId = gpuGetNv2080EngineType(ceRmEngineTypeIdx);
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(ccKeyspaceLCEIndex, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
return NV_OK;
}
NV_STATUS NV_STATUS
confComputeDeriveSecrets_GH100(ConfidentialCompute *pConfCompute, confComputeDeriveSecrets_GH100(ConfidentialCompute *pConfCompute,
NvU32 engine) NvU32 engine)
@ -154,310 +219,48 @@ confComputeDeriveSecrets_GH100(ConfidentialCompute *pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_SEC2, CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER))); CC_GKEYID_GEN(CC_KEYSPACE_SEC2, CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER)));
} }
break; break;
case MC_ENGINE_IDX_CE2: case MC_ENGINE_IDX_CE2:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY0, CC_KEYSPACE_LCE0);
params.engineId = NV2080_ENGINE_TYPE_COPY0;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE0, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE3: case MC_ENGINE_IDX_CE3:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY1, CC_KEYSPACE_LCE1);
params.engineId = NV2080_ENGINE_TYPE_COPY1;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE1, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE4: case MC_ENGINE_IDX_CE4:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY2, CC_KEYSPACE_LCE2);
params.engineId = NV2080_ENGINE_TYPE_COPY2;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE2, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE5: case MC_ENGINE_IDX_CE5:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY3, CC_KEYSPACE_LCE3);
params.engineId = NV2080_ENGINE_TYPE_COPY3;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE3, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE6: case MC_ENGINE_IDX_CE6:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY4, CC_KEYSPACE_LCE4);
params.engineId = NV2080_ENGINE_TYPE_COPY4;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE4, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE7: case MC_ENGINE_IDX_CE7:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY5, CC_KEYSPACE_LCE5);
params.engineId = NV2080_ENGINE_TYPE_COPY5;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE5, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE8: case MC_ENGINE_IDX_CE8:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY6, CC_KEYSPACE_LCE6);
params.engineId = NV2080_ENGINE_TYPE_COPY6;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE6, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
case MC_ENGINE_IDX_CE9: case MC_ENGINE_IDX_CE9:
{ confComputeDeriveSecretsForCEKeySpace_HAL(pConfCompute,
NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS params = {0}; RM_ENGINE_TYPE_COPY7, CC_KEYSPACE_LCE7);
params.engineId = NV2080_ENGINE_TYPE_COPY7;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS,
&params,
sizeof(params)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_H2D_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_D2H_USER)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_H2D_KERN)));
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreDeriveKey_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_D2H_KERN)));
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_H2D_USER),
(void*)&params.ivMaskSet[0].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_D2H_USER),
(void*)&params.ivMaskSet[1].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_H2D_KERN),
(void*)&params.ivMaskSet[2].ivMask);
confComputeKeyStoreDepositIvMask_HAL(pConfCompute,
CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_D2H_KERN),
(void*)&params.ivMaskSet[3].ivMask);
break; break;
}
default: default:
return NV_ERR_INVALID_ARGUMENT; return NV_ERR_INVALID_ARGUMENT;
} }
@ -488,38 +291,34 @@ confComputeGetEngineIdFromKeySpace_GH100
return RM_ENGINE_TYPE_SEC2; return RM_ENGINE_TYPE_SEC2;
} }
NvU32 lceId = 2; // TODO: Use NV_SSE_SCE_CC_CAPABLE_LCE_ID_START;
switch (keySpace) switch (keySpace)
{ {
case CC_KEYSPACE_LCE0: case CC_KEYSPACE_LCE0:
lceId += 0; return RM_ENGINE_TYPE_COPY2;
break;
case CC_KEYSPACE_LCE1: case CC_KEYSPACE_LCE1:
lceId += 1; return RM_ENGINE_TYPE_COPY3;
break;
case CC_KEYSPACE_LCE2: case CC_KEYSPACE_LCE2:
lceId += 2; return RM_ENGINE_TYPE_COPY4;
break;
case CC_KEYSPACE_LCE3: case CC_KEYSPACE_LCE3:
lceId += 3; return RM_ENGINE_TYPE_COPY5;
break;
case CC_KEYSPACE_LCE4: case CC_KEYSPACE_LCE4:
lceId += 4; return RM_ENGINE_TYPE_COPY6;
break;
case CC_KEYSPACE_LCE5: case CC_KEYSPACE_LCE5:
lceId += 5; return RM_ENGINE_TYPE_COPY7;
break;
case CC_KEYSPACE_LCE6: case CC_KEYSPACE_LCE6:
lceId += 6; return RM_ENGINE_TYPE_COPY8;
break;
case CC_KEYSPACE_LCE7: case CC_KEYSPACE_LCE7:
lceId += 7; return RM_ENGINE_TYPE_COPY9;
break;
default:
return RM_ENGINE_TYPE_NULL;
} }
return RM_ENGINE_TYPE_COPY(lceId); return RM_ENGINE_TYPE_NULL;
} }
/*! /*!
@ -627,7 +426,7 @@ NV_STATUS confComputeUpdateSecrets_GH100(ConfidentialCompute *pConfCompute,
// Only LCEs have a decrypt IV mask. // Only LCEs have a decrypt IV mask.
if ((CC_GKEYID_GET_KEYSPACE(d2hKey) >= CC_KEYSPACE_LCE0) && if ((CC_GKEYID_GET_KEYSPACE(d2hKey) >= CC_KEYSPACE_LCE0) &&
(CC_GKEYID_GET_KEYSPACE(d2hKey) <= CC_KEYSPACE_LCE7)) (CC_GKEYID_GET_KEYSPACE(d2hKey) <= confComputeGetMaxCeKeySpaceIdx(pConfCompute)))
{ {
confComputeKeyStoreDepositIvMask_HAL(pConfCompute, d2hKey, &params.updatedDecryptIVMask); confComputeKeyStoreDepositIvMask_HAL(pConfCompute, d2hKey, &params.updatedDecryptIVMask);
} }
@ -646,7 +445,7 @@ NV_STATUS confComputeUpdateSecrets_GH100(ConfidentialCompute *pConfCompute,
pKernelChannel->clientKmb.encryptBundle.iv[0] = 0x00000000; pKernelChannel->clientKmb.encryptBundle.iv[0] = 0x00000000;
if ((CC_GKEYID_GET_KEYSPACE(d2hKey) >= CC_KEYSPACE_LCE0) && if ((CC_GKEYID_GET_KEYSPACE(d2hKey) >= CC_KEYSPACE_LCE0) &&
(CC_GKEYID_GET_KEYSPACE(d2hKey) <= CC_KEYSPACE_LCE7)) (CC_GKEYID_GET_KEYSPACE(d2hKey) <= confComputeGetMaxCeKeySpaceIdx(pConfCompute)))
{ {
pKernelChannel->clientKmb.decryptBundle.iv[0] = 0x00000000; pKernelChannel->clientKmb.decryptBundle.iv[0] = 0x00000000;
} }

View File

@ -67,11 +67,8 @@ typedef cryptoBundle_t keySlot_t[CC_KEYSPACE_TOTAL_SIZE];
static NV_STATUS checkSlot(ConfidentialCompute *pConfCompute, NvU32 slotNumber); static NV_STATUS checkSlot(ConfidentialCompute *pConfCompute, NvU32 slotNumber);
static void incrementChannelCounter(ConfidentialCompute *pConfCompute, NvU32 slotNumber); static void incrementChannelCounter(ConfidentialCompute *pConfCompute, NvU32 slotNumber);
static NvU64 getChannelCounter(ConfidentialCompute *pConfCompute, NvU32 slotNumber); static NvU64 getChannelCounter(ConfidentialCompute *pConfCompute, NvU32 slotNumber);
static NV_STATUS getKeyIdLce(KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation,
NvU16 *keyId);
static NV_STATUS getKeyIdSec2(KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, static NV_STATUS getKeyIdSec2(KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation,
NvU16 *keyId); NvU16 *keyId);
static NV_STATUS getKeyspaceLce(KernelChannel *pKernelChannel, NvU16 *keyspace);
NV_STATUS NV_STATUS
confComputeKeyStoreInit_GH100(ConfidentialCompute *pConfCompute) confComputeKeyStoreInit_GH100(ConfidentialCompute *pConfCompute)
@ -176,7 +173,7 @@ confComputeKeyStoreDeriveKey_GH100(ConfidentialCompute *pConfCompute, NvU32 glob
// LCEs will return an error / interrupt if the key is all 0s. // LCEs will return an error / interrupt if the key is all 0s.
if ((CC_GKEYID_GET_KEYSPACE(globalKeyId) >= CC_KEYSPACE_LCE0) && if ((CC_GKEYID_GET_KEYSPACE(globalKeyId) >= CC_KEYSPACE_LCE0) &&
(CC_GKEYID_GET_KEYSPACE(globalKeyId) <= CC_KEYSPACE_LCE7)) (CC_GKEYID_GET_KEYSPACE(globalKeyId) <= confComputeGetMaxCeKeySpaceIdx(pConfCompute)))
{ {
for (NvU32 index = 0; index < CC_AES_256_GCM_KEY_SIZE_DWORD; index++) for (NvU32 index = 0; index < CC_AES_256_GCM_KEY_SIZE_DWORD; index++)
{ {
@ -235,19 +232,21 @@ confComputeKeyStoreRetrieveViaChannel_GH100
if (RM_ENGINE_TYPE_IS_COPY(kchannelGetEngineType(pKernelChannel))) if (RM_ENGINE_TYPE_IS_COPY(kchannelGetEngineType(pKernelChannel)))
{ {
NvU16 keyspace; NvU16 keySpace;
if (getKeyspaceLce(pKernelChannel, &keyspace) != NV_OK) if (confComputeGetKeySpaceFromKChannel_HAL(pConfCompute, pKernelChannel,
&keySpace) != NV_OK)
{ {
return NV_ERR_INVALID_PARAMETER; return NV_ERR_INVALID_PARAMETER;
} }
if (getKeyIdLce(pKernelChannel, rotateOperation, &keyId) != NV_OK) if (confComputeGetLceKeyIdFromKChannel_HAL(pConfCompute, pKernelChannel,
rotateOperation, &keyId) != NV_OK)
{ {
return NV_ERR_INVALID_PARAMETER; return NV_ERR_INVALID_PARAMETER;
} }
globalKeyId = CC_GKEYID_GEN(keyspace, keyId); globalKeyId = CC_GKEYID_GEN(keySpace, keyId);
} }
else if (kchannelGetEngineType(pKernelChannel) == RM_ENGINE_TYPE_SEC2) else if (kchannelGetEngineType(pKernelChannel) == RM_ENGINE_TYPE_SEC2)
{ {
@ -433,7 +432,7 @@ confComputeKeyStoreUpdateKey_GH100(ConfidentialCompute *pConfCompute, NvU32 glob
// LCEs will return an error / interrupt if the key is all 0s. // LCEs will return an error / interrupt if the key is all 0s.
if ((CC_GKEYID_GET_KEYSPACE(globalKeyId) >= CC_KEYSPACE_LCE0) && if ((CC_GKEYID_GET_KEYSPACE(globalKeyId) >= CC_KEYSPACE_LCE0) &&
(CC_GKEYID_GET_KEYSPACE(globalKeyId) <= CC_KEYSPACE_LCE7)) (CC_GKEYID_GET_KEYSPACE(globalKeyId) <= confComputeGetMaxCeKeySpaceIdx(pConfCompute)))
{ {
for (NvU32 index = 0; index < CC_AES_256_GCM_KEY_SIZE_DWORD; index++) for (NvU32 index = 0; index < CC_AES_256_GCM_KEY_SIZE_DWORD; index++)
{ {
@ -480,9 +479,12 @@ confComputeGetKeyPairByChannel_GH100
} }
else else
{ {
NV_ASSERT_OK_OR_RETURN(getKeyspaceLce(pKernelChannel, &keySpace)); NV_ASSERT_OK_OR_RETURN(confComputeGetKeySpaceFromKChannel_HAL(pConfCompute,
NV_ASSERT_OK_OR_RETURN(getKeyIdLce(pKernelChannel, ROTATE_IV_ENCRYPT, &lh2dKeyId)); pKernelChannel, &keySpace) != NV_OK);
NV_ASSERT_OK_OR_RETURN(getKeyIdLce(pKernelChannel, ROTATE_IV_DECRYPT, &ld2hKeyId)); NV_ASSERT_OK_OR_RETURN(confComputeGetLceKeyIdFromKChannel_HAL(pConfCompute, pKernelChannel,
ROTATE_IV_ENCRYPT, &lh2dKeyId));
NV_ASSERT_OK_OR_RETURN(confComputeGetLceKeyIdFromKChannel_HAL(pConfCompute, pKernelChannel,
ROTATE_IV_DECRYPT, &ld2hKeyId));
} }
if (pH2DKey != NULL) if (pH2DKey != NULL)
@ -508,41 +510,49 @@ confComputeKeyStoreIsValidGlobalKeyId_GH100
return (globalKeyIdString != NULL); return (globalKeyIdString != NULL);
} }
// /*!
// Return the key ID for a given LCE channel and rotation operation. * Return the key ID for a given LCE channel and rotation operation.
// If rotateOperation is ROTATE_IV_ALL_VALID then it will return the least * If rotateOperation is ROTATE_IV_ALL_VALID then it will return the least
// key ID of the key pair; ie the one that corresponds to an even numbered slot. * key ID of the key pair; ie the one that corresponds to an even numbered slot.
// *
static NV_STATUS * @param[in] pConfCompute : conf comp pointer
getKeyIdLce * @param[in] pKernelChannel : KernelChannel pointer
* @param[in] rotateOperation : The type of rotation operation
* @param[out] pKeyId : pointer to keyId
*/
NV_STATUS
confComputeGetLceKeyIdFromKChannel_GH100
( (
KernelChannel *pKernelChannel, ConfidentialCompute *pConfCompute,
ROTATE_IV_TYPE rotateOperation, KernelChannel *pKernelChannel,
NvU16 *keyId ROTATE_IV_TYPE rotateOperation,
NvU16 *pKeyId
) )
{ {
if (kchannelCheckIsUserMode(pKernelChannel)) if (kchannelCheckIsUserMode(pKernelChannel))
{ {
if ((rotateOperation == ROTATE_IV_ENCRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID)) if ((rotateOperation == ROTATE_IV_ENCRYPT) ||
(rotateOperation == ROTATE_IV_ALL_VALID))
{ {
*keyId = CC_LKEYID_LCE_H2D_USER; *pKeyId = CC_LKEYID_LCE_H2D_USER;
} }
else else
{ {
*keyId = CC_LKEYID_LCE_D2H_USER; *pKeyId = CC_LKEYID_LCE_D2H_USER;
} }
return NV_OK; return NV_OK;
} }
else if (kchannelCheckIsKernel(pKernelChannel)) else if (kchannelCheckIsKernel(pKernelChannel))
{ {
if ((rotateOperation == ROTATE_IV_ENCRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID)) if ((rotateOperation == ROTATE_IV_ENCRYPT) ||
(rotateOperation == ROTATE_IV_ALL_VALID))
{ {
*keyId = CC_LKEYID_LCE_H2D_KERN; *pKeyId = CC_LKEYID_LCE_H2D_KERN;
} }
else else
{ {
*keyId = CC_LKEYID_LCE_D2H_KERN; *pKeyId = CC_LKEYID_LCE_D2H_KERN;
} }
return NV_OK; return NV_OK;
@ -599,11 +609,20 @@ getKeyIdSec2
return NV_ERR_GENERIC; return NV_ERR_GENERIC;
} }
static NV_STATUS /*!
getKeyspaceLce * Returns a key space corresponding to a channel
*
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] pKernelChannel : KernelChannel pointer
* @param[out] keySpace : value of keyspace from cc_keystore.h
*
*/
NV_STATUS
confComputeGetKeySpaceFromKChannel_GH100
( (
KernelChannel *pKernelChannel, ConfidentialCompute *pConfCompute,
NvU16 *keyspace KernelChannel *pKernelChannel,
NvU16 *keyspace
) )
{ {
// The actual copy engine (2 through 9) is normalized to start at 0. // The actual copy engine (2 through 9) is normalized to start at 0.

View File

@ -31,6 +31,7 @@
#include "platform/platform.h" #include "platform/platform.h"
#include "platform/chipset/chipset.h" #include "platform/chipset/chipset.h"
#include "kernel/gpu/gr/kernel_graphics.h"
#include "gpu/mem_mgr/mem_mgr.h" #include "gpu/mem_mgr/mem_mgr.h"
#include "gpu/mem_mgr/fbsr.h" #include "gpu/mem_mgr/fbsr.h"
#include "gpu/gsp/gsp_init_args.h" #include "gpu/gsp/gsp_init_args.h"
@ -356,6 +357,13 @@ gpuResumeFromStandby_IMPL(OBJGPU *pGpu)
NV_PRINTF(LEVEL_NOTICE, "Ending resume from %s\n", NV_PRINTF(LEVEL_NOTICE, "Ending resume from %s\n",
IS_GPU_GC6_STATE_EXITING(pGpu) ? "GC6" : "APM Suspend"); IS_GPU_GC6_STATE_EXITING(pGpu) ? "GC6" : "APM Suspend");
} }
if (resumeStatus == NV_OK)
{
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0)))
{
return kgraphicsInitializeBug4208224WAR_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0));
}
}
return resumeStatus; return resumeStatus;
} }
@ -419,6 +427,13 @@ NV_STATUS gpuResumeFromHibernate_IMPL(OBJGPU *pGpu)
{ {
NV_PRINTF(LEVEL_NOTICE, "End resuming from APM Suspend\n"); NV_PRINTF(LEVEL_NOTICE, "End resuming from APM Suspend\n");
} }
if (resumeStatus == NV_OK)
{
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0)))
{
return kgraphicsInitializeBug4208224WAR_HAL(pGpu, GPU_GET_KERNEL_GRAPHICS(pGpu, 0));
}
}
return resumeStatus; return resumeStatus;
} }

View File

@ -197,6 +197,11 @@ kgraphicsInitializeBug4208224WAR_TU102
RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL);
NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS params = {0}; NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS params = {0};
if (pKernelGraphics->bug4208224Info.bConstructed)
{
return NV_OK;
}
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, NV_CHECK_OK_OR_RETURN(LEVEL_ERROR,
kgraphicsCreateBug4208224Channel_HAL(pGpu, pKernelGraphics)); kgraphicsCreateBug4208224Channel_HAL(pGpu, pKernelGraphics));

View File

@ -512,7 +512,7 @@ _kgraphicsPostSchedulingEnableHandler
} }
NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgraphicsCreateGoldenImageChannel(pGpu, pKernelGraphics)); NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgraphicsCreateGoldenImageChannel(pGpu, pKernelGraphics));
if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, pKernelGraphics)) if (kgraphicsIsBug4208224WARNeeded_HAL(pGpu, pKernelGraphics) && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))
{ {
return kgraphicsInitializeBug4208224WAR_HAL(pGpu, pKernelGraphics); return kgraphicsInitializeBug4208224WAR_HAL(pGpu, pKernelGraphics);
} }
@ -1031,6 +1031,13 @@ kgraphicsLoadStaticInfo_VF
kgrmgrSetLegacyKgraphicsStaticInfo(pGpu, pKernelGraphicsManager, pKernelGraphics); kgrmgrSetLegacyKgraphicsStaticInfo(pGpu, pKernelGraphicsManager, pKernelGraphics);
} }
// FECS ctxsw logging is consumed when profiling support is available in guest
if (!pVSI->vgpuStaticProperties.bProfilingTracingEnabled)
{
kgraphicsSetCtxswLoggingSupported(pGpu, pKernelGraphics, NV_FALSE);
NV_PRINTF(LEVEL_NOTICE, "Profiling support not requested. Disabling ctxsw logging\n");
}
cleanup : cleanup :
if (status != NV_OK) if (status != NV_OK)

View File

@ -310,13 +310,17 @@ _kgspCompleteRpcHistoryEntry
NvU32 historyIndex; NvU32 historyIndex;
NvU32 historyEntry; NvU32 historyEntry;
// Complete the current entry (it should be active)
// TODO: assert that ts_end == 0 here when continuation record timestamps are fixed
NV_ASSERT_OR_RETURN_VOID(pHistory[current].ts_start != 0);
pHistory[current].ts_end = osGetTimestamp(); pHistory[current].ts_end = osGetTimestamp();
// //
// Complete any previous entries that aren't marked complete yet, using the same timestamp // Complete any previous entries that aren't marked complete yet, using the same timestamp
// (we may not have explicitly waited for them) // (we may not have explicitly waited for them)
// //
for (historyIndex = 0; historyIndex < RPC_HISTORY_DEPTH; historyIndex++) for (historyIndex = 1; historyIndex < RPC_HISTORY_DEPTH; historyIndex++)
{ {
historyEntry = (current + RPC_HISTORY_DEPTH - historyIndex) % RPC_HISTORY_DEPTH; historyEntry = (current + RPC_HISTORY_DEPTH - historyIndex) % RPC_HISTORY_DEPTH;
if (pHistory[historyEntry].ts_start != 0 && if (pHistory[historyEntry].ts_start != 0 &&
@ -1661,13 +1665,13 @@ _tsDiffToDuration
{ {
duration /= 1000; duration /= 1000;
*pDurationUnitsChar = 'm'; *pDurationUnitsChar = 'm';
}
// 9999ms then 10s // 9999ms then 10s
if (duration >= 10000) if (duration >= 10000)
{ {
duration /= 1000; duration /= 1000;
*pDurationUnitsChar = ' '; // so caller can always just append 's' *pDurationUnitsChar = ' '; // so caller can always just append 's'
}
} }
return duration; return duration;
@ -1830,7 +1834,7 @@ _kgspLogXid119
duration = _tsDiffToDuration(ts_end - pHistoryEntry->ts_start, &durationUnitsChar); duration = _tsDiffToDuration(ts_end - pHistoryEntry->ts_start, &durationUnitsChar);
NV_ERROR_LOG(pGpu, GSP_RPC_TIMEOUT, NV_ERROR_LOG(pGpu, GSP_RPC_TIMEOUT,
"Timeout after %llus of waiting for RPC response from GPU%d GSP! Expected function %d (%s) (0x%x 0x%x).", "Timeout after %llus of waiting for RPC response from GPU%d GSP! Expected function %d (%s) (0x%llx 0x%llx).",
(durationUnitsChar == 'm' ? duration / 1000 : duration), (durationUnitsChar == 'm' ? duration / 1000 : duration),
gpuGetInstance(pGpu), gpuGetInstance(pGpu),
expectedFunc, expectedFunc,
@ -1841,7 +1845,6 @@ _kgspLogXid119
if (pRpc->timeoutCount == 1) if (pRpc->timeoutCount == 1)
{ {
kgspLogRpcDebugInfo(pGpu, pRpc, GSP_RPC_TIMEOUT, NV_TRUE/*bPollingForRpcResponse*/); kgspLogRpcDebugInfo(pGpu, pRpc, GSP_RPC_TIMEOUT, NV_TRUE/*bPollingForRpcResponse*/);
osAssertFailed(); osAssertFailed();
NV_PRINTF(LEVEL_ERROR, NV_PRINTF(LEVEL_ERROR,
@ -1849,6 +1852,32 @@ _kgspLogXid119
} }
} }
static void
_kgspLogRpcSanityCheckFailure
(
OBJGPU *pGpu,
OBJRPC *pRpc,
NvU32 rpcStatus,
NvU32 expectedFunc
)
{
RpcHistoryEntry *pHistoryEntry = &pRpc->rpcHistory[pRpc->rpcHistoryCurrent];
NV_ASSERT(expectedFunc == pHistoryEntry->function);
NV_PRINTF(LEVEL_ERROR,
"GPU%d sanity check failed 0x%x waiting for RPC response from GSP. Expected function %d (%s) (0x%llx 0x%llx).\n",
gpuGetInstance(pGpu),
rpcStatus,
expectedFunc,
_getRpcName(expectedFunc),
pHistoryEntry->data[0],
pHistoryEntry->data[1]);
kgspLogRpcDebugInfo(pGpu, pRpc, GSP_RPC_TIMEOUT, NV_TRUE/*bPollingForRpcResponse*/);
osAssertFailed();
}
static void static void
_kgspRpcIncrementTimeoutCountAndRateLimitPrints _kgspRpcIncrementTimeoutCountAndRateLimitPrints
( (
@ -1986,7 +2015,16 @@ _kgspRpcRecvPoll
goto done; goto done;
} }
NV_CHECK_OK_OR_GOTO(rpcStatus, LEVEL_SILENT, _kgspRpcSanityCheck(pGpu, pKernelGsp, pRpc), done); rpcStatus = _kgspRpcSanityCheck(pGpu, pKernelGsp, pRpc);
if (rpcStatus != NV_OK)
{
if (!pRpc->bQuietPrints)
{
_kgspLogRpcSanityCheckFailure(pGpu, pRpc, rpcStatus, expectedFunc);
pRpc->bQuietPrints = NV_TRUE;
}
goto done;
}
if (timeoutStatus == NV_ERR_TIMEOUT) if (timeoutStatus == NV_ERR_TIMEOUT)
{ {

View File

@ -54,6 +54,7 @@ static NvBool _knvlinkUpdateSwitchLinkMasks(OBJGPU *, KernelNvlink *, NvU32);
static NvBool _knvlinkUpdateSwitchLinkMasksGpuDegraded(OBJGPU *, KernelNvlink *); static NvBool _knvlinkUpdateSwitchLinkMasksGpuDegraded(OBJGPU *, KernelNvlink *);
static void _knvlinkUpdatePeerConfigs(OBJGPU *, KernelNvlink *); static void _knvlinkUpdatePeerConfigs(OBJGPU *, KernelNvlink *);
static void _knvlinkPrintTopologySummary(OBJGPU *, KernelNvlink *); static void _knvlinkPrintTopologySummary(OBJGPU *, KernelNvlink *);
static NvU32 _knvlinkGetNumPortEvents(OBJGPU *pGpu, KernelNvlink *pKernelNvlink);
#endif #endif
@ -83,6 +84,7 @@ knvlinkCoreGetRemoteDeviceInfo_IMPL
NvBool bNvswitchProxyPresent = NV_FALSE; NvBool bNvswitchProxyPresent = NV_FALSE;
NvBool bUpdateConnStatus = NV_FALSE; NvBool bUpdateConnStatus = NV_FALSE;
NvBool bCheckDegradedMode = NV_FALSE; NvBool bCheckDegradedMode = NV_FALSE;
NvBool bForceDiscovery = NV_FALSE;
nvlink_conn_info conn_info = {0}; nvlink_conn_info conn_info = {0};
NvU32 linkId; NvU32 linkId;
NvU32 numActiveLinksPerIoctrl = 0; NvU32 numActiveLinksPerIoctrl = 0;
@ -152,6 +154,12 @@ knvlinkCoreGetRemoteDeviceInfo_IMPL
{ {
if (gpuFabricProbeIsSupported(pGpu)) if (gpuFabricProbeIsSupported(pGpu))
{ {
NvU32 numPortEvents = _knvlinkGetNumPortEvents(pGpu, pKernelNvlink);
if (pKernelNvlink->numPortEvents < numPortEvents)
{
bForceDiscovery = NV_TRUE;
}
// //
// If FM doesn't talk to NVLink driver using control calls // If FM doesn't talk to NVLink driver using control calls
// (i.e. uses NVLink inband comm instread) such as // (i.e. uses NVLink inband comm instread) such as
@ -159,7 +167,13 @@ knvlinkCoreGetRemoteDeviceInfo_IMPL
// discover remote information explicitly. // discover remote information explicitly.
// //
nvlink_lib_discover_and_get_remote_conn_info( nvlink_lib_discover_and_get_remote_conn_info(
pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info, flags); pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info,
flags, bForceDiscovery);
if (bForceDiscovery)
{
pKernelNvlink->numPortEvents = numPortEvents;
}
} }
else else
{ {
@ -205,7 +219,7 @@ knvlinkCoreGetRemoteDeviceInfo_IMPL
} }
nvlink_lib_discover_and_get_remote_conn_info( nvlink_lib_discover_and_get_remote_conn_info(
pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info, flags); pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info, flags, NV_FALSE);
} }
// RPC into GSP-RM to update the link connected status only if its required // RPC into GSP-RM to update the link connected status only if its required
@ -1355,7 +1369,7 @@ knvlinkFloorSweep_IMPL
FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks)
{ {
nvlink_lib_discover_and_get_remote_conn_info( nvlink_lib_discover_and_get_remote_conn_info(
pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info, 0); pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info, 0, NV_FALSE);
} }
FOR_EACH_INDEX_IN_MASK_END; FOR_EACH_INDEX_IN_MASK_END;
@ -2506,4 +2520,30 @@ _knvlinkPrintTopologySummary
#endif #endif
} }
static NvU32
_knvlinkGetNumPortEvents
(
OBJGPU *pGpu,
KernelNvlink *pKernelNvlink
)
{
NV_STATUS status;
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NV2080_CTRL_NVLINK_GET_PORT_EVENTS_PARAMS params = {0};
status = pRmApi->Control(pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_NVLINK_GET_PORT_EVENTS,
&params,
sizeof(NV2080_CTRL_NVLINK_GET_PORT_EVENTS_PARAMS));
if (status != NV_OK)
{
// If this call fails, force discovery in knvlinkCoreGetRemoteDeviceInfo
return 0;
}
return params.portEventCount;
}
#endif #endif

View File

@ -1,4 +1,4 @@
NVIDIA_VERSION = 550.90.07 NVIDIA_VERSION = 550.100
# This file. # This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST)) VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))