This commit is contained in:
Bernhard Stoeckner 2024-04-17 17:23:37 +02:00
parent 3bf16b890c
commit ea4c27fad6
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
103 changed files with 60736 additions and 56138 deletions

View File

@ -2,6 +2,8 @@
## Release 550 Entries ## Release 550 Entries
### [550.76] 2024-04-17
### [550.67] 2024-03-19 ### [550.67] 2024-03-19
### [550.54.15] 2024-03-18 ### [550.54.15] 2024-03-18

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source # NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules, This is the source release of the NVIDIA Linux open GPU kernel modules,
version 550.67. version 550.76.
## How to Build ## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding firmware and user-space NVIDIA GPU driver components from a corresponding
550.67 driver release. This can be achieved by installing 550.76 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g., option. E.g.,
@ -188,7 +188,7 @@ encountered specific to them.
For details on feature support and limitations, see the NVIDIA GPU driver For details on feature support and limitations, see the NVIDIA GPU driver
end user README here: end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/550.67/README/kernel_open.html https://us.download.nvidia.com/XFree86/Linux-x86_64/550.76/README/kernel_open.html
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
Package for more details. Package for more details.
@ -651,6 +651,7 @@ Subsystem Device ID.
| NVIDIA T1000 8GB | 1FF0 17AA 1612 | | NVIDIA T1000 8GB | 1FF0 17AA 1612 |
| NVIDIA T400 4GB | 1FF2 1028 1613 | | NVIDIA T400 4GB | 1FF2 1028 1613 |
| NVIDIA T400 4GB | 1FF2 103C 1613 | | NVIDIA T400 4GB | 1FF2 103C 1613 |
| NVIDIA T400E | 1FF2 103C 18FF |
| NVIDIA T400 4GB | 1FF2 103C 8A80 | | NVIDIA T400 4GB | 1FF2 103C 8A80 |
| NVIDIA T400 4GB | 1FF2 10DE 1613 | | NVIDIA T400 4GB | 1FF2 10DE 1613 |
| NVIDIA T400E | 1FF2 10DE 18FF | | NVIDIA T400E | 1FF2 10DE 18FF |
@ -829,6 +830,14 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 3050 4GB Laptop GPU | 25AB | | NVIDIA GeForce RTX 3050 4GB Laptop GPU | 25AB |
| NVIDIA GeForce RTX 3050 6GB Laptop GPU | 25AC | | NVIDIA GeForce RTX 3050 6GB Laptop GPU | 25AC |
| NVIDIA GeForce RTX 2050 | 25AD | | NVIDIA GeForce RTX 2050 | 25AD |
| NVIDIA RTX A1000 | 25B0 1028 1878 |
| NVIDIA RTX A1000 | 25B0 103C 1878 |
| NVIDIA RTX A1000 | 25B0 10DE 1878 |
| NVIDIA RTX A1000 | 25B0 17AA 1878 |
| NVIDIA RTX A400 | 25B2 1028 1879 |
| NVIDIA RTX A400 | 25B2 103C 1879 |
| NVIDIA RTX A400 | 25B2 10DE 1879 |
| NVIDIA RTX A400 | 25B2 17AA 1879 |
| NVIDIA A16 | 25B6 10DE 14A9 | | NVIDIA A16 | 25B6 10DE 14A9 |
| NVIDIA A2 | 25B6 10DE 157E | | NVIDIA A2 | 25B6 10DE 157E |
| NVIDIA RTX A2000 Laptop GPU | 25B8 | | NVIDIA RTX A2000 Laptop GPU | 25B8 |
@ -907,8 +916,11 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 4050 Laptop GPU | 28A1 | | NVIDIA GeForce RTX 4050 Laptop GPU | 28A1 |
| NVIDIA RTX 2000 Ada Generation | 28B0 1028 1870 | | NVIDIA RTX 2000 Ada Generation | 28B0 1028 1870 |
| NVIDIA RTX 2000 Ada Generation | 28B0 103C 1870 | | NVIDIA RTX 2000 Ada Generation | 28B0 103C 1870 |
| NVIDIA RTX 2000E Ada Generation | 28B0 103C 1871 |
| NVIDIA RTX 2000 Ada Generation | 28B0 10DE 1870 | | NVIDIA RTX 2000 Ada Generation | 28B0 10DE 1870 |
| NVIDIA RTX 2000E Ada Generation | 28B0 10DE 1871 |
| NVIDIA RTX 2000 Ada Generation | 28B0 17AA 1870 | | NVIDIA RTX 2000 Ada Generation | 28B0 17AA 1870 |
| NVIDIA RTX 2000E Ada Generation | 28B0 17AA 1871 |
| NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 | | NVIDIA RTX 2000 Ada Generation Laptop GPU | 28B8 |
| NVIDIA RTX 1000 Ada Generation Laptop GPU | 28B9 | | NVIDIA RTX 1000 Ada Generation Laptop GPU | 28B9 |
| NVIDIA RTX 500 Ada Generation Laptop GPU | 28BA | | NVIDIA RTX 500 Ada Generation Laptop GPU | 28BA |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src) EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.67\" EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.76\"
ifneq ($(SYSSRCHOST1X),) ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X) EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -151,6 +151,7 @@ NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded") NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value") NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event") NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event")
NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress")
// Warnings: // Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")

View File

@ -340,9 +340,9 @@ static NV_STATUS uvm_test_iommu_rc_for_gpu(uvm_gpu_t *gpu)
if (!domain || !iommu_is_dma_domain(domain)) if (!domain || !iommu_is_dma_domain(domain))
return NV_OK; return NV_OK;
// Only run if ATS is enabled. Otherwise the CE doesn't get response on // Only run if ATS is enabled with 64kB base page.
// writing to unmapped location. // Otherwise the CE doesn't get response on writing to unmapped location.
if (!g_uvm_global.ats.enabled) if (!g_uvm_global.ats.enabled || PAGE_SIZE != UVM_PAGE_SIZE_64K)
return NV_OK; return NV_OK;
status = uvm_mem_alloc_sysmem_and_map_cpu_kernel(data_size, NULL, &sysmem); status = uvm_mem_alloc_sysmem_and_map_cpu_kernel(data_size, NULL, &sysmem);

View File

@ -527,6 +527,15 @@ void uvm_va_space_destroy(uvm_va_space_t *va_space)
nv_kthread_q_flush(&g_uvm_global.global_q); nv_kthread_q_flush(&g_uvm_global.global_q);
for_each_gpu_in_mask(gpu, retained_gpus) { for_each_gpu_in_mask(gpu, retained_gpus) {
// Free the processor masks allocated in uvm_va_space_register_gpu().
// The mask is also freed in uvm_va_space_unregister_gpu() but that
// function won't be called in uvm_release() and uvm_release_deferred()
// path.
uvm_processor_mask_cache_free(va_space->peers_to_release[uvm_id_value(gpu->id)]);
// Set the pointer to NULL to avoid accidental re-use and double free.
va_space->peers_to_release[uvm_id_value(gpu->id)] = NULL;
if (!gpu->parent->isr.replayable_faults.handling) { if (!gpu->parent->isr.replayable_faults.handling) {
UVM_ASSERT(!gpu->parent->isr.non_replayable_faults.handling); UVM_ASSERT(!gpu->parent->isr.non_replayable_faults.handling);
continue; continue;
@ -543,14 +552,6 @@ void uvm_va_space_destroy(uvm_va_space_t *va_space)
if (gpu->parent->access_counters_supported) if (gpu->parent->access_counters_supported)
uvm_parent_gpu_access_counters_disable(gpu->parent, va_space); uvm_parent_gpu_access_counters_disable(gpu->parent, va_space);
// Free the processor masks allocated in uvm_va_space_register_gpu().
// The mask is also freed in uvm_va_space_unregister_gpu() but that
// function won't be called in uvm_release() and uvm_release_deferred()
// path.
uvm_processor_mask_cache_free(va_space->peers_to_release[uvm_id_value(gpu->id)]);
// Set the pointer to NULL to avoid accidental re-use and double free.
va_space->peers_to_release[uvm_id_value(gpu->id)] = NULL;
} }
// Check that all CPU/GPU affinity masks are empty // Check that all CPU/GPU affinity masks are empty

View File

@ -998,6 +998,22 @@ nvswitch_os_get_supported_register_events_params
NvBool *bUserSuppliesOsData NvBool *bUserSuppliesOsData
); );
/*
* @Brief : Is TNVL mode enabled.
*
* @Description : Returns if TNVL is enabled for the device
*
* @param[in] device a reference to the device
*
* @returns NV_TRUE, if TNVL is enabled
* NV_FALSE, if TNVL is disabled
*/
NvBool
nvswitch_lib_is_tnvl_enabled
(
nvswitch_device *device
);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -164,6 +164,7 @@ typedef struct
NVSWITCH_DEVICE_FABRIC_STATE deviceState; NVSWITCH_DEVICE_FABRIC_STATE deviceState;
NVSWITCH_DEVICE_BLACKLIST_REASON deviceReason; NVSWITCH_DEVICE_BLACKLIST_REASON deviceReason;
NvU32 physId; NvU32 physId;
NvBool bTnvlEnabled;
/* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */
} NVSWITCH_DEVICE_INSTANCE_INFO_V2; } NVSWITCH_DEVICE_INSTANCE_INFO_V2;

View File

@ -159,7 +159,14 @@ static int lkca_aead_internal(struct crypto_aead *aead,
} }
if (rc != 0) { if (rc != 0) {
pr_info("Encryption FAILED\n"); if (enc) {
pr_info("aead.c: Encryption failed with error %i\n", rc);
} else {
pr_info("aead.c: Decryption failed with error %i\n", rc);
if (rc == -EBADMSG) {
pr_info("aead.c: Authentication tag mismatch!\n");
}
}
} }
*data_out_size = data_in_size; *data_out_size = data_in_size;

View File

@ -1000,6 +1000,8 @@ nvswitch_ctl_get_devices_v2(NVSWITCH_GET_DEVICES_V2_PARAMS *p)
&p->info[index].deviceState, &p->info[index].deviceState,
&p->info[index].deviceReason, &p->info[index].deviceReason,
&p->info[index].driverState); &p->info[index].driverState);
p->info[index].bTnvlEnabled = nvswitch_lib_is_tnvl_enabled(nvswitch_dev->lib_device);
mutex_unlock(&nvswitch_dev->device_mutex); mutex_unlock(&nvswitch_dev->device_mutex);
} }
index++; index++;

View File

@ -1061,7 +1061,15 @@ NV_STATUS NV_API_CALL os_flush_user_cache(void)
void NV_API_CALL os_flush_cpu_write_combine_buffer(void) void NV_API_CALL os_flush_cpu_write_combine_buffer(void)
{ {
wmb(); #if defined(NVCPU_X86_64)
asm volatile("sfence" ::: "memory");
#elif defined(NVCPU_PPC64LE)
__asm__ __volatile__ ("sync" : : : "memory");
#elif defined(NVCPU_AARCH64)
asm volatile("dsb st" : : : "memory");
#else
mb();
#endif
} }
// override initial debug level from registry // override initial debug level from registry

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -360,6 +360,7 @@ namespace DisplayPort
// the stale messages from previous discovery. // the stale messages from previous discovery.
// //
bool bForceClearPendingMsg; bool bForceClearPendingMsg;
bool bSkipFakeDeviceDpcdAccess;
Group *perHeadAttachedGroup[NV_MAX_HEADS]; Group *perHeadAttachedGroup[NV_MAX_HEADS];

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -199,8 +199,9 @@ namespace DisplayPort
TriState bAsyncSDPCapable; TriState bAsyncSDPCapable;
bool bMSAOverMSTCapable; bool bMSAOverMSTCapable;
bool bDscPassThroughColorFormatWar; bool bDscPassThroughColorFormatWar;
bool bSkipFakeDeviceDpcdAccess;
DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent); DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent, bool bSkipFakeDeviceDpcdAccess);
~DeviceImpl(); ~DeviceImpl();
virtual bool isCableOk(); virtual bool isCableOk();

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -83,6 +83,7 @@
// Bug 4459839 : This regkey will enable DSC irrespective of LT status. // Bug 4459839 : This regkey will enable DSC irrespective of LT status.
// //
#define NV_DP_REGKEY_FORCE_DSC_ON_SINK "DP_FORCE_DSC_ON_SINK" #define NV_DP_REGKEY_FORCE_DSC_ON_SINK "DP_FORCE_DSC_ON_SINK"
#define NV_DP_REGKEY_ENABLE_SKIP_DPCD_READS_WAR "DP_BUG_4478047_WAR"
// //
// Data Base used to store all the regkey values. // Data Base used to store all the regkey values.
@ -119,6 +120,7 @@ struct DP_REGKEY_DATABASE
bool bReassessMaxLink; bool bReassessMaxLink;
bool bMSTPCONCapsReadDisabled; bool bMSTPCONCapsReadDisabled;
bool bForceDscOnSink; bool bForceDscOnSink;
bool bSkipFakeDeviceDpcdAccess;
}; };
#endif //INCLUDED_DP_REGKEYDATABASE_H #endif //INCLUDED_DP_REGKEYDATABASE_H

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -166,15 +166,16 @@ void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatab
this->bKeepLinkAliveMST = dpRegkeyDatabase.bOptLinkKeptAliveMst; this->bKeepLinkAliveMST = dpRegkeyDatabase.bOptLinkKeptAliveMst;
this->bKeepLinkAliveSST = dpRegkeyDatabase.bOptLinkKeptAliveSst; this->bKeepLinkAliveSST = dpRegkeyDatabase.bOptLinkKeptAliveSst;
} }
this->bReportDeviceLostBeforeNew = dpRegkeyDatabase.bReportDeviceLostBeforeNew; this->bReportDeviceLostBeforeNew = dpRegkeyDatabase.bReportDeviceLostBeforeNew;
this->maxLinkRateFromRegkey = dpRegkeyDatabase.applyMaxLinkRateOverrides; this->maxLinkRateFromRegkey = dpRegkeyDatabase.applyMaxLinkRateOverrides;
this->bEnableAudioBeyond48K = dpRegkeyDatabase.bAudioBeyond48kEnabled; this->bEnableAudioBeyond48K = dpRegkeyDatabase.bAudioBeyond48kEnabled;
this->bDisableSSC = dpRegkeyDatabase.bSscDisabled; this->bDisableSSC = dpRegkeyDatabase.bSscDisabled;
this->bEnableFastLT = dpRegkeyDatabase.bFastLinkTrainingEnabled; this->bEnableFastLT = dpRegkeyDatabase.bFastLinkTrainingEnabled;
this->bDscMstCapBug3143315 = dpRegkeyDatabase.bDscMstCapBug3143315; this->bDscMstCapBug3143315 = dpRegkeyDatabase.bDscMstCapBug3143315;
this->bPowerDownPhyBeforeD3 = dpRegkeyDatabase.bPowerDownPhyBeforeD3; this->bPowerDownPhyBeforeD3 = dpRegkeyDatabase.bPowerDownPhyBeforeD3;
this->bReassessMaxLink = dpRegkeyDatabase.bReassessMaxLink; this->bReassessMaxLink = dpRegkeyDatabase.bReassessMaxLink;
this->bForceDscOnSink = dpRegkeyDatabase.bForceDscOnSink; this->bForceDscOnSink = dpRegkeyDatabase.bForceDscOnSink;
this->bSkipFakeDeviceDpcdAccess = dpRegkeyDatabase.bSkipFakeDeviceDpcdAccess;
} }
void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled) void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled)
@ -478,7 +479,7 @@ create:
} }
else else
{ {
newDev = new DeviceImpl(hal, this, parent); newDev = new DeviceImpl(hal, this, parent, this->bSkipFakeDeviceDpcdAccess);
} }
if (parent) if (parent)
@ -4632,11 +4633,6 @@ bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig)
} }
} }
//
// There is no point in fallback here since we are link training
// to loweset link config that can support the mode.
//
lowestSelected.policy.setSkipFallBack(true);
bLinkTrainingSuccessful = train(lowestSelected, false); bLinkTrainingSuccessful = train(lowestSelected, false);
// //
// If LT failed, check if skipLT was marked. If so, clear the flag and // If LT failed, check if skipLT was marked. If so, clear the flag and
@ -7022,7 +7018,7 @@ void ConnectorImpl::createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize)
return; return;
} }
DeviceImpl *newDev = new DeviceImpl(hal, this, NULL); DeviceImpl *newDev = new DeviceImpl(hal, this, NULL, this->bSkipFakeDeviceDpcdAccess);
if (!newDev) if (!newDev)
{ {
return; return;

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -75,7 +75,7 @@ DeviceImpl::~DeviceImpl()
} }
DeviceImpl::DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent) DeviceImpl::DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent, bool bSkipFakeDeviceDpcdAccess)
: parent(parent), : parent(parent),
hal(hal), hal(hal),
activeGroup(0), activeGroup(0),
@ -95,7 +95,8 @@ DeviceImpl::DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * pa
bIgnoreMsaCapCached(false), bIgnoreMsaCapCached(false),
bSdpExtCapable(Indeterminate), bSdpExtCapable(Indeterminate),
bAsyncSDPCapable(Indeterminate), bAsyncSDPCapable(Indeterminate),
bDscPassThroughColorFormatWar(false) bDscPassThroughColorFormatWar(false),
bSkipFakeDeviceDpcdAccess(bSkipFakeDeviceDpcdAccess)
{ {
bandwidth.enum_path.dataValid = false; bandwidth.enum_path.dataValid = false;
shadow.plugged = false; shadow.plugged = false;
@ -375,6 +376,12 @@ AuxBus::status DeviceImpl::getDpcdData(unsigned offset, NvU8 * buffer,
unsigned * sizeCompleted, unsigned * sizeCompleted,
unsigned * pNakReason) unsigned * pNakReason)
{ {
if (this->bSkipFakeDeviceDpcdAccess && isFakedMuxDevice())
{
DP_LOG(("Device is faked, returning nack\n"));
return AuxBus::nack;
}
if (!buffer || !sizeCompleted) if (!buffer || !sizeCompleted)
{ {
// default param may be NULL // default param may be NULL
@ -403,6 +410,12 @@ AuxBus::status DeviceImpl::setDpcdData(unsigned offset, NvU8 * buffer,
unsigned * sizeCompleted, unsigned * sizeCompleted,
unsigned * pNakReason) unsigned * pNakReason)
{ {
if (this->bSkipFakeDeviceDpcdAccess && isFakedMuxDevice())
{
DP_LOG(("Device is faked, returning nack\n"));
return AuxBus::nack;
}
if (!buffer || !sizeCompleted) if (!buffer || !sizeCompleted)
{ {
// default param may be NULL // default param may be NULL

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -96,6 +96,7 @@ const struct
{NV_DP_REGKEY_REASSESS_MAX_LINK, &dpRegkeyDatabase.bReassessMaxLink, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_REASSESS_MAX_LINK, &dpRegkeyDatabase.bReassessMaxLink, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_FORCE_DSC_ON_SINK, &dpRegkeyDatabase.bForceDscOnSink, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_FORCE_DSC_ON_SINK, &dpRegkeyDatabase.bForceDscOnSink, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_ENABLE_SKIP_DPCD_READS_WAR, &dpRegkeyDatabase.bSkipFakeDeviceDpcdAccess, DP_REG_VAL_BOOL}
}; };
EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) : EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) :

View File

@ -43,18 +43,18 @@
#endif #endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r550_00-204" #define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r550_00-237"
#define NV_BUILD_CHANGELIST_NUM (34025356) #define NV_BUILD_CHANGELIST_NUM (34145289)
#define NV_BUILD_TYPE "Official" #define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r550/r550_00-204" #define NV_BUILD_NAME "rel/gpu_drv/r550/r550_00-237"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34025356) #define NV_LAST_OFFICIAL_CHANGELIST_NUM (34145289)
#else /* Windows builds */ #else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r550_00-192" #define NV_BUILD_BRANCH_VERSION "r550_00-227"
#define NV_BUILD_CHANGELIST_NUM (34025356) #define NV_BUILD_CHANGELIST_NUM (34145289)
#define NV_BUILD_TYPE "Official" #define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "551.86" #define NV_BUILD_NAME "552.19"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34025356) #define NV_LAST_OFFICIAL_CHANGELIST_NUM (34145289)
#define NV_BUILD_BRANCH_BASE_VERSION R550 #define NV_BUILD_BRANCH_BASE_VERSION R550
#endif #endif
// End buildmeister python edited section // End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "550.67" #define NV_VERSION_STRING "550.76"
#else #else

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES * SPDX-FileCopyrightText: Copyright (c) 2003-2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -26,12 +26,15 @@
#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K 0x100 #define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K 0x100
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC NV_PGC6_AON_SECURE_SCRATCH_GROUP_20 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC NV_PGC6_AON_SECURE_SCRATCH_GROUP_20
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MODE_ENABLED 0:0 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MODE_ENABLED 0:0
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MODE_ENABLED_TRUE 0x1 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MODE_ENABLED_TRUE 0x1
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MODE_ENABLED_FALSE 0x0 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MODE_ENABLED_FALSE 0x0
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED 1:1 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED 1:1
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_TRUE 0x1 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_TRUE 0x1
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_FALSE 0x0 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_FALSE 0x0
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE 7:6
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE_NONE 0x0
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE_PROTECTED_PCIE 0x1
#endif // __gh100_dev_gc6_island_addendum_h__ #endif // __gh100_dev_gc6_island_addendum_h__

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2003-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -96,4 +96,5 @@
#define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_1__SIZE_1 4 /* */ #define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_1__SIZE_1 4 /* */
#define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_1_VALUE 31:0 /* RWEVF */ #define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_1_VALUE 31:0 /* RWEVF */
#define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_1_VALUE_INIT 0x00000000 /* RWE-V */ #define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_1_VALUE_INIT 0x00000000 /* RWE-V */
#define NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_3(i) (0x00000c50+(i)*0x4) /* RW-4A */
#endif // __ls10_dev_nvlsaw_ip_h__ #endif // __ls10_dev_nvlsaw_ip_h__

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2003-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -52,4 +52,9 @@
#define NV_NVLSAW_DRIVER_ATTACH_DETACH_FABRIC_MANAGER_ERROR 23:17 #define NV_NVLSAW_DRIVER_ATTACH_DETACH_FABRIC_MANAGER_ERROR 23:17
#define NV_NVLSAW_DRIVER_ATTACH_DETACH_EVENT_MESSAGE_COUNT 31:24 #define NV_NVLSAW_DRIVER_ATTACH_DETACH_EVENT_MESSAGE_COUNT 31:24
#define NV_NVLSAW_TNVL_MODE NV_NVLSAW_SECURE_SCRATCH_WARM_GROUP_3(0)
#define NV_NVLSAW_TNVL_MODE_STATUS 0:0
#define NV_NVLSAW_TNVL_MODE_STATUS_DISABLED 0x0
#define NV_NVLSAW_TNVL_MODE_STATUS_ENABLED 0x1
#endif //__ls10_dev_nvlsaw_ip_addendum_h__ #endif //__ls10_dev_nvlsaw_ip_addendum_h__

View File

@ -115,6 +115,7 @@ typedef struct
#define NVLINK_INBAND_FM_CAPS_BW_MODE_HALF NVBIT64(3) #define NVLINK_INBAND_FM_CAPS_BW_MODE_HALF NVBIT64(3)
#define NVLINK_INBAND_FM_CAPS_BW_MODE_3QUARTER NVBIT64(4) #define NVLINK_INBAND_FM_CAPS_BW_MODE_3QUARTER NVBIT64(4)
#define NVLINK_INBAND_FM_CAPS_MC_TEAM_SETUP_V2 NVBIT64(5) #define NVLINK_INBAND_FM_CAPS_MC_TEAM_SETUP_V2 NVBIT64(5)
#define NVLINK_INBAND_FM_CAPS_EGM_ENABLED NVBIT64(6)
#define NVLINK_INBAND_FABRIC_HEALTH_MASK_DEGRADED_BW 1:0 #define NVLINK_INBAND_FABRIC_HEALTH_MASK_DEGRADED_BW 1:0
#define NVLINK_INBAND_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED 0 #define NVLINK_INBAND_FABRIC_HEALTH_MASK_DEGRADED_BW_NOT_SUPPORTED 0
@ -135,7 +136,8 @@ typedef struct
NvU32 linkMaskToBeReduced; /* bit mask of unused NVLink ports for P2P */ NvU32 linkMaskToBeReduced; /* bit mask of unused NVLink ports for P2P */
NvU32 cliqueId; /* Fabric Clique Id */ NvU32 cliqueId; /* Fabric Clique Id */
NvU32 fabricHealthMask; /* Mask containing bits indicating various fabric health parameters */ NvU32 fabricHealthMask; /* Mask containing bits indicating various fabric health parameters */
NvU8 reserved[20]; /* For future use. Must be initialized to zero */ NvU32 gpaAddressEGMHi; /* GPA Address for EGM. Don't use if EGM support is not present in GFM */
NvU8 reserved[16]; /* For future use. Must be initialized to zero */
} nvlink_inband_gpu_probe_rsp_t; } nvlink_inband_gpu_probe_rsp_t;
typedef struct typedef struct

View File

@ -4456,9 +4456,93 @@ typedef struct
NvU32 commandNvdmType; NvU32 commandNvdmType;
NvU32 responseNvdmType; NvU32 responseNvdmType;
NvU32 errorCode; NvU32 errorCode;
NvU8* pRspPayload;
} NVSWITCH_FSPRPC_GET_CAPS_PARAMS; } NVSWITCH_FSPRPC_GET_CAPS_PARAMS;
typedef enum nvswitch_device_tnvl_mode
{
NVSWITCH_DEVICE_TNVL_MODE_DISABLED = 0, // TNVL mode is disabled
NVSWITCH_DEVICE_TNVL_MODE_ENABLED, // TNVL mode is enabled
NVSWITCH_DEVICE_TNVL_MODE_FAILURE, // TNVL mode is enabled but in failure state
NVSWITCH_DEVICE_TNVL_MODE_LOCKED, // TNVL mode is enabled and locked
NVSWITCH_DEVICE_TNVL_MODE_COUNT
} NVSWITCH_DEVICE_TNVL_MODE;
/*
* CTRL_NVSWITCH_SET_DEVICE_TNVL_LOCK
*
* Control to set Trusted NVLink(TNVL) lock
*
* FM sets the TNVL lock after Fabric State is CONFIGURED
*
* Parameters:
* tnvlStatus [OUT]
* TNVL mode status of the device
*/
typedef struct nvswitch_set_device_tnvl_lock_params
{
NVSWITCH_DEVICE_TNVL_MODE tnvlStatus;
} NVSWITCH_SET_DEVICE_TNVL_LOCK_PARAMS;
/*
* CTRL_NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN
*
* Control to query NvSwitch session attestation certificate chain
*
* Parameters:
*
* attestationCertChain: [OUT]
* Attestation certificate chain for the NvSwitch queried
*
* attestationCertChainSize: [OUT]
* Actual size of attestation cert chain data
*/
#define NVSWITCH_ATTESTATION_CERT_CHAIN_MAX_SIZE 0x1400
typedef struct nvswitch_get_attestation_certificate_chain_params
{
NvU8 attestationCertChain[NVSWITCH_ATTESTATION_CERT_CHAIN_MAX_SIZE];
NvU32 attestationCertChainSize;
} NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS;
/*
* CTRL_NVSWITCH_GET_ATTESTATION_REPORT
*
* Control to query NvSwitch attestation report.
*
* Parameters:
* nonce: [IN]
* nonce
* attestationReport: [OUT]
* Attestation report of the NvSwitch queried
* attestationReportSize: [OUT]
* Actual size of the report
*/
#define NVSWITCH_NONCE_SIZE 0x20
#define NVSWITCH_ATTESTATION_REPORT_MAX_SIZE 0x2000
typedef struct nvswitch_get_attestation_report_params
{
NvU8 nonce[NVSWITCH_NONCE_SIZE];
NvU8 attestationReport[NVSWITCH_ATTESTATION_REPORT_MAX_SIZE];
NvU32 attestationReportSize;
} NVSWITCH_GET_ATTESTATION_REPORT_PARAMS;
/*
* CTRL_NVSWITCH_GET_TNVL_STATUS
*
* Control to query Trusted NVLink(TNVL) status
*
* Parameters :
* status: [OUT]
* TNVL mode status
*/
typedef struct nvswitch_get_tnvl_status_params
{
NVSWITCH_DEVICE_TNVL_MODE status;
} NVSWITCH_GET_TNVL_STATUS_PARAMS;
#define REGISTER_RW_ENGINE_RAW 0x00 #define REGISTER_RW_ENGINE_RAW 0x00
#define REGISTER_RW_ENGINE_CLKS 0x10 #define REGISTER_RW_ENGINE_CLKS 0x10
@ -4604,6 +4688,10 @@ typedef struct
#define CTRL_NVSWITCH_GET_NVLINK_L1_THRESHOLD 0x66 #define CTRL_NVSWITCH_GET_NVLINK_L1_THRESHOLD 0x66
#define CTRL_NVSWITCH_SET_NVLINK_L1_THRESHOLD 0x67 #define CTRL_NVSWITCH_SET_NVLINK_L1_THRESHOLD 0x67
#define CTRL_NVSWITCH_FSPRPC_GET_CAPS 0x68 #define CTRL_NVSWITCH_FSPRPC_GET_CAPS 0x68
#define CTRL_NVSWITCH_SET_DEVICE_TNVL_LOCK 0x69
#define CTRL_NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN 0x6A
#define CTRL_NVSWITCH_GET_ATTESTATION_REPORT 0x6B
#define CTRL_NVSWITCH_GET_TNVL_STATUS 0x6C
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -994,6 +994,22 @@ nvswitch_os_get_supported_register_events_params
NvBool *bUserSuppliesOsData NvBool *bUserSuppliesOsData
); );
/*
* @Brief : Is TNVL mode enabled.
*
* @Description : Returns if TNVL is enabled for the device
*
* @param[in] device a reference to the device
*
* @returns NV_TRUE, if TNVL is enabled
* NV_FALSE, if TNVL is disabled
*/
NvBool
nvswitch_lib_is_tnvl_enabled
(
nvswitch_device *device
);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a

View File

@ -25,9 +25,6 @@
#include "haldef_nvswitch.h" #include "haldef_nvswitch.h"
#include "fsprpc_nvswitch.h" #include "fsprpc_nvswitch.h"
#include "fsp/nvdm_payload_cmd_response.h"
#include "fsp/fsp_nvdm_format.h"
/*! /*!
* @brief Check if FSP RM command queue is empty * @brief Check if FSP RM command queue is empty
* *

View File

@ -469,6 +469,9 @@ struct nvswitch_device
// To be removed once newer vbios is on TOT. // To be removed once newer vbios is on TOT.
NvBool bIsNvlinkVbiosTableVersion2; NvBool bIsNvlinkVbiosTableVersion2;
// Trusted NVLink Mode
NVSWITCH_DEVICE_TNVL_MODE tnvl_mode;
}; };
#define NVSWITCH_IS_DEVICE_VALID(device) \ #define NVSWITCH_IS_DEVICE_VALID(device) \

View File

@ -24,6 +24,12 @@
#ifndef _FSPRPC_NVSWITCH_H_ #ifndef _FSPRPC_NVSWITCH_H_
#define _FSPRPC_NVSWITCH_H_ #define _FSPRPC_NVSWITCH_H_
#include "fsp/fsp_emem_channels.h"
#include "fsp/nvdm_payload_cmd_response.h"
#include "fsp/fsp_nvdm_format.h"
#include "fsp/fsp_mctp_format.h"
#include "fsp/fsp_tnvl_rpc.h"
#define FSP_OK (0x00U) #define FSP_OK (0x00U)
#define FSP_ERR_IFS_ERR_INVALID_STATE (0x9EU) #define FSP_ERR_IFS_ERR_INVALID_STATE (0x9EU)
#define FSP_ERR_IFR_FILE_NOT_FOUND (0x9FU) #define FSP_ERR_IFR_FILE_NOT_FOUND (0x9FU)

View File

@ -288,6 +288,13 @@
_op(NvlStatus, nvswitch_fsp_error_code_to_nvlstatus_map, (nvswitch_device *device, NvU32 errorCode), _arch) \ _op(NvlStatus, nvswitch_fsp_error_code_to_nvlstatus_map, (nvswitch_device *device, NvU32 errorCode), _arch) \
_op(NvlStatus, nvswitch_fsp_get_packet_info, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size, NvU8 *pPacketState, NvU8 *pTag), _arch) \ _op(NvlStatus, nvswitch_fsp_get_packet_info, (nvswitch_device *device, NvU8 *pBuffer, NvU32 size, NvU8 *pPacketState, NvU8 *pTag), _arch) \
_op(NvlStatus, nvswitch_fsprpc_get_caps, (nvswitch_device *device, NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params), _arch) \ _op(NvlStatus, nvswitch_fsprpc_get_caps, (nvswitch_device *device, NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_detect_tnvl_mode, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_tnvl_mode_enabled, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_tnvl_mode_locked, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_tnvl_get_attestation_certificate_chain, (nvswitch_device *device, NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_tnvl_get_attestation_report, (nvswitch_device *device, NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params), _arch) \
_op(NvlStatus, nvswitch_tnvl_send_fsp_lock_config, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_tnvl_get_status, (nvswitch_device *device, NVSWITCH_GET_TNVL_STATUS_PARAMS *params), _arch) \
NVSWITCH_HAL_FUNCTION_LIST_FEATURE_0(_op, _arch) \ NVSWITCH_HAL_FUNCTION_LIST_FEATURE_0(_op, _arch) \
#define NVSWITCH_HAL_FUNCTION_LIST_LS10(_op, _arch) \ #define NVSWITCH_HAL_FUNCTION_LIST_LS10(_op, _arch) \

View File

@ -707,5 +707,7 @@ NvlStatus nvswitch_fsp_config_ememc_lr10(nvswitch_device *device, NvU32 offset,
NvlStatus nvswitch_fsp_write_to_emem_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size); NvlStatus nvswitch_fsp_write_to_emem_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_read_from_emem_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size); NvlStatus nvswitch_fsp_read_from_emem_lr10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_error_code_to_nvlstatus_map_lr10(nvswitch_device *device, NvU32 errorCode); NvlStatus nvswitch_fsp_error_code_to_nvlstatus_map_lr10(nvswitch_device *device, NvU32 errorCode);
NvlStatus nvswitch_tnvl_get_attestation_certificate_chain_lr10(nvswitch_device *device, NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS *params);
NvlStatus nvswitch_tnvl_get_attestation_report_lr10(nvswitch_device *device, NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params);
NvlStatus nvswitch_tnvl_get_status_lr10(nvswitch_device *device, NVSWITCH_GET_TNVL_STATUS_PARAMS *params);
#endif //_LR10_H_ #endif //_LR10_H_

View File

@ -1051,6 +1051,13 @@ NvlStatus nvswitch_fsp_write_to_emem_ls10(nvswitch_device *device, NvU8 *pBuffer
NvlStatus nvswitch_fsp_read_from_emem_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size); NvlStatus nvswitch_fsp_read_from_emem_ls10(nvswitch_device *device, NvU8 *pBuffer, NvU32 size);
NvlStatus nvswitch_fsp_error_code_to_nvlstatus_map_ls10(nvswitch_device *device, NvU32 errorCode); NvlStatus nvswitch_fsp_error_code_to_nvlstatus_map_ls10(nvswitch_device *device, NvU32 errorCode);
NvlStatus nvswitch_fsprpc_get_caps_ls10(nvswitch_device *device, NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params); NvlStatus nvswitch_fsprpc_get_caps_ls10(nvswitch_device *device, NVSWITCH_FSPRPC_GET_CAPS_PARAMS *params);
NvlStatus nvswitch_detect_tnvl_mode_ls10(nvswitch_device *device);
NvBool nvswitch_is_tnvl_mode_enabled_ls10(nvswitch_device *device);
NvBool nvswitch_is_tnvl_mode_locked_ls10(nvswitch_device *device);
NvlStatus nvswitch_tnvl_get_attestation_certificate_chain_ls10(nvswitch_device *device, NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS *params);
NvlStatus nvswitch_tnvl_get_attestation_report_ls10(nvswitch_device *device, NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params);
NvlStatus nvswitch_tnvl_send_fsp_lock_config_ls10(nvswitch_device *device);
NvlStatus nvswitch_tnvl_get_status_ls10(nvswitch_device *device, NVSWITCH_GET_TNVL_STATUS_PARAMS *params);
NvlStatus nvswitch_ctrl_get_soe_heartbeat_ls10(nvswitch_device *device, NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p); NvlStatus nvswitch_ctrl_get_soe_heartbeat_ls10(nvswitch_device *device, NVSWITCH_GET_SOE_HEARTBEAT_PARAMS *p);
NvlStatus nvswitch_cci_enable_iobist_ls10(nvswitch_device *device, NvU32 linkNumber, NvBool bEnable); NvlStatus nvswitch_cci_enable_iobist_ls10(nvswitch_device *device, NvU32 linkNumber, NvBool bEnable);

View File

@ -3720,6 +3720,9 @@ nvswitch_initialize_device_state_lr10
(NvU64)device->regkeys.link_enable_mask) & (NvU64)device->regkeys.link_enable_mask) &
((~0ULL) >> (64 - NVSWITCH_LINK_COUNT(device)))); ((~0ULL) >> (64 - NVSWITCH_LINK_COUNT(device))));
// Detect TNVL mode
nvswitch_detect_tnvl_mode(device);
if (nvswitch_is_soe_supported(device)) if (nvswitch_is_soe_supported(device))
{ {
retval = nvswitch_init_soe(device); retval = nvswitch_init_soe(device);
@ -8111,6 +8114,76 @@ nvswitch_fsprpc_get_caps_lr10
return -NVL_ERR_NOT_SUPPORTED; return -NVL_ERR_NOT_SUPPORTED;
} }
NvlStatus
nvswitch_detect_tnvl_mode_lr10
(
nvswitch_device *device
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
NvBool
nvswitch_is_tnvl_mode_enabled_lr10
(
nvswitch_device *device
)
{
return NV_FALSE;
}
NvBool
nvswitch_is_tnvl_mode_locked_lr10
(
nvswitch_device *device
)
{
return NV_FALSE;
}
NvlStatus
nvswitch_tnvl_get_attestation_certificate_chain_lr10
(
nvswitch_device *device,
NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS *params
)
{
// Not supported in LR10
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_tnvl_get_attestation_report_lr10
(
nvswitch_device *device,
NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params
)
{
// Not supported in LR10
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_tnvl_send_fsp_lock_config_lr10
(
nvswitch_device *device
)
{
// Not supported in LR10
return -NVL_ERR_NOT_SUPPORTED;
}
NvlStatus
nvswitch_tnvl_get_status_lr10
(
nvswitch_device *device,
NVSWITCH_GET_TNVL_STATUS_PARAMS *params
)
{
// Not supported in LR10
return -NVL_ERR_NOT_SUPPORTED;
}
// //
// This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL // This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
// macro in haldef_nvswitch.h // macro in haldef_nvswitch.h

View File

@ -28,12 +28,6 @@
#include "fsprpc_nvswitch.h" #include "fsprpc_nvswitch.h"
#include "ls10/ls10.h" #include "ls10/ls10.h"
#include "fsp/fsp_emem_channels.h"
#include "fsp/nvdm_payload_cmd_response.h"
#include "fsp/fsp_nvdm_format.h"
#include "fsp/fsp_mctp_format.h"
#include "fsp/fsp_tnvl_rpc.h"
#include "nvswitch/ls10/dev_fsp_pri.h" #include "nvswitch/ls10/dev_fsp_pri.h"
/*! /*!
@ -346,6 +340,7 @@ nvswitch_fsp_process_nvdm_msg_ls10
switch (nvdmType) switch (nvdmType)
{ {
case NVDM_TYPE_TNVL:
case NVDM_TYPE_FSP_RESPONSE: case NVDM_TYPE_FSP_RESPONSE:
status = nvswitch_fsp_process_cmd_response(device, pBuffer, size); status = nvswitch_fsp_process_cmd_response(device, pBuffer, size);
break; break;
@ -606,7 +601,6 @@ nvswitch_fsprpc_get_caps_ls10
params->responseNvdmType = responsePayload.nvdmType; params->responseNvdmType = responsePayload.nvdmType;
params->commandNvdmType = responsePayload.cmdResponse.commandNvdmType; params->commandNvdmType = responsePayload.cmdResponse.commandNvdmType;
params->errorCode = responsePayload.cmdResponse.errorCode; params->errorCode = responsePayload.cmdResponse.errorCode;
params->pRspPayload = responsePayload.rspPayload;
return NVL_SUCCESS; return NVL_SUCCESS;
} }

View File

@ -2979,6 +2979,13 @@ nvswitch_is_soe_supported_ls10
NVSWITCH_PRINT(device, WARN, "SOE can not be disabled via regkey.\n"); NVSWITCH_PRINT(device, WARN, "SOE can not be disabled via regkey.\n");
} }
if (nvswitch_is_tnvl_mode_locked(device))
{
NVSWITCH_PRINT(device, INFO,
"SOE is not supported when TNVL mode is locked\n");
return NV_FALSE;
}
return NV_TRUE; return NV_TRUE;
} }
@ -3026,6 +3033,13 @@ nvswitch_is_inforom_supported_ls10
return NV_FALSE; return NV_FALSE;
} }
if (nvswitch_is_tnvl_mode_enabled(device))
{
NVSWITCH_PRINT(device, INFO,
"INFOROM is not supported when TNVL mode is enabled\n");
return NV_FALSE;
}
if (!nvswitch_is_soe_supported(device)) if (!nvswitch_is_soe_supported(device))
{ {
NVSWITCH_PRINT(device, INFO, NVSWITCH_PRINT(device, INFO,
@ -3124,6 +3138,13 @@ nvswitch_is_smbpbi_supported_ls10
return NV_FALSE; return NV_FALSE;
} }
if (nvswitch_is_tnvl_mode_enabled(device))
{
NVSWITCH_PRINT(device, INFO,
"SMBPBI is not supported when TNVL mode is enabled\n");
return NV_FALSE;
}
status = _nvswitch_get_bios_version(device, &version); status = _nvswitch_get_bios_version(device, &version);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {

File diff suppressed because it is too large Load Diff

View File

@ -991,6 +991,36 @@ _nvswitch_ctrl_fsprpc_get_caps
return device->hal.nvswitch_fsprpc_get_caps(device, params); return device->hal.nvswitch_fsprpc_get_caps(device, params);
} }
static NvlStatus
_nvswitch_ctrl_get_attestation_certificate_chain
(
nvswitch_device *device,
NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS *params
)
{
return device->hal.nvswitch_tnvl_get_attestation_certificate_chain(device, params);
}
static NvlStatus
_nvswitch_ctrl_get_attestation_report
(
nvswitch_device *device,
NVSWITCH_GET_ATTESTATION_REPORT_PARAMS *params
)
{
return device->hal.nvswitch_tnvl_get_attestation_report(device, params);
}
static NvlStatus
_nvswitch_ctrl_get_tnvl_status
(
nvswitch_device *device,
NVSWITCH_GET_TNVL_STATUS_PARAMS *params
)
{
return device->hal.nvswitch_tnvl_get_status(device, params);
}
static NvlStatus static NvlStatus
_nvswitch_construct_soe _nvswitch_construct_soe
( (
@ -2777,6 +2807,11 @@ nvswitch_lib_register_device
device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_STANDBY; device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_STANDBY;
device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_NONE; device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_NONE;
//
// Initialize TNVL Mode
//
device->tnvl_mode = NVSWITCH_DEVICE_TNVL_MODE_DISABLED;
// //
// Initialize HAL connectivity as early as possible so that other lib // Initialize HAL connectivity as early as possible so that other lib
// interfaces can work. // interfaces can work.
@ -5888,6 +5923,101 @@ _nvswitch_ctrl_set_link_l1_threshold
return NVL_SUCCESS; return NVL_SUCCESS;
} }
NvlStatus
nvswitch_detect_tnvl_mode
(
nvswitch_device *device
)
{
return device->hal.nvswitch_detect_tnvl_mode(device);
}
NvBool
nvswitch_is_tnvl_mode_enabled
(
nvswitch_device *device
)
{
return device->hal.nvswitch_is_tnvl_mode_enabled(device);
}
NvBool
nvswitch_is_tnvl_mode_locked
(
nvswitch_device *device
)
{
return device->hal.nvswitch_is_tnvl_mode_locked(device);
}
NvBool NV_API_CALL
nvswitch_lib_is_tnvl_enabled
(
nvswitch_device *device
)
{
return nvswitch_is_tnvl_mode_enabled(device);
}
NvlStatus
nvswitch_tnvl_send_fsp_lock_config
(
nvswitch_device *device
)
{
return device->hal.nvswitch_tnvl_send_fsp_lock_config(device);
}
static NvlStatus
_nvswitch_ctrl_set_device_tnvl_lock
(
nvswitch_device *device,
NVSWITCH_SET_DEVICE_TNVL_LOCK_PARAMS *p
)
{
NvlStatus status = NVL_SUCCESS;
if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device))
{
return -NVL_BAD_ARGS;
}
if (!nvswitch_is_tnvl_mode_enabled(device))
{
NVSWITCH_PRINT(device, ERROR,
"%s: TNVL is not enabled\n",
__FUNCTION__);
return -NVL_ERR_NOT_SUPPORTED;
}
// Return failure if FM is not yet configured
if (device->device_fabric_state != NVSWITCH_DEVICE_FABRIC_STATE_CONFIGURED)
{
NVSWITCH_PRINT(device, ERROR,
"%s: FM is not configured yet\n",
__FUNCTION__);
return -NVL_ERR_INVALID_STATE;
}
//
// Disable non-fatal and legacy interrupts
// Disable commands to SOE
//
// Send lock-config command to FSP
status = nvswitch_tnvl_send_fsp_lock_config(device);
if (status == NVL_SUCCESS)
{
device->tnvl_mode = NVSWITCH_DEVICE_TNVL_MODE_LOCKED;
}
else
{
device->tnvl_mode = NVSWITCH_DEVICE_TNVL_MODE_FAILURE;
}
return status;
}
NvlStatus NvlStatus
nvswitch_lib_ctrl nvswitch_lib_ctrl
( (
@ -6308,7 +6438,26 @@ nvswitch_lib_ctrl
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_FSPRPC_GET_CAPS, NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_FSPRPC_GET_CAPS,
_nvswitch_ctrl_fsprpc_get_caps, _nvswitch_ctrl_fsprpc_get_caps,
NVSWITCH_FSPRPC_GET_CAPS_PARAMS); NVSWITCH_FSPRPC_GET_CAPS_PARAMS);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_SET_DEVICE_TNVL_LOCK,
_nvswitch_ctrl_set_device_tnvl_lock,
NVSWITCH_SET_DEVICE_TNVL_LOCK_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN,
_nvswitch_ctrl_get_attestation_certificate_chain,
NVSWITCH_GET_ATTESTATION_CERTIFICATE_CHAIN_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_ATTESTATION_REPORT,
_nvswitch_ctrl_get_attestation_report,
NVSWITCH_GET_ATTESTATION_REPORT_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_TNVL_STATUS,
_nvswitch_ctrl_get_tnvl_status,
NVSWITCH_GET_TNVL_STATUS_PARAMS,
osPrivate, flags);
default: default:
nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "unknown ioctl %x\n", cmd); nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "unknown ioctl %x\n", cmd);
retval = -NVL_BAD_ARGS; retval = -NVL_BAD_ARGS;

View File

@ -94,7 +94,7 @@ typedef struct CC_CRYPTOBUNDLE_STATS {
NV_DECLARE_ALIGNED(NvU64 numEncryptionsH2D, 8); NV_DECLARE_ALIGNED(NvU64 numEncryptionsH2D, 8);
NV_DECLARE_ALIGNED(NvU64 numEncryptionsD2H, 8); NV_DECLARE_ALIGNED(NvU64 numEncryptionsD2H, 8);
NV_DECLARE_ALIGNED(NvU64 bytesEncryptedH2D, 8); NV_DECLARE_ALIGNED(NvU64 bytesEncryptedH2D, 8);
NV_DECLARE_ALIGNED(NvU64 bytesDecryptedD2H, 8); NV_DECLARE_ALIGNED(NvU64 bytesEncryptedD2H, 8);
} CC_CRYPTOBUNDLE_STATS; } CC_CRYPTOBUNDLE_STATS;
typedef struct CC_CRYPTOBUNDLE_STATS *PCC_CRYPTOBUNDLE_STATS; typedef struct CC_CRYPTOBUNDLE_STATS *PCC_CRYPTOBUNDLE_STATS;

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -31,6 +31,7 @@
// //
#include "nvimpshared.h" #include "nvimpshared.h"
#include "cc_drv.h"
#include "ctrl/ctrl2080/ctrl2080base.h" #include "ctrl/ctrl2080/ctrl2080base.h"
#include "ctrl/ctrl2080/ctrl2080gpu.h" #include "ctrl/ctrl2080/ctrl2080gpu.h"
@ -862,6 +863,19 @@ typedef NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS NV2080
#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a43) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */ #define NV2080_CTRL_CMD_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE (0x20800a43) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MIGMGR_PROMOTE_GPU_INSTANCE_MEM_RANGE_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID (0x45U)
typedef struct NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS {
NvBool bTeardown;
} NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS;
#define NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR (0x20800a46) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS_MESSAGE_ID (0x46U)
typedef NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS NV2080_CTRL_INTERNAL_KGR_INIT_BUG4208224_WAR_PARAMS;
typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES { typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES {
NvBool bPerSubCtxheaderSupported; NvBool bPerSubCtxheaderSupported;
} NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES; } NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES;
@ -3620,11 +3634,15 @@ typedef struct NV2080_CTRL_CMD_INTERNAL_GET_GPU_FABRIC_PROBE_INFO_PARAMS {
* *
* bwMode[IN] * bwMode[IN]
* - Nvlink Bandwidth mode * - Nvlink Bandwidth mode
*
* bLocalEgmEnabled[IN]
* - EGM Enablement Status that needs to be set in GSP-RM
*/ */
#define NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID (0xF5U) #define NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS_MESSAGE_ID (0xF5U)
typedef struct NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS { typedef struct NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS {
NvU8 bwMode; NvU8 bwMode;
NvBool bLocalEgmEnabled;
} NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS; } NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS;
/*! /*!
@ -3757,6 +3775,50 @@ typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS {
NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK ivMaskSet[NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_LCE_COUNT]; NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK ivMaskSet[NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_LCE_COUNT];
} NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS; } NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS;
/*!
* NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_ROTATE_KEYS
*
* This command handles key rotation for a given H2D key (and corresponding D2H key)
* by deriving new key on GSP and updating the key on relevant SEC2 or LCE.
* It also updates IVs for all channels using the key and conditionally re-enables them
* and notifies clients of key rotation status at the end.
*
* globalH2DKey : [IN]
* global h2d key to be rotated
* updatedEncryptIVMask: [OUT]
* Encrypt IV mask post IV key rotation for a given engine's kernel channel
* updatedDecryptIVMask: [OUT]
* Decrypt IV mask post IV key rotation for a given engine's kernel channel
*/
#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_ROTATE_KEYS (0x20800ae5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS_MESSAGE_ID (0xE5U)
typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS {
NvU32 globalH2DKey;
NvU32 updatedEncryptIVMask[CC_AES_256_GCM_IV_SIZE_DWORD];
NvU32 updatedDecryptIVMask[CC_AES_256_GCM_IV_SIZE_DWORD];
} NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS;
/*!
* NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION
*
* This command RCs all channels that use the given key and have not reported
* idle via NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION yet.
* RM needs to RC such channels before going ahead with key rotation.
*
* globalH2DKey : [IN]
* global h2d key whose channels will be RCed
*/
#define NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION (0x20800ae6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS_MESSAGE_ID (0xE6U)
typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS {
NvU32 exceptionType;
NvU32 globalH2DKey;
} NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS;
/*! /*!
* NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE * NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE
* *

View File

@ -124,7 +124,8 @@
#define UNRECOVERABLE_ECC_ERROR_ESCAPE (140) #define UNRECOVERABLE_ECC_ERROR_ESCAPE (140)
#define ROBUST_CHANNEL_FAST_PATH_ERROR (141) #define ROBUST_CHANNEL_FAST_PATH_ERROR (141)
#define GPU_INIT_ERROR (143) #define GPU_INIT_ERROR (143)
#define ROBUST_CHANNEL_LAST_ERROR (GPU_INIT_ERROR) #define ROBUST_CHANNEL_KEY_ROTATION_ERROR (144)
#define ROBUST_CHANNEL_LAST_ERROR (ROBUST_CHANNEL_KEY_ROTATION_ERROR)
// Indexed CE reference // Indexed CE reference

View File

@ -151,6 +151,7 @@ NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded") NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value") NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event") NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event")
NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress")
// Warnings: // Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")

View File

@ -682,6 +682,8 @@ ENTRY(0x2329, 0x2032, 0x10de, "NVIDIA H20-16C"),
ENTRY(0x2329, 0x2033, 0x10de, "NVIDIA H20-24C"), ENTRY(0x2329, 0x2033, 0x10de, "NVIDIA H20-24C"),
ENTRY(0x2329, 0x2034, 0x10de, "NVIDIA H20-48C"), ENTRY(0x2329, 0x2034, 0x10de, "NVIDIA H20-48C"),
ENTRY(0x2329, 0x2035, 0x10de, "NVIDIA H20-96C"), ENTRY(0x2329, 0x2035, 0x10de, "NVIDIA H20-96C"),
ENTRY(0x2329, 0x2047, 0x10de, "NVIDIA H20-8C"),
ENTRY(0x2329, 0x2048, 0x10de, "NVIDIA H20-32C"),
ENTRY(0x2330, 0x187A, 0x10de, "NVIDIA H100XM-1-10CME"), ENTRY(0x2330, 0x187A, 0x10de, "NVIDIA H100XM-1-10CME"),
ENTRY(0x2330, 0x187B, 0x10de, "NVIDIA H100XM-1-10C"), ENTRY(0x2330, 0x187B, 0x10de, "NVIDIA H100XM-1-10C"),
ENTRY(0x2330, 0x187C, 0x10de, "NVIDIA H100XM-1-20C"), ENTRY(0x2330, 0x187C, 0x10de, "NVIDIA H100XM-1-20C"),
@ -856,45 +858,45 @@ ENTRY(0x26B2, 0x1835, 0x10de, "NVIDIA RTX5000-Ada-4C"),
ENTRY(0x26B2, 0x1836, 0x10de, "NVIDIA RTX5000-Ada-8C"), ENTRY(0x26B2, 0x1836, 0x10de, "NVIDIA RTX5000-Ada-8C"),
ENTRY(0x26B2, 0x1837, 0x10de, "NVIDIA RTX5000-Ada-16C"), ENTRY(0x26B2, 0x1837, 0x10de, "NVIDIA RTX5000-Ada-16C"),
ENTRY(0x26B2, 0x1838, 0x10de, "NVIDIA RTX5000-Ada-32C"), ENTRY(0x26B2, 0x1838, 0x10de, "NVIDIA RTX5000-Ada-32C"),
ENTRY(0x26B3, 0x1958, 0x10de, "NVIDIA RTX 5880-Ada-1B"), ENTRY(0x26B3, 0x1958, 0x10de, "NVIDIA RTX5880-Ada-1B"),
ENTRY(0x26B3, 0x1959, 0x10de, "NVIDIA RTX 5880-Ada-2B"), ENTRY(0x26B3, 0x1959, 0x10de, "NVIDIA RTX5880-Ada-2B"),
ENTRY(0x26B3, 0x195A, 0x10de, "NVIDIA RTX 5880-Ada-1Q"), ENTRY(0x26B3, 0x195A, 0x10de, "NVIDIA RTX5880-Ada-1Q"),
ENTRY(0x26B3, 0x195B, 0x10de, "NVIDIA RTX 5880-Ada-2Q"), ENTRY(0x26B3, 0x195B, 0x10de, "NVIDIA RTX5880-Ada-2Q"),
ENTRY(0x26B3, 0x195C, 0x10de, "NVIDIA RTX 5880-Ada-3Q"), ENTRY(0x26B3, 0x195C, 0x10de, "NVIDIA RTX5880-Ada-3Q"),
ENTRY(0x26B3, 0x195D, 0x10de, "NVIDIA RTX 5880-Ada-4Q"), ENTRY(0x26B3, 0x195D, 0x10de, "NVIDIA RTX5880-Ada-4Q"),
ENTRY(0x26B3, 0x195E, 0x10de, "NVIDIA RTX 5880-Ada-6Q"), ENTRY(0x26B3, 0x195E, 0x10de, "NVIDIA RTX5880-Ada-6Q"),
ENTRY(0x26B3, 0x195F, 0x10de, "NVIDIA RTX 5880-Ada-8Q"), ENTRY(0x26B3, 0x195F, 0x10de, "NVIDIA RTX5880-Ada-8Q"),
ENTRY(0x26B3, 0x1960, 0x10de, "NVIDIA RTX 5880-Ada-12Q"), ENTRY(0x26B3, 0x1960, 0x10de, "NVIDIA RTX5880-Ada-12Q"),
ENTRY(0x26B3, 0x1961, 0x10de, "NVIDIA RTX 5880-Ada-16Q"), ENTRY(0x26B3, 0x1961, 0x10de, "NVIDIA RTX5880-Ada-16Q"),
ENTRY(0x26B3, 0x1962, 0x10de, "NVIDIA RTX 5880-Ada-24Q"), ENTRY(0x26B3, 0x1962, 0x10de, "NVIDIA RTX5880-Ada-24Q"),
ENTRY(0x26B3, 0x1963, 0x10de, "NVIDIA RTX 5880-Ada-48Q"), ENTRY(0x26B3, 0x1963, 0x10de, "NVIDIA RTX5880-Ada-48Q"),
ENTRY(0x26B3, 0x1964, 0x10de, "NVIDIA RTX 5880-Ada-1A"), ENTRY(0x26B3, 0x1964, 0x10de, "NVIDIA RTX5880-Ada-1A"),
ENTRY(0x26B3, 0x1965, 0x10de, "NVIDIA RTX 5880-Ada-2A"), ENTRY(0x26B3, 0x1965, 0x10de, "NVIDIA RTX5880-Ada-2A"),
ENTRY(0x26B3, 0x1966, 0x10de, "NVIDIA RTX 5880-Ada-3A"), ENTRY(0x26B3, 0x1966, 0x10de, "NVIDIA RTX5880-Ada-3A"),
ENTRY(0x26B3, 0x1967, 0x10de, "NVIDIA RTX 5880-Ada-4A"), ENTRY(0x26B3, 0x1967, 0x10de, "NVIDIA RTX5880-Ada-4A"),
ENTRY(0x26B3, 0x1968, 0x10de, "NVIDIA RTX 5880-Ada-6A"), ENTRY(0x26B3, 0x1968, 0x10de, "NVIDIA RTX5880-Ada-6A"),
ENTRY(0x26B3, 0x1969, 0x10de, "NVIDIA RTX 5880-Ada-8A"), ENTRY(0x26B3, 0x1969, 0x10de, "NVIDIA RTX5880-Ada-8A"),
ENTRY(0x26B3, 0x196A, 0x10de, "NVIDIA RTX 5880-Ada-12A"), ENTRY(0x26B3, 0x196A, 0x10de, "NVIDIA RTX5880-Ada-12A"),
ENTRY(0x26B3, 0x196B, 0x10de, "NVIDIA RTX 5880-Ada-16A"), ENTRY(0x26B3, 0x196B, 0x10de, "NVIDIA RTX5880-Ada-16A"),
ENTRY(0x26B3, 0x196C, 0x10de, "NVIDIA RTX 5880-Ada-24A"), ENTRY(0x26B3, 0x196C, 0x10de, "NVIDIA RTX5880-Ada-24A"),
ENTRY(0x26B3, 0x196D, 0x10de, "NVIDIA RTX 5880-Ada-48A"), ENTRY(0x26B3, 0x196D, 0x10de, "NVIDIA RTX5880-Ada-48A"),
ENTRY(0x26B3, 0x196E, 0x10de, "NVIDIA RTX 5880-Ada-1"), ENTRY(0x26B3, 0x196E, 0x10de, "NVIDIA RTX5880-Ada-1"),
ENTRY(0x26B3, 0x196F, 0x10de, "NVIDIA RTX 5880-Ada-2"), ENTRY(0x26B3, 0x196F, 0x10de, "NVIDIA RTX5880-Ada-2"),
ENTRY(0x26B3, 0x1970, 0x10de, "NVIDIA RTX 5880-Ada-3"), ENTRY(0x26B3, 0x1970, 0x10de, "NVIDIA RTX5880-Ada-3"),
ENTRY(0x26B3, 0x1971, 0x10de, "NVIDIA RTX 5880-Ada-4"), ENTRY(0x26B3, 0x1971, 0x10de, "NVIDIA RTX5880-Ada-4"),
ENTRY(0x26B3, 0x1972, 0x10de, "NVIDIA RTX 5880-Ada-6"), ENTRY(0x26B3, 0x1972, 0x10de, "NVIDIA RTX5880-Ada-6"),
ENTRY(0x26B3, 0x1973, 0x10de, "NVIDIA RTX 5880-Ada-8"), ENTRY(0x26B3, 0x1973, 0x10de, "NVIDIA RTX5880-Ada-8"),
ENTRY(0x26B3, 0x1974, 0x10de, "NVIDIA RTX 5880-Ada-12"), ENTRY(0x26B3, 0x1974, 0x10de, "NVIDIA RTX5880-Ada-12"),
ENTRY(0x26B3, 0x1975, 0x10de, "NVIDIA RTX 5880-Ada-16"), ENTRY(0x26B3, 0x1975, 0x10de, "NVIDIA RTX5880-Ada-16"),
ENTRY(0x26B3, 0x1976, 0x10de, "NVIDIA RTX 5880-Ada-24"), ENTRY(0x26B3, 0x1976, 0x10de, "NVIDIA RTX5880-Ada-24"),
ENTRY(0x26B3, 0x1977, 0x10de, "NVIDIA RTX 5880-Ada-48"), ENTRY(0x26B3, 0x1977, 0x10de, "NVIDIA RTX5880-Ada-48"),
ENTRY(0x26B3, 0x1978, 0x10de, "NVIDIA RTX 5880-Ada-4C"), ENTRY(0x26B3, 0x1978, 0x10de, "NVIDIA RTX5880-Ada-4C"),
ENTRY(0x26B3, 0x1979, 0x10de, "NVIDIA RTX 5880-Ada-6C"), ENTRY(0x26B3, 0x1979, 0x10de, "NVIDIA RTX5880-Ada-6C"),
ENTRY(0x26B3, 0x197A, 0x10de, "NVIDIA RTX 5880-Ada-8C"), ENTRY(0x26B3, 0x197A, 0x10de, "NVIDIA RTX5880-Ada-8C"),
ENTRY(0x26B3, 0x197B, 0x10de, "NVIDIA RTX 5880-Ada-12C"), ENTRY(0x26B3, 0x197B, 0x10de, "NVIDIA RTX5880-Ada-12C"),
ENTRY(0x26B3, 0x197C, 0x10de, "NVIDIA RTX 5880-Ada-16C"), ENTRY(0x26B3, 0x197C, 0x10de, "NVIDIA RTX5880-Ada-16C"),
ENTRY(0x26B3, 0x197D, 0x10de, "NVIDIA RTX 5880-Ada-24C"), ENTRY(0x26B3, 0x197D, 0x10de, "NVIDIA RTX5880-Ada-24C"),
ENTRY(0x26B3, 0x197E, 0x10de, "NVIDIA RTX 5880-Ada-48C"), ENTRY(0x26B3, 0x197E, 0x10de, "NVIDIA RTX5880-Ada-48C"),
ENTRY(0x26B5, 0x176D, 0x10de, "NVIDIA L40-1B"), ENTRY(0x26B5, 0x176D, 0x10de, "NVIDIA L40-1B"),
ENTRY(0x26B5, 0x176E, 0x10de, "NVIDIA L40-2B"), ENTRY(0x26B5, 0x176E, 0x10de, "NVIDIA L40-2B"),
ENTRY(0x26B5, 0x176F, 0x10de, "NVIDIA L40-1Q"), ENTRY(0x26B5, 0x176F, 0x10de, "NVIDIA L40-1Q"),

View File

@ -21,6 +21,7 @@ static inline void _get_chip_id_for_alias_pgpu(NvU32 *dev_id, NvU32 *subdev_id)
{ 0x2329, 0x198C, 0x2329, 0x198B }, { 0x2329, 0x198C, 0x2329, 0x198B },
{ 0x2330, 0x16C0, 0x2330, 0x16C1 }, { 0x2330, 0x16C0, 0x2330, 0x16C1 },
{ 0x2336, 0x16C2, 0x2330, 0x16C1 }, { 0x2336, 0x16C2, 0x2330, 0x16C1 },
{ 0x26BA, 0x1990, 0x26BA, 0x1957 },
}; };
for (NvU32 i = 0; i < (sizeof(vgpu_aliases) / sizeof(struct vgpu_alias_details)); ++i) { for (NvU32 i = 0; i < (sizeof(vgpu_aliases) / sizeof(struct vgpu_alias_details)); ++i) {

View File

@ -448,9 +448,13 @@
// Cavium, Inc. CN99xx [ThunderX2] [177d:af00] // Cavium, Inc. CN99xx [ThunderX2] [177d:af00]
#define CAVIUM_X2_DEVID 0xAF00 #define CAVIUM_X2_DEVID 0xAF00
// Lenovo Tomcat Workstation // Lenovo Tomcat/Falcon/Hornet Workstations
#define LENOVO_TOMCAT_DEVID 0x1B81 #define LENOVO_TOMCAT_DEVID 0x1B81
#define LENOVO_TOMCAT_SSDEVID 0x104e #define LENOVO_TOMCAT_SSDEVID 0x104e
#define LENOVO_FALCON_DEVID 0x7A8A
#define LENOVO_FALCON_SSDEVID 0x1055
#define LENOVO_HORNET_DEVID 0x7A8A
#define LENOVO_HORNET_SSDEVID 0x1056
// NVIDIA C51 // NVIDIA C51
#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0 #define NVIDIA_C51_DEVICE_ID_MIN 0x2F0

View File

@ -55,6 +55,8 @@ enum
CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT, CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT,
CC_LKEYID_CPU_GSP_RESERVED2, CC_LKEYID_CPU_GSP_RESERVED2,
CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT, CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT,
CC_LKEYID_GSP_SEC2_LOCKED_RPC,
CC_LKEYID_SEC2_GSP_LOCKED_RPC,
CC_KEYSPACE_GSP_SIZE // This is always the last element. CC_KEYSPACE_GSP_SIZE // This is always the last element.
}; };
// The fault buffers only support GPU-to-CPU encryption, so the CPU-to-GPU encryption slot // The fault buffers only support GPU-to-CPU encryption, so the CPU-to-GPU encryption slot
@ -159,6 +161,10 @@ enum
// Get the local key ID from a global key ID. // Get the local key ID from a global key ID.
#define CC_GKEYID_GET_LKEYID(a) (NvU16)((a) & 0xffff) #define CC_GKEYID_GET_LKEYID(a) (NvU16)((a) & 0xffff)
// Decrement/increment the local key ID portion of a global key ID.
#define CC_GKEYID_DEC_LKEYID(a) CC_GKEYID_GEN(CC_GKEYID_GET_KEYSPACE((a)), CC_GKEYID_GET_LKEYID((a)) - 1)
#define CC_GKEYID_INC_LKEYID(a) CC_GKEYID_GEN(CC_GKEYID_GET_KEYSPACE((a)), CC_GKEYID_GET_LKEYID((a)) + 1)
// Get the unqiue string from a global key ID. // Get the unqiue string from a global key ID.
#define CC_GKEYID_GET_STR(a) \ #define CC_GKEYID_GET_STR(a) \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_GSP) ? \ (CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_GSP) ? \

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -77,6 +77,9 @@
#define NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(i) \ #define NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(i) \
((NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2) ((NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2)
// First index of doorbell which is controlled by VF
#define NV_CTRL_INTR_GPU_DOORBELL_INDEX_VF_START 2048
// The max number of leaf registers we expect // The max number of leaf registers we expect
#define NV_MAX_INTR_LEAVES 16 #define NV_MAX_INTR_LEAVES 16

View File

@ -25,6 +25,9 @@
#define _FSP_TNVL_RPC_H_ #define _FSP_TNVL_RPC_H_
#define TNVL_CAPS_SUBMESSAGE_ID 0xFF #define TNVL_CAPS_SUBMESSAGE_ID 0xFF
#define TNVL_GET_ATT_CERTS_SUBMESSAGE_ID 0x0
#define TNVL_GET_ATT_REPORT_SUBMESSAGE_ID 0x1
#define TNVL_LOCK_CONFIG_SUBMESSAGE_ID 0x2
#pragma pack(1) #pragma pack(1)
@ -46,6 +49,81 @@ typedef struct
NvU8 rspPayload[40]; NvU8 rspPayload[40];
} TNVL_RPC_CAPS_RSP_PAYLOAD; } TNVL_RPC_CAPS_RSP_PAYLOAD;
/*!
* @brief TNVL payload to query attestation cert chain
*/
typedef struct
{
NvU8 subMessageId;
NvU8 rsvd;
NvU8 minorVersion;
NvU8 majorVersion;
} TNVL_GET_ATT_CERTS_CMD_PAYLOAD;
/*!
* @brief TNVL response payload for attestation cert chain
*/
typedef struct
{
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 subMessageId;
NvU8 rsvd0;
NvU8 minorVersion;
NvU8 majorVersion;
NvU16 certChainLength;
NvU16 rsvd1;
NvU8 certChain[NVSWITCH_ATTESTATION_CERT_CHAIN_MAX_SIZE];
} TNVL_GET_ATT_CERTS_RSP_PAYLOAD;
/*!
* @brief TNVL payload to query attestation report
*/
typedef struct
{
NvU8 subMessageId;
NvU8 rsvd;
NvU8 minorVersion;
NvU8 majorVersion;
NvU8 nonce[NVSWITCH_NONCE_SIZE];
} TNVL_GET_ATT_REPORT_CMD_PAYLOAD;
/*!
* @brief TNVL response payload for attestation report
*/
typedef struct
{
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 subMessageId;
NvU8 rsvd0;
NvU8 minorVersion;
NvU8 majorVersion;
NvU32 measurementSize;
NvU8 measurementBuffer[NVSWITCH_ATTESTATION_REPORT_MAX_SIZE];
} TNVL_GET_ATT_REPORT_RSP_PAYLOAD;
/*!
* @brief TNVL payload to send lock config
*/
typedef struct
{
NvU8 subMessageId;
NvU8 rsvd;
NvU8 minorVersion;
NvU8 majorVersion;
} TNVL_LOCK_CONFIG_CMD_PAYLOAD;
/*!
* @brief TNVL response payload for lock config
*/
typedef struct
{
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 subMessageId;
NvU8 rsvd0;
NvU8 minorVersion;
NvU8 majorVersion;
} TNVL_LOCK_CONFIG_RSP_PAYLOAD;
#pragma pack() #pragma pack()
#endif // _FSP_TNVL_RPC_H_ #endif // _FSP_TNVL_RPC_H_

View File

@ -249,7 +249,8 @@ CSINFO chipsetInfo[] =
{PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD, "Mellanox BlueField", Mellanox_BlueField_setupFunc}, {PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD, "Mellanox BlueField", Mellanox_BlueField_setupFunc},
{PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2", NULL}, {PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2", NULL},
{PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2 Crypto disabled", NULL}, {PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2 Crypto disabled", NULL},
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3, "Mellanox BlueField 3", Mellanox_BlueField3_setupFunc}, {PCI_VENDOR_ID_MELLANOX, 0xA2DA, CS_MELLANOX_BLUEFIELD3, "Mellanox BlueField 3 Crypto enabled", Mellanox_BlueField3_setupFunc},
{PCI_VENDOR_ID_MELLANOX, 0xA2DB, CS_MELLANOX_BLUEFIELD3, "Mellanox BlueField 3 Crypto disabled", Mellanox_BlueField3_setupFunc},
{PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2, "Amazon Gravitron2", Amazon_Gravitron2_setupFunc}, {PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2, "Amazon Gravitron2", Amazon_Gravitron2_setupFunc},
{PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX, "Fujitsu A64FX", Fujitsu_A64FX_setupFunc}, {PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX, "Fujitsu A64FX", Fujitsu_A64FX_setupFunc},
{PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, {PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL},

View File

@ -170,6 +170,7 @@
--undefined=nvswitch_lib_get_valid_ports_mask --undefined=nvswitch_lib_get_valid_ports_mask
--undefined=nvswitch_lib_is_i2c_supported --undefined=nvswitch_lib_is_i2c_supported
--undefined=nvswitch_lib_i2c_transfer --undefined=nvswitch_lib_i2c_transfer
--undefined=nvswitch_lib_is_tnvl_enabled
--undefined=rm_gpu_copy_mmu_faults --undefined=rm_gpu_copy_mmu_faults
--undefined=rm_gpu_handle_mmu_faults --undefined=rm_gpu_handle_mmu_faults
--undefined=rm_gpu_need_4k_page_isolation --undefined=rm_gpu_need_4k_page_isolation

View File

@ -39,8 +39,8 @@
// //
static BINDATA_CONST NvU8 kgspBinArchiveConcatenatedFMCDesc_GH100_ucode_desc_prod_data[] = static BINDATA_CONST NvU8 kgspBinArchiveConcatenatedFMCDesc_GH100_ucode_desc_prod_data[] =
{ {
0x63, 0x65, 0x20, 0x02, 0x70, 0x41, 0xf1, 0x32, 0x20, 0xde, 0x00, 0xc4, 0x37, 0x19, 0x19, 0x18, 0x63, 0x65, 0x20, 0x02, 0x70, 0x41, 0xf1, 0x72, 0x20, 0xde, 0x08, 0xc4, 0x37, 0x19, 0x19, 0x18,
0xf1, 0xe8, 0x03, 0x00, 0x7f, 0x51, 0xb9, 0x4d, 0x54, 0x00, 0x00, 0x00, 0xf1, 0xe8, 0x03, 0x00, 0x1e, 0x4d, 0xae, 0xcc, 0x54, 0x00, 0x00, 0x00,
}; };
#endif // defined(BINDATA_INCLUDE_DATA) #endif // defined(BINDATA_INCLUDE_DATA)

View File

@ -164,6 +164,8 @@ void __nvoc_init_dataField_ConfidentialCompute(ConfidentialCompute *pThis, RmHal
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED, ((NvBool)(0 == 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED, ((NvBool)(0 == 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED, ((NvBool)(0 != 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED, ((NvBool)(0 != 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED, ((NvBool)(0 != 0)));
} }
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
@ -276,6 +278,17 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeDeriveSecrets__ = &confComputeDeriveSecrets_46f6a7; pThis->__confComputeDeriveSecrets__ = &confComputeDeriveSecrets_46f6a7;
} }
// Hal function -- confComputeUpdateSecrets
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeUpdateSecrets__ = &confComputeUpdateSecrets_GH100;
}
// default
else
{
pThis->__confComputeUpdateSecrets__ = &confComputeUpdateSecrets_46f6a7;
}
// Hal function -- confComputeIsSpdmEnabled // Hal function -- confComputeIsSpdmEnabled
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {
@ -287,6 +300,93 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeIsSpdmEnabled__ = &confComputeIsSpdmEnabled_491d52; pThis->__confComputeIsSpdmEnabled__ = &confComputeIsSpdmEnabled_491d52;
} }
// Hal function -- confComputeGetEngineIdFromKeySpace
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGetEngineIdFromKeySpace__ = &confComputeGetEngineIdFromKeySpace_GH100;
}
// default
else
{
pThis->__confComputeGetEngineIdFromKeySpace__ = &confComputeGetEngineIdFromKeySpace_78ac8b;
}
// Hal function -- confComputeGlobalKeyIsKernelPriv
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGlobalKeyIsKernelPriv__ = &confComputeGlobalKeyIsKernelPriv_GH100;
}
// default
else
{
pThis->__confComputeGlobalKeyIsKernelPriv__ = &confComputeGlobalKeyIsKernelPriv_491d52;
}
// Hal function -- confComputeGetKeyPairByChannel
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGetKeyPairByChannel__ = &confComputeGetKeyPairByChannel_GH100;
}
// default
else
{
pThis->__confComputeGetKeyPairByChannel__ = &confComputeGetKeyPairByChannel_46f6a7;
}
// Hal function -- confComputeTriggerKeyRotation
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__confComputeTriggerKeyRotation__ = &confComputeTriggerKeyRotation_46f6a7;
}
else
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeTriggerKeyRotation__ = &confComputeTriggerKeyRotation_GH100;
}
// default
else
{
pThis->__confComputeTriggerKeyRotation__ = &confComputeTriggerKeyRotation_56cd7a;
}
}
// Hal function -- confComputeEnableKeyRotationCallback
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__confComputeEnableKeyRotationCallback__ = &confComputeEnableKeyRotationCallback_56cd7a;
}
else
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeEnableKeyRotationCallback__ = &confComputeEnableKeyRotationCallback_GH100;
}
// default
else
{
pThis->__confComputeEnableKeyRotationCallback__ = &confComputeEnableKeyRotationCallback_56cd7a;
}
}
// Hal function -- confComputeEnableKeyRotationSupport
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__confComputeEnableKeyRotationSupport__ = &confComputeEnableKeyRotationSupport_56cd7a;
}
else
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeEnableKeyRotationSupport__ = &confComputeEnableKeyRotationSupport_GH100;
}
// default
else
{
pThis->__confComputeEnableKeyRotationSupport__ = &confComputeEnableKeyRotationSupport_56cd7a;
}
}
// Hal function -- confComputeIsDebugModeEnabled // Hal function -- confComputeIsDebugModeEnabled
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {

View File

@ -42,6 +42,7 @@ extern "C" {
#include "cc_drv.h" #include "cc_drv.h"
#include "conf_compute/cc_keystore.h" #include "conf_compute/cc_keystore.h"
#include "kernel/gpu/fifo/kernel_channel.h" #include "kernel/gpu/fifo/kernel_channel.h"
#include "kernel/gpu/fifo/kernel_fifo.h"
#include "kernel/gpu/intr/engine_idx.h" #include "kernel/gpu/intr/engine_idx.h"
#include "kernel/gpu/conf_compute/ccsl_context.h" #include "kernel/gpu/conf_compute/ccsl_context.h"
#include "objtmr.h" #include "objtmr.h"
@ -52,6 +53,28 @@ extern "C" {
* * * *
****************************************************************************/ ****************************************************************************/
//
// Temp threshold values until we move to using
// encryption statistics buffers.
//
#define KEY_ROTATION_UPPER_THRESHOLD 30
#define KEY_ROTATION_LOWER_THRESHOLD 20
// Per-key info regarding encryption ops
typedef struct
{
NvU64 totalBytesEncrypted;
NvU64 totalEncryptOps;
} KEY_ROTATION_STATS_INFO;
// Info needed by workitem to perform key rotation
typedef struct
{
NvU32 h2dKey;
NvU32 d2hKey;
KEY_ROTATION_STATUS status;
} KEY_ROTATION_WORKITEM_INFO;
// Private field names are wrapped in PRIVATE_FIELD, which does nothing for // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
// the matching C source file, but causes diagnostics to be issued if another // the matching C source file, but causes diagnostics to be issued if another
@ -78,7 +101,14 @@ struct ConfidentialCompute {
NV_STATUS (*__confComputeKeyStoreRetrieveViaChannel__)(struct ConfidentialCompute *, struct KernelChannel *, ROTATE_IV_TYPE, NvBool, CC_KMB *); NV_STATUS (*__confComputeKeyStoreRetrieveViaChannel__)(struct ConfidentialCompute *, struct KernelChannel *, ROTATE_IV_TYPE, NvBool, CC_KMB *);
NV_STATUS (*__confComputeKeyStoreRetrieveViaKeyId__)(struct ConfidentialCompute *, NvU32, ROTATE_IV_TYPE, NvBool, CC_KMB *); NV_STATUS (*__confComputeKeyStoreRetrieveViaKeyId__)(struct ConfidentialCompute *, NvU32, ROTATE_IV_TYPE, NvBool, CC_KMB *);
NV_STATUS (*__confComputeDeriveSecrets__)(struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeDeriveSecrets__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeUpdateSecrets__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeIsSpdmEnabled__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsSpdmEnabled__)(struct OBJGPU *, struct ConfidentialCompute *);
RM_ENGINE_TYPE (*__confComputeGetEngineIdFromKeySpace__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeGlobalKeyIsKernelPriv__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeGetKeyPairByChannel__)(struct OBJGPU *, struct ConfidentialCompute *, struct KernelChannel *, NvU32 *, NvU32 *);
NV_STATUS (*__confComputeTriggerKeyRotation__)(struct OBJGPU *, struct ConfidentialCompute *);
NV_STATUS (*__confComputeEnableKeyRotationCallback__)(struct OBJGPU *, struct ConfidentialCompute *, NvBool);
NV_STATUS (*__confComputeEnableKeyRotationSupport__)(struct OBJGPU *, struct ConfidentialCompute *);
NvBool (*__confComputeIsDebugModeEnabled__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsDebugModeEnabled__)(struct OBJGPU *, struct ConfidentialCompute *);
NvBool (*__confComputeIsGpuCcCapable__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsGpuCcCapable__)(struct OBJGPU *, struct ConfidentialCompute *);
NV_STATUS (*__confComputeEstablishSpdmSessionAndKeys__)(struct OBJGPU *, struct ConfidentialCompute *); NV_STATUS (*__confComputeEstablishSpdmSessionAndKeys__)(struct OBJGPU *, struct ConfidentialCompute *);
@ -106,6 +136,8 @@ struct ConfidentialCompute {
NvBool PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED; NvBool PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED;
NvBool PDB_PROP_CONFCOMPUTE_SPDM_ENABLED; NvBool PDB_PROP_CONFCOMPUTE_SPDM_ENABLED;
NvBool PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED; NvBool PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED;
NvBool PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED;
NvBool PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED;
NvU32 gspProxyRegkeys; NvU32 gspProxyRegkeys;
struct Spdm *pSpdm; struct Spdm *pSpdm;
NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS ccStaticInfo; NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS ccStaticInfo;
@ -113,11 +145,21 @@ struct ConfidentialCompute {
struct ccslContext_t *pDmaCcslCtx; struct ccslContext_t *pDmaCcslCtx;
struct ccslContext_t *pNonReplayableFaultCcslCtx; struct ccslContext_t *pNonReplayableFaultCcslCtx;
struct ccslContext_t *pReplayableFaultCcslCtx; struct ccslContext_t *pReplayableFaultCcslCtx;
struct ccslContext_t *pGspSec2RpcCcslCtx;
NvU32 keyRotationCallbackCount;
NvU32 keyRotationChannelRefCount;
NvBool bAcceptClientRequest; NvBool bAcceptClientRequest;
PTMR_EVENT pGspHeartbeatTimer; PTMR_EVENT pGspHeartbeatTimer;
NvU32 heartbeatPeriodSec; NvU32 heartbeatPeriodSec;
NvU32 keyRotationEnableMask;
KEY_ROTATION_STATS_INFO lowerThreshold;
KEY_ROTATION_STATS_INFO upperThreshold;
NvU8 PRIVATE_FIELD(m_exportMasterKey)[32]; NvU8 PRIVATE_FIELD(m_exportMasterKey)[32];
void *PRIVATE_FIELD(m_keySlot); void *PRIVATE_FIELD(m_keySlot);
KEY_ROTATION_STATUS PRIVATE_FIELD(keyRotationState)[62];
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(aggregateStats)[62];
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(freedChannelAggregateStats)[62];
PTMR_EVENT PRIVATE_FIELD(ppKeyRotationTimer)[62];
}; };
#ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__ #ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__
@ -141,12 +183,16 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_ConfidentialCompute;
((ConfidentialCompute*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ConfidentialCompute))) ((ConfidentialCompute*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ConfidentialCompute)))
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
#define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED_BASE_NAME PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED
#define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED #define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED
#define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. #define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
#define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING #define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
#define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_CAST #define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_NAME PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT #define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_NAME PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT
#define PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED
#define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED #define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED
#define PDB_PROP_CONFCOMPUTE_ENABLED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_ENABLED_BASE_CAST
@ -177,14 +223,28 @@ NV_STATUS __nvoc_objCreate_ConfidentialCompute(ConfidentialCompute**, Dynamic*,
#define confComputeStatePreUnload_HAL(pGpu, pConfCompute, flags) confComputeStatePreUnload_DISPATCH(pGpu, pConfCompute, flags) #define confComputeStatePreUnload_HAL(pGpu, pConfCompute, flags) confComputeStatePreUnload_DISPATCH(pGpu, pConfCompute, flags)
#define confComputeSetErrorState(pGpu, pConfCompute) confComputeSetErrorState_DISPATCH(pGpu, pConfCompute) #define confComputeSetErrorState(pGpu, pConfCompute) confComputeSetErrorState_DISPATCH(pGpu, pConfCompute)
#define confComputeSetErrorState_HAL(pGpu, pConfCompute) confComputeSetErrorState_DISPATCH(pGpu, pConfCompute) #define confComputeSetErrorState_HAL(pGpu, pConfCompute) confComputeSetErrorState_DISPATCH(pGpu, pConfCompute)
#define confComputeKeyStoreRetrieveViaChannel(pConfCompute, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) confComputeKeyStoreRetrieveViaChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaChannel(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeKeyStoreRetrieveViaChannel_HAL(pConfCompute, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) confComputeKeyStoreRetrieveViaChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaChannel_HAL(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaChannel_DISPATCH(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeKeyStoreRetrieveViaKeyId(pConfCompute, globalKeyId, rotateOperation, includeSecrets, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, includeSecrets, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaKeyId(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeKeyStoreRetrieveViaKeyId_HAL(pConfCompute, globalKeyId, rotateOperation, includeSecrets, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, includeSecrets, keyMaterialBundle) #define confComputeKeyStoreRetrieveViaKeyId_HAL(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle) confComputeKeyStoreRetrieveViaKeyId_DISPATCH(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle)
#define confComputeDeriveSecrets(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine) #define confComputeDeriveSecrets(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine)
#define confComputeDeriveSecrets_HAL(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine) #define confComputeDeriveSecrets_HAL(pConfCompute, engine) confComputeDeriveSecrets_DISPATCH(pConfCompute, engine)
#define confComputeUpdateSecrets(pConfCompute, globalKeyId) confComputeUpdateSecrets_DISPATCH(pConfCompute, globalKeyId)
#define confComputeUpdateSecrets_HAL(pConfCompute, globalKeyId) confComputeUpdateSecrets_DISPATCH(pConfCompute, globalKeyId)
#define confComputeIsSpdmEnabled(pGpu, pConfCompute) confComputeIsSpdmEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsSpdmEnabled(pGpu, pConfCompute) confComputeIsSpdmEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeIsSpdmEnabled_HAL(pGpu, pConfCompute) confComputeIsSpdmEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsSpdmEnabled_HAL(pGpu, pConfCompute) confComputeIsSpdmEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeGetEngineIdFromKeySpace(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace)
#define confComputeGetEngineIdFromKeySpace_HAL(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace)
#define confComputeGlobalKeyIsKernelPriv(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId)
#define confComputeGlobalKeyIsKernelPriv_HAL(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId)
#define confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) confComputeGetKeyPairByChannel_DISPATCH(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey)
#define confComputeGetKeyPairByChannel_HAL(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) confComputeGetKeyPairByChannel_DISPATCH(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey)
#define confComputeTriggerKeyRotation(pGpu, pConfCompute) confComputeTriggerKeyRotation_DISPATCH(pGpu, pConfCompute)
#define confComputeTriggerKeyRotation_HAL(pGpu, pConfCompute) confComputeTriggerKeyRotation_DISPATCH(pGpu, pConfCompute)
#define confComputeEnableKeyRotationCallback(pGpu, pConfCompute, bEnable) confComputeEnableKeyRotationCallback_DISPATCH(pGpu, pConfCompute, bEnable)
#define confComputeEnableKeyRotationCallback_HAL(pGpu, pConfCompute, bEnable) confComputeEnableKeyRotationCallback_DISPATCH(pGpu, pConfCompute, bEnable)
#define confComputeEnableKeyRotationSupport(pGpu, pConfCompute) confComputeEnableKeyRotationSupport_DISPATCH(pGpu, pConfCompute)
#define confComputeEnableKeyRotationSupport_HAL(pGpu, pConfCompute) confComputeEnableKeyRotationSupport_DISPATCH(pGpu, pConfCompute)
#define confComputeIsDebugModeEnabled(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsDebugModeEnabled(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeIsDebugModeEnabled_HAL(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsDebugModeEnabled_HAL(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeIsGpuCcCapable(pGpu, pConfCompute) confComputeIsGpuCcCapable_DISPATCH(pGpu, pConfCompute) #define confComputeIsGpuCcCapable(pGpu, pConfCompute) confComputeIsGpuCcCapable_DISPATCH(pGpu, pConfCompute)
@ -290,24 +350,24 @@ static inline void confComputeSetErrorState_DISPATCH(struct OBJGPU *pGpu, struct
pConfCompute->__confComputeSetErrorState__(pGpu, pConfCompute); pConfCompute->__confComputeSetErrorState__(pGpu, pConfCompute);
} }
NV_STATUS confComputeKeyStoreRetrieveViaChannel_GH100(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle); NV_STATUS confComputeKeyStoreRetrieveViaChannel_GH100(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool bIncludeIvOrNonce, CC_KMB *keyMaterialBundle);
static inline NV_STATUS confComputeKeyStoreRetrieveViaChannel_46f6a7(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle) { static inline NV_STATUS confComputeKeyStoreRetrieveViaChannel_46f6a7(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool bIncludeIvOrNonce, CC_KMB *keyMaterialBundle) {
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
static inline NV_STATUS confComputeKeyStoreRetrieveViaChannel_DISPATCH(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle) { static inline NV_STATUS confComputeKeyStoreRetrieveViaChannel_DISPATCH(struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool bIncludeIvOrNonce, CC_KMB *keyMaterialBundle) {
return pConfCompute->__confComputeKeyStoreRetrieveViaChannel__(pConfCompute, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle); return pConfCompute->__confComputeKeyStoreRetrieveViaChannel__(pConfCompute, pKernelChannel, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle);
} }
NV_STATUS confComputeKeyStoreRetrieveViaKeyId_GH100(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle); NV_STATUS confComputeKeyStoreRetrieveViaKeyId_GH100(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, ROTATE_IV_TYPE rotateOperation, NvBool bIncludeIvOrNonce, CC_KMB *keyMaterialBundle);
static inline NV_STATUS confComputeKeyStoreRetrieveViaKeyId_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle) { static inline NV_STATUS confComputeKeyStoreRetrieveViaKeyId_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, ROTATE_IV_TYPE rotateOperation, NvBool bIncludeIvOrNonce, CC_KMB *keyMaterialBundle) {
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
static inline NV_STATUS confComputeKeyStoreRetrieveViaKeyId_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle) { static inline NV_STATUS confComputeKeyStoreRetrieveViaKeyId_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, ROTATE_IV_TYPE rotateOperation, NvBool bIncludeIvOrNonce, CC_KMB *keyMaterialBundle) {
return pConfCompute->__confComputeKeyStoreRetrieveViaKeyId__(pConfCompute, globalKeyId, rotateOperation, includeSecrets, keyMaterialBundle); return pConfCompute->__confComputeKeyStoreRetrieveViaKeyId__(pConfCompute, globalKeyId, rotateOperation, bIncludeIvOrNonce, keyMaterialBundle);
} }
NV_STATUS confComputeDeriveSecrets_GH100(struct ConfidentialCompute *pConfCompute, NvU32 engine); NV_STATUS confComputeDeriveSecrets_GH100(struct ConfidentialCompute *pConfCompute, NvU32 engine);
@ -320,6 +380,16 @@ static inline NV_STATUS confComputeDeriveSecrets_DISPATCH(struct ConfidentialCom
return pConfCompute->__confComputeDeriveSecrets__(pConfCompute, engine); return pConfCompute->__confComputeDeriveSecrets__(pConfCompute, engine);
} }
NV_STATUS confComputeUpdateSecrets_GH100(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
static inline NV_STATUS confComputeUpdateSecrets_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeUpdateSecrets_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return pConfCompute->__confComputeUpdateSecrets__(pConfCompute, globalKeyId);
}
static inline NvBool confComputeIsSpdmEnabled_cbe027(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) { static inline NvBool confComputeIsSpdmEnabled_cbe027(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return ((NvBool)(0 == 0)); return ((NvBool)(0 == 0));
} }
@ -332,6 +402,70 @@ static inline NvBool confComputeIsSpdmEnabled_DISPATCH(struct OBJGPU *pGpu, stru
return pConfCompute->__confComputeIsSpdmEnabled__(pGpu, pConfCompute); return pConfCompute->__confComputeIsSpdmEnabled__(pGpu, pConfCompute);
} }
RM_ENGINE_TYPE confComputeGetEngineIdFromKeySpace_GH100(struct ConfidentialCompute *pConfCompute, NvU32 keySpace);
static inline RM_ENGINE_TYPE confComputeGetEngineIdFromKeySpace_78ac8b(struct ConfidentialCompute *pConfCompute, NvU32 keySpace) {
return RM_ENGINE_TYPE_NULL;
}
static inline RM_ENGINE_TYPE confComputeGetEngineIdFromKeySpace_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 keySpace) {
return pConfCompute->__confComputeGetEngineIdFromKeySpace__(pConfCompute, keySpace);
}
NvBool confComputeGlobalKeyIsKernelPriv_GH100(struct ConfidentialCompute *pConfCompute, NvU32 keyId);
static inline NvBool confComputeGlobalKeyIsKernelPriv_491d52(struct ConfidentialCompute *pConfCompute, NvU32 keyId) {
return ((NvBool)(0 != 0));
}
static inline NvBool confComputeGlobalKeyIsKernelPriv_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 keyId) {
return pConfCompute->__confComputeGlobalKeyIsKernelPriv__(pConfCompute, keyId);
}
NV_STATUS confComputeGetKeyPairByChannel_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey);
static inline NV_STATUS confComputeGetKeyPairByChannel_46f6a7(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeGetKeyPairByChannel_DISPATCH(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey) {
return pConfCompute->__confComputeGetKeyPairByChannel__(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey);
}
NV_STATUS confComputeTriggerKeyRotation_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute);
static inline NV_STATUS confComputeTriggerKeyRotation_56cd7a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return NV_OK;
}
static inline NV_STATUS confComputeTriggerKeyRotation_46f6a7(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeTriggerKeyRotation_DISPATCH(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return pConfCompute->__confComputeTriggerKeyRotation__(pGpu, pConfCompute);
}
NV_STATUS confComputeEnableKeyRotationCallback_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable);
static inline NV_STATUS confComputeEnableKeyRotationCallback_56cd7a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable) {
return NV_OK;
}
static inline NV_STATUS confComputeEnableKeyRotationCallback_DISPATCH(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable) {
return pConfCompute->__confComputeEnableKeyRotationCallback__(pGpu, pConfCompute, bEnable);
}
NV_STATUS confComputeEnableKeyRotationSupport_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute);
static inline NV_STATUS confComputeEnableKeyRotationSupport_56cd7a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return NV_OK;
}
static inline NV_STATUS confComputeEnableKeyRotationSupport_DISPATCH(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return pConfCompute->__confComputeEnableKeyRotationSupport__(pGpu, pConfCompute);
}
NvBool confComputeIsDebugModeEnabled_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute); NvBool confComputeIsDebugModeEnabled_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute);
static inline NvBool confComputeIsDebugModeEnabled_491d52(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) { static inline NvBool confComputeIsDebugModeEnabled_491d52(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
@ -479,6 +613,104 @@ static inline NvBool confComputeAcceptClientRequest(struct OBJGPU *pGpu, struct
#define confComputeAcceptClientRequest(pGpu, pConfCompute) confComputeAcceptClientRequest_IMPL(pGpu, pConfCompute) #define confComputeAcceptClientRequest(pGpu, pConfCompute) confComputeAcceptClientRequest_IMPL(pGpu, pConfCompute)
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeInitChannelIterForKey_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 globalKey, CHANNEL_ITERATOR *pIter);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeInitChannelIterForKey(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 globalKey, CHANNEL_ITERATOR *pIter) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeInitChannelIterForKey(pGpu, pConfCompute, globalKey, pIter) confComputeInitChannelIterForKey_IMPL(pGpu, pConfCompute, globalKey, pIter)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeGetNextChannelForKey_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, CHANNEL_ITERATOR *pIt, NvU32 globalKey, struct KernelChannel **ppKernelChannel);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeGetNextChannelForKey(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, CHANNEL_ITERATOR *pIt, NvU32 globalKey, struct KernelChannel **ppKernelChannel) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeGetNextChannelForKey(pGpu, pConfCompute, pIt, globalKey, ppKernelChannel) confComputeGetNextChannelForKey_IMPL(pGpu, pConfCompute, pIt, globalKey, ppKernelChannel)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeGetKeySlotFromGlobalKeyId_IMPL(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, NvU32 *pSlot);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeGetKeySlotFromGlobalKeyId(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId, NvU32 *pSlot) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, pSlot) confComputeGetKeySlotFromGlobalKeyId_IMPL(pConfCompute, globalKeyId, pSlot)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeCheckAndScheduleKeyRotation_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeCheckAndScheduleKeyRotation(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeCheckAndScheduleKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey) confComputeCheckAndScheduleKeyRotation_IMPL(pGpu, pConfCompute, h2dKey, d2hKey)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeScheduleKeyRotationWorkItem_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeScheduleKeyRotationWorkItem(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey) confComputeScheduleKeyRotationWorkItem_IMPL(pGpu, pConfCompute, h2dKey, d2hKey)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeSetKeyRotationStatus_IMPL(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, KEY_ROTATION_STATUS status);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeSetKeyRotationStatus(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, KEY_ROTATION_STATUS status) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeSetKeyRotationStatus(pConfCompute, globalKey, status) confComputeSetKeyRotationStatus_IMPL(pConfCompute, globalKey, status)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeGetKeyRotationStatus_IMPL(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, KEY_ROTATION_STATUS *pStatus);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeGetKeyRotationStatus(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, KEY_ROTATION_STATUS *pStatus) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeGetKeyRotationStatus(pConfCompute, globalKey, pStatus) confComputeGetKeyRotationStatus_IMPL(pConfCompute, globalKey, pStatus)
#endif //__nvoc_conf_compute_h_disabled
void confComputeGetKeyPairByKey_IMPL(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, NvU32 *pH2DKey, NvU32 *pD2HKey);
#ifdef __nvoc_conf_compute_h_disabled
static inline void confComputeGetKeyPairByKey(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, NvU32 *pH2DKey, NvU32 *pD2HKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeGetKeyPairByKey(pConfCompute, globalKey, pH2DKey, pD2HKey) confComputeGetKeyPairByKey_IMPL(pConfCompute, globalKey, pH2DKey, pD2HKey)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeUpdateFreedChannelStats_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeUpdateFreedChannelStats(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel) confComputeUpdateFreedChannelStats_IMPL(pGpu, pConfCompute, pKernelChannel)
#endif //__nvoc_conf_compute_h_disabled
#undef PRIVATE_FIELD #undef PRIVATE_FIELD
#ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED #ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
@ -532,6 +764,14 @@ NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey_HAL)(struct Confide
*/ */
NV_STATUS spdmCtrlSpdmPartition(struct OBJGPU *pGpu, NV2080_CTRL_INTERNAL_SPDM_PARTITION_PARAMS *pSpdmPartitionParams); NV_STATUS spdmCtrlSpdmPartition(struct OBJGPU *pGpu, NV2080_CTRL_INTERNAL_SPDM_PARTITION_PARAMS *pSpdmPartitionParams);
/*!
* @brief 1Hz callback function to perform key rotation
*
* @param[in] pGpu : OBJGPU Pointer
* @param[in] data : void Pointer
*/
void confComputeKeyRotationCallback(struct OBJGPU *pGpu, void *data);
// spdmCtrlWriteSharedMemory is a common function called from both Physical-RM and Kernel-RM. // spdmCtrlWriteSharedMemory is a common function called from both Physical-RM and Kernel-RM.
// However, SPDM module is disabled on Phyiscal-RM and hence declared in conf_compute.h. // However, SPDM module is disabled on Phyiscal-RM and hence declared in conf_compute.h.

View File

@ -1329,6 +1329,26 @@ static void __nvoc_init_funcTable_KernelChannel_1(KernelChannel *pThis, RmHalspe
pThis->__kchannelRetrieveKmb__ = &kchannelRetrieveKmb_KERNEL; pThis->__kchannelRetrieveKmb__ = &kchannelRetrieveKmb_KERNEL;
} }
// Hal function -- kchannelSetKeyRotationNotifier
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__kchannelSetKeyRotationNotifier__ = &kchannelSetKeyRotationNotifier_56cd7a;
}
else
{
pThis->__kchannelSetKeyRotationNotifier__ = &kchannelSetKeyRotationNotifier_KERNEL;
}
// Hal function -- kchannelSetEncryptionStatsBuffer
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__kchannelSetEncryptionStatsBuffer__ = &kchannelSetEncryptionStatsBuffer_56cd7a;
}
else
{
pThis->__kchannelSetEncryptionStatsBuffer__ = &kchannelSetEncryptionStatsBuffer_KERNEL;
}
pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_KernelChannel_gpuresMap; pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_KernelChannel_gpuresMap;
pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_KernelChannel_gpuresUnmap; pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_KernelChannel_gpuresUnmap;

View File

@ -272,6 +272,8 @@ struct KernelChannel {
NV_STATUS (*__kchannelCtrlGetMMUDebugMode__)(struct KernelChannel *, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *); NV_STATUS (*__kchannelCtrlGetMMUDebugMode__)(struct KernelChannel *, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *);
NV_STATUS (*__kchannelCtrlProgramVidmemPromote__)(struct KernelChannel *, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *); NV_STATUS (*__kchannelCtrlProgramVidmemPromote__)(struct KernelChannel *, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *);
NV_STATUS (*__kchannelRetrieveKmb__)(struct OBJGPU *, struct KernelChannel *, ROTATE_IV_TYPE, NvBool, CC_KMB *); NV_STATUS (*__kchannelRetrieveKmb__)(struct OBJGPU *, struct KernelChannel *, ROTATE_IV_TYPE, NvBool, CC_KMB *);
NV_STATUS (*__kchannelSetKeyRotationNotifier__)(struct OBJGPU *, struct KernelChannel *, NvBool);
NV_STATUS (*__kchannelSetEncryptionStatsBuffer__)(struct OBJGPU *, struct KernelChannel *, NvBool);
NvBool (*__kchannelShareCallback__)(struct KernelChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); NvBool (*__kchannelShareCallback__)(struct KernelChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__kchannelGetOrAllocNotifShare__)(struct KernelChannel *, NvHandle, NvHandle, struct NotifShare **); NV_STATUS (*__kchannelGetOrAllocNotifShare__)(struct KernelChannel *, NvHandle, NvHandle, struct NotifShare **);
NV_STATUS (*__kchannelMapTo__)(struct KernelChannel *, RS_RES_MAP_TO_PARAMS *); NV_STATUS (*__kchannelMapTo__)(struct KernelChannel *, RS_RES_MAP_TO_PARAMS *);
@ -339,6 +341,9 @@ struct KernelChannel {
RM_ENGINE_TYPE engineType; RM_ENGINE_TYPE engineType;
CC_KMB clientKmb; CC_KMB clientKmb;
MEMORY_DESCRIPTOR *pEncStatsBufMemDesc; MEMORY_DESCRIPTOR *pEncStatsBufMemDesc;
CC_CRYPTOBUNDLE_STATS *pEncStatsBuf;
MEMORY_DESCRIPTOR *pKeyRotationNotifierMemDesc;
NvNotification *pKeyRotationNotifier;
NvBool bCCSecureChannel; NvBool bCCSecureChannel;
}; };
@ -434,6 +439,10 @@ NV_STATUS __nvoc_objCreate_KernelChannel(KernelChannel**, Dynamic*, NvU32, CALL_
#define kchannelCtrlProgramVidmemPromote(pKernelChannel, pParams) kchannelCtrlProgramVidmemPromote_DISPATCH(pKernelChannel, pParams) #define kchannelCtrlProgramVidmemPromote(pKernelChannel, pParams) kchannelCtrlProgramVidmemPromote_DISPATCH(pKernelChannel, pParams)
#define kchannelRetrieveKmb(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) kchannelRetrieveKmb_DISPATCH(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) #define kchannelRetrieveKmb(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) kchannelRetrieveKmb_DISPATCH(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle)
#define kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) kchannelRetrieveKmb_DISPATCH(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) #define kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle) kchannelRetrieveKmb_DISPATCH(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle)
#define kchannelSetKeyRotationNotifier(pGpu, pKernelChannel, bSet) kchannelSetKeyRotationNotifier_DISPATCH(pGpu, pKernelChannel, bSet)
#define kchannelSetKeyRotationNotifier_HAL(pGpu, pKernelChannel, bSet) kchannelSetKeyRotationNotifier_DISPATCH(pGpu, pKernelChannel, bSet)
#define kchannelSetEncryptionStatsBuffer(pGpu, pKernelChannel, bSet) kchannelSetEncryptionStatsBuffer_DISPATCH(pGpu, pKernelChannel, bSet)
#define kchannelSetEncryptionStatsBuffer_HAL(pGpu, pKernelChannel, bSet) kchannelSetEncryptionStatsBuffer_DISPATCH(pGpu, pKernelChannel, bSet)
#define kchannelShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kchannelShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) #define kchannelShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kchannelShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
#define kchannelGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) kchannelGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) #define kchannelGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) kchannelGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
#define kchannelMapTo(pResource, pParams) kchannelMapTo_DISPATCH(pResource, pParams) #define kchannelMapTo(pResource, pParams) kchannelMapTo_DISPATCH(pResource, pParams)
@ -1170,6 +1179,26 @@ static inline NV_STATUS kchannelRetrieveKmb_DISPATCH(struct OBJGPU *pGpu, struct
return pKernelChannel->__kchannelRetrieveKmb__(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle); return pKernelChannel->__kchannelRetrieveKmb__(pGpu, pKernelChannel, rotateOperation, includeSecrets, keyMaterialBundle);
} }
NV_STATUS kchannelSetKeyRotationNotifier_KERNEL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet);
static inline NV_STATUS kchannelSetKeyRotationNotifier_56cd7a(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet) {
return NV_OK;
}
static inline NV_STATUS kchannelSetKeyRotationNotifier_DISPATCH(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet) {
return pKernelChannel->__kchannelSetKeyRotationNotifier__(pGpu, pKernelChannel, bSet);
}
NV_STATUS kchannelSetEncryptionStatsBuffer_KERNEL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet);
static inline NV_STATUS kchannelSetEncryptionStatsBuffer_56cd7a(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet) {
return NV_OK;
}
static inline NV_STATUS kchannelSetEncryptionStatsBuffer_DISPATCH(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet) {
return pKernelChannel->__kchannelSetEncryptionStatsBuffer__(pGpu, pKernelChannel, bSet);
}
static inline NvBool kchannelShareCallback_DISPATCH(struct KernelChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { static inline NvBool kchannelShareCallback_DISPATCH(struct KernelChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pGpuResource->__kchannelShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); return pGpuResource->__kchannelShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
} }
@ -1336,14 +1365,25 @@ static inline NV_STATUS kchannelDeregisterChild(struct KernelChannel *pKernelCha
#define kchannelDeregisterChild(pKernelChannel, pObject) kchannelDeregisterChild_IMPL(pKernelChannel, pObject) #define kchannelDeregisterChild(pKernelChannel, pObject) kchannelDeregisterChild_IMPL(pKernelChannel, pObject)
#endif //__nvoc_kernel_channel_h_disabled #endif //__nvoc_kernel_channel_h_disabled
void kchannelNotifyGeneric_IMPL(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize); void kchannelNotifyEvent_IMPL(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, NvU32 info32, NvU16 info16, void *pNotifyParams, NvU32 notifyParamsSize);
#ifdef __nvoc_kernel_channel_h_disabled #ifdef __nvoc_kernel_channel_h_disabled
static inline void kchannelNotifyGeneric(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize) { static inline void kchannelNotifyEvent(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, NvU32 info32, NvU16 info16, void *pNotifyParams, NvU32 notifyParamsSize) {
NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!");
} }
#else //__nvoc_kernel_channel_h_disabled #else //__nvoc_kernel_channel_h_disabled
#define kchannelNotifyGeneric(pKernelChannel, notifyIndex, pNotifyParams, notifyParamsSize) kchannelNotifyGeneric_IMPL(pKernelChannel, notifyIndex, pNotifyParams, notifyParamsSize) #define kchannelNotifyEvent(pKernelChannel, notifyIndex, info32, info16, pNotifyParams, notifyParamsSize) kchannelNotifyEvent_IMPL(pKernelChannel, notifyIndex, info32, info16, pNotifyParams, notifyParamsSize)
#endif //__nvoc_kernel_channel_h_disabled
NV_STATUS kchannelUpdateNotifierMem_IMPL(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, NvU32 info32, NvU16 info16, NvU32 notifierStatus);
#ifdef __nvoc_kernel_channel_h_disabled
static inline NV_STATUS kchannelUpdateNotifierMem(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, NvU32 info32, NvU16 info16, NvU32 notifierStatus) {
NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kernel_channel_h_disabled
#define kchannelUpdateNotifierMem(pKernelChannel, notifyIndex, info32, info16, notifierStatus) kchannelUpdateNotifierMem_IMPL(pKernelChannel, notifyIndex, info32, info16, notifierStatus)
#endif //__nvoc_kernel_channel_h_disabled #endif //__nvoc_kernel_channel_h_disabled
NvBool kchannelCheckIsUserMode_IMPL(struct KernelChannel *pKernelChannel); NvBool kchannelCheckIsUserMode_IMPL(struct KernelChannel *pKernelChannel);
@ -1523,6 +1563,18 @@ NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelRetrieveKmb)(struct OBJGPU *pGpu, struct
#undef kchannelRetrieveKmb_HAL #undef kchannelRetrieveKmb_HAL
NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelRetrieveKmb_HAL)(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle); NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelRetrieveKmb_HAL)(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvBool includeSecrets, CC_KMB *keyMaterialBundle);
#undef kchannelSetKeyRotationNotifier
NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelSetKeyRotationNotifier)(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet);
#undef kchannelSetKeyRotationNotifier_HAL
NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelSetKeyRotationNotifier_HAL)(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet);
#undef kchannelSetEncryptionStatsBuffer
NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelSetEncryptionStatsBuffer)(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet);
#undef kchannelSetEncryptionStatsBuffer_HAL
NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelSetEncryptionStatsBuffer_HAL)(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bSet);
#ifndef __nvoc_kernel_channel_h_disabled #ifndef __nvoc_kernel_channel_h_disabled
#undef kchannelRotateSecureChannelIv #undef kchannelRotateSecureChannelIv
NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelRotateSecureChannelIv)(struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvU32 *encryptIv, NvU32 *decryptIv); NV_STATUS NVOC_PRIVATE_FUNCTION(kchannelRotateSecureChannelIv)(struct KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, NvU32 *encryptIv, NvU32 *decryptIv);

View File

@ -1847,14 +1847,14 @@ static inline NvU32 kfifoGetRunlistChannelGroupsInUse(struct OBJGPU *pGpu, struc
#define kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId) kfifoGetRunlistChannelGroupsInUse_IMPL(pGpu, pKernelFifo, runlistId) #define kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId) kfifoGetRunlistChannelGroupsInUse_IMPL(pGpu, pKernelFifo, runlistId)
#endif //__nvoc_kernel_fifo_h_disabled #endif //__nvoc_kernel_fifo_h_disabled
void kfifoGetChannelIterator_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt); void kfifoGetChannelIterator_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, NvU32 runlistId);
#ifdef __nvoc_kernel_fifo_h_disabled #ifdef __nvoc_kernel_fifo_h_disabled
static inline void kfifoGetChannelIterator(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt) { static inline void kfifoGetChannelIterator(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, NvU32 runlistId) {
NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!");
} }
#else //__nvoc_kernel_fifo_h_disabled #else //__nvoc_kernel_fifo_h_disabled
#define kfifoGetChannelIterator(pGpu, pKernelFifo, pIt) kfifoGetChannelIterator_IMPL(pGpu, pKernelFifo, pIt) #define kfifoGetChannelIterator(pGpu, pKernelFifo, pIt, runlistId) kfifoGetChannelIterator_IMPL(pGpu, pKernelFifo, pIt, runlistId)
#endif //__nvoc_kernel_fifo_h_disabled #endif //__nvoc_kernel_fifo_h_disabled
NV_STATUS kfifoGetNextKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel); NV_STATUS kfifoGetNextKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel);

View File

@ -257,6 +257,8 @@ void __nvoc_init_dataField_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRm
} }
pThis->fabricBaseAddr = (+18446744073709551615ULL); pThis->fabricBaseAddr = (+18446744073709551615ULL);
pThis->fabricEgmBaseAddr = (+18446744073709551615ULL);
} }
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
@ -325,6 +327,28 @@ static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecO
pThis->__knvlinkClearUniqueFabricBaseAddress__ = &knvlinkClearUniqueFabricBaseAddress_b3696a; pThis->__knvlinkClearUniqueFabricBaseAddress__ = &knvlinkClearUniqueFabricBaseAddress_b3696a;
} }
// Hal function -- knvlinkSetUniqueFabricEgmBaseAddress
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__knvlinkSetUniqueFabricEgmBaseAddress__ = &knvlinkSetUniqueFabricEgmBaseAddress_GH100;
}
// default
else
{
pThis->__knvlinkSetUniqueFabricEgmBaseAddress__ = &knvlinkSetUniqueFabricEgmBaseAddress_46f6a7;
}
// Hal function -- knvlinkClearUniqueFabricEgmBaseAddress
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__knvlinkClearUniqueFabricEgmBaseAddress__ = &knvlinkClearUniqueFabricEgmBaseAddress_GH100;
}
// default
else
{
pThis->__knvlinkClearUniqueFabricEgmBaseAddress__ = &knvlinkClearUniqueFabricEgmBaseAddress_b3696a;
}
// Hal function -- knvlinkHandleFaultUpInterrupt // Hal function -- knvlinkHandleFaultUpInterrupt
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {
@ -351,6 +375,17 @@ static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecO
pThis->__knvlinkValidateFabricBaseAddress__ = &knvlinkValidateFabricBaseAddress_46f6a7; pThis->__knvlinkValidateFabricBaseAddress__ = &knvlinkValidateFabricBaseAddress_46f6a7;
} }
// Hal function -- knvlinkValidateFabricEgmBaseAddress
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__knvlinkValidateFabricEgmBaseAddress__ = &knvlinkValidateFabricEgmBaseAddress_GH100;
}
// default
else
{
pThis->__knvlinkValidateFabricEgmBaseAddress__ = &knvlinkValidateFabricEgmBaseAddress_46f6a7;
}
// Hal function -- knvlinkGetConnectedLinksMask // Hal function -- knvlinkGetConnectedLinksMask
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000380UL) )) /* ChipHal: TU106 | TU116 | TU117 | GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000380UL) )) /* ChipHal: TU106 | TU116 | TU117 | GH100 */
{ {

View File

@ -235,8 +235,11 @@ struct KernelNvlink {
NvBool (*__knvlinkIsPresent__)(struct OBJGPU *, struct KernelNvlink *); NvBool (*__knvlinkIsPresent__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64); NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
void (*__knvlinkClearUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *); void (*__knvlinkClearUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkSetUniqueFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
void (*__knvlinkClearUniqueFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkHandleFaultUpInterrupt__)(struct OBJGPU *, struct KernelNvlink *, NvU32); NV_STATUS (*__knvlinkHandleFaultUpInterrupt__)(struct OBJGPU *, struct KernelNvlink *, NvU32);
NV_STATUS (*__knvlinkValidateFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64); NV_STATUS (*__knvlinkValidateFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
NV_STATUS (*__knvlinkValidateFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
NvU32 (*__knvlinkGetConnectedLinksMask__)(struct OBJGPU *, struct KernelNvlink *); NvU32 (*__knvlinkGetConnectedLinksMask__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkEnableLinksPostTopology__)(struct OBJGPU *, struct KernelNvlink *, NvU32); NV_STATUS (*__knvlinkEnableLinksPostTopology__)(struct OBJGPU *, struct KernelNvlink *, NvU32);
NV_STATUS (*__knvlinkOverrideConfig__)(struct OBJGPU *, struct KernelNvlink *, NvU32); NV_STATUS (*__knvlinkOverrideConfig__)(struct OBJGPU *, struct KernelNvlink *, NvU32);
@ -330,6 +333,7 @@ struct KernelNvlink {
NvU32 PRIVATE_FIELD(errorRecoveries)[18]; NvU32 PRIVATE_FIELD(errorRecoveries)[18];
NvBool PRIVATE_FIELD(bNvswitchProxy); NvBool PRIVATE_FIELD(bNvswitchProxy);
NvU64 PRIVATE_FIELD(fabricBaseAddr); NvU64 PRIVATE_FIELD(fabricBaseAddr);
NvU64 PRIVATE_FIELD(fabricEgmBaseAddr);
}; };
struct KernelNvlink_PRIVATE { struct KernelNvlink_PRIVATE {
@ -347,8 +351,11 @@ struct KernelNvlink_PRIVATE {
NvBool (*__knvlinkIsPresent__)(struct OBJGPU *, struct KernelNvlink *); NvBool (*__knvlinkIsPresent__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64); NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
void (*__knvlinkClearUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *); void (*__knvlinkClearUniqueFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkSetUniqueFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
void (*__knvlinkClearUniqueFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkHandleFaultUpInterrupt__)(struct OBJGPU *, struct KernelNvlink *, NvU32); NV_STATUS (*__knvlinkHandleFaultUpInterrupt__)(struct OBJGPU *, struct KernelNvlink *, NvU32);
NV_STATUS (*__knvlinkValidateFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64); NV_STATUS (*__knvlinkValidateFabricBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
NV_STATUS (*__knvlinkValidateFabricEgmBaseAddress__)(struct OBJGPU *, struct KernelNvlink *, NvU64);
NvU32 (*__knvlinkGetConnectedLinksMask__)(struct OBJGPU *, struct KernelNvlink *); NvU32 (*__knvlinkGetConnectedLinksMask__)(struct OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkEnableLinksPostTopology__)(struct OBJGPU *, struct KernelNvlink *, NvU32); NV_STATUS (*__knvlinkEnableLinksPostTopology__)(struct OBJGPU *, struct KernelNvlink *, NvU32);
NV_STATUS (*__knvlinkOverrideConfig__)(struct OBJGPU *, struct KernelNvlink *, NvU32); NV_STATUS (*__knvlinkOverrideConfig__)(struct OBJGPU *, struct KernelNvlink *, NvU32);
@ -442,6 +449,7 @@ struct KernelNvlink_PRIVATE {
NvU32 errorRecoveries[18]; NvU32 errorRecoveries[18];
NvBool bNvswitchProxy; NvBool bNvswitchProxy;
NvU64 fabricBaseAddr; NvU64 fabricBaseAddr;
NvU64 fabricEgmBaseAddr;
}; };
#ifndef __NVOC_CLASS_KernelNvlink_TYPEDEF__ #ifndef __NVOC_CLASS_KernelNvlink_TYPEDEF__
@ -515,10 +523,16 @@ NV_STATUS __nvoc_objCreate_KernelNvlink(KernelNvlink**, Dynamic*, NvU32);
#define knvlinkSetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0) #define knvlinkSetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkClearUniqueFabricBaseAddress(pGpu, pKernelNvlink) knvlinkClearUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink) #define knvlinkClearUniqueFabricBaseAddress(pGpu, pKernelNvlink) knvlinkClearUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkClearUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkClearUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink) #define knvlinkClearUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkClearUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkSetUniqueFabricEgmBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricEgmBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkSetUniqueFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricEgmBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkClearUniqueFabricEgmBaseAddress(pGpu, pKernelNvlink) knvlinkClearUniqueFabricEgmBaseAddress_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkClearUniqueFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkClearUniqueFabricEgmBaseAddress_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkHandleFaultUpInterrupt(pGpu, pKernelNvlink, arg0) knvlinkHandleFaultUpInterrupt_DISPATCH(pGpu, pKernelNvlink, arg0) #define knvlinkHandleFaultUpInterrupt(pGpu, pKernelNvlink, arg0) knvlinkHandleFaultUpInterrupt_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkHandleFaultUpInterrupt_HAL(pGpu, pKernelNvlink, arg0) knvlinkHandleFaultUpInterrupt_DISPATCH(pGpu, pKernelNvlink, arg0) #define knvlinkHandleFaultUpInterrupt_HAL(pGpu, pKernelNvlink, arg0) knvlinkHandleFaultUpInterrupt_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkValidateFabricBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0) #define knvlinkValidateFabricBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkValidateFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0) #define knvlinkValidateFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkValidateFabricEgmBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricEgmBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkValidateFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricEgmBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkGetConnectedLinksMask(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink) #define knvlinkGetConnectedLinksMask(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkGetConnectedLinksMask_HAL(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink) #define knvlinkGetConnectedLinksMask_HAL(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink)
#define knvlinkEnableLinksPostTopology(pGpu, pKernelNvlink, arg0) knvlinkEnableLinksPostTopology_DISPATCH(pGpu, pKernelNvlink, arg0) #define knvlinkEnableLinksPostTopology(pGpu, pKernelNvlink, arg0) knvlinkEnableLinksPostTopology_DISPATCH(pGpu, pKernelNvlink, arg0)
@ -1366,6 +1380,23 @@ static inline NvU64 knvlinkGetUniqueFabricBaseAddress(struct OBJGPU *pGpu, struc
#define knvlinkGetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) #define knvlinkGetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink)
static inline NvU64 knvlinkGetUniqueFabricEgmBaseAddress_4de472(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
struct KernelNvlink_PRIVATE *pKernelNvlink_PRIVATE = (struct KernelNvlink_PRIVATE *)pKernelNvlink;
return pKernelNvlink_PRIVATE->fabricEgmBaseAddr;
}
#ifdef __nvoc_kernel_nvlink_h_disabled
static inline NvU64 knvlinkGetUniqueFabricEgmBaseAddress(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!");
return 0;
}
#else //__nvoc_kernel_nvlink_h_disabled
#define knvlinkGetUniqueFabricEgmBaseAddress(pGpu, pKernelNvlink) knvlinkGetUniqueFabricEgmBaseAddress_4de472(pGpu, pKernelNvlink)
#endif //__nvoc_kernel_nvlink_h_disabled
#define knvlinkGetUniqueFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkGetUniqueFabricEgmBaseAddress(pGpu, pKernelNvlink)
NV_STATUS knvlinkStatePostLoadHal_GV100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); NV_STATUS knvlinkStatePostLoadHal_GV100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink);
@ -1499,6 +1530,26 @@ static inline void knvlinkClearUniqueFabricBaseAddress_DISPATCH(struct OBJGPU *p
pKernelNvlink->__knvlinkClearUniqueFabricBaseAddress__(pGpu, pKernelNvlink); pKernelNvlink->__knvlinkClearUniqueFabricBaseAddress__(pGpu, pKernelNvlink);
} }
NV_STATUS knvlinkSetUniqueFabricEgmBaseAddress_GH100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0);
static inline NV_STATUS knvlinkSetUniqueFabricEgmBaseAddress_46f6a7(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS knvlinkSetUniqueFabricEgmBaseAddress_DISPATCH(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) {
return pKernelNvlink->__knvlinkSetUniqueFabricEgmBaseAddress__(pGpu, pKernelNvlink, arg0);
}
static inline void knvlinkClearUniqueFabricEgmBaseAddress_b3696a(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
return;
}
void knvlinkClearUniqueFabricEgmBaseAddress_GH100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink);
static inline void knvlinkClearUniqueFabricEgmBaseAddress_DISPATCH(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
pKernelNvlink->__knvlinkClearUniqueFabricEgmBaseAddress__(pGpu, pKernelNvlink);
}
NV_STATUS knvlinkHandleFaultUpInterrupt_GH100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); NV_STATUS knvlinkHandleFaultUpInterrupt_GH100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0);
static inline NV_STATUS knvlinkHandleFaultUpInterrupt_46f6a7(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { static inline NV_STATUS knvlinkHandleFaultUpInterrupt_46f6a7(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) {
@ -1521,6 +1572,16 @@ static inline NV_STATUS knvlinkValidateFabricBaseAddress_DISPATCH(struct OBJGPU
return pKernelNvlink->__knvlinkValidateFabricBaseAddress__(pGpu, pKernelNvlink, arg0); return pKernelNvlink->__knvlinkValidateFabricBaseAddress__(pGpu, pKernelNvlink, arg0);
} }
NV_STATUS knvlinkValidateFabricEgmBaseAddress_GH100(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0);
static inline NV_STATUS knvlinkValidateFabricEgmBaseAddress_46f6a7(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS knvlinkValidateFabricEgmBaseAddress_DISPATCH(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) {
return pKernelNvlink->__knvlinkValidateFabricEgmBaseAddress__(pGpu, pKernelNvlink, arg0);
}
static inline NvU32 knvlinkGetConnectedLinksMask_15a734(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { static inline NvU32 knvlinkGetConnectedLinksMask_15a734(struct OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) {
return 0U; return 0U;
} }

View File

@ -424,6 +424,8 @@ kvgpumgrGetHostVgpuDeviceFromGfid(NvU32 gpuPciId, NvU32 gfid,
NV_STATUS NV_STATUS
kvgpuMgrRestoreSmcExecPart(struct OBJGPU *pGpu,KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice, kvgpuMgrRestoreSmcExecPart(struct OBJGPU *pGpu,KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance); KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance);
NV_STATUS
kvgpumgrSetVgpuType(struct OBJGPU *pGpu, KERNEL_PHYS_GPU_INFO *pPhysGpuInfo, NvU32 vgpuTypeId);
#endif // __kernel_vgpu_mgr_h__ #endif // __kernel_vgpu_mgr_h__

View File

@ -359,7 +359,6 @@ typedef enum
NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_143 = 176U, NV_FB_ALLOC_RM_INTERNAL_OWNER_UNNAMED_TAG_143 = 176U,
NV_FB_ALLOC_RM_INTERNAL_OWNER_GSP_NOTIFY_OP_SURFACE = 177U, NV_FB_ALLOC_RM_INTERNAL_OWNER_GSP_NOTIFY_OP_SURFACE = 177U,
//
// Unused tags from here, for any new use-case it's required // Unused tags from here, for any new use-case it's required
// to replace the below tags with known verbose strings // to replace the below tags with known verbose strings
// //

View File

@ -797,6 +797,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x1FF0, 0x1612, 0x17aa, "NVIDIA T1000 8GB" }, { 0x1FF0, 0x1612, 0x17aa, "NVIDIA T1000 8GB" },
{ 0x1FF2, 0x1613, 0x1028, "NVIDIA T400 4GB" }, { 0x1FF2, 0x1613, 0x1028, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x1613, 0x103c, "NVIDIA T400 4GB" }, { 0x1FF2, 0x1613, 0x103c, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x18ff, 0x103c, "NVIDIA T400E" },
{ 0x1FF2, 0x8a80, 0x103c, "NVIDIA T400 4GB" }, { 0x1FF2, 0x8a80, 0x103c, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x1613, 0x10de, "NVIDIA T400 4GB" }, { 0x1FF2, 0x1613, 0x10de, "NVIDIA T400 4GB" },
{ 0x1FF2, 0x18ff, 0x10de, "NVIDIA T400E" }, { 0x1FF2, 0x18ff, 0x10de, "NVIDIA T400E" },
@ -976,6 +977,14 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25AB, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 4GB Laptop GPU" }, { 0x25AB, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 4GB Laptop GPU" },
{ 0x25AC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 6GB Laptop GPU" }, { 0x25AC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 6GB Laptop GPU" },
{ 0x25AD, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, { 0x25AD, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25B0, 0x1878, 0x1028, "NVIDIA RTX A1000" },
{ 0x25B0, 0x1878, 0x103c, "NVIDIA RTX A1000" },
{ 0x25B0, 0x1878, 0x10de, "NVIDIA RTX A1000" },
{ 0x25B0, 0x1878, 0x17aa, "NVIDIA RTX A1000" },
{ 0x25B2, 0x1879, 0x1028, "NVIDIA RTX A400" },
{ 0x25B2, 0x1879, 0x103c, "NVIDIA RTX A400" },
{ 0x25B2, 0x1879, 0x10de, "NVIDIA RTX A400" },
{ 0x25B2, 0x1879, 0x17aa, "NVIDIA RTX A400" },
{ 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" }, { 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" },
{ 0x25B6, 0x157e, 0x10de, "NVIDIA A2" }, { 0x25B6, 0x157e, 0x10de, "NVIDIA A2" },
{ 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" }, { 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" },
@ -1054,8 +1063,11 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x28A1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" }, { 0x28A1, 0x0000, 0x0000, "NVIDIA GeForce RTX 4050 Laptop GPU" },
{ 0x28B0, 0x1870, 0x1028, "NVIDIA RTX 2000 Ada Generation" }, { 0x28B0, 0x1870, 0x1028, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1870, 0x103c, "NVIDIA RTX 2000 Ada Generation" }, { 0x28B0, 0x1870, 0x103c, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1871, 0x103c, "NVIDIA RTX 2000E Ada Generation" },
{ 0x28B0, 0x1870, 0x10de, "NVIDIA RTX 2000 Ada Generation" }, { 0x28B0, 0x1870, 0x10de, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1871, 0x10de, "NVIDIA RTX 2000E Ada Generation" },
{ 0x28B0, 0x1870, 0x17aa, "NVIDIA RTX 2000 Ada Generation" }, { 0x28B0, 0x1870, 0x17aa, "NVIDIA RTX 2000 Ada Generation" },
{ 0x28B0, 0x1871, 0x17aa, "NVIDIA RTX 2000E Ada Generation" },
{ 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" }, { 0x28B8, 0x0000, 0x0000, "NVIDIA RTX 2000 Ada Generation Laptop GPU" },
{ 0x28B9, 0x0000, 0x0000, "NVIDIA RTX 1000 Ada Generation Laptop GPU" }, { 0x28B9, 0x0000, 0x0000, "NVIDIA RTX 1000 Ada Generation Laptop GPU" },
{ 0x28BA, 0x0000, 0x0000, "NVIDIA RTX 500 Ada Generation Laptop GPU" }, { 0x28BA, 0x0000, 0x0000, "NVIDIA RTX 500 Ada Generation Laptop GPU" },
@ -1715,6 +1727,8 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2329, 0x2033, 0x10DE, "NVIDIA H20-24C" }, { 0x2329, 0x2033, 0x10DE, "NVIDIA H20-24C" },
{ 0x2329, 0x2034, 0x10DE, "NVIDIA H20-48C" }, { 0x2329, 0x2034, 0x10DE, "NVIDIA H20-48C" },
{ 0x2329, 0x2035, 0x10DE, "NVIDIA H20-96C" }, { 0x2329, 0x2035, 0x10DE, "NVIDIA H20-96C" },
{ 0x2329, 0x2047, 0x10DE, "NVIDIA H20-8C" },
{ 0x2329, 0x2048, 0x10DE, "NVIDIA H20-32C" },
{ 0x2330, 0x187a, 0x10DE, "NVIDIA H100XM-1-10CME" }, { 0x2330, 0x187a, 0x10DE, "NVIDIA H100XM-1-10CME" },
{ 0x2330, 0x187b, 0x10DE, "NVIDIA H100XM-1-10C" }, { 0x2330, 0x187b, 0x10DE, "NVIDIA H100XM-1-10C" },
{ 0x2330, 0x187c, 0x10DE, "NVIDIA H100XM-1-20C" }, { 0x2330, 0x187c, 0x10DE, "NVIDIA H100XM-1-20C" },
@ -1889,45 +1903,45 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B2, 0x1836, 0x10DE, "NVIDIA RTX5000-Ada-8C" }, { 0x26B2, 0x1836, 0x10DE, "NVIDIA RTX5000-Ada-8C" },
{ 0x26B2, 0x1837, 0x10DE, "NVIDIA RTX5000-Ada-16C" }, { 0x26B2, 0x1837, 0x10DE, "NVIDIA RTX5000-Ada-16C" },
{ 0x26B2, 0x1838, 0x10DE, "NVIDIA RTX5000-Ada-32C" }, { 0x26B2, 0x1838, 0x10DE, "NVIDIA RTX5000-Ada-32C" },
{ 0x26B3, 0x1958, 0x10DE, "NVIDIA RTX 5880-Ada-1B" }, { 0x26B3, 0x1958, 0x10DE, "NVIDIA RTX5880-Ada-1B" },
{ 0x26B3, 0x1959, 0x10DE, "NVIDIA RTX 5880-Ada-2B" }, { 0x26B3, 0x1959, 0x10DE, "NVIDIA RTX5880-Ada-2B" },
{ 0x26B3, 0x195a, 0x10DE, "NVIDIA RTX 5880-Ada-1Q" }, { 0x26B3, 0x195a, 0x10DE, "NVIDIA RTX5880-Ada-1Q" },
{ 0x26B3, 0x195b, 0x10DE, "NVIDIA RTX 5880-Ada-2Q" }, { 0x26B3, 0x195b, 0x10DE, "NVIDIA RTX5880-Ada-2Q" },
{ 0x26B3, 0x195c, 0x10DE, "NVIDIA RTX 5880-Ada-3Q" }, { 0x26B3, 0x195c, 0x10DE, "NVIDIA RTX5880-Ada-3Q" },
{ 0x26B3, 0x195d, 0x10DE, "NVIDIA RTX 5880-Ada-4Q" }, { 0x26B3, 0x195d, 0x10DE, "NVIDIA RTX5880-Ada-4Q" },
{ 0x26B3, 0x195e, 0x10DE, "NVIDIA RTX 5880-Ada-6Q" }, { 0x26B3, 0x195e, 0x10DE, "NVIDIA RTX5880-Ada-6Q" },
{ 0x26B3, 0x195f, 0x10DE, "NVIDIA RTX 5880-Ada-8Q" }, { 0x26B3, 0x195f, 0x10DE, "NVIDIA RTX5880-Ada-8Q" },
{ 0x26B3, 0x1960, 0x10DE, "NVIDIA RTX 5880-Ada-12Q" }, { 0x26B3, 0x1960, 0x10DE, "NVIDIA RTX5880-Ada-12Q" },
{ 0x26B3, 0x1961, 0x10DE, "NVIDIA RTX 5880-Ada-16Q" }, { 0x26B3, 0x1961, 0x10DE, "NVIDIA RTX5880-Ada-16Q" },
{ 0x26B3, 0x1962, 0x10DE, "NVIDIA RTX 5880-Ada-24Q" }, { 0x26B3, 0x1962, 0x10DE, "NVIDIA RTX5880-Ada-24Q" },
{ 0x26B3, 0x1963, 0x10DE, "NVIDIA RTX 5880-Ada-48Q" }, { 0x26B3, 0x1963, 0x10DE, "NVIDIA RTX5880-Ada-48Q" },
{ 0x26B3, 0x1964, 0x10DE, "NVIDIA RTX 5880-Ada-1A" }, { 0x26B3, 0x1964, 0x10DE, "NVIDIA RTX5880-Ada-1A" },
{ 0x26B3, 0x1965, 0x10DE, "NVIDIA RTX 5880-Ada-2A" }, { 0x26B3, 0x1965, 0x10DE, "NVIDIA RTX5880-Ada-2A" },
{ 0x26B3, 0x1966, 0x10DE, "NVIDIA RTX 5880-Ada-3A" }, { 0x26B3, 0x1966, 0x10DE, "NVIDIA RTX5880-Ada-3A" },
{ 0x26B3, 0x1967, 0x10DE, "NVIDIA RTX 5880-Ada-4A" }, { 0x26B3, 0x1967, 0x10DE, "NVIDIA RTX5880-Ada-4A" },
{ 0x26B3, 0x1968, 0x10DE, "NVIDIA RTX 5880-Ada-6A" }, { 0x26B3, 0x1968, 0x10DE, "NVIDIA RTX5880-Ada-6A" },
{ 0x26B3, 0x1969, 0x10DE, "NVIDIA RTX 5880-Ada-8A" }, { 0x26B3, 0x1969, 0x10DE, "NVIDIA RTX5880-Ada-8A" },
{ 0x26B3, 0x196a, 0x10DE, "NVIDIA RTX 5880-Ada-12A" }, { 0x26B3, 0x196a, 0x10DE, "NVIDIA RTX5880-Ada-12A" },
{ 0x26B3, 0x196b, 0x10DE, "NVIDIA RTX 5880-Ada-16A" }, { 0x26B3, 0x196b, 0x10DE, "NVIDIA RTX5880-Ada-16A" },
{ 0x26B3, 0x196c, 0x10DE, "NVIDIA RTX 5880-Ada-24A" }, { 0x26B3, 0x196c, 0x10DE, "NVIDIA RTX5880-Ada-24A" },
{ 0x26B3, 0x196d, 0x10DE, "NVIDIA RTX 5880-Ada-48A" }, { 0x26B3, 0x196d, 0x10DE, "NVIDIA RTX5880-Ada-48A" },
{ 0x26B3, 0x196e, 0x10DE, "NVIDIA RTX 5880-Ada-1" }, { 0x26B3, 0x196e, 0x10DE, "NVIDIA RTX5880-Ada-1" },
{ 0x26B3, 0x196f, 0x10DE, "NVIDIA RTX 5880-Ada-2" }, { 0x26B3, 0x196f, 0x10DE, "NVIDIA RTX5880-Ada-2" },
{ 0x26B3, 0x1970, 0x10DE, "NVIDIA RTX 5880-Ada-3" }, { 0x26B3, 0x1970, 0x10DE, "NVIDIA RTX5880-Ada-3" },
{ 0x26B3, 0x1971, 0x10DE, "NVIDIA RTX 5880-Ada-4" }, { 0x26B3, 0x1971, 0x10DE, "NVIDIA RTX5880-Ada-4" },
{ 0x26B3, 0x1972, 0x10DE, "NVIDIA RTX 5880-Ada-6" }, { 0x26B3, 0x1972, 0x10DE, "NVIDIA RTX5880-Ada-6" },
{ 0x26B3, 0x1973, 0x10DE, "NVIDIA RTX 5880-Ada-8" }, { 0x26B3, 0x1973, 0x10DE, "NVIDIA RTX5880-Ada-8" },
{ 0x26B3, 0x1974, 0x10DE, "NVIDIA RTX 5880-Ada-12" }, { 0x26B3, 0x1974, 0x10DE, "NVIDIA RTX5880-Ada-12" },
{ 0x26B3, 0x1975, 0x10DE, "NVIDIA RTX 5880-Ada-16" }, { 0x26B3, 0x1975, 0x10DE, "NVIDIA RTX5880-Ada-16" },
{ 0x26B3, 0x1976, 0x10DE, "NVIDIA RTX 5880-Ada-24" }, { 0x26B3, 0x1976, 0x10DE, "NVIDIA RTX5880-Ada-24" },
{ 0x26B3, 0x1977, 0x10DE, "NVIDIA RTX 5880-Ada-48" }, { 0x26B3, 0x1977, 0x10DE, "NVIDIA RTX5880-Ada-48" },
{ 0x26B3, 0x1978, 0x10DE, "NVIDIA RTX 5880-Ada-4C" }, { 0x26B3, 0x1978, 0x10DE, "NVIDIA RTX5880-Ada-4C" },
{ 0x26B3, 0x1979, 0x10DE, "NVIDIA RTX 5880-Ada-6C" }, { 0x26B3, 0x1979, 0x10DE, "NVIDIA RTX5880-Ada-6C" },
{ 0x26B3, 0x197a, 0x10DE, "NVIDIA RTX 5880-Ada-8C" }, { 0x26B3, 0x197a, 0x10DE, "NVIDIA RTX5880-Ada-8C" },
{ 0x26B3, 0x197b, 0x10DE, "NVIDIA RTX 5880-Ada-12C" }, { 0x26B3, 0x197b, 0x10DE, "NVIDIA RTX5880-Ada-12C" },
{ 0x26B3, 0x197c, 0x10DE, "NVIDIA RTX 5880-Ada-16C" }, { 0x26B3, 0x197c, 0x10DE, "NVIDIA RTX5880-Ada-16C" },
{ 0x26B3, 0x197d, 0x10DE, "NVIDIA RTX 5880-Ada-24C" }, { 0x26B3, 0x197d, 0x10DE, "NVIDIA RTX5880-Ada-24C" },
{ 0x26B3, 0x197e, 0x10DE, "NVIDIA RTX 5880-Ada-48C" }, { 0x26B3, 0x197e, 0x10DE, "NVIDIA RTX5880-Ada-48C" },
{ 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" }, { 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" },
{ 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" }, { 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" },
{ 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" }, { 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" },

File diff suppressed because it is too large Load Diff

View File

@ -638,6 +638,8 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeGetStaticInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeGetStaticInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeDeriveSwlKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeDeriveSwlKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_SWL_KEYS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeDeriveLceKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeDeriveLceKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_DERIVE_LCE_KEYS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRotateKeys__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation__)(struct Subdevice *, NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetGpuState__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdInternalConfComputeSetGpuState__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalInitUserSharedData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdInternalInitUserSharedData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_INIT_USER_SHARED_DATA_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__)(struct Subdevice *, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *); NV_STATUS (*__subdeviceCtrlCmdInternalUserSharedDataSetDataPoll__)(struct Subdevice *, NV2080_CTRL_INTERNAL_USER_SHARED_DATA_SET_DATA_POLL_PARAMS *);
@ -1291,6 +1293,8 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdInternalConfComputeGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeGetStaticInfo_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdInternalConfComputeGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeGetStaticInfo_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalConfComputeDeriveSwlKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeDeriveSwlKeys_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdInternalConfComputeDeriveSwlKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeDeriveSwlKeys_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalConfComputeDeriveLceKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeDeriveLceKeys_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdInternalConfComputeDeriveLceKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeDeriveLceKeys_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalConfComputeRotateKeys(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRotateKeys_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalConfComputeSetGpuState(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdInternalConfComputeSetGpuState(pSubdevice, pParams) subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalInitUserSharedData(pSubdevice, pParams) subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdInternalInitUserSharedData(pSubdevice, pParams) subdeviceCtrlCmdInternalInitUserSharedData_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll(pSubdevice, pParams) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(pSubdevice, pParams) #define subdeviceCtrlCmdInternalUserSharedDataSetDataPoll(pSubdevice, pParams) subdeviceCtrlCmdInternalUserSharedDataSetDataPoll_DISPATCH(pSubdevice, pParams)
@ -4565,6 +4569,18 @@ static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeDeriveLceKeys_DISPATC
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeDeriveLceKeys__(pSubdevice, pParams); return pSubdevice->__subdeviceCtrlCmdInternalConfComputeDeriveLceKeys__(pSubdevice, pParams);
} }
NV_STATUS subdeviceCtrlCmdInternalConfComputeRotateKeys_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeRotateKeys_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeRotateKeys__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdInternalConfComputeRCChannelsForKeyRotation__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdInternalConfComputeSetGpuState_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *pParams); NV_STATUS subdeviceCtrlCmdInternalConfComputeSetGpuState_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *pParams) { static inline NV_STATUS subdeviceCtrlCmdInternalConfComputeSetGpuState_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_SET_GPU_STATE_PARAMS *pParams) {

View File

@ -298,6 +298,7 @@
#define RMCFG_FEATURE_FEATURE_GH180 1 // RMconfig to encapsulate GH180 features #define RMCFG_FEATURE_FEATURE_GH180 1 // RMconfig to encapsulate GH180 features
#define RMCFG_FEATURE_MULTICAST_FABRIC 1 // Support for MULTICAST_FABRIC #define RMCFG_FEATURE_MULTICAST_FABRIC 1 // Support for MULTICAST_FABRIC
#define RMCFG_FEATURE_NVLINK_ERROR_THRESHOLD 1 // Support for NVLINK_ERROR_THRESHOLD #define RMCFG_FEATURE_NVLINK_ERROR_THRESHOLD 1 // Support for NVLINK_ERROR_THRESHOLD
#define RMCFG_FEATURE_GSP_SEC2_ENC_CHNLMGMT_RC_WAR 1 // WAR required for RC handling. See comment #36 of bug 4406277
#define RMCFG_FEATURE_FABRIC_LINEAR_ADDRESSING 1 // Unicast fabric memory management #define RMCFG_FEATURE_FABRIC_LINEAR_ADDRESSING 1 // Unicast fabric memory management

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -52,6 +52,7 @@ NV_STATUS gpuFabricProbeGetGpaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64
NV_STATUS gpuFabricProbeGetGpaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pGpaAddressRange); NV_STATUS gpuFabricProbeGetGpaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pGpaAddressRange);
NV_STATUS gpuFabricProbeGetFlaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddress); NV_STATUS gpuFabricProbeGetFlaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddress);
NV_STATUS gpuFabricProbeGetFlaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddressRange); NV_STATUS gpuFabricProbeGetFlaAddressRange(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pFlaAddressRange);
NV_STATUS gpuFabricProbeGetEgmGpaAddress(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *pEgmGpaAddress);
NV_STATUS gpuFabricProbeGetNumProbeReqs(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *numProbes); NV_STATUS gpuFabricProbeGetNumProbeReqs(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU64 *numProbes);
NV_STATUS gpuFabricProbeGetFabricCliqueId(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pFabricCliqueId); NV_STATUS gpuFabricProbeGetFabricCliqueId(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pFabricCliqueId);
NV_STATUS gpuFabricProbeGetFabricHealthStatus(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pFabricHealthStatusMask); NV_STATUS gpuFabricProbeGetFabricHealthStatus(GPU_FABRIC_PROBE_INFO_KERNEL *pInfo, NvU32 *pFabricHealthStatusMask);

View File

@ -1939,6 +1939,87 @@
#define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED_NO 0x00000000 #define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED_YES 0x00000001 #define NV_REG_STR_RM_CONF_COMPUTE_SPDM_POLICY_ENABLED_YES 0x00000001
//
// Enable/disable dummy key rotation in Confidential Compute.
// This is a temp reg key that will be removed once all RM clients
// support key rotation by default.
//
// 0 - Feature disabled
// 1 - Feature enabled
//
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION "RmConfComputeDummyKeyRotation"
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_ENABLED 0:0
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_ENABLED_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_ENABLED_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_SEC2_KEYS 1:1
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_SEC2_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_SEC2_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE2_KEYS 2:2
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE2_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE2_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE3_KEYS 3:3
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE3_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE3_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE4_KEYS 4:4
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE4_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE4_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE5_KEYS 5:5
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE5_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE5_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE6_KEYS 6:6
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE6_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE6_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE7_KEYS 7:7
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE7_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE7_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE8_KEYS 8:8
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE8_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE8_KEYS_YES 0x00000001
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE9_KEYS 9:9
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE9_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LCE9_KEYS_YES 0x00000001
// if all kernel keys should be considered for key rotation
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_KERNEL_KEYS 10:10
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_KERNEL_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_KERNEL_KEYS_YES 0x00000001
// if all user keys should be considered for key rotation
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_USER_KEYS 11:11
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_USER_KEYS_NO 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_USER_KEYS_YES 0x00000001
//
// Set period for "keep-alive" heartbeat message sent between SPDM Requester and Responder.
// This will sent a keep-alive message every period to GPU. GPU will set timeout to 2 * period.
// If GPU doesn't receive message within 2 * period, it is fatal error and GPU will require reset.
// Minimum period is 4 seconds, maximum period is 255 seconds. Setting period to 0 will disable heartbeat.
//
// 0 - Disable feature (no heartbeat sending)
// x - Period value in seconds
//
#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT "RmConfComputeHeartbeatPeriod"
#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS 31:0
#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS_DISABLE 0x00000000
#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS_MIN 0x00000004
#define NV_REG_STR_RM_CONF_COMPUTE_HEARTBEAT_PERIOD_SECONDS_MAX 0x000000FF
//
// Set lower threshold for dummy key rotation.
// This is a temp reg key that will be removed once all RM clients
// support prod key rotation.
// Value is in seconds.
//
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LOWER_THRESHOLD "RmDummyKeyRotationLowerThreshold"
//
// Set upper threshold for dummy key rotation.
// This is a temp reg key that will be removed once all RM clients
// support prod key rotation.
// Value is in seconds.
//
#define NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_UPPER_THRESHOLD "RmDummyKeyRotationUpperThreshold"
// TYPE Dword // TYPE Dword
// Encoding boolean // Encoding boolean
// Regkey based solution to serialize VBlank Aggressive Handling in Top Half using spinlock // Regkey based solution to serialize VBlank Aggressive Handling in Top Half using spinlock
@ -2278,4 +2359,12 @@
#define NV_REG_STR_RM_RELAXED_GSP_INIT_LOCKING_ENABLE 0x00000001 #define NV_REG_STR_RM_RELAXED_GSP_INIT_LOCKING_ENABLE 0x00000001
#define NV_REG_STR_RM_RELAXED_GSP_INIT_LOCKING_DEFAULT 0x00000002 #define NV_REG_STR_RM_RELAXED_GSP_INIT_LOCKING_DEFAULT 0x00000002
//
// Type: Dword
// This regkey overrides the state of the GR scrubber channel and determines
// whether it should be created or not.
//
#define NV_REG_STR_RM_FORCE_GR_SCRUBBER_CHANNEL "RmForceGrScrubberChannel"
#define NV_REG_STR_RM_FORCE_GR_SCRUBBER_CHANNEL_DISABLE 0x00000000
#define NV_REG_STR_RM_FORCE_GR_SCRUBBER_CHANNEL_ENABLE 0x00000001
#endif // NVRM_REGISTRY_H #endif // NVRM_REGISTRY_H

View File

@ -142,11 +142,6 @@ vgpuDestructObject
OBJVGPU *pVGpu = GPU_GET_VGPU(pGpu); OBJVGPU *pVGpu = GPU_GET_VGPU(pGpu);
NV_STATUS rmStatus = NV_OK; NV_STATUS rmStatus = NV_OK;
// Sysmem PFN Bitmap teardown invokes RPC for GSP enabled
// case. Hence this needs to happen before RPC teardown
if (pVGpu != NULL)
teardownSysmemPfnBitMap(pGpu, pVGpu);
NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, rmStatus, NV_FALSE, NV_FALSE, 0); NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, rmStatus, NV_FALSE, NV_FALSE, 0);
{ {
@ -161,6 +156,9 @@ vgpuDestructObject
vgpuGspTeardownBuffers(pGpu); vgpuGspTeardownBuffers(pGpu);
if (pVGpu != NULL)
teardownSysmemPfnBitMap(pGpu, pVGpu);
portMemFree(pVGpu); portMemFree(pVGpu);
NvVGPU_Table[gpuGetInstance(pGpu)] = NULL; NvVGPU_Table[gpuGetInstance(pGpu)] = NULL;
} }

View File

@ -515,9 +515,6 @@ static NV_STATUS _setupGspSharedMemory(OBJGPU *pGpu, OBJVGPU *pVGpu)
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NvU32 memFlags = 0; NvU32 memFlags = 0;
if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus))
memFlags = MEMDESC_FLAGS_CPU_ONLY;
if (IsGH100orBetter(pGpu) && (!kbusIsBar2Initialized(pKernelBus))) if (IsGH100orBetter(pGpu) && (!kbusIsBar2Initialized(pKernelBus)))
addressSpace = ADDR_SYSMEM; addressSpace = ADDR_SYSMEM;
@ -874,12 +871,8 @@ NV_STATUS vgpuReinitializeRpcInfraOnStateLoad(OBJGPU *pGpu)
static NV_STATUS _setupGspControlBuffer(OBJGPU *pGpu, OBJVGPU *pVGpu) static NV_STATUS _setupGspControlBuffer(OBJGPU *pGpu, OBJVGPU *pVGpu)
{ {
NV_STATUS status; NV_STATUS status;
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NvU32 memFlags = 0; NvU32 memFlags = 0;
if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus))
memFlags = MEMDESC_FLAGS_CPU_ONLY;
status = _allocRpcMemDesc(pGpu, status = _allocRpcMemDesc(pGpu,
RM_PAGE_SIZE, RM_PAGE_SIZE,
NV_MEMORY_CONTIGUOUS, NV_MEMORY_CONTIGUOUS,
@ -918,12 +911,8 @@ static void _teardownGspControlBuffer(OBJGPU *pGpu, OBJVGPU *pVGpu)
static NV_STATUS _setupGspResponseBuffer(OBJGPU *pGpu, OBJVGPU *pVGpu) static NV_STATUS _setupGspResponseBuffer(OBJGPU *pGpu, OBJVGPU *pVGpu)
{ {
NV_STATUS status; NV_STATUS status;
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NvU32 memFlags = 0; NvU32 memFlags = 0;
if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus))
memFlags = MEMDESC_FLAGS_CPU_ONLY;
status = _allocRpcMemDesc(pGpu, status = _allocRpcMemDesc(pGpu,
RM_PAGE_SIZE, RM_PAGE_SIZE,
NV_MEMORY_CONTIGUOUS, NV_MEMORY_CONTIGUOUS,
@ -975,9 +964,6 @@ static NV_STATUS _setupGspMessageBuffer(OBJGPU *pGpu, OBJVGPU *pVGpu)
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
NvU32 memFlags = 0; NvU32 memFlags = 0;
if(kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus))
memFlags = MEMDESC_FLAGS_CPU_ONLY;
if (IsGH100orBetter(pGpu) && (!kbusIsBar2Initialized(pKernelBus))) if (IsGH100orBetter(pGpu) && (!kbusIsBar2Initialized(pKernelBus)))
addressSpace = ADDR_SYSMEM; addressSpace = ADDR_SYSMEM;
@ -1273,6 +1259,7 @@ static NV_STATUS _vgpuGspSetupCommunicationWithPlugin(OBJGPU *pGpu, OBJVGPU *pVG
void vgpuGspTeardownBuffers(OBJGPU *pGpu) void vgpuGspTeardownBuffers(OBJGPU *pGpu)
{ {
OBJVGPU *pVGpu = GPU_GET_VGPU(pGpu); OBJVGPU *pVGpu = GPU_GET_VGPU(pGpu);
NvU32 rmStatus = NV_OK;
if (!pVGpu->bGspPlugin) if (!pVGpu->bGspPlugin)
{ {
@ -1284,6 +1271,15 @@ void vgpuGspTeardownBuffers(OBJGPU *pGpu)
// First teardown with GSP and then teardown the buffers // First teardown with GSP and then teardown the buffers
_vgpuGspTeardownCommunicationWithPlugin(pGpu, pVGpu); _vgpuGspTeardownCommunicationWithPlugin(pGpu, pVGpu);
if (vgpuSysmemPfnInfo.bSysmemPfnInfoInitialized)
{
rmStatus = updateSharedBufferInfoInSysmemPfnBitMap(pGpu, pVGpu, NV_FALSE);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Sysmem PFN bitmap update failed for shared buffer sysmem pages failed: 0x%x\n", rmStatus);
}
}
_teardownGspSharedMemory(pGpu, pVGpu); _teardownGspSharedMemory(pGpu, pVGpu);
_teardownGspEventInfrastructure(pGpu, pVGpu); _teardownGspEventInfrastructure(pGpu, pVGpu);
@ -1362,6 +1358,16 @@ NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu)
goto fail; goto fail;
} }
if (vgpuSysmemPfnInfo.bSysmemPfnInfoInitialized)
{
status = updateSharedBufferInfoInSysmemPfnBitMap(pGpu, pVGpu, NV_TRUE);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Sysmem PFN bitmap update failed for shared buffer sysmem pages failed: 0x%x\n", status);
goto fail;
}
}
// Update Guest ECC status based on Host ECC status, after establishing RPC with GSP. // Update Guest ECC status based on Host ECC status, after establishing RPC with GSP.
setGuestEccStatus(pGpu); setGuestEccStatus(pGpu);
@ -1492,11 +1498,14 @@ NV_STATUS initRpcInfrastructure_VGPU(OBJGPU *pGpu)
goto fail; goto fail;
} }
rmStatus = updateSharedBufferInfoInSysmemPfnBitMap(pGpu, pVGpu, NV_TRUE); if (vgpuSysmemPfnInfo.bSysmemPfnInfoInitialized)
if (rmStatus != NV_OK)
{ {
NV_PRINTF(LEVEL_ERROR, "RPC: Sysmem PFN bitmap update failed for shared buffer sysmem pages failed: 0x%x\n", rmStatus); rmStatus = updateSharedBufferInfoInSysmemPfnBitMap(pGpu, pVGpu, NV_TRUE);
goto fail; if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Sysmem PFN bitmap update failed for shared buffer sysmem pages failed: 0x%x\n", rmStatus);
goto fail;
}
} }
pVGpu->bVncSupported = !!(*(NvU32 *)(pVGpu->shared_memory + pVGpu->bVncSupported = !!(*(NvU32 *)(pVGpu->shared_memory +
@ -1543,12 +1552,6 @@ NV_STATUS freeRpcInfrastructure_VGPU(OBJGPU *pGpu)
return NV_ERR_INVALID_STATE; return NV_ERR_INVALID_STATE;
} }
rmStatus = updateSharedBufferInfoInSysmemPfnBitMap(pGpu, pVGpu, NV_FALSE);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "RPC: Sysmem PFN bitmap update failed for shared buffer sysmem pages failed: 0x%x\n", rmStatus);
}
if (pVGpu->bGspPlugin) if (pVGpu->bGspPlugin)
{ {
vgpuGspTeardownBuffers(pGpu); vgpuGspTeardownBuffers(pGpu);

View File

@ -96,9 +96,6 @@ NV_STATUS _setupGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu)
NvU32 memFlags = 0; NvU32 memFlags = 0;
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus))
memFlags = MEMDESC_FLAGS_CPU_ONLY;
if (IsGH100orBetter(pGpu) && (!kbusIsBar2Initialized(pKernelBus))) if (IsGH100orBetter(pGpu) && (!kbusIsBar2Initialized(pKernelBus)))
addressSpace = ADDR_SYSMEM; addressSpace = ADDR_SYSMEM;

View File

@ -628,7 +628,7 @@ rmGpuLockFree(NvU32 gpuInst)
// Disable GPUs Interrupts thus blocking the ISR from // Disable GPUs Interrupts thus blocking the ISR from
// entering. // entering.
// //
static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvU32 flags) static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvBool bInIsr)
{ {
OBJGPU *pGpu = gpumgrGetGpu(gpuInst); OBJGPU *pGpu = gpumgrGetGpu(gpuInst);
@ -653,7 +653,6 @@ static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvU32 flags)
if (osLockShouldToggleInterrupts(pGpu)) if (osLockShouldToggleInterrupts(pGpu))
{ {
Intr *pIntr = GPU_GET_INTR(pGpu); Intr *pIntr = GPU_GET_INTR(pGpu);
NvBool isIsr = !!(flags & GPU_LOCK_FLAGS_COND_ACQUIRE);
NvBool bBcEnabled = gpumgrGetBcEnabledStatus(pGpu); NvBool bBcEnabled = gpumgrGetBcEnabledStatus(pGpu);
// Always disable intrs for cond code // Always disable intrs for cond code
@ -667,10 +666,10 @@ static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvU32 flags)
tmrRmCallbackIntrDisable(pTmr, pGpu); tmrRmCallbackIntrDisable(pTmr, pGpu);
} }
osDisableInterrupts(pGpu, isIsr); osDisableInterrupts(pGpu, bInIsr);
if ((pIntr != NULL) && pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING) && if ((pIntr != NULL) && pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING) &&
(isIsr == NV_FALSE) ) (bInIsr == NV_FALSE) )
{ {
NvU64 oldIrql; NvU64 oldIrql;
NvU32 intrMaskFlags; NvU32 intrMaskFlags;
@ -722,7 +721,7 @@ _rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pG
NvU32 gpuMaskLocked = 0; NvU32 gpuMaskLocked = 0;
GPULOCK *pAllocLock = &rmGpuLockInfo.gpuAllocLock; GPULOCK *pAllocLock = &rmGpuLockInfo.gpuAllocLock;
GPULOCK *pGpuLock; GPULOCK *pGpuLock;
NvBool bHighIrql, bCondAcquireCheck; NvBool bHighIrql, bInIsr, bCondAcquireCheck;
NvU32 maxLockableGpuInst; NvU32 maxLockableGpuInst;
NvU64 threadId = portThreadGetCurrentThreadId(); NvU64 threadId = portThreadGetCurrentThreadId();
NvU64 priority = 0; NvU64 priority = 0;
@ -734,6 +733,7 @@ _rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pG
NvU32 loopCount; NvU32 loopCount;
bHighIrql = (portSyncExSafeToSleep() == NV_FALSE); bHighIrql = (portSyncExSafeToSleep() == NV_FALSE);
bInIsr = portUtilIsInterruptContext();
bCondAcquireCheck = ((flags & GPU_LOCK_FLAGS_COND_ACQUIRE) != 0); bCondAcquireCheck = ((flags & GPU_LOCK_FLAGS_COND_ACQUIRE) != 0);
if (pGpuLockedMask) if (pGpuLockedMask)
@ -1084,7 +1084,7 @@ per_gpu_lock_acquired:
if (gpuInst != GPU_INST_ALLOC_LOCK) if (gpuInst != GPU_INST_ALLOC_LOCK)
{ {
// now disable interrupts // now disable interrupts
_gpuLocksAcquireDisableInterrupts(gpuInst, flags); _gpuLocksAcquireDisableInterrupts(gpuInst, bInIsr);
// mark this one as locked // mark this one as locked
gpuMaskLocked |= NVBIT(gpuInst); gpuMaskLocked |= NVBIT(gpuInst);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a

View File

@ -2501,6 +2501,17 @@ kbusGetEgmPeerId_GH100
return BUS_INVALID_PEER; return BUS_INVALID_PEER;
} }
//
// For Nvswitch connected systems, AAS (Alternate Address Space) is set by Nvswitch itself
// based on the EGM fabric address range and so there is no need for a separate peer id
// in the Nvswitch case.
//
if (GPU_IS_NVSWITCH_DETECTED(pLocalGpu))
{
LOWESTBITIDX_32(peerMask);
return peerMask;
}
FOR_EACH_INDEX_IN_MASK(32, peerId, peerMask) FOR_EACH_INDEX_IN_MASK(32, peerId, peerMask)
{ {
if (pLocalKernelBus->p2p.bEgmPeer[peerId]) if (pLocalKernelBus->p2p.bEgmPeer[peerId])

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2009-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2009-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -546,9 +546,17 @@ p2papiConstruct_IMPL
pP2PApi->attributes = DRF_NUM(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, p2pConnectionType); pP2PApi->attributes = DRF_NUM(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, p2pConnectionType);
pP2PApi->attributes |= bSpaAccessOnly ? DRF_DEF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA) : pP2PApi->attributes |= bSpaAccessOnly ? DRF_DEF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA) :
DRF_DEF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _GPA); DRF_DEF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _GPA);
//
// For Nvswitch connected systems, AAS(Alternate Address Space) is set by Nvswitch itself
// based on the EGM fabric address range and so there is no need for a separate peer id
// in the Nvswitch case.
//
bEgmPeer = (!bSpaAccessOnly && bEgmPeer = (!bSpaAccessOnly &&
memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pLocalGpu)) && memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pLocalGpu)) &&
memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pRemoteGpu))); memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pRemoteGpu)) &&
!GPU_IS_NVSWITCH_DETECTED(pLocalGpu));
if (bSpaAccessOnly && if (bSpaAccessOnly &&
memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pLocalGpu)) && memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pLocalGpu)) &&
memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pRemoteGpu))) memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pRemoteGpu)))
@ -738,7 +746,8 @@ p2papiDestruct_IMPL
pP2PApi->attributes), end); pP2PApi->attributes), end);
if (!FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA, pP2PApi->attributes) && if (!FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA, pP2PApi->attributes) &&
memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pLocalGpu)) && memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pLocalGpu)) &&
memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pRemoteGpu))) memmgrIsLocalEgmEnabled(GPU_GET_MEMORY_MANAGER(pRemoteGpu)) &&
!GPU_IS_NVSWITCH_DETECTED(pLocalGpu))
{ {
status = kbusRemoveP2PMapping_HAL(pLocalGpu, pLocalKernelBus, status = kbusRemoveP2PMapping_HAL(pLocalGpu, pLocalKernelBus,
pRemoteGpu, pRemoteKernelBus, pRemoteGpu, pRemoteKernelBus,

View File

@ -33,6 +33,7 @@
#include "published/hopper/gh100/dev_fuse.h" #include "published/hopper/gh100/dev_fuse.h"
#include "rmapi/rmapi.h" #include "rmapi/rmapi.h"
#include "conf_compute/cc_keystore.h" #include "conf_compute/cc_keystore.h"
//#include "hopper/gh100/dev_se_seb.h"
/*! /*!
* check if debug mode is enabled. * check if debug mode is enabled.
@ -70,8 +71,8 @@ confComputeIsGpuCcCapable_GH100
if (confComputeIsDebugModeEnabled_HAL(pGpu, pConfCompute)) if (confComputeIsDebugModeEnabled_HAL(pGpu, pConfCompute))
{ {
NV_PRINTF(LEVEL_ERROR, "Not checking if GPU is capable of accepting conf compute workloads\n"); NV_PRINTF(LEVEL_ERROR, "Cannot boot Confidential Compute as debug board is not supported!\n");
return NV_TRUE; return NV_FALSE;
} }
reg = GPU_REG_RD32(pGpu, NV_FUSE_SPARE_BIT_0); reg = GPU_REG_RD32(pGpu, NV_FUSE_SPARE_BIT_0);
@ -455,3 +456,155 @@ confComputeDeriveSecrets_GH100(ConfidentialCompute *pConfCompute,
return NV_OK; return NV_OK;
} }
/*!
* Returns RM engine Id corresponding to a key space
*
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] keySpace : value of keyspace from cc_keystore.h
*/
RM_ENGINE_TYPE
confComputeGetEngineIdFromKeySpace_GH100
(
ConfidentialCompute *pConfCompute,
NvU32 keySpace
)
{
if (keySpace == CC_KEYSPACE_GSP)
{
return RM_ENGINE_TYPE_NULL;
}
if (keySpace == CC_KEYSPACE_SEC2)
{
return RM_ENGINE_TYPE_SEC2;
}
NvU32 lceId = 2; // TODO: Use NV_SSE_SCE_CC_CAPABLE_LCE_ID_START;
switch (keySpace)
{
case CC_KEYSPACE_LCE0:
lceId += 0;
break;
case CC_KEYSPACE_LCE1:
lceId += 1;
break;
case CC_KEYSPACE_LCE2:
lceId += 2;
break;
case CC_KEYSPACE_LCE3:
lceId += 3;
break;
case CC_KEYSPACE_LCE4:
lceId += 4;
break;
case CC_KEYSPACE_LCE5:
lceId += 5;
break;
case CC_KEYSPACE_LCE6:
lceId += 6;
break;
case CC_KEYSPACE_LCE7:
lceId += 7;
break;
default:
return RM_ENGINE_TYPE_NULL;
}
return RM_ENGINE_TYPE_COPY(lceId);
}
/*!
* Checks if key is kernel key or user key
*
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] keyId : global keyId
*/
NvBool
confComputeGlobalKeyIsKernelPriv_GH100
(
ConfidentialCompute *pConfCompute,
NvU32 globalKeyId
)
{
NvU32 keySpace = CC_GKEYID_GET_KEYSPACE(globalKeyId);
NvU32 localKeyId = CC_GKEYID_GET_LKEYID(globalKeyId);
if (keySpace == CC_KEYSPACE_GSP)
{
return NV_TRUE;
}
else if (keySpace == CC_KEYSPACE_SEC2)
{
switch (localKeyId)
{
case CC_LKEYID_CPU_SEC2_DATA_KERN:
case CC_LKEYID_CPU_SEC2_HMAC_KERN:
return NV_TRUE;
}
}
else
{
NV_ASSERT((keySpace >= CC_KEYSPACE_LCE0) && (keySpace < CC_KEYSPACE_SIZE));
switch (localKeyId)
{
case CC_LKEYID_LCE_H2D_KERN:
case CC_LKEYID_LCE_D2H_KERN:
return NV_TRUE;
}
}
return NV_FALSE;
}
NV_STATUS confComputeUpdateSecrets_GH100(ConfidentialCompute *pConfCompute,
NvU32 globalKeyId)
{
OBJGPU *pGpu = ENG_GET_GPU(pConfCompute);
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NvU32 h2dKey, d2hKey;
NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS params = {0};
// GSP keys are currently not supported.
NV_ASSERT(CC_GKEYID_GET_KEYSPACE(globalKeyId) != CC_KEYSPACE_GSP);
confComputeGetKeyPairByKey(pConfCompute, globalKeyId, &h2dKey, &d2hKey);
params.globalH2DKey = h2dKey;
NV_ASSERT_OK_OR_RETURN(pRmApi->Control(
pRmApi,
pGpu->hInternalClient,
pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_ROTATE_KEYS,
&params,
sizeof(NV2080_CTRL_INTERNAL_CONF_COMPUTE_ROTATE_KEYS_PARAMS)));
CHANNEL_ITERATOR iterator;
KernelChannel *pKernelChannel;
NV_ASSERT_OK_OR_RETURN(confComputeInitChannelIterForKey(pGpu, pConfCompute, globalKeyId, &iterator));
while (confComputeGetNextChannelForKey(pGpu, pConfCompute, &iterator, globalKeyId, &pKernelChannel) == NV_OK)
{
NV_ASSERT_OK_OR_RETURN(confComputeKeyStoreRetrieveViaChannel(
pConfCompute, pKernelChannel, ROTATE_IV_ALL_VALID, NV_FALSE, &pKernelChannel->clientKmb));
// After key rotation channel counter stays the same but message counter is cleared.
pKernelChannel->clientKmb.encryptBundle.iv[0] = 0x00000000;
if ((CC_GKEYID_GET_KEYSPACE(globalKeyId) >= CC_KEYSPACE_LCE0) &&
(CC_GKEYID_GET_KEYSPACE(globalKeyId) <= CC_KEYSPACE_LCE7))
{
pKernelChannel->clientKmb.decryptBundle.iv[0] = 0x00000000;
}
else
{
pKernelChannel->clientKmb.hmacBundle.nonce[0] = 0x00000000;
pKernelChannel->clientKmb.hmacBundle.nonce[1] = 0x00000000;
pKernelChannel->clientKmb.hmacBundle.nonce[2] = 0x00000000;
pKernelChannel->clientKmb.hmacBundle.nonce[3] = 0x00000000;
pKernelChannel->clientKmb.hmacBundle.nonce[4] = 0x00000000;
pKernelChannel->clientKmb.hmacBundle.nonce[5] = 0x00000000;
}
}
return NV_OK;
}

View File

@ -0,0 +1,546 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
#include "kernel/gpu/conf_compute/conf_compute.h"
#include "class/cl2080.h"
#include "kernel/gpu/mem_mgr/mem_mgr.h"
#include "class/clc86fsw.h"
#include "ctrl/ctrl2080/ctrl2080internal.h"
#include "nvrm_registry.h"
static void initKeyRotationRegistryOverrides(OBJGPU *pGpu, ConfidentialCompute *pConfCompute);
static void getKeyPairForKeySpace(NvU32 keySpace, NvBool bKernel, NvU32 *pGlobalH2DKey, NvU32 *pGlobalD2HKey);
static NV_STATUS triggerKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
static NV_STATUS calculateEncryptionStatsByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
static NV_STATUS notifyKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute, NvU32 h2dKey);
static NvBool confComputeIsLowerThresholdCrossed(ConfidentialCompute *pConfCompute, KEY_ROTATION_STATS_INFO *pH2DInfo,
KEY_ROTATION_STATS_INFO *pD2HInfo);
static NvBool confComputeIsUpperThresholdCrossed(ConfidentialCompute *pConfCompute, KEY_ROTATION_STATS_INFO *pH2DInfo,
KEY_ROTATION_STATS_INFO *pD2HInfo);
static NV_STATUS keyRotationTimeoutCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent);
/*!
* Conditionally enables key rotation support
*
* @param[in] pGpu : OBJGPU Pointer
* @param[in] pConfCompute : ConfidentialCompute pointer
*/
NV_STATUS
confComputeEnableKeyRotationSupport_GH100
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute
)
{
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_ENABLED) &&
pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED))
{
pConfCompute->setProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED, NV_TRUE);
//
// TODO: sbellock default values need to be defined and set separately
// for prod flow based on attacker advantage table.
//
pConfCompute->lowerThreshold.totalBytesEncrypted = NV_U64_MAX;
pConfCompute->lowerThreshold.totalEncryptOps = 500;
pConfCompute->upperThreshold.totalBytesEncrypted = NV_U64_MAX;
pConfCompute->upperThreshold.totalEncryptOps = 1000;
initKeyRotationRegistryOverrides(pGpu, pConfCompute);
}
return NV_OK;
}
/*!
* Enables/disables key rotation by setting up the 1 sec callback for key rotation
*
* @param[in] pGpu : OBJGPU Pointer
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] bEnable : If key rotation should be enabled
*/
NV_STATUS
confComputeEnableKeyRotationCallback_GH100
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvBool bEnable
)
{
if (bEnable)
{
// Hook into the 1 Hz OS timer
osSchedule1HzCallback(pGpu,
confComputeKeyRotationCallback,
NULL /* pData */,
NV_OS_1HZ_REPEAT);
}
else
{
osRemove1HzCallback(pGpu,
confComputeKeyRotationCallback,
NULL /* pData */);
}
return NV_OK;
}
/*!
* Calculates encryption statistics and triggers key rotation if thresholds are crossed.
*
* @param[in] pGpu : OBJGPU Pointer
* @param[in] pConfCompute : ConfidentialCompute pointer
*/
NV_STATUS
confComputeTriggerKeyRotation_GH100
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute
)
{
NV_STATUS tempStatus, status = NV_OK;
NvU32 globalD2HKey, globalH2DKey, keySpace;
if ((pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED)) &&
(pConfCompute->keyRotationChannelRefCount > 0))
{
pConfCompute->keyRotationCallbackCount++;
NV_PRINTF(LEVEL_ERROR, "DUMMY KR: COUNT = %d\n", pConfCompute->keyRotationCallbackCount);
}
for (keySpace = 0; keySpace < CC_KEYSPACE_SIZE; keySpace++)
{
if (keySpace == CC_KEYSPACE_GSP)
continue;
if ((pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED)) &&
!(pConfCompute->keyRotationEnableMask & NVBIT(keySpace)))
{
NV_PRINTF(LEVEL_INFO, "Skipping keyspace = %d since mask = 0x%x\n", keySpace, pConfCompute->keyRotationEnableMask);
continue;
}
// calculate kernel channels stats for keyspace
if ((!pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED)) ||
(FLD_TEST_DRF(_REG_STR, _RM_CONF_COMPUTE_DUMMY_KEY_ROTATION, _KERNEL_KEYS, _YES, pConfCompute->keyRotationEnableMask)))
{
getKeyPairForKeySpace(keySpace, NV_TRUE, &globalH2DKey, &globalD2HKey);
tempStatus = triggerKeyRotationByKeyPair(pGpu, pConfCompute, globalH2DKey, globalD2HKey);
if (tempStatus != NV_OK)
{
NV_ASSERT(tempStatus == NV_OK);
NV_PRINTF(LEVEL_ERROR, "Failed to calculate encryption statistics for H2D key 0x%x with status 0x%x\n", globalH2DKey, tempStatus);
status = tempStatus;
}
}
// calculate user channels stats for keyspace
if ((!pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED)) ||
(FLD_TEST_DRF(_REG_STR, _RM_CONF_COMPUTE_DUMMY_KEY_ROTATION, _USER_KEYS, _YES, pConfCompute->keyRotationEnableMask)))
{
getKeyPairForKeySpace(keySpace, NV_FALSE, &globalH2DKey, &globalD2HKey);
tempStatus = triggerKeyRotationByKeyPair(pGpu, pConfCompute, globalH2DKey, globalD2HKey);
if (tempStatus != NV_OK)
{
NV_ASSERT(tempStatus == NV_OK);
NV_PRINTF(LEVEL_ERROR, "Failed to calculate encryption statistics for H2D key 0x%x with status 0x%x\n", globalH2DKey, tempStatus);
status = tempStatus;
}
}
}
return status;
}
static NV_STATUS
triggerKeyRotationByKeyPair
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 h2dKey,
NvU32 d2hKey
)
{
KEY_ROTATION_STATUS state;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
CHANNEL_ITERATOR iter = {0};
KernelChannel *pKernelChannel = NULL;
NvU32 h2dIndex, d2hIndex;
// we won't need this once we have encryption statistics since unused keys will have stats = 0
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED))
{
NV_ASSERT_OK_OR_RETURN(confComputeInitChannelIterForKey(pGpu, pConfCompute, h2dKey, &iter));
if (confComputeGetNextChannelForKey(pGpu, pConfCompute, &iter, h2dKey, &pKernelChannel) != NV_OK)
{
//
// If this is the last key and we haven't done KR yet even after crossing upper threshold then
// it means there are no channels alive and we need to manually reset our counter
//
if ((h2dKey == CC_GKEYID_GEN(CC_KEYSPACE_LCE7, CC_LKEYID_LCE_H2D_USER)) &&
(pConfCompute->keyRotationCallbackCount > pConfCompute->upperThreshold.totalEncryptOps))
{
pConfCompute->keyRotationCallbackCount = 1;
}
return NV_OK;
}
}
//
// If key rotation is alredy scheduled because we crossed upper threshold or hit timeout
// then we dont need to update encryption statistics as they will be zeroed out soon.
//
if ((state == KEY_ROTATION_STATUS_FAILED_THRESHOLD) ||
(state == KEY_ROTATION_STATUS_FAILED_TIMEOUT))
{
return NV_OK;
}
//
// CC session doesn't exist if key rotation failed
// TODO CONFCOMP-984: RC all channels and other cleanup (kpadwal is working on adding this call)
//
if (state == KEY_ROTATION_STATUS_FAILED_ROTATION)
return NV_ERR_INVALID_STATE;
NV_ASSERT_OK_OR_RETURN(calculateEncryptionStatsByKeyPair(pGpu, pConfCompute, h2dKey, d2hKey));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
if (confComputeIsUpperThresholdCrossed(pConfCompute, &pConfCompute->aggregateStats[h2dIndex],
&pConfCompute->aggregateStats[d2hIndex]))
{
NV_PRINTF(LEVEL_ERROR, "Crossed UPPER threshold for key = 0x%x\n", h2dKey);
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_THRESHOLD));
NV_ASSERT_OK_OR_RETURN(confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey));
}
else if (confComputeIsLowerThresholdCrossed(pConfCompute, &pConfCompute->aggregateStats[h2dIndex],
&pConfCompute->aggregateStats[d2hIndex]))
{
NV_PRINTF(LEVEL_INFO, "Crossed LOWER threshold for key = 0x%x\n", h2dKey);
if (state == KEY_ROTATION_STATUS_IDLE)
{
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_PENDING));
//
// Start the timeout timer once lower threshold is crossed.
//
// If timer is not already created then create it now. Else, just schedule a callback.
// make sure callback is canceled if we schedule the KR task (after crossing lower or upper threshold)
// make sure all these timer events are deleted as part of RM shutdown
//
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
if (pConfCompute->ppKeyRotationTimer[h2dIndex] == NULL)
{
NvU32 *pH2DKey = portMemAllocNonPaged(sizeof(NvU32));
*pH2DKey = h2dKey;
NV_ASSERT_OK_OR_RETURN(tmrEventCreate(pTmr, &pConfCompute->ppKeyRotationTimer[h2dIndex],
keyRotationTimeoutCallback, (void*)pH2DKey, TMR_FLAGS_NONE));
}
//
// Schedule first callback.
// TODO sbellock: don't use hardcoded 12.5 sec vaule
//
NV_ASSERT_OK_OR_RETURN(tmrEventScheduleRelSec(pTmr, pConfCompute->ppKeyRotationTimer[h2dIndex], 12.5));
//
// Notify clients of pending KR
// We can't schedule a workitem for this since it may get scheduled too late and
// we might have already crossed the upper threshold by then.
//
NV_ASSERT_OK_OR_RETURN(notifyKeyRotationByKeyPair(pGpu, pConfCompute, h2dKey));
}
}
return NV_OK;
}
static NV_STATUS
keyRotationTimeoutCallback
(
OBJGPU *pGpu,
OBJTMR *pTmr,
TMR_EVENT *pEvent
)
{
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
NvU32 h2dKey, d2hKey;
NvU32 key = *(NvU32*)pEvent->pUserData;
confComputeGetKeyPairByKey(pConfCompute, key, &h2dKey, &d2hKey);
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_TIMEOUT));
return confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey);
}
static NV_STATUS
calculateEncryptionStatsByKeyPair
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 h2dKey,
NvU32 d2hKey
)
{
CHANNEL_ITERATOR iter = {0};
NvU64 totalH2Dbytes = 0;
NvU64 totalD2Hbytes = 0;
NvU64 totalEncryptOpsH2D = 0;
NvU64 totalEncryptOpsD2H = 0;
NvU32 h2dIndex, d2hIndex;
// Iterate through all channels using the key pair and compute totals
KernelChannel *pKernelChannel = NULL;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
NV_ASSERT_OK_OR_RETURN(confComputeInitChannelIterForKey(pGpu, pConfCompute, h2dKey, &iter));
while(confComputeGetNextChannelForKey(pGpu, pConfCompute, &iter, h2dKey, &pKernelChannel) == NV_OK)
{
// TODO: Make this fatal
if (pKernelChannel->pEncStatsBufMemDesc == NULL)
continue;
CC_CRYPTOBUNDLE_STATS *pEncStats = pKernelChannel->pEncStatsBuf;
if (pEncStats == NULL)
{
NV_ASSERT(pEncStats != NULL);
NV_PRINTF(LEVEL_ERROR, "Failed to get stats for chid = 0x%x RM engineId = 0x%x\n",
kchannelGetDebugTag(pKernelChannel), kchannelGetEngineType(pKernelChannel));
return NV_ERR_INVALID_STATE;
}
totalH2Dbytes += pEncStats->bytesEncryptedH2D;
totalD2Hbytes += pEncStats->bytesEncryptedD2H;
totalEncryptOpsH2D += pEncStats->numEncryptionsH2D;
totalEncryptOpsD2H += pEncStats->numEncryptionsD2H;
NV_PRINTF(LEVEL_INFO, "Encryption stats for chid 0x%x with h2dKey 0x%x\n", kchannelGetDebugTag(pKernelChannel), h2dKey);
NV_PRINTF(LEVEL_INFO, "Total h2d bytes encrypted = 0x%llx\n", pEncStats->bytesEncryptedH2D);
NV_PRINTF(LEVEL_INFO, "Total d2h bytes encrypted = 0x%llx\n", pEncStats->bytesEncryptedD2H);
NV_PRINTF(LEVEL_INFO, "Total h2d encrypt ops = 0x%llx\n", pEncStats->numEncryptionsH2D);
NV_PRINTF(LEVEL_INFO, "Total d2h encrypt ops = 0x%llx\n", pEncStats->numEncryptionsD2H);
}
// Add stats for freed channels
totalH2Dbytes += pConfCompute->freedChannelAggregateStats[h2dIndex].totalBytesEncrypted;
totalEncryptOpsH2D += pConfCompute->freedChannelAggregateStats[h2dIndex].totalEncryptOps;
totalD2Hbytes += pConfCompute->freedChannelAggregateStats[d2hIndex].totalBytesEncrypted;
totalEncryptOpsD2H += pConfCompute->freedChannelAggregateStats[d2hIndex].totalEncryptOps;
pConfCompute->aggregateStats[h2dIndex].totalBytesEncrypted = totalH2Dbytes;
pConfCompute->aggregateStats[h2dIndex].totalEncryptOps = totalEncryptOpsH2D;
pConfCompute->aggregateStats[d2hIndex].totalBytesEncrypted = totalD2Hbytes;
pConfCompute->aggregateStats[d2hIndex].totalEncryptOps = totalEncryptOpsD2H;
if ((pConfCompute->aggregateStats[h2dIndex].totalBytesEncrypted > 0) ||
(pConfCompute->aggregateStats[d2hIndex].totalBytesEncrypted > 0))
{
NV_PRINTF(LEVEL_INFO, "Aggregate stats for h2dKey 0x%x and d2hKey 0x%x\n", h2dKey, d2hKey);
NV_PRINTF(LEVEL_INFO, "Total h2d bytes encrypted = 0x%llx\n", pConfCompute->aggregateStats[h2dIndex].totalBytesEncrypted);
NV_PRINTF(LEVEL_INFO, "Total d2h bytes encrypted = 0x%llx\n", pConfCompute->aggregateStats[h2dIndex].totalEncryptOps);
NV_PRINTF(LEVEL_INFO, "Total h2d encrypt ops = 0x%llx\n", pConfCompute->aggregateStats[d2hIndex].totalBytesEncrypted);
NV_PRINTF(LEVEL_INFO, "Total d2h encrypt ops = 0x%llx\n", pConfCompute->aggregateStats[d2hIndex].totalEncryptOps);
}
return NV_OK;
}
static NvBool
confComputeIsUpperThresholdCrossed
(
ConfidentialCompute *pConfCompute,
KEY_ROTATION_STATS_INFO *pH2DInfo,
KEY_ROTATION_STATS_INFO *pD2HInfo
)
{
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED))
{
return (pConfCompute->keyRotationCallbackCount > pConfCompute->upperThreshold.totalEncryptOps);
}
else
{
if ((pH2DInfo->totalBytesEncrypted > pConfCompute->upperThreshold.totalBytesEncrypted) ||
(pH2DInfo->totalEncryptOps > pConfCompute->upperThreshold.totalEncryptOps))
{
return NV_TRUE;
}
else if ((pD2HInfo->totalBytesEncrypted > pConfCompute->upperThreshold.totalBytesEncrypted) ||
(pD2HInfo->totalEncryptOps > pConfCompute->upperThreshold.totalEncryptOps))
{
return NV_TRUE;
}
}
return NV_FALSE;
}
static NvBool
confComputeIsLowerThresholdCrossed
(
ConfidentialCompute *pConfCompute,
KEY_ROTATION_STATS_INFO *pH2DInfo,
KEY_ROTATION_STATS_INFO *pD2HInfo
)
{
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED))
{
return (pConfCompute->keyRotationCallbackCount > pConfCompute->lowerThreshold.totalEncryptOps);
}
else
{
if ((pH2DInfo->totalBytesEncrypted > pConfCompute->lowerThreshold.totalBytesEncrypted) ||
(pH2DInfo->totalEncryptOps > pConfCompute->lowerThreshold.totalEncryptOps))
{
return NV_TRUE;
}
else if ((pD2HInfo->totalBytesEncrypted > pConfCompute->lowerThreshold.totalBytesEncrypted) ||
(pD2HInfo->totalEncryptOps > pConfCompute->lowerThreshold.totalEncryptOps))
{
return NV_TRUE;
}
}
return NV_FALSE;
}
static void
getKeyPairForKeySpace(NvU32 keySpace, NvBool bKernel, NvU32 *pGlobalH2DKey, NvU32 *pGlobalD2HKey)
{
NvU32 localH2DKey, localD2HKey;
if (keySpace == CC_KEYSPACE_SEC2)
{
if (bKernel)
{
localH2DKey = CC_LKEYID_CPU_SEC2_DATA_KERN;
localD2HKey = CC_LKEYID_CPU_SEC2_HMAC_KERN;
}
else
{
localH2DKey = CC_LKEYID_CPU_SEC2_DATA_USER;
localD2HKey = CC_LKEYID_CPU_SEC2_HMAC_USER;
}
}
else
{
if (bKernel)
{
localH2DKey = CC_LKEYID_LCE_H2D_KERN;
localD2HKey = CC_LKEYID_LCE_D2H_KERN;
}
else
{
localH2DKey = CC_LKEYID_LCE_H2D_USER;
localD2HKey = CC_LKEYID_LCE_D2H_USER;
}
}
*pGlobalH2DKey = CC_GKEYID_GEN(keySpace, localH2DKey);
*pGlobalD2HKey = CC_GKEYID_GEN(keySpace, localD2HKey);
}
static NV_STATUS
notifyKeyRotationByKeyPair
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 h2dKey
)
{
KEY_ROTATION_STATUS status;
CHANNEL_ITERATOR iter = {0};
KernelChannel *pKernelChannel = NULL;
NvU32 notifyStatus = 0;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &status));
//
// We expect this work item to be called soon after RM detects lower threshold is
// crossed and schedules this.
//
NV_ASSERT_OR_RETURN(status == KEY_ROTATION_STATUS_PENDING, NV_ERR_INVALID_STATE);
// notify all channels
NV_ASSERT_OK_OR_RETURN(confComputeInitChannelIterForKey(pGpu, pConfCompute, h2dKey, &iter));
while(confComputeGetNextChannelForKey(pGpu, pConfCompute, &iter, h2dKey, &pKernelChannel) == NV_OK)
{
// update notifier memory
notifyStatus =
FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus);
notifyStatus =
FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, status, notifyStatus);
NV_ASSERT_OK_OR_RETURN(kchannelUpdateNotifierMem(pKernelChannel, NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS,
0, 0, notifyStatus));
NV_PRINTF(LEVEL_INFO, "chid 0x%x has pending key rotation, writing notifier with val 0x%x\n", kchannelGetDebugTag(pKernelChannel), (NvU32)notifyStatus);
// send events to clients if registered
kchannelNotifyEvent(pKernelChannel, NVC86F_NOTIFIERS_KEY_ROTATION, 0, status, NULL, 0);
}
return NV_OK;
}
static void
initKeyRotationRegistryOverrides
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute
)
{
//
// Temp CONFCOMP-984: This will be removed once all RM clients support
// key rotation by default.
//
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{
NvU32 data;
if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION, &data) == NV_OK)
{
if (FLD_TEST_DRF(_REG_STR, _RM_CONF_COMPUTE_DUMMY_KEY_ROTATION, _ENABLED, _YES, data))
{
NV_PRINTF(LEVEL_INFO, "Confidential Compute dummy key rotation enabled via regkey override.\n");
pConfCompute->setProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED, NV_TRUE);
}
else if (FLD_TEST_DRF(_REG_STR, _RM_CONF_COMPUTE_DUMMY_KEY_ROTATION, _ENABLED, _NO, data))
{
NV_PRINTF(LEVEL_INFO, "Confidential Compute dummy key rotation disabled via regkey override.\n");
pConfCompute->setProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED, NV_FALSE);
}
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED))
{
pConfCompute->keyRotationEnableMask = data;
//
// Set lower and upper thresholds to default values
// this will go away once we stop supporting dummy KR
//
pConfCompute->lowerThreshold.totalBytesEncrypted = NV_U64_MAX;
pConfCompute->lowerThreshold.totalEncryptOps = KEY_ROTATION_LOWER_THRESHOLD;
pConfCompute->upperThreshold.totalBytesEncrypted = NV_U64_MAX;
pConfCompute->upperThreshold.totalEncryptOps = KEY_ROTATION_UPPER_THRESHOLD;
if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_LOWER_THRESHOLD, &data) == NV_OK)
{
pConfCompute->lowerThreshold.totalEncryptOps = data;
}
if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CONF_COMPUTE_DUMMY_KEY_ROTATION_UPPER_THRESHOLD, &data) == NV_OK)
{
pConfCompute->upperThreshold.totalEncryptOps = data;
}
}
}
}
}

View File

@ -29,6 +29,8 @@
#include "kernel/gpu/spdm/libspdm_includes.h" #include "kernel/gpu/spdm/libspdm_includes.h"
#include "hal/library/cryptlib.h" #include "hal/library/cryptlib.h"
//#include "hopper/gh100/dev_se_seb.h"
// //
// The keystore holds keys, IV masks, and IVs for the LCE, SEC2, and GSP channels. It owns the channel // The keystore holds keys, IV masks, and IVs for the LCE, SEC2, and GSP channels. It owns the channel
// counter for each key and helps prevent IV reuse. The keystore is comprised of key slots. A key // counter for each key and helps prevent IV reuse. The keystore is comprised of key slots. A key
@ -70,8 +72,6 @@ static NV_STATUS getKeyIdLce(KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotat
static NV_STATUS getKeyIdSec2(KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation, static NV_STATUS getKeyIdSec2(KernelChannel *pKernelChannel, ROTATE_IV_TYPE rotateOperation,
NvU16 *keyId); NvU16 *keyId);
static NV_STATUS getKeyspaceLce(KernelChannel *pKernelChannel, NvU16 *keyspace); static NV_STATUS getKeyspaceLce(KernelChannel *pKernelChannel, NvU16 *keyspace);
static NvU32 getKeySlotFromGlobalKeyId (NvU32 globalKeyId);
static NvU32 getKeyspaceSize(NvU16 keyspace);
NV_STATUS NV_STATUS
confComputeKeyStoreInit_GH100(ConfidentialCompute *pConfCompute) confComputeKeyStoreInit_GH100(ConfidentialCompute *pConfCompute)
@ -140,13 +140,13 @@ void
NV_STATUS NV_STATUS
confComputeKeyStoreDeriveKey_GH100(ConfidentialCompute *pConfCompute, NvU32 globalKeyId) confComputeKeyStoreDeriveKey_GH100(ConfidentialCompute *pConfCompute, NvU32 globalKeyId)
{ {
const NvU32 slotIndex = getKeySlotFromGlobalKeyId(globalKeyId); NvU32 slotIndex;
cryptoBundle_t (*pKeyStore)[]; cryptoBundle_t (*pKeyStore)[];
uint8_t * pKey = NULL; uint8_t *pKey = NULL;
size_t keySize = 0; size_t keySize = 0;
pKeyStore = pConfCompute->m_keySlot; pKeyStore = pConfCompute->m_keySlot;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, &slotIndex));
NV_PRINTF(LEVEL_INFO, "Deriving key for global key ID %x.\n", globalKeyId); NV_PRINTF(LEVEL_INFO, "Deriving key for global key ID %x.\n", globalKeyId);
if ((globalKeyId == CC_GKEYID_GEN(CC_KEYSPACE_SEC2, CC_LKEYID_CPU_SEC2_HMAC_USER)) || if ((globalKeyId == CC_GKEYID_GEN(CC_KEYSPACE_SEC2, CC_LKEYID_CPU_SEC2_HMAC_USER)) ||
@ -197,11 +197,11 @@ confComputeKeyStoreDepositIvMask_GH100
void *ivMask void *ivMask
) )
{ {
NvU32 slotNumber = getKeySlotFromGlobalKeyId(globalKeyId); NvU32 slotNumber;
cryptoBundle_t (*pKeyStore)[]; cryptoBundle_t (*pKeyStore)[];
pKeyStore = pConfCompute->m_keySlot; pKeyStore = pConfCompute->m_keySlot;
NV_ASSERT_OR_RETURN_VOID(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, &slotNumber) == NV_OK);
NV_PRINTF(LEVEL_INFO, "Depositing IV mask for global key ID %x.\n", globalKeyId); NV_PRINTF(LEVEL_INFO, "Depositing IV mask for global key ID %x.\n", globalKeyId);
portMemCopy((*pKeyStore)[slotNumber].cryptBundle.ivMask, portMemCopy((*pKeyStore)[slotNumber].cryptBundle.ivMask,
@ -223,7 +223,7 @@ confComputeKeyStoreRetrieveViaChannel_GH100
ConfidentialCompute *pConfCompute, ConfidentialCompute *pConfCompute,
KernelChannel *pKernelChannel, KernelChannel *pKernelChannel,
ROTATE_IV_TYPE rotateOperation, ROTATE_IV_TYPE rotateOperation,
NvBool includeSecrets, NvBool bIncludeIvOrNonce,
CC_KMB *keyMaterialBundle CC_KMB *keyMaterialBundle
) )
{ {
@ -261,7 +261,7 @@ confComputeKeyStoreRetrieveViaChannel_GH100
} }
return confComputeKeyStoreRetrieveViaKeyId_GH100(pConfCompute, globalKeyId, rotateOperation, return confComputeKeyStoreRetrieveViaKeyId_GH100(pConfCompute, globalKeyId, rotateOperation,
includeSecrets, keyMaterialBundle); bIncludeIvOrNonce, keyMaterialBundle);
} }
NV_STATUS NV_STATUS
@ -270,15 +270,16 @@ confComputeKeyStoreRetrieveViaKeyId_GH100
ConfidentialCompute *pConfCompute, ConfidentialCompute *pConfCompute,
NvU32 globalKeyId, NvU32 globalKeyId,
ROTATE_IV_TYPE rotateOperation, ROTATE_IV_TYPE rotateOperation,
NvBool includeSecrets, NvBool bIncludeIvOrNonce,
CC_KMB *keyMaterialBundle CC_KMB *keyMaterialBundle
) )
{ {
NvU32 slotNumber = getKeySlotFromGlobalKeyId(globalKeyId); NvU32 slotNumber;
cryptoBundle_t (*pKeyStore)[]; cryptoBundle_t (*pKeyStore)[];
pKeyStore = pConfCompute->m_keySlot; pKeyStore = pConfCompute->m_keySlot;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, &slotNumber));
NV_PRINTF(LEVEL_INFO, "Retrieving KMB from slot number = %d and type is %d.\n", NV_PRINTF(LEVEL_INFO, "Retrieving KMB from slot number = %d and type is %d.\n",
slotNumber, (*pKeyStore)[slotNumber].type); slotNumber, (*pKeyStore)[slotNumber].type);
@ -287,7 +288,8 @@ confComputeKeyStoreRetrieveViaKeyId_GH100
slotNumber--; slotNumber--;
} }
if ((rotateOperation == ROTATE_IV_ENCRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID)) if (bIncludeIvOrNonce &&
((rotateOperation == ROTATE_IV_ENCRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID)))
{ {
if (checkSlot(pConfCompute, slotNumber) != NV_OK) if (checkSlot(pConfCompute, slotNumber) != NV_OK)
{ {
@ -297,8 +299,9 @@ confComputeKeyStoreRetrieveViaKeyId_GH100
} }
} }
if ((rotateOperation == ROTATE_IV_DECRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID) || if (bIncludeIvOrNonce &&
(rotateOperation == ROTATE_IV_HMAC)) ((rotateOperation == ROTATE_IV_DECRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID) ||
(rotateOperation == ROTATE_IV_HMAC)))
{ {
if (checkSlot(pConfCompute, slotNumber + 1) != NV_OK) if (checkSlot(pConfCompute, slotNumber + 1) != NV_OK)
{ {
@ -310,48 +313,68 @@ confComputeKeyStoreRetrieveViaKeyId_GH100
if ((rotateOperation == ROTATE_IV_ENCRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID)) if ((rotateOperation == ROTATE_IV_ENCRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID))
{ {
incrementChannelCounter(pConfCompute, slotNumber); if (bIncludeIvOrNonce)
{
incrementChannelCounter(pConfCompute, slotNumber);
}
if (includeSecrets) if (bIncludeIvOrNonce)
{ {
keyMaterialBundle->encryptBundle = (*pKeyStore)[slotNumber].cryptBundle; keyMaterialBundle->encryptBundle = (*pKeyStore)[slotNumber].cryptBundle;
} }
else else
{ {
portMemCopy(keyMaterialBundle->encryptBundle.iv, sizeof(keyMaterialBundle->encryptBundle.iv), portMemCopy(keyMaterialBundle->encryptBundle.key,
(*pKeyStore)[slotNumber].cryptBundle.iv, CC_AES_256_GCM_IV_SIZE_BYTES); sizeof(keyMaterialBundle->encryptBundle.key),
(*pKeyStore)[slotNumber].cryptBundle.key,
sizeof((*pKeyStore)[slotNumber].cryptBundle.key));
portMemCopy(keyMaterialBundle->encryptBundle.ivMask,
sizeof(keyMaterialBundle->encryptBundle.ivMask),
(*pKeyStore)[slotNumber].cryptBundle.ivMask,
sizeof((*pKeyStore)[slotNumber].cryptBundle.ivMask));
} }
} }
if ((rotateOperation == ROTATE_IV_DECRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID) || if ((rotateOperation == ROTATE_IV_DECRYPT) || (rotateOperation == ROTATE_IV_ALL_VALID) ||
(rotateOperation == ROTATE_IV_HMAC)) (rotateOperation == ROTATE_IV_HMAC))
{ {
incrementChannelCounter(pConfCompute, slotNumber + 1); if (bIncludeIvOrNonce)
{
incrementChannelCounter(pConfCompute, slotNumber + 1);
}
switch ((*pKeyStore)[slotNumber + 1].type) switch ((*pKeyStore)[slotNumber + 1].type)
{ {
case NO_CHAN_COUNTER: case NO_CHAN_COUNTER:
case CRYPT_COUNTER: case CRYPT_COUNTER:
if (includeSecrets) if (bIncludeIvOrNonce)
{ {
keyMaterialBundle->decryptBundle = (*pKeyStore)[slotNumber + 1].cryptBundle; keyMaterialBundle->decryptBundle = (*pKeyStore)[slotNumber + 1].cryptBundle;
} }
else else
{ {
portMemCopy(keyMaterialBundle->decryptBundle.iv, sizeof(keyMaterialBundle->decryptBundle.iv), portMemCopy(keyMaterialBundle->encryptBundle.key,
(*pKeyStore)[slotNumber + 1].cryptBundle.iv, CC_AES_256_GCM_IV_SIZE_BYTES); sizeof(keyMaterialBundle->encryptBundle.key),
(*pKeyStore)[slotNumber + 1].cryptBundle.key,
sizeof((*pKeyStore)[slotNumber + 1].cryptBundle.key));
portMemCopy(keyMaterialBundle->encryptBundle.ivMask,
sizeof(keyMaterialBundle->encryptBundle.ivMask),
(*pKeyStore)[slotNumber + 1].cryptBundle.ivMask,
sizeof((*pKeyStore)[slotNumber + 1].cryptBundle.ivMask));
} }
keyMaterialBundle->bIsWorkLaunch = NV_FALSE; keyMaterialBundle->bIsWorkLaunch = NV_FALSE;
break; break;
case HMAC_COUNTER: case HMAC_COUNTER:
if (includeSecrets) if (bIncludeIvOrNonce)
{ {
keyMaterialBundle->hmacBundle = (*pKeyStore)[slotNumber + 1].hmacBundle; keyMaterialBundle->hmacBundle = (*pKeyStore)[slotNumber + 1].hmacBundle;
} }
else else
{ {
portMemCopy(keyMaterialBundle->hmacBundle.nonce, sizeof(keyMaterialBundle->hmacBundle.nonce), portMemCopy(keyMaterialBundle->hmacBundle.key,
(*pKeyStore)[slotNumber + 1].hmacBundle.nonce, CC_HMAC_NONCE_SIZE_BYTES); sizeof(keyMaterialBundle->hmacBundle.key),
(*pKeyStore)[slotNumber + 1].hmacBundle.key,
sizeof((*pKeyStore)[slotNumber + 1].hmacBundle.key));
} }
keyMaterialBundle->bIsWorkLaunch = NV_TRUE; keyMaterialBundle->bIsWorkLaunch = NV_TRUE;
break; break;
@ -364,17 +387,30 @@ confComputeKeyStoreRetrieveViaKeyId_GH100
NV_STATUS NV_STATUS
confComputeKeyStoreUpdateKey_GH100(ConfidentialCompute *pConfCompute, NvU32 globalKeyId) confComputeKeyStoreUpdateKey_GH100(ConfidentialCompute *pConfCompute, NvU32 globalKeyId)
{ {
const NvU32 slotIndex = getKeySlotFromGlobalKeyId(globalKeyId); NvU32 slotIndex;
cryptoBundle_t (*pKeyStore)[]; cryptoBundle_t (*pKeyStore)[];
NvU8 tempMem[CC_AES_256_GCM_KEY_SIZE_BYTES]; NvU8 tempMem[CC_AES_256_GCM_KEY_SIZE_BYTES];
NvU8 *pKey;
NvU32 keySize;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, &slotIndex));
NV_PRINTF(LEVEL_INFO, "Updating key with global key ID %x.\n", globalKeyId);
pKeyStore = pConfCompute->m_keySlot; pKeyStore = pConfCompute->m_keySlot;
NV_PRINTF(LEVEL_INFO, "Updating key with global key ID %x.\n", globalKeyId); if ((globalKeyId == CC_GKEYID_GEN(CC_KEYSPACE_SEC2, CC_LKEYID_CPU_SEC2_HMAC_USER)) ||
(globalKeyId == CC_GKEYID_GEN(CC_KEYSPACE_SEC2, CC_LKEYID_CPU_SEC2_HMAC_KERN)))
{
pKey = (uint8_t *)(*pKeyStore)[slotIndex].hmacBundle.key;
keySize = sizeof((*pKeyStore)[slotIndex].hmacBundle.key);
}
else
{
pKey = (uint8_t *)(*pKeyStore)[slotIndex].cryptBundle.key;
keySize = sizeof((*pKeyStore)[slotIndex].cryptBundle.key);
}
if (!libspdm_sha256_hash_all((const void *)(*pKeyStore)[slotIndex].cryptBundle.key, if (!libspdm_sha256_hash_all((const void *)pKey, keySize, tempMem))
sizeof((*pKeyStore)[slotIndex].cryptBundle.key),
tempMem))
{ {
return NV_ERR_FATAL_ERROR; return NV_ERR_FATAL_ERROR;
} }
@ -383,14 +419,76 @@ confComputeKeyStoreUpdateKey_GH100(ConfidentialCompute *pConfCompute, NvU32 glob
sizeof(tempMem), sizeof(tempMem),
(const uint8_t *)(CC_GKEYID_GET_STR(globalKeyId)), (const uint8_t *)(CC_GKEYID_GET_STR(globalKeyId)),
(size_t)portStringLength(CC_GKEYID_GET_STR(globalKeyId)), (size_t)portStringLength(CC_GKEYID_GET_STR(globalKeyId)),
(uint8_t *)(*pKeyStore)[slotIndex].cryptBundle.key, pKey,
sizeof((*pKeyStore)[slotIndex].cryptBundle.key))) keySize));
{ {
return NV_ERR_FATAL_ERROR; return NV_ERR_FATAL_ERROR;
} }
portMemSet(tempMem, 0, (NvLength) sizeof(tempMem)); portMemSet(tempMem, 0, (NvLength) sizeof(tempMem));
// LCEs will return an error / interrupt if the key is all 0s.
if ((CC_GKEYID_GET_KEYSPACE(globalKeyId) >= CC_KEYSPACE_LCE0) &&
(CC_GKEYID_GET_KEYSPACE(globalKeyId) <= CC_KEYSPACE_LCE7))
{
for (NvU32 index = 0; index < CC_AES_256_GCM_KEY_SIZE_DWORD; index++)
{
if ((*pKeyStore)[slotIndex].cryptBundle.key[index] != 0)
{
return NV_OK;
}
}
return NV_ERR_FATAL_ERROR;
}
return NV_OK;
}
/*!
* Get key pair from channel
*
* @param[in] pGpu : OBJGPU pointer
* @param[in] pConfCompute : conf comp pointer
* @param[in] pKernelChannel : KernelChannel pointer
* @param[out] pH2DKey : pointer to h2d key
* @param[out] pD2HKey : pointer to d2h key
*/
NV_STATUS
confComputeGetKeyPairByChannel_GH100
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
KernelChannel *pKernelChannel,
NvU32 *pH2DKey,
NvU32 *pD2HKey
)
{
NvU16 keySpace = 0;
NvU16 lh2dKeyId = 0;
NvU16 ld2hKeyId = 0;
RM_ENGINE_TYPE engineType = kchannelGetEngineType(pKernelChannel);
if (engineType == RM_ENGINE_TYPE_SEC2)
{
keySpace = CC_KEYSPACE_SEC2;
NV_ASSERT_OK_OR_RETURN(getKeyIdSec2(pKernelChannel, ROTATE_IV_ENCRYPT, &lh2dKeyId));
NV_ASSERT_OK_OR_RETURN(getKeyIdSec2(pKernelChannel, ROTATE_IV_HMAC, &ld2hKeyId));
}
else
{
NV_ASSERT_OK_OR_RETURN(getKeyspaceLce(pKernelChannel, &keySpace));
NV_ASSERT_OK_OR_RETURN(getKeyIdLce(pKernelChannel, ROTATE_IV_ENCRYPT, &lh2dKeyId));
NV_ASSERT_OK_OR_RETURN(getKeyIdLce(pKernelChannel, ROTATE_IV_DECRYPT, &ld2hKeyId));
}
if (pH2DKey != NULL)
{
*pH2DKey = CC_GKEYID_GEN(keySpace, lh2dKeyId);
}
if (pD2HKey != NULL)
{
*pD2HKey = CC_GKEYID_GEN(keySpace, ld2hKeyId);
}
return NV_OK; return NV_OK;
} }
@ -521,30 +619,6 @@ getKeyspaceLce
return NV_OK; return NV_OK;
} }
static NvU32
getKeySlotFromGlobalKeyId
(
NvU32 globalKeyId
)
{
NvU16 keyspace = CC_GKEYID_GET_KEYSPACE(globalKeyId);
NvU32 keySlotIndex = 0;
for (NvU16 index = 0; index < CC_KEYSPACE_SIZE; index++)
{
if (index == keyspace)
{
break;
}
else
{
keySlotIndex += getKeyspaceSize(index);
}
}
return keySlotIndex + CC_GKEYID_GET_LKEYID(globalKeyId);
}
static NV_STATUS static NV_STATUS
checkSlot checkSlot
( (
@ -589,32 +663,6 @@ incrementChannelCounter
} }
} }
static NvU32
getKeyspaceSize
(
NvU16 keyspace
)
{
switch (keyspace)
{
case CC_KEYSPACE_GSP:
return CC_KEYSPACE_GSP_SIZE;
case CC_KEYSPACE_SEC2:
return CC_KEYSPACE_SEC2_SIZE;
case CC_KEYSPACE_LCE0:
case CC_KEYSPACE_LCE1:
case CC_KEYSPACE_LCE2:
case CC_KEYSPACE_LCE3:
case CC_KEYSPACE_LCE4:
case CC_KEYSPACE_LCE5:
case CC_KEYSPACE_LCE6:
case CC_KEYSPACE_LCE7:
return CC_KEYSPACE_LCE_SIZE;
default:
NV_ASSERT_OR_RETURN(NV_FALSE, 0);
}
}
static NvU64 static NvU64
getChannelCounter getChannelCounter
( (

View File

@ -39,15 +39,16 @@
#include "ctrl/ctrl2080/ctrl2080internal.h" #include "ctrl/ctrl2080/ctrl2080internal.h"
#include "ctrl/ctrl2080/ctrl2080spdm.h" #include "ctrl/ctrl2080/ctrl2080spdm.h"
#include "kernel/gpu/conf_compute/ccsl.h" #include "kernel/gpu/conf_compute/ccsl.h"
#include "kernel/gpu/fifo/kernel_fifo.h"
#include "kernel/gpu/fifo/kernel_channel.h"
#include "gpu/conf_compute/conf_compute_api.h" #include "gpu/conf_compute/conf_compute_api.h"
#include "class/clcb33.h" #include "class/clcb33.h"
#include "spdm/rmspdmvendordef.h"
/*! /*!
* Local object related functions * Local object related functions
*/ */
static NV_STATUS _confComputeInitRegistryOverrides(OBJGPU *, ConfidentialCompute*); static NV_STATUS _confComputeInitRegistryOverrides(OBJGPU *, ConfidentialCompute*);
static NvU32 _confComputeGetKeyspaceSize(NvU16 keyspace);
NV_STATUS NV_STATUS
confComputeConstructEngine_IMPL(OBJGPU *pGpu, confComputeConstructEngine_IMPL(OBJGPU *pGpu,
@ -70,6 +71,7 @@ confComputeConstructEngine_IMPL(OBJGPU *pGpu,
pConfCompute->pDmaCcslCtx = NULL; pConfCompute->pDmaCcslCtx = NULL;
pConfCompute->pReplayableFaultCcslCtx = NULL; pConfCompute->pReplayableFaultCcslCtx = NULL;
pConfCompute->pNonReplayableFaultCcslCtx = NULL; pConfCompute->pNonReplayableFaultCcslCtx = NULL;
pConfCompute->pGspSec2RpcCcslCtx = NULL;
if (gpuIsCCEnabledInHw_HAL(pGpu)) if (gpuIsCCEnabledInHw_HAL(pGpu))
{ {
@ -151,7 +153,18 @@ confComputeConstructEngine_IMPL(OBJGPU *pGpu,
DRF_DEF(GSP, _PROXY_REG, _CONF_COMPUTE_MULTI_GPU_MODE, _PROTECTED_PCIE); DRF_DEF(GSP, _PROXY_REG, _CONF_COMPUTE_MULTI_GPU_MODE, _PROTECTED_PCIE);
} }
} }
// init key rotation state
for (NvU32 i = 0; i < CC_KEYSPACE_TOTAL_SIZE; i++)
{
pConfCompute->keyRotationState[i] = KEY_ROTATION_STATUS_IDLE;
pConfCompute->ppKeyRotationTimer[i] = NULL;
}
portMemSet(pConfCompute->aggregateStats, 0, sizeof(pConfCompute->aggregateStats));
portMemSet(pConfCompute->freedChannelAggregateStats, 0, sizeof(pConfCompute->freedChannelAggregateStats));
pConfCompute->keyRotationCallbackCount = 0;
pConfCompute->keyRotationChannelRefCount = 0;
pConfCompute->keyRotationEnableMask = 0;
NV_ASSERT_OK_OR_RETURN(confComputeEnableKeyRotationSupport_HAL(pGpu, pConfCompute));
return NV_OK; return NV_OK;
} }
@ -258,7 +271,6 @@ _confComputeInitRegistryOverrides
} }
} }
} }
return NV_OK; return NV_OK;
} }
@ -392,6 +404,7 @@ _confComputeDeinitSpdmSessionAndKeys
pConfCompute->pDmaCcslCtx = NULL; pConfCompute->pDmaCcslCtx = NULL;
pConfCompute->pReplayableFaultCcslCtx = NULL; pConfCompute->pReplayableFaultCcslCtx = NULL;
pConfCompute->pNonReplayableFaultCcslCtx = NULL; pConfCompute->pNonReplayableFaultCcslCtx = NULL;
pConfCompute->pGspSec2RpcCcslCtx = NULL;
confComputeKeyStoreDeinit_HAL(pConfCompute); confComputeKeyStoreDeinit_HAL(pConfCompute);
} }
@ -455,6 +468,16 @@ confComputeStatePostLoad_IMPL
} }
} }
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{
status = confComputeEnableKeyRotationCallback_HAL(pGpu, pConfCompute, NV_TRUE);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "ConfCompute : Failed to enable key rotation callback!");
return status;
}
}
return status; return status;
} }
@ -474,17 +497,41 @@ confComputeStatePreUnload_KERNEL
) )
{ {
NV_STATUS status = NV_OK; NV_STATUS status = NV_OK;
NV_STATUS tempStatus = NV_OK;
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
for (NvU32 i = 0; i < CC_KEYSPACE_TOTAL_SIZE; i++)
{
if (pConfCompute->ppKeyRotationTimer[i] != NULL)
{
tmrEventCancel(pTmr, pConfCompute->ppKeyRotationTimer[i]);
portMemFree(pConfCompute->ppKeyRotationTimer[i]->pUserData);
tmrEventDestroy(pTmr, pConfCompute->ppKeyRotationTimer[i]);
}
}
tempStatus = confComputeEnableKeyRotationCallback_HAL(pGpu, pConfCompute, NV_FALSE);
if (tempStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to disable key rotation 0x%x\n", tempStatus);
status = tempStatus;
}
}
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED)) if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED))
{ {
if (IS_GSP_CLIENT(pGpu) && (pConfCompute->heartbeatPeriodSec != 0)) if (IS_GSP_CLIENT(pGpu) && (pConfCompute->heartbeatPeriodSec != 0))
{ {
status = spdmUnregisterFromHeartbeats(pGpu, pConfCompute->pSpdm); tempStatus = spdmUnregisterFromHeartbeats(pGpu, pConfCompute->pSpdm);
} }
else if (!IS_GSP_CLIENT(pGpu)) else if (!IS_GSP_CLIENT(pGpu))
{ {
NV_PRINTF(LEVEL_INFO, "Performing SPDM deinitialization in Pre Unload!\n"); NV_PRINTF(LEVEL_INFO, "Performing SPDM deinitialization in Pre Unload!\n");
status = _confComputeDeinitSpdmSessionAndKeys(pGpu, pConfCompute); tempStatus = _confComputeDeinitSpdmSessionAndKeys(pGpu, pConfCompute);
}
if (tempStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to deinit spdm 0x%x\n", tempStatus);
status = tempStatus;
} }
} }
@ -580,6 +627,75 @@ confComputeSetErrorState_KERNEL
} }
} }
/*!
* Init channel iterator for a given global key
*
* @param[in] pGpu : OBJGPU Pointer
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] globalKey : Key used by channels
* @param[in/out] pIter : kernelchannel iterator
*/
NV_STATUS
confComputeInitChannelIterForKey_IMPL
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 globalKey,
CHANNEL_ITERATOR *pIter
)
{
KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
NvU32 keySpace = CC_GKEYID_GET_KEYSPACE(globalKey);
NvU32 engineId = confComputeGetEngineIdFromKeySpace_HAL(pConfCompute, keySpace);
NV_ASSERT_OR_RETURN(engineId != RM_ENGINE_TYPE_NULL, NV_ERR_INVALID_ARGUMENT);
NvU32 runlistId;
NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate(pGpu, pKernelFifo, ENGINE_INFO_TYPE_RM_ENGINE_TYPE, engineId,
ENGINE_INFO_TYPE_RUNLIST, &runlistId));
kfifoGetChannelIterator(pGpu, pKernelFifo, pIter, runlistId);
return NV_OK;
}
/*!
* Gets next channel for a given global key
*
* @param[in] pGpu : OBJGPU Pointer
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] pIt : channel iterator for a runlist
* @param[in] globalKey : Key used by channels
* @param[out] ppKernelChannel : kernelchannel
*/
NV_STATUS
confComputeGetNextChannelForKey_IMPL
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
CHANNEL_ITERATOR *pIt,
NvU32 globalKey,
KernelChannel **ppKernelChannel
)
{
NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT);
KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
NvBool bKernelPriv = confComputeGlobalKeyIsKernelPriv_HAL(pConfCompute, globalKey);
KernelChannel *pKernelChannel = NULL;
while(kfifoGetNextKernelChannel(pGpu, pKernelFifo, pIt, &pKernelChannel) == NV_OK)
{
if (kchannelGetRunlistId(pKernelChannel) != pIt->runlistId)
continue;
if (!pKernelChannel->bCCSecureChannel)
continue;
if (!(bKernelPriv ^ kchannelCheckIsKernel(pKernelChannel)))
{
*ppKernelChannel = pKernelChannel;
return NV_OK;
}
}
return NV_ERR_OBJECT_NOT_FOUND;
}
/*! /*!
* Deinitialize all keys required for the Confidential Compute session. * Deinitialize all keys required for the Confidential Compute session.
* *
@ -607,3 +723,69 @@ confComputeDestruct_KERNEL
return; return;
} }
/*!
* Get key slot from global key
*
* @param[in] pConfCompute : ConfidentialCompute pointer
* @param[in] globalKeyId : globalKeyId
* @param[out] pSlot : key slot
*/
NV_STATUS
confComputeGetKeySlotFromGlobalKeyId_IMPL
(
ConfidentialCompute *pConfCompute,
NvU32 globalKeyId,
NvU32 *pSlot
)
{
NvU32 slot;
NvU16 keyspace = CC_GKEYID_GET_KEYSPACE(globalKeyId);
NvU32 keySlotIndex = 0;
NV_ASSERT_OR_RETURN(pSlot != NULL, NV_ERR_INVALID_ARGUMENT);
for (NvU16 index = 0; index < CC_KEYSPACE_SIZE; index++)
{
if (index == keyspace)
{
break;
}
else
{
keySlotIndex += _confComputeGetKeyspaceSize(index);
}
}
slot = keySlotIndex + CC_GKEYID_GET_LKEYID(globalKeyId);
if (slot >= CC_KEYSPACE_TOTAL_SIZE)
return NV_ERR_INVALID_ARGUMENT;
*pSlot = slot;
return NV_OK;
}
static NvU32
_confComputeGetKeyspaceSize
(
NvU16 keyspace
)
{
switch (keyspace)
{
case CC_KEYSPACE_GSP:
return CC_KEYSPACE_GSP_SIZE;
case CC_KEYSPACE_SEC2:
return CC_KEYSPACE_SEC2_SIZE;
case CC_KEYSPACE_LCE0:
case CC_KEYSPACE_LCE1:
case CC_KEYSPACE_LCE2:
case CC_KEYSPACE_LCE3:
case CC_KEYSPACE_LCE4:
case CC_KEYSPACE_LCE5:
case CC_KEYSPACE_LCE6:
case CC_KEYSPACE_LCE7:
return CC_KEYSPACE_LCE_SIZE;
default:
NV_ASSERT_OR_RETURN(NV_FALSE, 0);
}
}

View File

@ -0,0 +1,432 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
#include "gpu/conf_compute/conf_compute.h"
#include "gpu/conf_compute/conf_compute_keystore.h"
#include "class/clc86fsw.h"
#include "ctrl/ctrl2080/ctrl2080internal.h"
#include "kernel/gpu/mem_mgr/mem_mgr.h"
static NV_STATUS performKeyRotationByKeyPair(OBJGPU *pGpu, ConfidentialCompute *pConfCompute,
NvU32 h2dKey, NvU32 d2hKey);
// Callback that will check stats and trigger key rotation
void
confComputeKeyRotationCallback
(
OBJGPU *pGpu,
void *data
)
{
NV_STATUS status;
status = confComputeTriggerKeyRotation_HAL(pGpu, GPU_GET_CONF_COMPUTE(pGpu));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Key rotation callback failed with status 0x%x\n", status);
NV_ASSERT(status == NV_OK);
}
}
void
performKeyRotation_WORKITEM
(
NvU32 gpuInstance,
void *pArgs
)
{
OBJGPU *pGpu = gpumgrGetGpu(gpuInstance);
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
KEY_ROTATION_WORKITEM_INFO *pWorkItemInfo = (KEY_ROTATION_WORKITEM_INFO *)pArgs;
NvU32 h2dKey = pWorkItemInfo->h2dKey;
NvU32 d2hKey = pWorkItemInfo->d2hKey;
KernelChannel *pKernelChannel = NULL;
NvU16 notifyStatus = 0x0;
CHANNEL_ITERATOR iter = {0};
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NV2080_CTRL_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION_PARAMS params = {0};
NV_STATUS status = NV_OK;
if (pWorkItemInfo->status == KEY_ROTATION_STATUS_PENDING)
{
// This means all channels reported idle and we can go ahead with KR
status = performKeyRotationByKeyPair(pGpu, pConfCompute, h2dKey, d2hKey);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to perform key rotation with status = 0x%x for h2dKey = 0x%x\n", status, h2dKey);
NV_ASSERT_OK(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_ROTATION));
goto done;
}
}
else if ((pWorkItemInfo->status == KEY_ROTATION_STATUS_FAILED_THRESHOLD) ||
(pWorkItemInfo->status == KEY_ROTATION_STATUS_FAILED_TIMEOUT))
{
// This means we need to notify and RC non-idle channels and go ahead with KR
NV_ASSERT_OR_RETURN_VOID(confComputeInitChannelIterForKey(pGpu, pConfCompute, h2dKey, &iter) == NV_OK);
while(confComputeGetNextChannelForKey(pGpu, pConfCompute, &iter, h2dKey, &pKernelChannel) == NV_OK)
{
if (!kchannelIsDisabledForKeyRotation(pGpu, pKernelChannel))
{
// update notifier memory
notifyStatus =
FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _FALSE, notifyStatus);
notifyStatus =
FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, pWorkItemInfo->status, notifyStatus);
NV_ASSERT_OK(kchannelUpdateNotifierMem(pKernelChannel, NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS,
0, 0, notifyStatus));
NV_PRINTF(LEVEL_INFO, "chid 0x%x was NOT disabled for key rotation, writing notifier with val 0x%x\n", kchannelGetDebugTag(pKernelChannel), (NvU32)notifyStatus);
// send events to clients if registered
kchannelNotifyEvent(pKernelChannel, NVC86F_NOTIFIERS_KEY_ROTATION, 0, pWorkItemInfo->status, NULL, 0);
}
}
// RC all non-idle channels
params.globalH2DKey = h2dKey;
status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice,
NV2080_CTRL_CMD_INTERNAL_CONF_COMPUTE_RC_CHANNELS_FOR_KEY_ROTATION,
&params, sizeof(params));
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Control call to RC non-idle channels failed with status 0x%x, can't perform key rotation for h2dKey = 0x%x\n",
status, h2dKey);
NV_ASSERT_OK(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_ROTATION));
goto done;
}
// perform key rotation
status = performKeyRotationByKeyPair(pGpu, pConfCompute, h2dKey, d2hKey);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to perform key rotation with status = 0x%x for h2dKey = 0x%x\n", status, h2dKey);
NV_ASSERT_OK(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_FAILED_ROTATION));
goto done;
}
}
else
{
NV_PRINTF(LEVEL_ERROR, "Unexpected key rotation status 0x%x\n", pWorkItemInfo->status);
status = NV_ERR_INVALID_STATE;
}
done:
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "KR fialed with status 0x%x\n", status);
// TODO CONFCOMP-984: Implement failure sequence from kpadwal
}
}
static NV_STATUS
performKeyRotationByKeyPair
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 h2dKey,
NvU32 d2hKey
)
{
KernelChannel *pKernelChannel = NULL;
NvU16 notifyStatus = 0x0;
CHANNEL_ITERATOR iter = {0};
NvU32 h2dIndex, d2hIndex;
NV_ASSERT_OK_OR_RETURN(confComputeUpdateSecrets_HAL(pConfCompute, h2dKey));
// notify clients
NV_ASSERT_OK_OR_RETURN(confComputeInitChannelIterForKey(pGpu, pConfCompute, h2dKey, &iter));
while(confComputeGetNextChannelForKey(pGpu, pConfCompute, &iter, h2dKey, &pKernelChannel) == NV_OK)
{
if (kchannelIsDisabledForKeyRotation(pGpu, pKernelChannel))
{
// update notifier memory
notifyStatus =
FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _FALSE, notifyStatus);
notifyStatus =
FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, (NvU16)KEY_ROTATION_STATUS_IDLE, notifyStatus);
NV_ASSERT_OK(kchannelUpdateNotifierMem(pKernelChannel, NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS,
0, 0, notifyStatus));
// send events to clients if registered
kchannelNotifyEvent(pKernelChannel, NVC86F_NOTIFIERS_KEY_ROTATION, 0, (NvU16)KEY_ROTATION_STATUS_IDLE, NULL, 0);
NV_PRINTF(LEVEL_INFO, "chid 0x%x was disabled for key rotation, writing notifier with val 0x%x\n", kchannelGetDebugTag(pKernelChannel), (NvU32)notifyStatus);
// also reset channel sw state
kchannelDisableForKeyRotation(pGpu, pKernelChannel, NV_FALSE);
kchannelEnableAfterKeyRotation(pGpu, pKernelChannel, NV_FALSE);
}
// clear encrypt stats irrespective of whether this channel was reported idle or not.
if (pKernelChannel->pEncStatsBuf != NULL)
portMemSet(pKernelChannel->pEncStatsBuf, 0, sizeof(CC_CRYPTOBUNDLE_STATS));
}
// reset KR state
pConfCompute->keyRotationCallbackCount = 1;
// clear aggregate and freed channel stats
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
pConfCompute->aggregateStats[h2dIndex].totalBytesEncrypted = 0;
pConfCompute->aggregateStats[h2dIndex].totalEncryptOps = 0;
pConfCompute->aggregateStats[d2hIndex].totalBytesEncrypted = 0;
pConfCompute->aggregateStats[d2hIndex].totalEncryptOps = 0;
pConfCompute->freedChannelAggregateStats[h2dIndex].totalBytesEncrypted = 0;
pConfCompute->freedChannelAggregateStats[h2dIndex].totalEncryptOps = 0;
pConfCompute->freedChannelAggregateStats[d2hIndex].totalBytesEncrypted = 0;
pConfCompute->freedChannelAggregateStats[d2hIndex].totalEncryptOps = 0;
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_IDLE));
return NV_OK;
}
/*!
* Checks if all channels corresponding to key pair
* are disabled and schedules key rotation.
*
* @param[in] pGpu : OBJGPU pointer
* @param[in] pConfCompute : conf comp pointer
* @param[out] h2dKey : h2d key
* @param[out] d2hKey : d2h key
*/
NV_STATUS
confComputeCheckAndScheduleKeyRotation_IMPL
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 h2dKey,
NvU32 d2hKey
)
{
CHANNEL_ITERATOR iter = {0};
KernelChannel *pKernelChannel = NULL;
KEY_ROTATION_STATUS state;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
NV_ASSERT_OR_RETURN(state == KEY_ROTATION_STATUS_PENDING, NV_ERR_INVALID_STATE);
NvBool bIdle = NV_TRUE;
NV_ASSERT_OK_OR_RETURN(confComputeInitChannelIterForKey(pGpu, pConfCompute, h2dKey, &iter));
while(confComputeGetNextChannelForKey(pGpu, pConfCompute, &iter, h2dKey, &pKernelChannel) == NV_OK)
{
// check if all channels are idle
if (!kchannelIsDisabledForKeyRotation(pGpu, pKernelChannel))
{
NV_PRINTF(LEVEL_INFO, "chid 0x%x was NOT disabled for key rotation, can't start KR yet\n", kchannelGetDebugTag(pKernelChannel));
bIdle = NV_FALSE;
break;
}
}
// if all channels are idle, trigger key rotation
if (bIdle)
{
NV_PRINTF(LEVEL_INFO, "scheduling KR for h2d key = 0x%x\n", h2dKey);
NV_ASSERT_OK_OR_RETURN(confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey));
}
return NV_OK;
}
/*!
* schedules key rotation workitem
*
* @param[in] pGpu : OBJGPU pointer
* @param[in] pConfCompute : conf comp pointer
* @param[out] h2dKey : h2d key
* @param[out] d2hKey : d2h key
*/
NV_STATUS
confComputeScheduleKeyRotationWorkItem_IMPL
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
NvU32 h2dKey,
NvU32 d2hKey
)
{
KEY_ROTATION_STATUS status;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &status));
if (status == KEY_ROTATION_STATUS_IN_PROGRESS)
{
NV_PRINTF(LEVEL_INFO, "Key rotation is already scheduled for key 0x%x\n", h2dKey);
return NV_OK;
}
// pWorkItemInfo will be freed by RmExecuteWorkItem after work item is done execution
KEY_ROTATION_WORKITEM_INFO *pWorkItemInfo = portMemAllocNonPaged(sizeof(KEY_ROTATION_WORKITEM_INFO));
NV_ASSERT_OR_RETURN(pWorkItemInfo != NULL, NV_ERR_NO_MEMORY);
pWorkItemInfo->h2dKey = h2dKey;
pWorkItemInfo->d2hKey = d2hKey;
pWorkItemInfo->status = status;
NV_ASSERT_OK_OR_RETURN(confComputeSetKeyRotationStatus(pConfCompute, h2dKey, KEY_ROTATION_STATUS_IN_PROGRESS));
// cancel timeout event in case it was scheduled
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
NvU32 h2dIndex;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
if (pConfCompute->ppKeyRotationTimer[h2dIndex] != NULL)
{
tmrEventCancel(pTmr, pConfCompute->ppKeyRotationTimer[h2dIndex]);
}
// Queue workitem to perform key rotation
NV_ASSERT_OK_OR_RETURN(osQueueWorkItemWithFlags(pGpu, performKeyRotation_WORKITEM, (void*)pWorkItemInfo,
(OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA |
OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW |
OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW)));
return NV_OK;
}
/*!
* Sets KEY_ROTATION_STATUS for key pair corresponding to given key
*
* @param[in] pConfCompute : conf comp pointer
* @param[in] globalKey : key for which to set the status
* @param[in] status : KEY_ROTATION_STATUS* value
*/
NV_STATUS confComputeSetKeyRotationStatus_IMPL
(
ConfidentialCompute *pConfCompute,
NvU32 globalKey,
KEY_ROTATION_STATUS status
)
{
NvU32 h2dKey, d2hKey;
confComputeGetKeyPairByKey(pConfCompute, globalKey, &h2dKey, &d2hKey);
NvU32 h2dIndex, d2hIndex;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
pConfCompute->keyRotationState[h2dIndex] = status;
pConfCompute->keyRotationState[d2hIndex] = status;
return NV_OK;
}
/*!
* Gets KEY_ROTATION_STATUS for given key
*
* @param[in] pConfCompute : conf comp pointer
* @param[in] globalKey : key for which to set the status
* @param[out] pStatus : KEY_ROTATION_STATUS* value
*/
NV_STATUS confComputeGetKeyRotationStatus_IMPL
(
ConfidentialCompute *pConfCompute,
NvU32 globalKey,
KEY_ROTATION_STATUS* pStatus
)
{
NvU32 h2dKey, d2hKey;
confComputeGetKeyPairByKey(pConfCompute, globalKey, &h2dKey, &d2hKey);
NvU32 h2dIndex, d2hIndex;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
NV_ASSERT_OR_RETURN(pConfCompute->keyRotationState[h2dIndex] ==
pConfCompute->keyRotationState[d2hIndex], NV_ERR_INVALID_STATE);
*pStatus = pConfCompute->keyRotationState[h2dIndex];
return NV_OK;
}
/*!
* Get key pair from globalKey
* This function can return an invalid key pair if input
* is invalid key. Its the callers resposibility to check this.
*
* @param[in] pConfCompute : conf comp pointer
* @param[in] globalKey : globalKey
* @param[out] pH2DKey : pointer to h2d key
* @param[out] pD2HKey : pointer to d2h key
*/
void confComputeGetKeyPairByKey_IMPL
(
ConfidentialCompute *pConfCompute,
NvU32 globalKey,
NvU32 *pH2DKey,
NvU32 *pD2HKey
)
{
NvU32 h2dKey, d2hKey;
// h2dkey is always the least (even numbered) of the keypair.
if ((CC_GKEYID_GET_LKEYID(globalKey) % 2) == 1)
{
h2dKey = CC_GKEYID_DEC_LKEYID(globalKey);
d2hKey = globalKey;
}
else
{
h2dKey = globalKey;
d2hKey = CC_GKEYID_INC_LKEYID(globalKey);
}
if (pH2DKey != NULL)
*pH2DKey = h2dKey;
if (pD2HKey != NULL)
*pD2HKey = d2hKey;
}
/*!
* Account the encryption statistics of channel being freed
*
* The encryption statistics of freed channels are also accounted
* in the per key aggregate statistics. This function accumalates
* the stats for a channel being freed.
*
* @param[in] pGpu : OBJGPU pointer
* @param[in] pConfCompute : conf comp pointer
* @param[in] pKernelChannel : channel pointer
*/
NV_STATUS
confComputeUpdateFreedChannelStats_IMPL
(
OBJGPU *pGpu,
ConfidentialCompute *pConfCompute,
KernelChannel *pKernelChannel
)
{
// TODO CONFCOMP-984: Make this fatal
if ((pKernelChannel->pEncStatsBufMemDesc == NULL) ||
(pKernelChannel->pEncStatsBuf == NULL))
{
return NV_OK;
}
CC_CRYPTOBUNDLE_STATS *pEncStats = pKernelChannel->pEncStatsBuf;
NvU32 h2dKey, d2hKey, h2dIndex, d2hIndex;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyPairByChannel_HAL(pGpu, pConfCompute, pKernelChannel, &h2dKey, &d2hKey));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, h2dKey, &h2dIndex));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeySlotFromGlobalKeyId(pConfCompute, d2hKey, &d2hIndex));
pConfCompute->freedChannelAggregateStats[h2dIndex].totalBytesEncrypted += pEncStats->bytesEncryptedH2D;
pConfCompute->freedChannelAggregateStats[h2dIndex].totalEncryptOps += pEncStats->numEncryptionsH2D;
pConfCompute->freedChannelAggregateStats[d2hIndex].totalBytesEncrypted += pEncStats->bytesEncryptedD2H;
pConfCompute->freedChannelAggregateStats[d2hIndex].totalEncryptOps += pEncStats->numEncryptionsD2H;
return NV_OK;
}

View File

@ -362,6 +362,8 @@ deviceCtrlCmdGpuSetVgpuHeterogeneousMode_IMPL
return NV_ERR_IN_USE; return NV_ERR_IN_USE;
} }
kvgpumgrSetVgpuType(pGpu, pPgpuInfo, NVA081_CTRL_VGPU_CONFIG_INVALID_TYPE);
pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE, pParams->bHeterogeneousMode); pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE, pParams->bHeterogeneousMode);
if (pParams->bHeterogeneousMode) if (pParams->bHeterogeneousMode)

View File

@ -637,7 +637,7 @@ kfifoConvertInstToKernelChannel_GM107
memdescDescribe(&instMemDesc, instAperture, pInst->address, NV_RAMIN_ALLOC_SIZE); memdescDescribe(&instMemDesc, instAperture, pInst->address, NV_RAMIN_ALLOC_SIZE);
kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt); kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt, INVALID_RUNLIST_ID);
while (kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK) while (kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK)
{ {
NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue);

View File

@ -114,6 +114,8 @@ static void _kchannelUpdateFifoMapping(KernelChannel *pKernelChannel,
NvU32 flags, NvU32 flags,
NvHandle hSubdevice, NvHandle hSubdevice,
RsCpuMapping *pMapping); RsCpuMapping *pMapping);
static NvNotification*
_kchannelGetKeyRotationNotifier(KernelChannel *pKernelChannel);
/*! /*!
* @brief Construct a new KernelChannel, which also creates a Channel. * @brief Construct a new KernelChannel, which also creates a Channel.
@ -714,6 +716,22 @@ kchannelConstruct_IMPL
return NV_ERR_NOT_READY; return NV_ERR_NOT_READY;
} }
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{
KEY_ROTATION_STATUS state;
NvU32 h2dKey;
NV_ASSERT_OK_OR_GOTO(status,
confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, &h2dKey, NULL),
cleanup);
NV_ASSERT_OK_OR_GOTO(status,
confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state),
cleanup);
if (state != KEY_ROTATION_STATUS_IDLE)
{
status = NV_ERR_KEY_ROTATION_IN_PROGRESS;
goto cleanup;
}
}
status = kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, ROTATE_IV_ALL_VALID, status = kchannelRetrieveKmb_HAL(pGpu, pKernelChannel, ROTATE_IV_ALL_VALID,
NV_TRUE, &pKernelChannel->clientKmb); NV_TRUE, &pKernelChannel->clientKmb);
NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); NV_ASSERT_OR_GOTO(status == NV_OK, cleanup);
@ -957,6 +975,32 @@ kchannelConstruct_IMPL
// Cache the hVASpace for this channel in the KernelChannel object // Cache the hVASpace for this channel in the KernelChannel object
pKernelChannel->hVASpace = pKernelChannel->pKernelCtxShareApi->hVASpace; pKernelChannel->hVASpace = pKernelChannel->pKernelCtxShareApi->hVASpace;
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if ((pConfCompute != NULL) &&
(pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED)) &&
(pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)) &&
(pKernelChannel->bCCSecureChannel))
{
if (!FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE, pChannelGpfifoParams->flags) &&
pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED))
{
//
// If conf compute feature is enabled AND
// If key rotation is supported AND
// If key rotation callbacks are not enabled yet AND
// If this is a secure channel being created AND
// If its not the scrubber channel then increment refcount
//
pConfCompute->keyRotationChannelRefCount++;
}
// Create persistent mapping to key rotation notifier
NV_ASSERT_OK_OR_GOTO(
status,
kchannelSetKeyRotationNotifier_HAL(pGpu, pKernelChannel, NV_TRUE),
cleanup);
}
cleanup: cleanup:
if (bLockAcquired) if (bLockAcquired)
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
@ -1056,9 +1100,35 @@ kchannelDestruct_IMPL
NV_STATUS status = NV_OK; NV_STATUS status = NV_OK;
KernelChannelGroup *pKernelChannelGroup = NULL; KernelChannelGroup *pKernelChannelGroup = NULL;
NV_ASSERT(pKernelChannel->pKernelChannelGroupApi != NULL);
pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup;
NV_ASSERT(pKernelChannelGroup != NULL);
resGetFreeParams(staticCast(pKernelChannel, RsResource), &pCallContext, &pParams); resGetFreeParams(staticCast(pKernelChannel, RsResource), &pCallContext, &pParams);
hClient = pCallContext->pClient->hClient; hClient = pCallContext->pClient->hClient;
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if ((pConfCompute != NULL) &&
(pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_CC_FEATURE_ENABLED)) &&
(pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)) &&
(pKernelChannel->bCCSecureChannel))
{
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED))
{
if (pConfCompute->keyRotationChannelRefCount > 0)
{
pConfCompute->keyRotationChannelRefCount--;
}
if (pConfCompute->keyRotationChannelRefCount == 0)
{
pConfCompute->keyRotationCallbackCount = 0;
}
}
NV_ASSERT_OK(confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel));
NV_ASSERT_OK(kchannelSetEncryptionStatsBuffer_HAL(pGpu, pKernelChannel, NV_FALSE));
NV_ASSERT_OK(kchannelSetKeyRotationNotifier_HAL(pGpu, pKernelChannel, NV_FALSE));
}
if (RMCFG_FEATURE_PLATFORM_GSP) if (RMCFG_FEATURE_PLATFORM_GSP)
{ {
// Free memdescs created during construct on GSP path. // Free memdescs created during construct on GSP path.
@ -1665,17 +1735,19 @@ kchannelNotifyRc_IMPL
} }
/** /**
* @brief Writes notifier specified by index * @brief Sends event corresponding to index to notify clients
* *
* @param[in] pKernelChannel * @param[in] pKernelChannel
* @param[in] notifyIndex * @param[in] notifyIndex
* @param[in] pNotifyParams * @param[in] pNotifyParams
* @parms[in] notifyParamsSize * @parms[in] notifyParamsSize
*/ */
void kchannelNotifyGeneric_IMPL void kchannelNotifyEvent_IMPL
( (
KernelChannel *pKernelChannel, KernelChannel *pKernelChannel,
NvU32 notifyIndex, NvU32 notifyIndex,
NvU32 info32,
NvU16 info16,
void *pNotifyParams, void *pNotifyParams,
NvU32 notifyParamsSize NvU32 notifyParamsSize
) )
@ -1714,8 +1786,15 @@ void kchannelNotifyGeneric_IMPL
pEventNotification = inotifyGetNotificationList(staticCast(pKernelChannel, INotifier)); pEventNotification = inotifyGetNotificationList(staticCast(pKernelChannel, INotifier));
if (pEventNotification != NULL) if (pEventNotification != NULL)
{ {
NV_PRINTF(LEVEL_INFO, "Posting event on channel = 0x%x with info16 = 0x%x\n",
kchannelGetDebugTag(pKernelChannel), (NvU32)info16);
// ping any events on the list of type notifyIndex // ping any events on the list of type notifyIndex
osEventNotification(pGpu, pEventNotification, notifyIndex, pNotifyParams, notifyParamsSize); osEventNotificationWithInfo(pGpu, pEventNotification, notifyIndex, info32, info16,
pNotifyParams, notifyParamsSize);
}
else
{
NV_PRINTF(LEVEL_INFO, "No event on channel = 0x%x\n", kchannelGetDebugTag(pKernelChannel));
} }
// reset if single shot notify action // reset if single shot notify action
@ -1725,6 +1804,103 @@ void kchannelNotifyGeneric_IMPL
return; return;
} }
/**
* @brief Writes notifier memory at given index with given info
*
* @param[in] pKernelChannel
* @param[in] notifyIndex
* @param[in] info32
* @param[in] info16
* @parms[in] notifierStatus
*/
NV_STATUS kchannelUpdateNotifierMem_IMPL
(
KernelChannel *pKernelChannel,
NvU32 notifyIndex,
NvU32 info32,
NvU16 info16,
NvU32 notifierStatus
)
{
OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
NV_ADDRESS_SPACE addressSpace;
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
NvU64 time;
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
TRANSFER_SURFACE surf = {0};
NvNotification *pNotifier = NULL;
NvBool bMemEndTransfer = NV_FALSE;
if (pNotifierMemDesc == NULL)
return NV_OK;
addressSpace = memdescGetAddressSpace(pNotifierMemDesc);
if (RMCFG_FEATURE_PLATFORM_GSP)
NV_ASSERT_OR_RETURN(addressSpace == ADDR_FBMEM, NV_ERR_INVALID_STATE);
//
// If clients did not allocate enough memory for the doorbell
// notifier, return NV_OK so as not to regress older clients
//
NV_CHECK_OR_RETURN(LEVEL_INFO, memdescGetSize(pNotifierMemDesc) >= (notifyIndex + 1) * sizeof(NvNotification), NV_OK);
//
// we rely on persistent mapping for key rotation notifier
// since this may be called in top half and mappings are not allowed
// in contexts that can't sleep on KVM or similar HCC systems.
//
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if ((pConfCompute != NULL) &&
(pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)) &&
(notifyIndex == NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS))
{
pNotifier = _kchannelGetKeyRotationNotifier(pKernelChannel);
NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
bMemEndTransfer = NV_FALSE;
}
else
{
pNotifier = (NvNotification *)memdescGetKernelMapping(pNotifierMemDesc);
if (pNotifier == NULL)
{
surf.pMemDesc = pNotifierMemDesc;
surf.offset = notifyIndex * sizeof(NvNotification);
pNotifier =
(NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
sizeof(NvNotification),
TRANSFER_FLAGS_SHADOW_ALLOC);
NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
bMemEndTransfer = NV_TRUE;
}
else
{
//
// If a CPU pointer has been passed by caller ensure that the notifier
// is in sysmem or in case it in vidmem, BAR access to the same is not
// blocked (for HCC)
//
NV_ASSERT_OR_RETURN(
memdescGetAddressSpace(pNotifierMemDesc) == ADDR_SYSMEM ||
!kbusIsBarAccessBlocked(pKernelBus), NV_ERR_INVALID_ARGUMENT);
pNotifier = &pNotifier[notifyIndex];
}
}
tmrGetCurrentTime(pTmr, &time);
notifyFillNvNotification(pGpu, pNotifier, info32, info16,
notifierStatus, NV_TRUE, time);
if (bMemEndTransfer)
{
memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
}
return NV_OK;
}
/*! /*!
* @brief Stop channel and notify client * @brief Stop channel and notify client
* *
@ -2931,7 +3107,7 @@ kchannelCtrlCmdEventSetTrigger_IMPL
KernelChannel *pKernelChannel KernelChannel *pKernelChannel
) )
{ {
kchannelNotifyGeneric(pKernelChannel, NVA06F_NOTIFIERS_SW, NULL, 0); kchannelNotifyEvent(pKernelChannel, NVA06F_NOTIFIERS_SW, 0, 0, NULL, 0);
return NV_OK; return NV_OK;
} }
@ -3842,6 +4018,15 @@ kchannelUpdateWorkSubmitTokenNotifIndex_IMPL
NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR, NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR,
NV_ERR_INVALID_ARGUMENT); NV_ERR_INVALID_ARGUMENT);
// If key rotation is enabled then clobbering key rotation notifier is disallowed
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if ((pConfCompute != NULL) &&
(pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED)))
{
NV_CHECK_OR_RETURN(LEVEL_ERROR, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS,
NV_ERR_INVALID_ARGUMENT);
}
// Check for integer overflows // Check for integer overflows
if (((index + 1) < index) || if (((index + 1) < index) ||
!portSafeMulU64(index + 1, sizeof(NvNotification), &notificationBufferSize)) !portSafeMulU64(index + 1, sizeof(NvNotification), &notificationBufferSize))
@ -3922,75 +4107,15 @@ kchannelNotifyWorkSubmitToken_IMPL
NvU32 token NvU32 token
) )
{ {
MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
NV_ADDRESS_SPACE addressSpace;
NvU16 notifyStatus = 0x0; NvU16 notifyStatus = 0x0;
NvU32 index; NvU32 index = pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN];
OBJTMR *pTmr = GPU_GET_TIMER(pGpu);
NvU64 time;
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
TRANSFER_SURFACE surf = {0};
NvNotification *pNotifier = NULL;
NvBool bMemEndTransfer = NV_FALSE;
if (pNotifierMemDesc == NULL)
return NV_OK;
index = pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN];
notifyStatus = notifyStatus =
FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus); FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus);
notifyStatus = notifyStatus =
FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, 0xFFFF, notifyStatus); FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, 0xFFFF, notifyStatus);
addressSpace = memdescGetAddressSpace(pNotifierMemDesc); return kchannelUpdateNotifierMem(pKernelChannel, index, token, 0, notifyStatus);
if (RMCFG_FEATURE_PLATFORM_GSP)
NV_ASSERT_OR_RETURN(addressSpace == ADDR_FBMEM, NV_ERR_INVALID_STATE);
//
// If clients did not allocate enough memory for the doorbell
// notifier, return NV_OK so as not to regress older clients
//
NV_CHECK_OR_RETURN(LEVEL_INFO, memdescGetSize(pNotifierMemDesc) >= (index + 1) * sizeof(NvNotification), NV_OK);
pNotifier = (NvNotification *)memdescGetKernelMapping(pNotifierMemDesc);
if (pNotifier == NULL)
{
surf.pMemDesc = pNotifierMemDesc;
surf.offset = index * sizeof(NvNotification);
pNotifier =
(NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
sizeof(NvNotification),
TRANSFER_FLAGS_SHADOW_ALLOC);
NV_ASSERT_OR_RETURN(pNotifier != NULL, NV_ERR_INVALID_STATE);
bMemEndTransfer = NV_TRUE;
}
else
{
//
// If a CPU pointer has been passed by caller ensure that the notifier
// is in sysmem or in case it in vidmem, BAR access to the same is not
// blocked (for HCC)
//
NV_ASSERT_OR_RETURN(
memdescGetAddressSpace(pNotifierMemDesc) == ADDR_SYSMEM ||
!kbusIsBarAccessBlocked(pKernelBus), NV_ERR_INVALID_ARGUMENT);
pNotifier = &pNotifier[index];
}
tmrGetCurrentTime(pTmr, &time);
notifyFillNvNotification(pGpu, pNotifier, token, 0,
notifyStatus, NV_TRUE, time);
if (bMemEndTransfer)
{
memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
}
return NV_OK;
} }
/** /**
@ -4423,7 +4548,7 @@ NV_STATUS kchannelRetrieveKmb_KERNEL
OBJGPU *pGpu, OBJGPU *pGpu,
KernelChannel *pKernelChannel, KernelChannel *pKernelChannel,
ROTATE_IV_TYPE rotateOperation, ROTATE_IV_TYPE rotateOperation,
NvBool includeSecrets, NvBool bIncludeIvOrNonce,
CC_KMB *keyMaterialBundle CC_KMB *keyMaterialBundle
) )
{ {
@ -4432,7 +4557,7 @@ NV_STATUS kchannelRetrieveKmb_KERNEL
NV_ASSERT(pCC != NULL); NV_ASSERT(pCC != NULL);
return (confComputeKeyStoreRetrieveViaChannel_HAL(pCC, pKernelChannel, rotateOperation, return (confComputeKeyStoreRetrieveViaChannel_HAL(pCC, pKernelChannel, rotateOperation,
includeSecrets, keyMaterialBundle)); bIncludeIvOrNonce, keyMaterialBundle));
} }
/*! /*!
@ -4453,39 +4578,59 @@ kchannelCtrlCmdGetKmb_KERNEL
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel);
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{
KEY_ROTATION_STATUS state;
NvU32 h2dKey;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, &h2dKey, NULL));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
if ((state != KEY_ROTATION_STATUS_IDLE) ||
(kchannelIsDisabledForKeyRotation(pGpu, pKernelChannel)))
{
return NV_ERR_KEY_ROTATION_IN_PROGRESS;
}
}
portMemCopy((void*)(&pGetKmbParams->kmb), sizeof(CC_KMB), portMemCopy((void*)(&pGetKmbParams->kmb), sizeof(CC_KMB),
(const void*)(&pKernelChannel->clientKmb), sizeof(CC_KMB)); (const void*)(&pKernelChannel->clientKmb), sizeof(CC_KMB));
if (pKernelChannel->pEncStatsBufMemDesc == NULL) if (pConfCompute->getProperty(pConfCompute, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{ {
RsClient *pRsClient = NULL; //
RsResourceRef *pResourceRef = NULL; // If this is the first time GET_KMB is called on a context
Memory *pMemory = NULL; // then setup the encrypt stats buffer.
MEMORY_DESCRIPTOR *pMemDesc = NULL; //
OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); if (pKernelChannel->pEncStatsBufMemDesc == NULL)
NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannel);
NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient));
if (clientGetResourceRef(pRsClient, pGetKmbParams->hMemory, &pResourceRef) != NV_OK)
{ {
// TODO: Make this fatal once all cients move to using hMemory RsClient *pRsClient = NULL;
return NV_OK; RsResourceRef *pResourceRef = NULL;
} Memory *pMemory = NULL;
pMemory = dynamicCast(pResourceRef->pResource, Memory); MEMORY_DESCRIPTOR *pMemDesc = NULL;
pMemDesc = pMemory->pMemDesc; NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannel);
NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT);
pKernelChannel->pEncStatsBufMemDesc = pMemDesc;
// Reset statistics to init the buffer NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient));
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); if (clientGetResourceRef(pRsClient, pGetKmbParams->hMemory, &pResourceRef) != NV_OK)
TRANSFER_SURFACE surf = {0}; {
surf.pMemDesc = pMemory->pMemDesc; // TODO: Make this fatal once all cients move to using hMemory
surf.offset = 0; return NV_OK;
CC_CRYPTOBUNDLE_STATS *pEncStats = (CC_CRYPTOBUNDLE_STATS*)memmgrMemBeginTransfer(pMemoryManager, &surf, }
sizeof(CC_CRYPTOBUNDLE_STATS), TRANSFER_FLAGS_SHADOW_ALLOC); pMemory = dynamicCast(pResourceRef->pResource, Memory);
portMemSet(pEncStats, 0, sizeof(CC_CRYPTOBUNDLE_STATS)); pMemDesc = pMemory->pMemDesc;
memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(CC_CRYPTOBUNDLE_STATS), 0); NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT);
pKernelChannel->pEncStatsBufMemDesc = pMemDesc;
NV_ASSERT_OK_OR_RETURN(kchannelSetEncryptionStatsBuffer_HAL(pGpu, pKernelChannel, NV_TRUE));
}
//
// Reset statistics every time GET_KMB is called
// TODO CONFCOMP-984: Make this fatal if this ptr is NULL
//
if (pKernelChannel->pEncStatsBuf != NULL)
portMemSet(pKernelChannel->pEncStatsBuf, 0, sizeof(CC_CRYPTOBUNDLE_STATS));
} }
return NV_OK; return NV_OK;
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
@ -4516,6 +4661,19 @@ kchannelCtrlRotateSecureChannelIv_KERNEL
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
if (pCC->getProperty(pCC, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED))
{
KEY_ROTATION_STATUS state;
NvU32 h2dKey;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyPairByChannel(pGpu, pCC, pKernelChannel, &h2dKey, NULL));
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pCC, h2dKey, &state));
if ((state != KEY_ROTATION_STATUS_IDLE) ||
(kchannelIsDisabledForKeyRotation(pGpu, pKernelChannel)))
{
return NV_ERR_KEY_ROTATION_IN_PROGRESS;
}
}
NV_PRINTF(LEVEL_INFO, "Rotating IV in CPU-RM.\n"); NV_PRINTF(LEVEL_INFO, "Rotating IV in CPU-RM.\n");
status = confComputeKeyStoreRetrieveViaChannel_HAL( status = confComputeKeyStoreRetrieveViaChannel_HAL(
@ -4717,3 +4875,125 @@ void kchannelEnableAfterKeyRotation
~KERNEL_CHANNEL_SW_STATE_ENABLE_AFTER_KEY_ROTATION; ~KERNEL_CHANNEL_SW_STATE_ENABLE_AFTER_KEY_ROTATION;
} }
} }
/*!
* Creates/destroys persistent mappings for key rotation notifier
*/
NV_STATUS
kchannelSetKeyRotationNotifier_KERNEL
(
OBJGPU *pGpu,
KernelChannel *pKernelChannel,
NvBool bSet
)
{
NV_STATUS status = NV_OK;
MEMORY_DESCRIPTOR *pNotifierMemDesc = pKernelChannel->pErrContextMemDesc;
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
TRANSFER_SURFACE surf = {0};
NV_ASSERT_OR_RETURN(pNotifierMemDesc != NULL, NV_ERR_INVALID_STATE);
NV_ADDRESS_SPACE addressSpace = memdescGetAddressSpace(pNotifierMemDesc);
NvU32 notifyIndex = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_KEY_ROTATION_STATUS;
if (bSet)
{
NV_ASSERT_OR_RETURN(memdescGetSize(pNotifierMemDesc) >= ((notifyIndex + 1) * sizeof(NvNotification)),
NV_ERR_INVALID_ARGUMENT);
NV_ASSERT_OR_RETURN(addressSpace == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED);
if (pKernelChannel->pKeyRotationNotifierMemDesc == NULL)
{
NV_ASSERT_OK_OR_RETURN(memdescCreateSubMem(&pKernelChannel->pKeyRotationNotifierMemDesc,
pNotifierMemDesc, pGpu, notifyIndex * sizeof(NvNotification),
sizeof(NvNotification)));
surf.pMemDesc = pKernelChannel->pKeyRotationNotifierMemDesc;
surf.offset = 0;
pKernelChannel->pKeyRotationNotifier =
(NvNotification *) memmgrMemBeginTransfer(pMemoryManager, &surf,
sizeof(NvNotification),
TRANSFER_FLAGS_SHADOW_ALLOC);
NV_ASSERT_OR_ELSE(pKernelChannel->pKeyRotationNotifier != NULL, status = NV_ERR_INVALID_STATE; goto done;);
portMemSet((void*)pKernelChannel->pKeyRotationNotifier, 0, sizeof(NvNotification));
}
}
else
{
if (pKernelChannel->pKeyRotationNotifierMemDesc != NULL)
{
if (pKernelChannel->pKeyRotationNotifier != NULL)
{
surf.pMemDesc = pKernelChannel->pKeyRotationNotifierMemDesc;
surf.offset = 0;
memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(NvNotification), 0);
pKernelChannel->pKeyRotationNotifier = NULL;
}
memdescDestroy(pKernelChannel->pKeyRotationNotifierMemDesc);
pKernelChannel->pKeyRotationNotifierMemDesc = NULL;
}
}
done:
if (status != NV_OK)
{
if (pKernelChannel->pKeyRotationNotifierMemDesc != NULL)
{
memdescDestroy(pKernelChannel->pKeyRotationNotifierMemDesc);
pKernelChannel->pKeyRotationNotifierMemDesc = NULL;
}
}
return status;
}
/*!
* Creates/destroys persistent mappings for encryption stats buffer
*/
NV_STATUS
kchannelSetEncryptionStatsBuffer_KERNEL
(
OBJGPU *pGpu,
KernelChannel *pKernelChannel,
NvBool bSet
)
{
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
TRANSFER_SURFACE surf = {0};
if (bSet)
{
NV_ASSERT_OR_RETURN(pKernelChannel->pEncStatsBuf == NULL, NV_ERR_INVALID_STATE);
//
// we rely on persistent mapping for encryption statistics buffer
// since these will be used in top half and mappings are not allowed
// in contexts that can't sleep on KVM or similar HCC systems.
//
surf.pMemDesc = pKernelChannel->pEncStatsBufMemDesc;
surf.offset = 0;
pKernelChannel->pEncStatsBuf = (CC_CRYPTOBUNDLE_STATS*)memmgrMemBeginTransfer(pMemoryManager, &surf,
sizeof(CC_CRYPTOBUNDLE_STATS),
TRANSFER_FLAGS_SHADOW_ALLOC);
NV_ASSERT_OR_RETURN(pKernelChannel->pEncStatsBuf != NULL, NV_ERR_INVALID_STATE);
portMemSet(pKernelChannel->pEncStatsBuf, 0, sizeof(CC_CRYPTOBUNDLE_STATS));
}
else
{
//
// Free persistent mappings for encryption stats buffer
// TODO CONFCOMP-984: Make this fatal if this ptr is NULL
//
if (pKernelChannel->pEncStatsBufMemDesc != NULL)
{
surf.pMemDesc = pKernelChannel->pEncStatsBufMemDesc;
surf.offset = 0;
memmgrMemEndTransfer(pMemoryManager, &surf, sizeof(CC_CRYPTOBUNDLE_STATS), 0);
}
}
return NV_OK;
}
static NvNotification*
_kchannelGetKeyRotationNotifier(KernelChannel *pKernelChannel)
{
return pKernelChannel->pKeyRotationNotifier;
}

View File

@ -65,6 +65,10 @@ static void _kfifoChidMgrDestroyChannelGroupMgr(CHID_MGR *pChidMgr);
static NV_STATUS _kfifoChidMgrFreeIsolationId(CHID_MGR *pChidMgr, NvU32 ChID); static NV_STATUS _kfifoChidMgrFreeIsolationId(CHID_MGR *pChidMgr, NvU32 ChID);
static NV_STATUS _kfifoChidMgrGetNextKernelChannel(OBJGPU *pGpu, KernelFifo *pKernelFifo,
CHID_MGR *pChidMgr, CHANNEL_ITERATOR *pIt,
KernelChannel **ppKernelChannel);
NvU32 kfifoGetNumEschedDrivenEngines_IMPL NvU32 kfifoGetNumEschedDrivenEngines_IMPL
( (
@ -1690,12 +1694,17 @@ kfifoFillMemInfo_IMPL
} }
} }
/*
* Initializes an iterator to iterate through all channels of a runlist
* If runlistId is INVALID_RUNLIST_ID then it iterates channels for all runlists
*/
void void
kfifoGetChannelIterator_IMPL kfifoGetChannelIterator_IMPL
( (
OBJGPU *pGpu, OBJGPU *pGpu,
KernelFifo *pKernelFifo, KernelFifo *pKernelFifo,
CHANNEL_ITERATOR *pIt CHANNEL_ITERATOR *pIt,
NvU32 runlistId
) )
{ {
portMemSet(pIt, 0, sizeof(*pIt)); portMemSet(pIt, 0, sizeof(*pIt));
@ -1703,10 +1712,73 @@ kfifoGetChannelIterator_IMPL
pIt->pFifoDataBlock = NULL; pIt->pFifoDataBlock = NULL;
pIt->runlistId = 0; pIt->runlistId = 0;
pIt->numRunlists = 1; pIt->numRunlists = 1;
if (kfifoIsPerRunlistChramEnabled(pKernelFifo))
// Do we want to ierate all runlist channels
if (runlistId == INVALID_RUNLIST_ID)
{ {
pIt->numRunlists = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo); if (kfifoIsPerRunlistChramEnabled(pKernelFifo))
{
pIt->numRunlists = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo);
}
} }
else
{
pIt->runlistId = runlistId;
}
}
// return next channel for a specific chidMgr
static NV_STATUS
_kfifoChidMgrGetNextKernelChannel
(
OBJGPU *pGpu,
KernelFifo *pKernelFifo,
CHID_MGR *pChidMgr,
CHANNEL_ITERATOR *pIt,
KernelChannel **ppKernelChannel
)
{
KernelChannel *pKernelChannel = NULL;
pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr);
if (pIt->pFifoDataBlock == NULL)
{
pIt->pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock(
pChidMgr->pFifoDataHeap,
pIt->physicalChannelID,
NV_TRUE);
}
while (pIt->physicalChannelID < pIt->numChannels)
{
if (pIt->pFifoDataBlock->owner == NVOS32_BLOCK_TYPE_FREE)
{
pIt->physicalChannelID = pIt->pFifoDataBlock->end + 1;
}
else
{
pIt->physicalChannelID++;
pKernelChannel = (KernelChannel *)pIt->pFifoDataBlock->pData;
//
// This iterator can be used during an interrupt, when a KernelChannel may
// be in the process of being destroyed. If a KernelChannel expects a pChannel
// but does not have one, it means it's being destroyed and we don't want to
// return it.
//
if (pKernelChannel != NULL && kchannelIsValid_HAL(pKernelChannel))
{
// Prepare iterator to check next block in pChidMgr->pFifoDataHeap
pIt->pFifoDataBlock = pIt->pFifoDataBlock->next;
*ppKernelChannel = pKernelChannel;
return NV_OK;
}
}
// Check next block in pChidMgr->pFifoDataHeap
pIt->pFifoDataBlock = pIt->pFifoDataBlock->next;
}
return NV_ERR_OBJECT_NOT_FOUND;
} }
/** /**
@ -1732,13 +1804,18 @@ NV_STATUS kfifoGetNextKernelChannel_IMPL
KernelChannel **ppKernelChannel KernelChannel **ppKernelChannel
) )
{ {
KernelChannel *pKernelChannel;
if (ppKernelChannel == NULL) if (ppKernelChannel == NULL)
return NV_ERR_INVALID_ARGUMENT; return NV_ERR_INVALID_ARGUMENT;
*ppKernelChannel = NULL; *ppKernelChannel = NULL;
if (pIt->numRunlists == 1)
{
CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId);
NV_ASSERT_OR_RETURN(pChidMgr != NULL, NV_ERR_INVALID_ARGUMENT);
return _kfifoChidMgrGetNextKernelChannel(pGpu, pKernelFifo,
pChidMgr, pIt, ppKernelChannel);
}
while (pIt->runlistId < pIt->numRunlists) while (pIt->runlistId < pIt->numRunlists)
{ {
CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId); CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId);
@ -1749,50 +1826,18 @@ NV_STATUS kfifoGetNextKernelChannel_IMPL
continue; continue;
} }
pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); if (_kfifoChidMgrGetNextKernelChannel(pGpu, pKernelFifo, pChidMgr,
pIt, ppKernelChannel) == NV_OK)
if (pIt->pFifoDataBlock == NULL)
{ {
pIt->pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock( return NV_OK;
pChidMgr->pFifoDataHeap,
pIt->physicalChannelID,
NV_TRUE);
} }
else
while (pIt->physicalChannelID < pIt->numChannels)
{ {
if (pIt->pFifoDataBlock->owner == NVOS32_BLOCK_TYPE_FREE) pIt->runlistId++;
{ // Reset iterator for next runlist
pIt->physicalChannelID = pIt->pFifoDataBlock->end + 1; pIt->physicalChannelID = 0;
} pIt->pFifoDataBlock = NULL;
else
{
pIt->physicalChannelID++;
pKernelChannel = (KernelChannel *)pIt->pFifoDataBlock->pData;
//
// This iterator can be used during an interrupt, when a KernelChannel may
// be in the process of being destroyed. If a KernelChannel expects a pChannel
// but does not have one, it means it's being destroyed and we don't want to
// return it.
//
if (pKernelChannel != NULL && kchannelIsValid_HAL(pKernelChannel))
{
// Prepare iterator to check next block in pChidMgr->pFifoDataHeap
pIt->pFifoDataBlock = pIt->pFifoDataBlock->next;
*ppKernelChannel = pKernelChannel;
return NV_OK;
}
}
// Check next block in pChidMgr->pFifoDataHeap
pIt->pFifoDataBlock = pIt->pFifoDataBlock->next;
} }
pIt->runlistId++;
// Reset iterator for next runlist
pIt->physicalChannelID = 0;
pIt->pFifoDataBlock = NULL;
} }
return NV_ERR_OBJECT_NOT_FOUND; return NV_ERR_OBJECT_NOT_FOUND;
@ -2349,7 +2394,7 @@ kfifoEngineListHasChannel_IMPL
NV_ASSERT_OR_RETURN((pEngines != NULL) && (engineCount > 0), NV_TRUE); NV_ASSERT_OR_RETURN((pEngines != NULL) && (engineCount > 0), NV_TRUE);
// Find any channels or contexts on passed engines // Find any channels or contexts on passed engines
kfifoGetChannelIterator(pGpu, pKernelFifo, &it); kfifoGetChannelIterator(pGpu, pKernelFifo, &it, INVALID_RUNLIST_ID);
while (kchannelGetNextKernelChannel(pGpu, &it, &pKernelChannel) == NV_OK) while (kchannelGetNextKernelChannel(pGpu, &it, &pKernelChannel) == NV_OK)
{ {
NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue);

View File

@ -42,6 +42,8 @@
#include "ctrl/ctrl0080/ctrl0080fifo.h" #include "ctrl/ctrl0080/ctrl0080fifo.h"
#include "kernel/gpu/conf_compute/conf_compute.h"
static NV_STATUS _kfifoGetCaps(OBJGPU *pGpu, NvU8 *pKfifoCaps); static NV_STATUS _kfifoGetCaps(OBJGPU *pGpu, NvU8 *pKfifoCaps);
/*! /*!
@ -783,6 +785,7 @@ subdeviceCtrlCmdFifoDisableChannelsForKeyRotation_IMPL
CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams;
NvU32 i; NvU32 i;
KernelChannel *pKernelChannel = NULL;
NV_CHECK_OR_RETURN(LEVEL_INFO, NV_CHECK_OR_RETURN(LEVEL_INFO,
pDisableChannelParams->numChannels <= NV_ARRAY_ELEMENTS(pDisableChannelParams->hChannelList), pDisableChannelParams->numChannels <= NV_ARRAY_ELEMENTS(pDisableChannelParams->hChannelList),
@ -812,7 +815,6 @@ subdeviceCtrlCmdFifoDisableChannelsForKeyRotation_IMPL
for (i = 0; i < pDisableChannelParams->numChannels; i++) for (i = 0; i < pDisableChannelParams->numChannels; i++)
{ {
RsClient *pClient = NULL; RsClient *pClient = NULL;
KernelChannel *pKernelChannel = NULL;
tmpStatus = serverGetClientUnderLock(&g_resServ, tmpStatus = serverGetClientUnderLock(&g_resServ,
pDisableChannelParams->hClientList[i], &pClient); pDisableChannelParams->hClientList[i], &pClient);
if (tmpStatus != NV_OK) if (tmpStatus != NV_OK)
@ -833,5 +835,19 @@ subdeviceCtrlCmdFifoDisableChannelsForKeyRotation_IMPL
kchannelDisableForKeyRotation(pGpu, pKernelChannel, NV_TRUE); kchannelDisableForKeyRotation(pGpu, pKernelChannel, NV_TRUE);
kchannelEnableAfterKeyRotation(pGpu, pKernelChannel, pDisableChannelParams->bEnableAfterKeyRotation); kchannelEnableAfterKeyRotation(pGpu, pKernelChannel, pDisableChannelParams->bEnableAfterKeyRotation);
} }
if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) &&
(pKernelChannel != NULL))
{
NvU32 h2dKey, d2hKey;
ConfidentialCompute *pConfCompute = GPU_GET_CONF_COMPUTE(pGpu);
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyPairByChannel_HAL(pGpu, pConfCompute, pKernelChannel, &h2dKey, &d2hKey));
KEY_ROTATION_STATUS state;
NV_ASSERT_OK_OR_RETURN(confComputeGetKeyRotationStatus(pConfCompute, h2dKey, &state));
if (state == KEY_ROTATION_STATUS_PENDING)
{
NV_ASSERT_OK_OR_RETURN(confComputeCheckAndScheduleKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey));
}
}
return status; return status;
} }

View File

@ -231,7 +231,7 @@ kfifoStateDestroy_IMPL
// On LDDM, we don't free these during freechannel because it's possible // On LDDM, we don't free these during freechannel because it's possible
// we wouldn't be able to reallocate them (we want to keep them preallocated // we wouldn't be able to reallocate them (we want to keep them preallocated
// from boot time). But we need to free before shutdown, so do that here. // from boot time). But we need to free before shutdown, so do that here.
kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt); kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt, INVALID_RUNLIST_ID);
while ((kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK)) while ((kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK))
{ {
RM_ENGINE_TYPE rmEngineType; RM_ENGINE_TYPE rmEngineType;

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -245,6 +245,28 @@ gpuFabricProbeGetFlaAddressRange
return status; return status;
} }
/*
* This function is used to get the peer GPU EGM address from FM to RM.
* FM passes only the upper 32 bits of the address.
*/
NV_STATUS
gpuFabricProbeGetEgmGpaAddress
(
GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel,
NvU64 *pEgmGpaAddress
)
{
NV_STATUS status;
status = _gpuFabricProbeFullSanityCheck(pGpuFabricProbeInfoKernel);
NV_CHECK_OR_RETURN(LEVEL_SILENT, status == NV_OK, status);
*pEgmGpaAddress = (NvU64)pGpuFabricProbeInfoKernel->probeResponseMsg.probeRsp.gpaAddressEGMHi << 32ULL;
return status;
}
NV_STATUS NV_STATUS
gpuFabricProbeGetNumProbeReqs gpuFabricProbeGetNumProbeReqs
( (
@ -385,6 +407,7 @@ _gpuFabricProbeSetupGpaRange
{ {
NvU64 gpaAddress; NvU64 gpaAddress;
NvU64 gpaAddressSize; NvU64 gpaAddressSize;
NvU64 egmGpaAddress;
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR,
gpuFabricProbeGetGpaAddress(pGpuFabricProbeInfoKernel, gpuFabricProbeGetGpaAddress(pGpuFabricProbeInfoKernel,
@ -397,6 +420,14 @@ _gpuFabricProbeSetupGpaRange
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR,
knvlinkSetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink, knvlinkSetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink,
gpaAddress) == NV_OK); gpaAddress) == NV_OK);
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR,
gpuFabricProbeGetEgmGpaAddress(pGpuFabricProbeInfoKernel,
&egmGpaAddress) == NV_OK);
NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR,
knvlinkSetUniqueFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink,
egmGpaAddress) == NV_OK);
} }
} }
@ -640,6 +671,7 @@ gpuFabricProbeStart
GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel; GPU_FABRIC_PROBE_INFO_KERNEL *pGpuFabricProbeInfoKernel;
RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu);
NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS params = { 0 }; NV2080_CTRL_CMD_INTERNAL_START_GPU_FABRIC_PROBE_INFO_PARAMS params = { 0 };
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu)));
@ -660,7 +692,7 @@ gpuFabricProbeStart
pGpuFabricProbeInfoKernel->pGpu = pGpu; pGpuFabricProbeInfoKernel->pGpu = pGpu;
pGpuFabricProbeInfoKernel->bwMode = gpumgrGetGpuNvlinkBwMode(); pGpuFabricProbeInfoKernel->bwMode = gpumgrGetGpuNvlinkBwMode();
params.bwMode = pGpuFabricProbeInfoKernel->bwMode; params.bwMode = pGpuFabricProbeInfoKernel->bwMode;
params.bLocalEgmEnabled = pMemoryManager->bLocalEgmEnabled;
if (IS_VIRTUAL(pGpu)) if (IS_VIRTUAL(pGpu))
{ {
@ -769,7 +801,10 @@ _gpuFabricProbeInvalidate
portAtomicSetU32(&pGpuFabricProbeInfoKernel->probeRespRcvd, 0); portAtomicSetU32(&pGpuFabricProbeInfoKernel->probeRespRcvd, 0);
if (pKernelNvlink != NULL) if (pKernelNvlink != NULL)
{
knvlinkClearUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink); knvlinkClearUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink);
knvlinkClearUniqueFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink);
}
if (pFabricVAS != NULL) if (pFabricVAS != NULL)
fabricvaspaceClearUCRange(pFabricVAS); fabricvaspaceClearUCRange(pFabricVAS);

View File

@ -24,11 +24,36 @@
#define NVOC_KERNEL_GRAPHICS_H_PRIVATE_ACCESS_ALLOWED #define NVOC_KERNEL_GRAPHICS_H_PRIVATE_ACCESS_ALLOWED
#include "gpu_mgr/gpu_mgr.h" #include "gpu_mgr/gpu_mgr.h"
#include "kernel/gpu/gr/kernel_graphics.h"
#include "kernel/gpu/mem_mgr/mem_mgr.h" #include "kernel/gpu/mem_mgr/mem_mgr.h"
#include "kernel/gpu/gr/kernel_graphics_manager.h"
#include "kernel/gpu/gr/kernel_graphics.h"
#include "kernel/gpu/device/device.h"
#include "kernel/gpu/subdevice/subdevice.h"
#include "kernel/rmapi/rmapi_utils.h"
#include "kernel/core/locks.h"
#include "kernel/gpu/mem_sys/kern_mem_sys.h"
#include "kernel/mem_mgr/gpu_vaspace.h"
#include "kernel/gpu/mem_mgr/mem_mgr.h"
#include "virtualization/hypervisor/hypervisor.h"
#include "kernel/gpu/mem_mgr/heap.h"
#include "gpu/mem_mgr/virt_mem_allocator.h"
#include "gpu/mmu/kern_gmmu.h"
#include "platform/sli/sli.h"
#include "rmapi/rs_utils.h"
#include "rmapi/client.h"
#include "nvrm_registry.h"
#include "gpu/mem_mgr/heap.h"
#include "ctrl/ctrl0080/ctrl0080fifo.h" #include "ctrl/ctrl0080/ctrl0080fifo.h"
#include "class/cla06f.h"
#include "class/cl90f1.h" // FERMI_VASPACE_A
#include "class/cl003e.h" // NV01_MEMORY_SYSTEM
#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL
#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER
#include "class/clc36f.h" // VOLTA_CHANNEL_GPFIFO_A
#include "class/clc46f.h" // TURING_CHANNEL_GPFIFO_A
/*! /*!
* @brief Allocate common local/global buffers that are required by the graphics context * @brief Allocate common local/global buffers that are required by the graphics context
* *

View File

@ -97,6 +97,12 @@ static NV_STATUS _kgraphicsMapGlobalCtxBuffer(OBJGPU *pGpu, KernelGraphics *pKer
KernelGraphicsContext *, GR_GLOBALCTX_BUFFER, NvBool bIsReadOnly); KernelGraphicsContext *, GR_GLOBALCTX_BUFFER, NvBool bIsReadOnly);
static NV_STATUS _kgraphicsPostSchedulingEnableHandler(OBJGPU *, void *); static NV_STATUS _kgraphicsPostSchedulingEnableHandler(OBJGPU *, void *);
static void
_kgraphicsInitRegistryOverrides(OBJGPU *pGpu, KernelGraphics *pKernelGraphics)
{
return;
}
NV_STATUS NV_STATUS
kgraphicsConstructEngine_IMPL kgraphicsConstructEngine_IMPL
( (
@ -216,6 +222,7 @@ kgraphicsConstructEngine_IMPL
NV_ASSERT_OK_OR_RETURN(fecsCtxswLoggingInit(pGpu, pKernelGraphics, &pKernelGraphics->pFecsTraceInfo)); NV_ASSERT_OK_OR_RETURN(fecsCtxswLoggingInit(pGpu, pKernelGraphics, &pKernelGraphics->pFecsTraceInfo));
_kgraphicsInitRegistryOverrides(pGpu, pKernelGraphics);
return NV_OK; return NV_OK;
} }
@ -355,6 +362,7 @@ kgraphicsStatePreUnload_IMPL
NvU32 flags NvU32 flags
) )
{ {
fecsBufferUnmap(pGpu, pKernelGraphics); fecsBufferUnmap(pGpu, pKernelGraphics);
// Release global buffers used as part of the gr context, when not in S/R // Release global buffers used as part of the gr context, when not in S/R

View File

@ -148,7 +148,10 @@
// no trace output // no trace output
#define _MMUXLATEVADDR_FLAG_XLATE_ONLY _MMUXLATEVADDR_FLAG_VALIDATE_TERSELY #define _MMUXLATEVADDR_FLAG_XLATE_ONLY _MMUXLATEVADDR_FLAG_VALIDATE_TERSELY
static NV_STATUS _dmaGetFabricAddress(OBJGPU *pGpu, NvU32 aperture, NvU32 kind, NvU64 *fabricAddr); static NV_STATUS _dmaGetFabricAddress(OBJGPU *pGpu, NvU32 aperture, NvU32 kind,
NvU64 *fabricAddr);
static NV_STATUS _dmaGetFabricEgmAddress(OBJGPU *pGpu, NvU32 aperture, NvU32 kind,
NvU64 *fabricEgmAddr);
static NV_STATUS static NV_STATUS
_dmaApplyWarForBug2720120 _dmaApplyWarForBug2720120
@ -1060,7 +1063,18 @@ dmaAllocMapping_GM107
} }
else else
{ {
status = _dmaGetFabricAddress(pLocals->pSrcGpu, pLocals->aperture, pLocals->kind, &pLocals->fabricAddr); // Get EGM fabric address for Remote EGM
if (memdescIsEgm(pLocals->pTempMemDesc))
{
status = _dmaGetFabricEgmAddress(pLocals->pSrcGpu, pLocals->aperture,
pLocals->kind, &pLocals->fabricAddr);
}
else
{
status = _dmaGetFabricAddress(pLocals->pSrcGpu, pLocals->aperture,
pLocals->kind, &pLocals->fabricAddr);
}
if (status != NV_OK) if (status != NV_OK)
{ {
DBG_BREAKPOINT(); DBG_BREAKPOINT();
@ -1666,7 +1680,7 @@ static NV_STATUS _dmaGetFabricAddress
// Fabric address should be available for NVSwitch connected GPUs, // Fabric address should be available for NVSwitch connected GPUs,
// otherwise it is a NOP. // otherwise it is a NOP.
// //
*fabricAddr = knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink); *fabricAddr = knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink);
if (*fabricAddr == NVLINK_INVALID_FABRIC_ADDR) if (*fabricAddr == NVLINK_INVALID_FABRIC_ADDR)
{ {
return NV_OK; return NV_OK;
@ -1682,6 +1696,49 @@ static NV_STATUS _dmaGetFabricAddress
return NV_OK; return NV_OK;
} }
static NV_STATUS _dmaGetFabricEgmAddress
(
OBJGPU *pGpu,
NvU32 aperture,
NvU32 kind,
NvU64 *fabricEgmAddr
)
{
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu);
*fabricEgmAddr = NVLINK_INVALID_FABRIC_ADDR;
if (pKernelNvlink == NULL)
{
return NV_OK;
}
if (aperture != NV_MMU_PTE_APERTURE_PEER_MEMORY)
{
return NV_OK;
}
//
// Fabric address should be available for NVSwitch connected GPUs,
// otherwise it is a NOP.
//
*fabricEgmAddr = knvlinkGetUniqueFabricEgmBaseAddress(pGpu, pKernelNvlink);
if (*fabricEgmAddr == NVLINK_INVALID_FABRIC_ADDR)
{
return NV_OK;
}
if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind))
{
NV_PRINTF(LEVEL_ERROR,
"Nvswitch systems don't support compression.\n");
return NV_ERR_NOT_SUPPORTED;
}
return NV_OK;
}
// VMM-TODO: PL(N) mmuPageLevelUpdate - but major splits // VMM-TODO: PL(N) mmuPageLevelUpdate - but major splits
NV_STATUS NV_STATUS
dmaUpdateVASpace_GF100 dmaUpdateVASpace_GF100

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -111,6 +111,39 @@ knvlinkValidateFabricBaseAddress_GH100
return NV_OK; return NV_OK;
} }
/*!
* @brief Validates fabric EGM base address.
*
* @param[in] pGpu OBJGPU pointer
* @param[in] pKernelNvlink KernelNvlink pointer
* @param[in] fabricEgmBaseAddr Address to be validated
*
* @returns On success, NV_OK.
* On failure, returns NV_ERR_XXX.
*/
NV_STATUS
knvlinkValidateFabricEgmBaseAddress_GH100
(
OBJGPU *pGpu,
KernelNvlink *pKernelNvlink,
NvU64 fabricEgmBaseAddr
)
{
//
// Hopper SKUs will be paired with NVSwitches supporting 2K
// mapslots that can cover 512GB each. Make sure that the EGM fabric base
// address being used is valid to cover whole frame buffer.
//
// Check if fabric EGM address is aligned to mapslot size.
if (fabricEgmBaseAddr & (NVBIT64(39) - 1))
{
return NV_ERR_INVALID_ARGUMENT;
}
return NV_OK;
}
/*! /*!
* @brief Do post setup on nvlink peers * @brief Do post setup on nvlink peers
* *
@ -646,6 +679,82 @@ knvlinkClearUniqueFabricBaseAddress_GH100
pKernelNvlink->fabricBaseAddr = NVLINK_INVALID_FABRIC_ADDR; pKernelNvlink->fabricBaseAddr = NVLINK_INVALID_FABRIC_ADDR;
} }
/*!
* @brief Set unique EGM fabric base address for NVSwitch enabled systems.
*
* @param[in] pGpu OBJGPU pointer
* @param[in] pKernelNvlink KernelNvlink pointer
* @param[in] fabricEgmBaseAddr EGM Fabric Address to set
*
* @returns On success, sets unique EGM fabric address and returns NV_OK.
* On failure, returns NV_ERR_XXX.
*/
NV_STATUS
knvlinkSetUniqueFabricEgmBaseAddress_GH100
(
OBJGPU *pGpu,
KernelNvlink *pKernelNvlink,
NvU64 fabricEgmBaseAddr
)
{
NV_STATUS status = NV_OK;
status = knvlinkValidateFabricEgmBaseAddress_HAL(pGpu, pKernelNvlink,
fabricEgmBaseAddr);
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "EGM Fabric base addr validation failed for GPU %x\n",
pGpu->gpuInstance);
return status;
}
if (IsSLIEnabled(pGpu))
{
NV_PRINTF(LEVEL_ERROR,
"Operation is unsupported on SLI enabled GPU %x\n",
pGpu->gpuInstance);
return NV_ERR_NOT_SUPPORTED;
}
if (pKernelNvlink->fabricEgmBaseAddr == fabricEgmBaseAddr)
{
NV_PRINTF(LEVEL_INFO,
"The same EGM fabric base addr is being re-assigned to GPU %x\n",
pGpu->gpuInstance);
return NV_OK;
}
if (pKernelNvlink->fabricEgmBaseAddr != NVLINK_INVALID_FABRIC_ADDR)
{
NV_PRINTF(LEVEL_ERROR, "EGM Fabric base addr is already assigned to GPU %x\n",
pGpu->gpuInstance);
return NV_ERR_STATE_IN_USE;
}
pKernelNvlink->fabricEgmBaseAddr = fabricEgmBaseAddr;
NV_PRINTF(LEVEL_INFO, "EGM Fabric base addr %llx is assigned to GPU %x\n",
pKernelNvlink->fabricEgmBaseAddr, pGpu->gpuInstance);
return NV_OK;
}
/*!
* @brief Clear unique EGM fabric base address for NVSwitch enabled systems.
*
* @param[in] pGpu OBJGPU pointer
* @param[in] pKernelNvlink KernelNvlink pointer
*/
void
knvlinkClearUniqueFabricEgmBaseAddress_GH100
(
OBJGPU *pGpu,
KernelNvlink *pKernelNvlink
)
{
pKernelNvlink->fabricEgmBaseAddr = NVLINK_INVALID_FABRIC_ADDR;
}
/*! /*!
* @brief Check if system has enough active NVLinks and * @brief Check if system has enough active NVLinks and
* enough NVLink bridges * enough NVLink bridges

View File

@ -269,6 +269,68 @@ _knvlinkCheckFabricCliqueId
return NV_TRUE; return NV_TRUE;
} }
/*!
* @brief Checks whether EGM addresses are valid for P2P
* when GPU is connected to NVSwitch
*
* @param[in] pGpu OBJGPU pointer for local GPU
* @param[in] pKernelNvlink KernelNvlink pointer
* @param[in] pPeerGpu OBJGPU pointer for remote GPU
*
* @return NV_TRUE if EGM addresses are valid
*/
static NvBool
_knvlinkCheckNvswitchEgmAddressSanity
(
OBJGPU *pGpu,
KernelNvlink *pKernelNvlink,
OBJGPU *pPeerGpu
)
{
NvU64 egmRangeStart = knvlinkGetUniqueFabricEgmBaseAddress(pGpu, pKernelNvlink);
if (knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink))
{
if (gpuIsSriovEnabled(pGpu))
{
// currently vgpu + switch doesn't support GPA addressing.
return NV_TRUE;
}
if (gpuFabricProbeIsSupported(pGpu) && gpuFabricProbeIsSupported(pPeerGpu))
{
if (!_knvlinkCheckFabricCliqueId(pGpu, pPeerGpu))
{
return NV_FALSE;
}
}
// Sanity checks for EGM address
if (egmRangeStart == NVLINK_INVALID_FABRIC_ADDR)
{
NV_PRINTF(LEVEL_ERROR, "GPU %d doesn't have a EGM fabric address\n",
gpuGetInstance(pGpu));
return NV_FALSE;
}
}
else
{
// Sanity check for EGM address
if (egmRangeStart != NVLINK_INVALID_FABRIC_ADDR)
{
NV_PRINTF(LEVEL_ERROR,
"non-NVSwitch GPU %d has a valid EGM fabric address\n",
gpuGetInstance(pGpu));
return NV_FALSE;
}
}
return NV_TRUE;
}
/*! /*!
* @brief Checks whether necessary the config setup is done to * @brief Checks whether necessary the config setup is done to
* support P2P over NVSwitch * support P2P over NVSwitch
@ -288,10 +350,10 @@ knvlinkCheckNvswitchP2pConfig_IMPL
) )
{ {
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
NvU64 rangeStart = knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink); NvU64 hbmRangeStart = knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink);
NvU64 rangeEnd = rangeStart + (pMemoryManager->Ram.fbTotalMemSizeMb << 20); NvU64 hbmRangeEnd = hbmRangeStart + (pMemoryManager->Ram.fbTotalMemSizeMb << 20);
NvU64 peerRangeStart = knvlinkGetUniqueFabricBaseAddress(pPeerGpu, NvU64 hbmPeerRangeStart = knvlinkGetUniqueFabricBaseAddress(pPeerGpu,
GPU_GET_KERNEL_NVLINK(pPeerGpu)); GPU_GET_KERNEL_NVLINK(pPeerGpu));
if (knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) if (knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink))
{ {
@ -309,8 +371,8 @@ knvlinkCheckNvswitchP2pConfig_IMPL
} }
} }
if (knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) == // Sanity checks for HBM addresses
NVLINK_INVALID_FABRIC_ADDR) if (hbmRangeStart == NVLINK_INVALID_FABRIC_ADDR)
{ {
NV_PRINTF(LEVEL_ERROR, "GPU %d doesn't have a fabric address\n", NV_PRINTF(LEVEL_ERROR, "GPU %d doesn't have a fabric address\n",
gpuGetInstance(pGpu)); gpuGetInstance(pGpu));
@ -319,7 +381,7 @@ knvlinkCheckNvswitchP2pConfig_IMPL
} }
if ((pGpu != pPeerGpu) && if ((pGpu != pPeerGpu) &&
((peerRangeStart >= rangeStart) && (peerRangeStart < rangeEnd))) ((hbmPeerRangeStart >= hbmRangeStart) && (hbmPeerRangeStart < hbmRangeEnd)))
{ {
NV_PRINTF(LEVEL_ERROR, NV_PRINTF(LEVEL_ERROR,
"GPU %d doesn't have a unique fabric address\n", "GPU %d doesn't have a unique fabric address\n",
@ -330,8 +392,8 @@ knvlinkCheckNvswitchP2pConfig_IMPL
} }
else else
{ {
if (knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) != // Sanity check for HBM address
NVLINK_INVALID_FABRIC_ADDR) if (hbmRangeStart != NVLINK_INVALID_FABRIC_ADDR)
{ {
NV_PRINTF(LEVEL_ERROR, NV_PRINTF(LEVEL_ERROR,
"non-NVSwitch GPU %d has a valid fabric address\n", "non-NVSwitch GPU %d has a valid fabric address\n",
@ -341,6 +403,11 @@ knvlinkCheckNvswitchP2pConfig_IMPL
} }
} }
if (memmgrIsLocalEgmEnabled(pMemoryManager))
{
return _knvlinkCheckNvswitchEgmAddressSanity(pGpu, pKernelNvlink, pPeerGpu);
}
return NV_TRUE; return NV_TRUE;
} }

View File

@ -369,10 +369,10 @@ krcErrorInvokeCallback_IMPL
&classInfo); &classInfo);
// notify the Fifo channel based event listeners // notify the Fifo channel based event listeners
kchannelNotifyGeneric(pKernelChannel, kchannelNotifyEvent(pKernelChannel,
classInfo.rcNotifierIndex, classInfo.rcNotifierIndex,
&params, 0, 0, &params,
sizeof(params)); sizeof(params));
} }
// update RC diagnostic records with process id and owner // update RC diagnostic records with process id and owner

View File

@ -3103,7 +3103,10 @@ subdeviceCtrlCmdGetGpuFabricProbeInfo_IMPL
status = gpuFabricProbeGetfmCaps(pGpu->pGpuFabricProbeInfoKernel, &fmCaps); status = gpuFabricProbeGetfmCaps(pGpu->pGpuFabricProbeInfoKernel, &fmCaps);
NV_ASSERT_OK_OR_RETURN(status); NV_ASSERT_OK_OR_RETURN(status);
pParams->fabricCaps = _convertGpuFabricProbeInfoCaps(fmCaps); if (!gpuIsCCFeatureEnabled(pGpu) || !gpuIsCCMultiGpuProtectedPcieModeEnabled(pGpu))
{
pParams->fabricCaps = _convertGpuFabricProbeInfoCaps(fmCaps);
}
status = gpuFabricProbeGetFabricCliqueId(pGpu->pGpuFabricProbeInfoKernel, status = gpuFabricProbeGetFabricCliqueId(pGpu->pGpuFabricProbeInfoKernel,
&pParams->fabricCliqueId); &pParams->fabricCliqueId);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -215,10 +215,10 @@ _memoryFabricAttachMem
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
if (gpuIsCCFeatureEnabled(pGpu)) if (gpuIsCCFeatureEnabled(pGpu) && !gpuIsCCMultiGpuProtectedPcieModeEnabled(pGpu))
{ {
NV_PRINTF(LEVEL_ERROR, NV_PRINTF(LEVEL_ERROR,
"Unsupported when Confidential Computing is enabled\n"); "Unsupported when Confidential Computing is enabled in SPT\n");
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }

View File

@ -127,7 +127,7 @@ _vidmemPmaAllocate
MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu);
PMA *pPma = &pHeap->pmaObject; PMA *pPma = &pHeap->pmaObject;
NvU64 size = 0; NvU64 size = 0;
NvU32 pageCount; NvU32 pageCount;
NvU32 pmaInfoSize; NvU32 pmaInfoSize;
NvU64 pageSize; NvU64 pageSize;
NV_STATUS status; NV_STATUS status;

View File

@ -631,7 +631,6 @@ NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCt
((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds, ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds,
numEntries, sizeof(NvU32)); numEntries, sizeof(NvU32));
paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN;
break; break;
} }
case NV0080_CTRL_CMD_FB_GET_CAPS: case NV0080_CTRL_CMD_FB_GET_CAPS:
@ -1070,7 +1069,6 @@ NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmC
case NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS: case NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS:
{ {
CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS); CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS);
status = rmapiParamsRelease(&paramCopies[0]); status = rmapiParamsRelease(&paramCopies[0]);
((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds = paramCopies[0].pUserParams; ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds = paramCopies[0].pUserParams;
break; break;

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -3293,7 +3293,14 @@ nvGpuOpsBuildExternalAllocPtes
} }
else else
{ {
fabricBaseAddress = knvlinkGetUniqueFabricBaseAddress(pMemDesc->pGpu, pKernelNvlink); if (memdescIsEgm(pMemDesc))
{
fabricBaseAddress = knvlinkGetUniqueFabricEgmBaseAddress(pMemDesc->pGpu, pKernelNvlink);
}
else
{
fabricBaseAddress = knvlinkGetUniqueFabricBaseAddress(pMemDesc->pGpu, pKernelNvlink);
}
} }
} }
} }
@ -3318,8 +3325,8 @@ nvGpuOpsBuildExternalAllocPtes
// //
memdescGetPhysAddrsForGpu(pMemDesc, pMappingGpu, AT_GPU, offset, mappingPageSize, memdescGetPhysAddrsForGpu(pMemDesc, pMappingGpu, AT_GPU, offset, mappingPageSize,
pteCount, physicalAddresses); pteCount, physicalAddresses);
kgmmuEncodePhysAddrs(pKernelGmmu, aperture, physicalAddresses, fabricBaseAddress, pteCount);
kgmmuEncodePhysAddrs(pKernelGmmu, aperture, physicalAddresses, fabricBaseAddress, pteCount);
// //
// Get information whether given physical address needs PLCable kind // Get information whether given physical address needs PLCable kind
@ -9966,8 +9973,7 @@ void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
return; return;
} }
status = _nvGpuOpsLocksAcquire(RMAPI_LOCK_FLAGS_NONE, hClient, NULL, 2, status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_NONE, hClient, NULL, &acquiredLocks);
device->deviceInstance, srcVaSpace->device->deviceInstance, &acquiredLocks);
if (status != NV_OK) if (status != NV_OK)
{ {
NV_PRINTF(LEVEL_ERROR, NV_PRINTF(LEVEL_ERROR,

Some files were not shown because too many files have changed in this diff Show More