550.90.07

This commit is contained in:
Bernhard Stoeckner 2024-06-04 13:48:03 +02:00
parent 083cd9cf17
commit e45d91de02
No known key found for this signature in database
GPG Key ID: 7D23DC2750FAC2E1
180 changed files with 43467 additions and 38127 deletions

View File

@ -2,6 +2,8 @@
## Release 550 Entries ## Release 550 Entries
### [550.90.07] 2024-06-04
### [550.78] 2024-04-25 ### [550.78] 2024-04-25
### [550.76] 2024-04-17 ### [550.76] 2024-04-17

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source # NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules, This is the source release of the NVIDIA Linux open GPU kernel modules,
version 550.78. version 550.90.07.
## How to Build ## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding firmware and user-space NVIDIA GPU driver components from a corresponding
550.78 driver release. This can be achieved by installing 550.90.07 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g., option. E.g.,
@ -188,7 +188,7 @@ encountered specific to them.
For details on feature support and limitations, see the NVIDIA GPU driver For details on feature support and limitations, see the NVIDIA GPU driver
end user README here: end user README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/550.78/README/kernel_open.html https://us.download.nvidia.com/XFree86/Linux-x86_64/550.90.07/README/kernel_open.html
For vGPU support, please refer to the README.vgpu packaged in the vGPU Host For vGPU support, please refer to the README.vgpu packaged in the vGPU Host
Package for more details. Package for more details.
@ -757,6 +757,8 @@ Subsystem Device ID.
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 | | NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 | | NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
| NVIDIA H100 PCIe | 2331 10DE 1626 | | NVIDIA H100 PCIe | 2331 10DE 1626 |
| NVIDIA H200 | 2335 10DE 18BE |
| NVIDIA H200 | 2335 10DE 18BF |
| NVIDIA H100 | 2339 10DE 17FC | | NVIDIA H100 | 2339 10DE 17FC |
| NVIDIA H800 NVL | 233A 10DE 183A | | NVIDIA H800 NVL | 233A 10DE 183A |
| NVIDIA GH200 120GB | 2342 10DE 16EB | | NVIDIA GH200 120GB | 2342 10DE 16EB |
@ -873,6 +875,7 @@ Subsystem Device ID.
| NVIDIA L40S | 26B9 10DE 1851 | | NVIDIA L40S | 26B9 10DE 1851 |
| NVIDIA L40S | 26B9 10DE 18CF | | NVIDIA L40S | 26B9 10DE 18CF |
| NVIDIA L20 | 26BA 10DE 1957 | | NVIDIA L20 | 26BA 10DE 1957 |
| NVIDIA L20 | 26BA 10DE 1990 |
| NVIDIA GeForce RTX 4080 SUPER | 2702 | | NVIDIA GeForce RTX 4080 SUPER | 2702 |
| NVIDIA GeForce RTX 4080 | 2704 | | NVIDIA GeForce RTX 4080 | 2704 |
| NVIDIA GeForce RTX 4070 Ti SUPER | 2705 | | NVIDIA GeForce RTX 4070 Ti SUPER | 2705 |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src) EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.78\" EXTRA_CFLAGS += -DNV_VERSION_STRING=\"550.90.07\"
ifneq ($(SYSSRCHOST1X),) ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X) EXTRA_CFLAGS += -I$(SYSSRCHOST1X)

View File

@ -37,11 +37,9 @@ typedef enum _HYPERVISOR_TYPE
OS_HYPERVISOR_UNKNOWN OS_HYPERVISOR_UNKNOWN
} HYPERVISOR_TYPE; } HYPERVISOR_TYPE;
#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0 #define CMD_VFIO_WAKE_REMOVE_GPU 1
#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1 #define CMD_VGPU_VFIO_PRESENT 2
#define CMD_VGPU_VFIO_REGISTER_MDEV 2 #define CMD_VFIO_PCI_CORE_PRESENT 3
#define CMD_VGPU_VFIO_PRESENT 3
#define CMD_VFIO_PCI_CORE_PRESENT 4
#define MAX_VF_COUNT_PER_GPU 64 #define MAX_VF_COUNT_PER_GPU 64
@ -54,17 +52,11 @@ typedef enum _VGPU_TYPE_INFO
typedef struct typedef struct
{ {
void *vgpuVfioRef;
void *waitQueue;
void *nv; void *nv;
NvU32 *vgpuTypeIds;
NvU8 **vgpuNames;
NvU32 numVgpuTypes;
NvU32 domain; NvU32 domain;
NvU8 bus; NvU32 bus;
NvU8 slot; NvU32 device;
NvU8 function; NvU32 return_status;
NvBool is_virtfn;
} vgpu_vfio_info; } vgpu_vfio_info;
typedef struct typedef struct

View File

@ -1614,6 +1614,10 @@ typedef struct nv_linux_state_s {
nv_kthread_q_t open_q; nv_kthread_q_t open_q;
NvBool is_accepting_opens; NvBool is_accepting_opens;
struct semaphore open_q_lock; struct semaphore open_q_lock;
#if defined(NV_VGPU_KVM_BUILD)
wait_queue_head_t wait;
NvS32 return_status;
#endif
} nv_linux_state_t; } nv_linux_state_t;
extern nv_linux_state_t *nv_linux_devices; extern nv_linux_state_t *nv_linux_devices;

View File

@ -1041,13 +1041,12 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, c
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool); NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *, NvBool *); NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *); NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *); NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *);
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*); NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -1505,23 +1505,35 @@ NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext); void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
/******************************************************************************* /*******************************************************************************
nvUvmInterfaceCslUpdateContext nvUvmInterfaceCslRotateKey
Updates a context after a key rotation event and can only be called once per Disables channels and rotates keys.
key rotation event. Following a key rotation event, and before
nvUvmInterfaceCslUpdateContext is called, data encrypted by the GPU with the
previous key can be decrypted with nvUvmInterfaceCslDecrypt.
Locking: This function acquires an API lock. This function disables channels and rotates associated keys. The channels
Memory : This function does not dynamically allocate memory. associated with the given CSL contexts must be idled before this function is
called. To trigger key rotation all allocated channels for a given key must
be present in the list. If the function returns successfully then the CSL
contexts have been updated with the new key.
Locking: This function attempts to acquire the GPU lock. In case of failure
to acquire the return code is NV_ERR_STATE_IN_USE. The caller must
guarantee that no CSL function, including this one, is invoked
concurrently with the CSL contexts in contextList.
Memory : This function dynamically allocates memory.
Arguments: Arguments:
uvmCslContext[IN] - The CSL context associated with a channel. contextList[IN/OUT] - An array of pointers to CSL contexts.
contextListCount[IN] - Number of CSL contexts in contextList. Its value
must be greater than 0.
Error codes: Error codes:
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel. NV_ERR_INVALID_ARGUMENT - contextList is NULL or contextListCount is 0.
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
can retry at a later time.
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
when attempting to acquire a lock.
*/ */
NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *uvmCslContext); NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount);
/******************************************************************************* /*******************************************************************************
nvUvmInterfaceCslRotateIv nvUvmInterfaceCslRotateIv
@ -1529,17 +1541,13 @@ NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *uvmCslContext);
Rotates the IV for a given channel and operation. Rotates the IV for a given channel and operation.
This function will rotate the IV on both the CPU and the GPU. This function will rotate the IV on both the CPU and the GPU.
Outstanding messages that have been encrypted by the GPU should first be For a given operation the channel must be idle before calling this function.
decrypted before calling this function with operation equal to This function can be called regardless of the value of the IV's message counter.
UVM_CSL_OPERATION_DECRYPT. Similarly, outstanding messages that have been
encrypted by the CPU should first be decrypted before calling this function
with operation equal to UVM_CSL_OPERATION_ENCRYPT. For a given operation
the channel must be idle before calling this function. This function can be
called regardless of the value of the IV's message counter.
Locking: This function attempts to acquire the GPU lock. Locking: This function attempts to acquire the GPU lock. In case of failure to
In case of failure to acquire the return code acquire the return code is NV_ERR_STATE_IN_USE. The caller must guarantee
is NV_ERR_STATE_IN_USE. that no CSL function, including this one, is invoked concurrently with
the same CSL context.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
Arguments: Arguments:
@ -1573,8 +1581,8 @@ NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
However, it is optional. If it is NULL, the next IV in line will be used. However, it is optional. If it is NULL, the next IV in line will be used.
Locking: This function does not acquire an API or GPU lock. Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext The caller must guarantee that no CSL function, including this one,
the caller must guarantee exclusion. is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
Arguments: Arguments:
@ -1610,9 +1618,14 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
maximized when the input and output buffers are 16-byte aligned. This is maximized when the input and output buffers are 16-byte aligned. This is
natural alignment for AES block. natural alignment for AES block.
During a key rotation event the previous key is stored in the CSL context.
This allows data encrypted by the GPU to be decrypted with the previous key.
The keyRotationId parameter identifies which key is used. The first key rotation
ID has a value of 0 that increments by one for each key rotation event.
Locking: This function does not acquire an API or GPU lock. Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext The caller must guarantee that no CSL function, including this one,
the caller must guarantee exclusion. is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
Arguments: Arguments:
@ -1622,6 +1635,8 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by
nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's
internal counter is used. internal counter is used.
keyRotationId[IN] - Specifies the key that is used for decryption.
A value of NV_U32_MAX specifies the current key.
inputBuffer[IN] - Address of ciphertext input buffer. inputBuffer[IN] - Address of ciphertext input buffer.
outputBuffer[OUT] - Address of plaintext output buffer. outputBuffer[OUT] - Address of plaintext output buffer.
addAuthData[IN] - Address of the plaintext additional authenticated data used to addAuthData[IN] - Address of the plaintext additional authenticated data used to
@ -1642,6 +1657,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
NvU32 bufferSize, NvU32 bufferSize,
NvU8 const *inputBuffer, NvU8 const *inputBuffer,
UvmCslIv const *decryptIv, UvmCslIv const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer, NvU8 *outputBuffer,
NvU8 const *addAuthData, NvU8 const *addAuthData,
NvU32 addAuthDataSize, NvU32 addAuthDataSize,
@ -1656,8 +1672,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
undefined behavior. undefined behavior.
Locking: This function does not acquire an API or GPU lock. Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext The caller must guarantee that no CSL function, including this one,
the caller must guarantee exclusion. is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
Arguments: Arguments:
@ -1685,8 +1701,8 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
Locking: This function does not acquire an API or GPU lock. Locking: This function does not acquire an API or GPU lock.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
If called concurrently in different threads with the same UvmCslContext The caller must guarantee that no CSL function, including this one,
the caller must guarantee exclusion. is invoked concurrently with the same CSL context.
Arguments: Arguments:
uvmCslContext[IN/OUT] - The CSL context. uvmCslContext[IN/OUT] - The CSL context.
@ -1711,8 +1727,8 @@ NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
the returned IV can be used in nvUvmInterfaceCslDecrypt. the returned IV can be used in nvUvmInterfaceCslDecrypt.
Locking: This function does not acquire an API or GPU lock. Locking: This function does not acquire an API or GPU lock.
If called concurrently in different threads with the same UvmCslContext The caller must guarantee that no CSL function, including this one,
the caller must guarantee exclusion. is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
Arguments: Arguments:
@ -1734,28 +1750,41 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
UvmCslIv *iv); UvmCslIv *iv);
/******************************************************************************* /*******************************************************************************
nvUvmInterfaceCslLogExternalEncryption nvUvmInterfaceCslLogEncryption
Checks and logs information about non-CSL encryptions, such as those that Checks and logs information about encryptions associated with the given
originate from the GPU. CSL context.
This function does not modify elements of the UvmCslContext. For contexts associated with channels, this function does not modify elements of
the UvmCslContext, and must be called for every CPU/GPU encryption.
For the context associated with fault buffers, bufferSize can encompass multiple
encryption invocations, and the UvmCslContext will be updated following a key
rotation event.
In either case the IV remains unmodified after this function is called.
Locking: This function does not acquire an API or GPU lock. Locking: This function does not acquire an API or GPU lock.
Memory : This function does not dynamically allocate memory. Memory : This function does not dynamically allocate memory.
If called concurrently in different threads with the same UvmCslContext The caller must guarantee that no CSL function, including this one,
the caller must guarantee exclusion. is invoked concurrently with the same CSL context.
Arguments: Arguments:
uvmCslContext[IN/OUT] - The CSL context. uvmCslContext[IN/OUT] - The CSL context.
bufferSize[OUT] - The size of the buffer encrypted by the operation[IN] - If the CSL context is associated with a fault
buffer, this argument is ignored. If it is
associated with a channel, it must be either
- UVM_CSL_OPERATION_ENCRYPT
- UVM_CSL_OPERATION_DECRYPT
bufferSize[IN] - The size of the buffer(s) encrypted by the
external entity in units of bytes. external entity in units of bytes.
Error codes: Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The device encryption would cause a counter NV_ERR_INSUFFICIENT_RESOURCES - The encryption would cause a counter
to overflow. to overflow.
*/ */
NV_STATUS nvUvmInterfaceCslLogExternalEncryption(UvmCslContext *uvmCslContext, NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
UvmCslOperation operation,
NvU32 bufferSize); NvU32 bufferSize);
#endif // _NV_UVM_INTERFACE_H_ #endif // _NV_UVM_INTERFACE_H_

View File

@ -267,6 +267,7 @@ typedef struct UvmGpuChannelInfo_tag
// The errorNotifier is filled out when the channel hits an RC error. // The errorNotifier is filled out when the channel hits an RC error.
NvNotification *errorNotifier; NvNotification *errorNotifier;
NvNotification *keyRotationNotifier;
NvU32 hwRunlistId; NvU32 hwRunlistId;
NvU32 hwChannelId; NvU32 hwChannelId;
@ -604,6 +605,8 @@ typedef struct UvmGpuConfComputeCaps_tag
{ {
// Out: GPU's confidential compute mode // Out: GPU's confidential compute mode
UvmGpuConfComputeMode mode; UvmGpuConfComputeMode mode;
// Is key rotation enabled for UVM keys
NvBool bKeyRotationEnabled;
} UvmGpuConfComputeCaps; } UvmGpuConfComputeCaps;
#define UVM_GPU_NAME_LENGTH 0x40 #define UVM_GPU_NAME_LENGTH 0x40
@ -1086,4 +1089,21 @@ typedef enum UvmCslOperation
UVM_CSL_OPERATION_DECRYPT UVM_CSL_OPERATION_DECRYPT
} UvmCslOperation; } UvmCslOperation;
typedef enum UVM_KEY_ROTATION_STATUS {
// Key rotation complete/not in progress
UVM_KEY_ROTATION_STATUS_IDLE = 0,
// RM is waiting for clients to report their channels are idle for key rotation
UVM_KEY_ROTATION_STATUS_PENDING = 1,
// Key rotation is in progress
UVM_KEY_ROTATION_STATUS_IN_PROGRESS = 2,
// Key rotation timeout failure, RM will RC non-idle channels.
// UVM should never see this status value.
UVM_KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3,
// Key rotation failed because upper threshold was crossed, RM will RC non-idle channels
UVM_KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4,
// Internal RM failure while rotating keys for a certain channel, RM will RC the channel.
UVM_KEY_ROTATION_STATUS_FAILED_ROTATION = 5,
UVM_KEY_ROTATION_STATUS_MAX_COUNT = 6,
} UVM_KEY_ROTATION_STATUS;
#endif // _NV_UVM_TYPES_H_ #endif // _NV_UVM_TYPES_H_

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -103,14 +103,14 @@ NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, n
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_update(nvidia_stack_t *, struct ccslContext_t *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *, UvmCslContext *[], NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU8 *, NvU8 const *, NvU32, NvU8 const *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU32, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU32); NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU32);
#endif #endif

View File

@ -1416,6 +1416,42 @@ compile_test() {
compile_check_conftest "$CODE" "NV_VFIO_REGISTER_EMULATED_IOMMU_DEV_PRESENT" "" "functions" compile_check_conftest "$CODE" "NV_VFIO_REGISTER_EMULATED_IOMMU_DEV_PRESENT" "" "functions"
;; ;;
bus_type_has_iommu_ops)
#
# Determine if 'bus_type' structure has a 'iommu_ops' field.
#
# This field was removed by commit 17de3f5fdd35 (iommu: Retire bus ops)
# in v6.8
#
CODE="
#include <linux/device.h>
int conftest_bus_type_has_iommu_ops(void) {
return offsetof(struct bus_type, iommu_ops);
}"
compile_check_conftest "$CODE" "NV_BUS_TYPE_HAS_IOMMU_OPS" "" "types"
;;
eventfd_signal_has_counter_arg)
#
# Determine if eventfd_signal() function has an additional 'counter' argument.
#
# This argument was removed by commit 3652117f8548 (eventfd: simplify
# eventfd_signal()) in v6.8
#
CODE="
#include <linux/eventfd.h>
void conftest_eventfd_signal_has_counter_arg(void) {
struct eventfd_ctx *ctx;
eventfd_signal(ctx, 1);
}"
compile_check_conftest "$CODE" "NV_EVENTFD_SIGNAL_HAS_COUNTER_ARG" "" "types"
;;
drm_available) drm_available)
# Determine if the DRM subsystem is usable # Determine if the DRM subsystem is usable
CODE=" CODE="
@ -5216,25 +5252,23 @@ compile_test() {
compile_check_conftest "$CODE" "NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT" "" "generic" compile_check_conftest "$CODE" "NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT" "" "generic"
;; ;;
unsafe_follow_pfn) follow_pfn)
# #
# Determine if unsafe_follow_pfn() is present. # Determine if follow_pfn() is present.
# #
# unsafe_follow_pfn() was added by commit 69bacee7f9ad # follow_pfn() was added by commit 3b6748e2dd69
# ("mm: Add unsafe_follow_pfn") in v5.13-rc1. # ("mm: introduce follow_pfn()") in v2.6.31-rc1, and removed
# # by commit 233eb0bf3b94 ("mm: remove follow_pfn")
# Note: this commit never made it to the linux kernel, so # from linux-next 233eb0bf3b94.
# unsafe_follow_pfn() never existed.
# #
CODE=" CODE="
#include <linux/mm.h> #include <linux/mm.h>
void conftest_unsafe_follow_pfn(void) { void conftest_follow_pfn(void) {
unsafe_follow_pfn(); follow_pfn();
}" }"
compile_check_conftest "$CODE" "NV_UNSAFE_FOLLOW_PFN_PRESENT" "" "functions" compile_check_conftest "$CODE" "NV_FOLLOW_PFN_PRESENT" "" "functions"
;; ;;
drm_plane_atomic_check_has_atomic_state_arg) drm_plane_atomic_check_has_atomic_state_arg)
# #
# Determine if drm_plane_helper_funcs::atomic_check takes 'state' # Determine if drm_plane_helper_funcs::atomic_check takes 'state'

View File

@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be // Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node // allocated on the preferred node
if ((i == (attempts - 1))) if (i == (attempts - 1))
break; break;
// Get the NUMA node where the first page of the stack is resident. If // Get the NUMA node where the first page of the stack is resident. If

View File

@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be // Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node // allocated on the preferred node
if ((i == (attempts - 1))) if (i == (attempts - 1))
break; break;
// Get the NUMA node where the first page of the stack is resident. If // Get the NUMA node where the first page of the stack is resident. If

View File

@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be // Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node // allocated on the preferred node
if ((i == (attempts - 1))) if (i == (attempts - 1))
break; break;
// Get the NUMA node where the first page of the stack is resident. If // Get the NUMA node where the first page of the stack is resident. If

View File

@ -1448,7 +1448,9 @@ NV_STATUS UvmAllocSemaphorePool(void *base,
// //
// preferredCpuMemoryNode: (INPUT) // preferredCpuMemoryNode: (INPUT)
// Preferred CPU NUMA memory node used if the destination processor is // Preferred CPU NUMA memory node used if the destination processor is
// the CPU. // the CPU. -1 indicates no preference, in which case the pages used
// can be on any of the available CPU NUMA nodes. If NUMA is disabled
// only 0 and -1 are allowed.
// //
// Error codes: // Error codes:
// NV_ERR_INVALID_ADDRESS: // NV_ERR_INVALID_ADDRESS:
@ -1462,6 +1464,11 @@ NV_STATUS UvmAllocSemaphorePool(void *base,
// The VA range exceeds the largest virtual address supported by the // The VA range exceeds the largest virtual address supported by the
// destination processor. // destination processor.
// //
// NV_ERR_INVALID_ARGUMENT:
// preferredCpuMemoryNode is not a valid CPU NUMA node or it corresponds
// to a NUMA node ID for a registered GPU. If NUMA is disabled, it
// indicates that preferredCpuMemoryNode was not either 0 or -1.
//
// NV_ERR_INVALID_DEVICE: // NV_ERR_INVALID_DEVICE:
// destinationUuid does not represent a valid processor such as a CPU or // destinationUuid does not represent a valid processor such as a CPU or
// a GPU with a GPU VA space registered for it. Or destinationUuid is a // a GPU with a GPU VA space registered for it. Or destinationUuid is a
@ -1528,8 +1535,9 @@ NV_STATUS UvmMigrate(void *base,
// //
// preferredCpuMemoryNode: (INPUT) // preferredCpuMemoryNode: (INPUT)
// Preferred CPU NUMA memory node used if the destination processor is // Preferred CPU NUMA memory node used if the destination processor is
// the CPU. This argument is ignored if the given virtual address range // the CPU. -1 indicates no preference, in which case the pages used
// corresponds to managed memory. // can be on any of the available CPU NUMA nodes. If NUMA is disabled
// only 0 and -1 are allowed.
// //
// semaphoreAddress: (INPUT) // semaphoreAddress: (INPUT)
// Base address of the semaphore. // Base address of the semaphore.
@ -1586,8 +1594,8 @@ NV_STATUS UvmMigrateAsync(void *base,
// //
// Migrates the backing of all virtual address ranges associated with the given // Migrates the backing of all virtual address ranges associated with the given
// range group to the specified destination processor. The behavior of this API // range group to the specified destination processor. The behavior of this API
// is equivalent to calling UvmMigrate on each VA range associated with this // is equivalent to calling UvmMigrate with preferredCpuMemoryNode = -1 on each
// range group. // VA range associated with this range group.
// //
// Any errors encountered during migration are returned immediately. No attempt // Any errors encountered during migration are returned immediately. No attempt
// is made to migrate the remaining unmigrated ranges and the ranges that are // is made to migrate the remaining unmigrated ranges and the ranges that are
@ -2169,7 +2177,8 @@ NV_STATUS UvmMapDynamicParallelismRegion(void *base,
// //
// If any page in the VA range has a preferred location, then the migration and // If any page in the VA range has a preferred location, then the migration and
// mapping policies associated with this API take precedence over those related // mapping policies associated with this API take precedence over those related
// to the preferred location. // to the preferred location. If the preferred location is a specific CPU NUMA
// node, that NUMA node will be used for a CPU-resident copy of the page.
// //
// If any pages in this VA range have any processors present in their // If any pages in this VA range have any processors present in their
// accessed-by list, the migration and mapping policies associated with this // accessed-by list, the migration and mapping policies associated with this
@ -2300,7 +2309,7 @@ NV_STATUS UvmDisableReadDuplication(void *base,
// UvmPreventMigrationRangeGroups has not been called on the range group that // UvmPreventMigrationRangeGroups has not been called on the range group that
// those pages are associated with, then the migration and mapping policies // those pages are associated with, then the migration and mapping policies
// associated with UvmEnableReadDuplication override the policies outlined // associated with UvmEnableReadDuplication override the policies outlined
// above. Note that enabling read duplication on on any pages in this VA range // above. Note that enabling read duplication on any pages in this VA range
// does not clear the state set by this API for those pages. It merely overrides // does not clear the state set by this API for those pages. It merely overrides
// the policies associated with this state until read duplication is disabled // the policies associated with this state until read duplication is disabled
// for those pages. // for those pages.
@ -2333,7 +2342,8 @@ NV_STATUS UvmDisableReadDuplication(void *base,
// preferredCpuMemoryNode: (INPUT) // preferredCpuMemoryNode: (INPUT)
// Preferred CPU NUMA memory node used if preferredLocationUuid is the // Preferred CPU NUMA memory node used if preferredLocationUuid is the
// UUID of the CPU. -1 is a special value which indicates all CPU nodes // UUID of the CPU. -1 is a special value which indicates all CPU nodes
// allowed by the global and thread memory policies. // allowed by the global and thread memory policies. If NUMA is disabled
// only 0 and -1 are allowed.
// //
// Errors: // Errors:
// NV_ERR_INVALID_ADDRESS: // NV_ERR_INVALID_ADDRESS:

View File

@ -855,6 +855,7 @@ static NV_STATUS cpu_decrypt_in_order(uvm_channel_t *channel,
uvm_mem_t *dst_mem, uvm_mem_t *dst_mem,
uvm_mem_t *src_mem, uvm_mem_t *src_mem,
const UvmCslIv *decrypt_iv, const UvmCslIv *decrypt_iv,
NvU32 key_version,
uvm_mem_t *auth_tag_mem, uvm_mem_t *auth_tag_mem,
size_t size, size_t size,
NvU32 copy_size) NvU32 copy_size)
@ -869,6 +870,7 @@ static NV_STATUS cpu_decrypt_in_order(uvm_channel_t *channel,
dst_plain + i * copy_size, dst_plain + i * copy_size,
src_cipher + i * copy_size, src_cipher + i * copy_size,
decrypt_iv + i, decrypt_iv + i,
key_version,
copy_size, copy_size,
auth_tag_buffer + i * UVM_CONF_COMPUTING_AUTH_TAG_SIZE)); auth_tag_buffer + i * UVM_CONF_COMPUTING_AUTH_TAG_SIZE));
} }
@ -879,6 +881,7 @@ static NV_STATUS cpu_decrypt_out_of_order(uvm_channel_t *channel,
uvm_mem_t *dst_mem, uvm_mem_t *dst_mem,
uvm_mem_t *src_mem, uvm_mem_t *src_mem,
const UvmCslIv *decrypt_iv, const UvmCslIv *decrypt_iv,
NvU32 key_version,
uvm_mem_t *auth_tag_mem, uvm_mem_t *auth_tag_mem,
size_t size, size_t size,
NvU32 copy_size) NvU32 copy_size)
@ -896,6 +899,7 @@ static NV_STATUS cpu_decrypt_out_of_order(uvm_channel_t *channel,
dst_plain + i * copy_size, dst_plain + i * copy_size,
src_cipher + i * copy_size, src_cipher + i * copy_size,
decrypt_iv + i, decrypt_iv + i,
key_version,
copy_size, copy_size,
auth_tag_buffer + i * UVM_CONF_COMPUTING_AUTH_TAG_SIZE)); auth_tag_buffer + i * UVM_CONF_COMPUTING_AUTH_TAG_SIZE));
} }
@ -959,7 +963,7 @@ static void gpu_encrypt(uvm_push_t *push,
i * UVM_CONF_COMPUTING_AUTH_TAG_SIZE, i * UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
dst_cipher); dst_cipher);
uvm_conf_computing_log_gpu_encryption(push->channel, decrypt_iv); uvm_conf_computing_log_gpu_encryption(push->channel, copy_size, decrypt_iv);
if (i > 0) if (i > 0)
uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED);
@ -1020,6 +1024,7 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu,
size_t auth_tag_buffer_size = (size / copy_size) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE; size_t auth_tag_buffer_size = (size / copy_size) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
UvmCslIv *decrypt_iv = NULL; UvmCslIv *decrypt_iv = NULL;
UvmCslIv *encrypt_iv = NULL; UvmCslIv *encrypt_iv = NULL;
NvU32 key_version;
uvm_tracker_t tracker; uvm_tracker_t tracker;
size_t src_plain_size; size_t src_plain_size;
@ -1089,6 +1094,11 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu,
gpu_encrypt(&push, dst_cipher, dst_plain_gpu, auth_tag_mem, decrypt_iv, size, copy_size); gpu_encrypt(&push, dst_cipher, dst_plain_gpu, auth_tag_mem, decrypt_iv, size, copy_size);
// There shouldn't be any key rotation between the end of the push and the
// CPU decryption(s), but it is more robust against test changes to force
// decryption to use the saved key.
key_version = uvm_channel_pool_key_version(push.channel->pool);
TEST_NV_CHECK_GOTO(uvm_push_end_and_wait(&push), out); TEST_NV_CHECK_GOTO(uvm_push_end_and_wait(&push), out);
TEST_CHECK_GOTO(!mem_match(src_plain, src_cipher, size), out); TEST_CHECK_GOTO(!mem_match(src_plain, src_cipher, size), out);
@ -1101,6 +1111,7 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu,
dst_plain, dst_plain,
dst_cipher, dst_cipher,
decrypt_iv, decrypt_iv,
key_version,
auth_tag_mem, auth_tag_mem,
size, size,
copy_size), copy_size),
@ -1111,6 +1122,7 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu,
dst_plain, dst_plain,
dst_cipher, dst_cipher,
decrypt_iv, decrypt_iv,
key_version,
auth_tag_mem, auth_tag_mem,
size, size,
copy_size), copy_size),

File diff suppressed because it is too large Load Diff

View File

@ -228,21 +228,65 @@ typedef struct
// variant is required when the thread holding the pool lock must sleep // variant is required when the thread holding the pool lock must sleep
// (ex: acquire another mutex) deeper in the call stack, either in UVM or // (ex: acquire another mutex) deeper in the call stack, either in UVM or
// RM. // RM.
union { union
{
uvm_spinlock_t spinlock; uvm_spinlock_t spinlock;
uvm_mutex_t mutex; uvm_mutex_t mutex;
}; };
struct
{
// Secure operations require that uvm_push_begin order matches // Secure operations require that uvm_push_begin order matches
// uvm_push_end order, because the engine's state is used in its internal // uvm_push_end order, because the engine's state is used in its
// operation and each push may modify this state. push_locks is protected by // internal operation and each push may modify this state.
// the channel pool lock. // push_locks is protected by the channel pool lock.
DECLARE_BITMAP(push_locks, UVM_CHANNEL_MAX_NUM_CHANNELS_PER_POOL); DECLARE_BITMAP(push_locks, UVM_CHANNEL_MAX_NUM_CHANNELS_PER_POOL);
// Counting semaphore for available and unlocked channels, it must be // Counting semaphore for available and unlocked channels, it must be
// acquired before submitting work to a channel when the Confidential // acquired before submitting work to a channel when the Confidential
// Computing feature is enabled. // Computing feature is enabled.
uvm_semaphore_t push_sem; uvm_semaphore_t push_sem;
// Per channel buffers in unprotected sysmem.
uvm_rm_mem_t *pool_sysmem;
// Per channel buffers in protected vidmem.
uvm_rm_mem_t *pool_vidmem;
struct
{
// Current encryption key version, incremented upon key rotation.
// While there are separate keys for encryption and decryption, the
// two keys are rotated at once, so the versioning applies to both.
NvU32 version;
// Lock used to ensure mutual exclusion during key rotation.
uvm_mutex_t mutex;
// CSL contexts passed to RM for key rotation. This is usually an
// array containing the CSL contexts associated with the channels in
// the pool. In the case of the WLC pool, the array also includes
// CSL contexts associated with LCIC channels.
UvmCslContext **csl_contexts;
// Number of elements in the CSL context array.
unsigned num_csl_contexts;
// Number of bytes encrypted, or decrypted, on the engine associated
// with the pool since the last key rotation. Only used during
// testing, to force key rotations after a certain encryption size,
// see UVM_CONF_COMPUTING_KEY_ROTATION_LOWER_THRESHOLD.
//
// Encryptions on a LCIC pool are accounted for in the paired WLC
// pool.
//
// TODO: Bug 4612912: these accounting variables can be removed once
// RM exposes an API to set the key rotation lower threshold.
atomic64_t encrypted;
atomic64_t decrypted;
} key_rotation;
} conf_computing;
} uvm_channel_pool_t; } uvm_channel_pool_t;
struct uvm_channel_struct struct uvm_channel_struct
@ -322,43 +366,14 @@ struct uvm_channel_struct
// work launches to match the order of push end-s that triggered them. // work launches to match the order of push end-s that triggered them.
volatile NvU32 gpu_put; volatile NvU32 gpu_put;
// Static pushbuffer for channels with static schedule (WLC/LCIC) // Protected sysmem location makes WLC independent from the pushbuffer
uvm_rm_mem_t *static_pb_protected_vidmem; // allocator. Unprotected sysmem and protected vidmem counterparts
// are allocated from the channel pool (sysmem, vidmem).
// Static pushbuffer staging buffer for WLC
uvm_rm_mem_t *static_pb_unprotected_sysmem;
void *static_pb_unprotected_sysmem_cpu;
void *static_pb_unprotected_sysmem_auth_tag_cpu;
// The above static locations are required by the WLC (and LCIC)
// schedule. Protected sysmem location completes WLC's independence
// from the pushbuffer allocator.
void *static_pb_protected_sysmem; void *static_pb_protected_sysmem;
// Static tracking semaphore notifier values
// Because of LCIC's fixed schedule, the secure semaphore release
// mechanism uses two additional static locations for incrementing the
// notifier values. See:
// . channel_semaphore_secure_release()
// . setup_lcic_schedule()
// . internal_channel_submit_work_wlc()
uvm_rm_mem_t *static_notifier_unprotected_sysmem;
NvU32 *static_notifier_entry_unprotected_sysmem_cpu;
NvU32 *static_notifier_exit_unprotected_sysmem_cpu;
uvm_gpu_address_t static_notifier_entry_unprotected_sysmem_gpu_va;
uvm_gpu_address_t static_notifier_exit_unprotected_sysmem_gpu_va;
// Explicit location for push launch tag used by WLC.
// Encryption auth tags have to be located in unprotected sysmem.
void *launch_auth_tag_cpu;
NvU64 launch_auth_tag_gpu_va;
// Used to decrypt the push back to protected sysmem. // Used to decrypt the push back to protected sysmem.
// This happens when profilers register callbacks for migration data. // This happens when profilers register callbacks for migration data.
uvm_push_crypto_bundle_t *push_crypto_bundles; uvm_push_crypto_bundle_t *push_crypto_bundles;
// Accompanying authentication tags for the crypto bundles
uvm_rm_mem_t *push_crypto_bundle_auth_tags;
} conf_computing; } conf_computing;
// RM channel information // RM channel information
@ -418,7 +433,7 @@ struct uvm_channel_manager_struct
unsigned num_channel_pools; unsigned num_channel_pools;
// Mask containing the indexes of the usable Copy Engines. Each usable CE // Mask containing the indexes of the usable Copy Engines. Each usable CE
// has at least one pool associated with it. // has at least one pool of type UVM_CHANNEL_POOL_TYPE_CE associated with it
DECLARE_BITMAP(ce_mask, UVM_COPY_ENGINE_COUNT_MAX); DECLARE_BITMAP(ce_mask, UVM_COPY_ENGINE_COUNT_MAX);
struct struct
@ -451,6 +466,16 @@ struct uvm_channel_manager_struct
UVM_BUFFER_LOCATION gpput_loc; UVM_BUFFER_LOCATION gpput_loc;
UVM_BUFFER_LOCATION pushbuffer_loc; UVM_BUFFER_LOCATION pushbuffer_loc;
} conf; } conf;
struct
{
// Flag indicating that the WLC/LCIC mechanism is ready/setup; should
// only be false during (de)initialization.
bool wlc_ready;
// True indicates that key rotation is enabled (UVM-wise).
bool key_rotation_enabled;
} conf_computing;
}; };
// Create a channel manager for the GPU // Create a channel manager for the GPU
@ -501,6 +526,14 @@ uvm_channel_t *uvm_channel_lcic_get_paired_wlc(uvm_channel_t *lcic_channel);
uvm_channel_t *uvm_channel_wlc_get_paired_lcic(uvm_channel_t *wlc_channel); uvm_channel_t *uvm_channel_wlc_get_paired_lcic(uvm_channel_t *wlc_channel);
NvU64 uvm_channel_get_static_pb_protected_vidmem_gpu_va(uvm_channel_t *channel);
NvU64 uvm_channel_get_static_pb_unprotected_sysmem_gpu_va(uvm_channel_t *channel);
char* uvm_channel_get_static_pb_unprotected_sysmem_cpu(uvm_channel_t *channel);
char *uvm_channel_get_push_crypto_bundle_auth_tags_cpu_va(uvm_channel_t *channel, unsigned tag_index);
static bool uvm_channel_pool_is_proxy(uvm_channel_pool_t *pool) static bool uvm_channel_pool_is_proxy(uvm_channel_pool_t *pool)
{ {
UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type)); UVM_ASSERT(uvm_pool_type_is_valid(pool->pool_type));
@ -532,6 +565,17 @@ static uvm_channel_type_t uvm_channel_proxy_channel_type(void)
return UVM_CHANNEL_TYPE_MEMOPS; return UVM_CHANNEL_TYPE_MEMOPS;
} }
// Force key rotation in the engine associated with the given channel pool.
// Rotation may still not happen if RM cannot acquire the necessary locks (in
// which case the function returns NV_ERR_STATE_IN_USE).
//
// This function should be only invoked in pools in which key rotation is
// enabled.
NV_STATUS uvm_channel_pool_rotate_key(uvm_channel_pool_t *pool);
// Retrieve the current encryption key version associated with the channel pool.
NvU32 uvm_channel_pool_key_version(uvm_channel_pool_t *pool);
// Privileged channels support all the Host and engine methods, while // Privileged channels support all the Host and engine methods, while
// non-privileged channels don't support privileged methods. // non-privileged channels don't support privileged methods.
// //
@ -579,12 +623,9 @@ NvU32 uvm_channel_manager_update_progress(uvm_channel_manager_t *channel_manager
// beginning. // beginning.
NV_STATUS uvm_channel_manager_wait(uvm_channel_manager_t *manager); NV_STATUS uvm_channel_manager_wait(uvm_channel_manager_t *manager);
// Check if WLC/LCIC mechanism is ready/setup
// Should only return false during initialization
static bool uvm_channel_manager_is_wlc_ready(uvm_channel_manager_t *manager) static bool uvm_channel_manager_is_wlc_ready(uvm_channel_manager_t *manager)
{ {
return (manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_WLC] != NULL) && return manager->conf_computing.wlc_ready;
(manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_LCIC] != NULL);
} }
// Get the GPU VA of semaphore_channel's tracking semaphore within the VA space // Get the GPU VA of semaphore_channel's tracking semaphore within the VA space
// associated with access_channel. // associated with access_channel.

View File

@ -796,11 +796,8 @@ done:
NV_STATUS test_conf_computing_channel_selection(uvm_va_space_t *va_space) NV_STATUS test_conf_computing_channel_selection(uvm_va_space_t *va_space)
{ {
NV_STATUS status = NV_OK; NV_STATUS status = NV_OK;
uvm_channel_pool_t *pool; uvm_push_t *pushes = NULL;
uvm_push_t *pushes; uvm_gpu_t *gpu = NULL;
uvm_gpu_t *gpu;
NvU32 i;
NvU32 num_pushes;
if (!g_uvm_global.conf_computing_enabled) if (!g_uvm_global.conf_computing_enabled)
return NV_OK; return NV_OK;
@ -810,9 +807,19 @@ NV_STATUS test_conf_computing_channel_selection(uvm_va_space_t *va_space)
for_each_va_space_gpu(gpu, va_space) { for_each_va_space_gpu(gpu, va_space) {
uvm_channel_type_t channel_type; uvm_channel_type_t channel_type;
// Key rotation is disabled because this test relies on nested pushes,
// which is illegal. If any push other than the first one triggers key
// rotation, the test won't complete. This is because key rotation
// depends on waiting for ongoing pushes to end, which doesn't happen
// if those pushes are ended after the current one begins.
uvm_conf_computing_disable_key_rotation(gpu);
for (channel_type = 0; channel_type < UVM_CHANNEL_TYPE_COUNT; channel_type++) { for (channel_type = 0; channel_type < UVM_CHANNEL_TYPE_COUNT; channel_type++) {
pool = gpu->channel_manager->pool_to_use.default_for_type[channel_type]; NvU32 i;
TEST_CHECK_RET(pool != NULL); NvU32 num_pushes;
uvm_channel_pool_t *pool = gpu->channel_manager->pool_to_use.default_for_type[channel_type];
TEST_CHECK_GOTO(pool != NULL, error);
// Skip LCIC channels as those can't accept any pushes // Skip LCIC channels as those can't accept any pushes
if (uvm_channel_pool_is_lcic(pool)) if (uvm_channel_pool_is_lcic(pool))
@ -824,7 +831,7 @@ NV_STATUS test_conf_computing_channel_selection(uvm_va_space_t *va_space)
num_pushes = min(pool->num_channels, (NvU32)UVM_PUSH_MAX_CONCURRENT_PUSHES); num_pushes = min(pool->num_channels, (NvU32)UVM_PUSH_MAX_CONCURRENT_PUSHES);
pushes = uvm_kvmalloc_zero(sizeof(*pushes) * num_pushes); pushes = uvm_kvmalloc_zero(sizeof(*pushes) * num_pushes);
TEST_CHECK_RET(pushes != NULL); TEST_CHECK_GOTO(pushes != NULL, error);
for (i = 0; i < num_pushes; i++) { for (i = 0; i < num_pushes; i++) {
uvm_push_t *push = &pushes[i]; uvm_push_t *push = &pushes[i];
@ -841,12 +848,18 @@ NV_STATUS test_conf_computing_channel_selection(uvm_va_space_t *va_space)
uvm_kvfree(pushes); uvm_kvfree(pushes);
} }
uvm_conf_computing_enable_key_rotation(gpu);
} }
uvm_thread_context_lock_enable_tracking(); uvm_thread_context_lock_enable_tracking();
return status; return status;
error: error:
if (gpu != NULL)
uvm_conf_computing_enable_key_rotation(gpu);
uvm_thread_context_lock_enable_tracking(); uvm_thread_context_lock_enable_tracking();
uvm_kvfree(pushes); uvm_kvfree(pushes);
@ -948,6 +961,318 @@ release:
return NV_OK; return NV_OK;
} }
static NV_STATUS force_key_rotations(uvm_channel_pool_t *pool, unsigned num_rotations)
{
unsigned num_tries;
unsigned max_num_tries = 20;
unsigned num_rotations_completed = 0;
if (num_rotations == 0)
return NV_OK;
// The number of accepted rotations is kept low, so failed rotation
// invocations due to RM not acquiring the necessary locks (which imply a
// sleep in the test) do not balloon the test execution time.
UVM_ASSERT(num_rotations <= 10);
for (num_tries = 0; (num_tries < max_num_tries) && (num_rotations_completed < num_rotations); num_tries++) {
// Force key rotation, irrespective of encryption usage.
NV_STATUS status = uvm_channel_pool_rotate_key(pool);
// Key rotation may not be able to complete due to RM failing to acquire
// the necessary locks. Detect the situation, sleep for a bit, and then
// try again
//
// The maximum time spent sleeping in a single rotation call is
// (max_num_tries * max_sleep_us)
if (status == NV_ERR_STATE_IN_USE) {
NvU32 min_sleep_us = 1000;
NvU32 max_sleep_us = 10000;
usleep_range(min_sleep_us, max_sleep_us);
continue;
}
TEST_NV_CHECK_RET(status);
num_rotations_completed++;
}
// If not a single key rotation occurred, the dependent tests still pass,
// but there is no much value to them. Instead, return an error so the
// maximum number of tries, or the maximum sleep time, are adjusted to
// ensure that at least one rotation completes.
if (num_rotations_completed > 0)
return NV_OK;
else
return NV_ERR_STATE_IN_USE;
}
static NV_STATUS force_key_rotation(uvm_channel_pool_t *pool)
{
return force_key_rotations(pool, 1);
}
// Test key rotation in all pools. This is useful because key rotation may not
// happen otherwise on certain engines during UVM test execution. For example,
// if the MEMOPS channel type is mapped to a CE not shared with any other
// channel type, then the only encryption taking place in the engine is due to
// semaphore releases (4 bytes each). This small encryption size makes it
// unlikely to exceed even small rotation thresholds.
static NV_STATUS test_channel_key_rotation_basic(uvm_gpu_t *gpu)
{
uvm_channel_pool_t *pool;
uvm_for_each_pool(pool, gpu->channel_manager) {
if (!uvm_conf_computing_is_key_rotation_enabled_in_pool(pool))
continue;
TEST_NV_CHECK_RET(force_key_rotation(pool));
}
return NV_OK;
}
// Interleave GPU encryptions and decryptions, and their CPU counterparts, with
// key rotations.
static NV_STATUS test_channel_key_rotation_interleave(uvm_gpu_t *gpu)
{
int i;
uvm_channel_pool_t *gpu_to_cpu_pool;
uvm_channel_pool_t *cpu_to_gpu_pool;
NV_STATUS status = NV_OK;
size_t size = UVM_CONF_COMPUTING_DMA_BUFFER_SIZE;
void *initial_plain_cpu = NULL;
void *final_plain_cpu = NULL;
uvm_mem_t *plain_gpu = NULL;
uvm_gpu_address_t plain_gpu_address;
cpu_to_gpu_pool = gpu->channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_CPU_TO_GPU];
TEST_CHECK_RET(uvm_conf_computing_is_key_rotation_enabled_in_pool(cpu_to_gpu_pool));
gpu_to_cpu_pool = gpu->channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_CPU];
TEST_CHECK_RET(uvm_conf_computing_is_key_rotation_enabled_in_pool(gpu_to_cpu_pool));
initial_plain_cpu = uvm_kvmalloc_zero(size);
if (initial_plain_cpu == NULL) {
status = NV_ERR_NO_MEMORY;
goto out;
}
final_plain_cpu = uvm_kvmalloc_zero(size);
if (final_plain_cpu == NULL) {
status = NV_ERR_NO_MEMORY;
goto out;
}
TEST_NV_CHECK_GOTO(uvm_mem_alloc_vidmem(size, gpu, &plain_gpu), out);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(plain_gpu, gpu), out);
plain_gpu_address = uvm_mem_gpu_address_virtual_kernel(plain_gpu, gpu);
memset(initial_plain_cpu, 1, size);
for (i = 0; i < 5; i++) {
TEST_NV_CHECK_GOTO(force_key_rotation(gpu_to_cpu_pool), out);
TEST_NV_CHECK_GOTO(force_key_rotation(cpu_to_gpu_pool), out);
TEST_NV_CHECK_GOTO(uvm_conf_computing_util_memcopy_cpu_to_gpu(gpu,
plain_gpu_address,
initial_plain_cpu,
size,
NULL,
"CPU > GPU"),
out);
TEST_NV_CHECK_GOTO(force_key_rotation(gpu_to_cpu_pool), out);
TEST_NV_CHECK_GOTO(force_key_rotation(cpu_to_gpu_pool), out);
TEST_NV_CHECK_GOTO(uvm_conf_computing_util_memcopy_gpu_to_cpu(gpu,
final_plain_cpu,
plain_gpu_address,
size,
NULL,
"GPU > CPU"),
out);
TEST_CHECK_GOTO(!memcmp(initial_plain_cpu, final_plain_cpu, size), out);
memset(final_plain_cpu, 0, size);
}
out:
uvm_mem_free(plain_gpu);
uvm_kvfree(final_plain_cpu);
uvm_kvfree(initial_plain_cpu);
return status;
}
static NV_STATUS memset_vidmem(uvm_mem_t *mem, NvU8 val)
{
uvm_push_t push;
uvm_gpu_address_t gpu_address;
uvm_gpu_t *gpu = mem->backing_gpu;
UVM_ASSERT(uvm_mem_is_vidmem(mem));
TEST_NV_CHECK_RET(uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "zero vidmem"));
gpu_address = uvm_mem_gpu_address_virtual_kernel(mem, gpu);
gpu->parent->ce_hal->memset_1(&push, gpu_address, val, mem->size);
TEST_NV_CHECK_RET(uvm_push_end_and_wait(&push));
return NV_OK;
}
// Custom version of uvm_conf_computing_util_memcopy_gpu_to_cpu that allows
// testing to insert key rotations in between the push end, and the CPU
// decryption
static NV_STATUS encrypted_memcopy_gpu_to_cpu(uvm_gpu_t *gpu,
void *dst_plain,
uvm_gpu_address_t src_gpu_address,
size_t size,
unsigned num_rotations_to_insert)
{
NV_STATUS status;
uvm_push_t push;
uvm_conf_computing_dma_buffer_t *dma_buffer;
uvm_gpu_address_t dst_gpu_address, auth_tag_gpu_address;
void *src_cipher, *auth_tag;
uvm_channel_t *channel;
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
UVM_ASSERT(size <= UVM_CONF_COMPUTING_DMA_BUFFER_SIZE);
status = uvm_conf_computing_dma_buffer_alloc(&gpu->conf_computing.dma_buffer_pool, &dma_buffer, NULL);
if (status != NV_OK)
return status;
status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, &push, "Small GPU > CPU encryption");
if (status != NV_OK)
goto out;
channel = push.channel;
uvm_conf_computing_log_gpu_encryption(channel, size, dma_buffer->decrypt_iv);
dma_buffer->key_version[0] = uvm_channel_pool_key_version(channel->pool);
dst_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu);
auth_tag_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu);
gpu->parent->ce_hal->encrypt(&push, dst_gpu_address, src_gpu_address, size, auth_tag_gpu_address);
status = uvm_push_end_and_wait(&push);
if (status != NV_OK)
goto out;
TEST_NV_CHECK_GOTO(force_key_rotations(channel->pool, num_rotations_to_insert), out);
// If num_rotations_to_insert is not zero, the current encryption key will
// be different from the one used during CE encryption.
src_cipher = uvm_mem_get_cpu_addr_kernel(dma_buffer->alloc);
auth_tag = uvm_mem_get_cpu_addr_kernel(dma_buffer->auth_tag);
status = uvm_conf_computing_cpu_decrypt(channel,
dst_plain,
src_cipher,
dma_buffer->decrypt_iv,
dma_buffer->key_version[0],
size,
auth_tag);
out:
uvm_conf_computing_dma_buffer_free(&gpu->conf_computing.dma_buffer_pool, dma_buffer, NULL);
return status;
}
static NV_STATUS test_channel_key_rotation_cpu_decryption(uvm_gpu_t *gpu,
unsigned num_repetitions,
unsigned num_rotations_to_insert)
{
unsigned i;
uvm_channel_pool_t *gpu_to_cpu_pool;
NV_STATUS status = NV_OK;
size_t size = UVM_CONF_COMPUTING_DMA_BUFFER_SIZE;
NvU8 *plain_cpu = NULL;
uvm_mem_t *plain_gpu = NULL;
uvm_gpu_address_t plain_gpu_address;
if (!uvm_conf_computing_is_key_rotation_enabled(gpu))
return NV_OK;
gpu_to_cpu_pool = gpu->channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_CPU];
TEST_CHECK_RET(uvm_conf_computing_is_key_rotation_enabled_in_pool(gpu_to_cpu_pool));
plain_cpu = (NvU8 *) uvm_kvmalloc_zero(size);
if (plain_cpu == NULL) {
status = NV_ERR_NO_MEMORY;
goto out;
}
TEST_NV_CHECK_GOTO(uvm_mem_alloc_vidmem(size, gpu, &plain_gpu), out);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(plain_gpu, gpu), out);
TEST_NV_CHECK_GOTO(memset_vidmem(plain_gpu, 1), out);
plain_gpu_address = uvm_mem_gpu_address_virtual_kernel(plain_gpu, gpu);
for (i = 0; i < num_repetitions; i++) {
unsigned j;
TEST_NV_CHECK_GOTO(encrypted_memcopy_gpu_to_cpu(gpu,
plain_cpu,
plain_gpu_address,
size,
num_rotations_to_insert),
out);
for (j = 0; j < size; j++)
TEST_CHECK_GOTO(plain_cpu[j] == 1, out);
memset(plain_cpu, 0, size);
}
out:
uvm_mem_free(plain_gpu);
uvm_kvfree(plain_cpu);
return status;
}
// Test that CPU decryptions can use old keys i.e. previous versions of the keys
// that are no longer the current key, due to key rotation. Given that SEC2
// does not expose encryption capabilities, the "decrypt-after-rotation" problem
// is exclusive of CE encryptions.
static NV_STATUS test_channel_key_rotation_decrypt_after_key_rotation(uvm_gpu_t *gpu)
{
// Instruct encrypted_memcopy_gpu_to_cpu to insert several key rotations
// between the GPU encryption, and the associated CPU decryption.
unsigned num_rotations_to_insert = 8;
TEST_NV_CHECK_RET(test_channel_key_rotation_cpu_decryption(gpu, 1, num_rotations_to_insert));
return NV_OK;
}
static NV_STATUS test_channel_key_rotation(uvm_va_space_t *va_space)
{
uvm_gpu_t *gpu;
if (!g_uvm_global.conf_computing_enabled)
return NV_OK;
for_each_va_space_gpu(gpu, va_space) {
if (!uvm_conf_computing_is_key_rotation_enabled(gpu))
break;
TEST_NV_CHECK_RET(test_channel_key_rotation_basic(gpu));
TEST_NV_CHECK_RET(test_channel_key_rotation_interleave(gpu));
TEST_NV_CHECK_RET(test_channel_key_rotation_decrypt_after_key_rotation(gpu));
}
return NV_OK;
}
NV_STATUS test_write_ctrl_gpfifo_noop(uvm_va_space_t *va_space) NV_STATUS test_write_ctrl_gpfifo_noop(uvm_va_space_t *va_space)
{ {
uvm_gpu_t *gpu; uvm_gpu_t *gpu;
@ -1203,6 +1528,10 @@ NV_STATUS uvm_test_channel_sanity(UVM_TEST_CHANNEL_SANITY_PARAMS *params, struct
if (status != NV_OK) if (status != NV_OK)
goto done; goto done;
status = test_channel_key_rotation(va_space);
if (status != NV_OK)
goto done;
// The following tests have side effects, they reset the GPU's // The following tests have side effects, they reset the GPU's
// channel_manager. // channel_manager.
status = test_channel_pushbuffer_extension_base(va_space); status = test_channel_pushbuffer_extension_base(va_space);
@ -1338,6 +1667,126 @@ done:
return status; return status;
} }
static NV_STATUS channel_stress_key_rotation_cpu_encryption(uvm_gpu_t *gpu, UVM_TEST_CHANNEL_STRESS_PARAMS *params)
{
int i;
uvm_channel_pool_t *cpu_to_gpu_pool;
NV_STATUS status = NV_OK;
size_t size = UVM_CONF_COMPUTING_DMA_BUFFER_SIZE;
void *initial_plain_cpu = NULL;
uvm_mem_t *plain_gpu = NULL;
uvm_gpu_address_t plain_gpu_address;
UVM_ASSERT(params->key_rotation_operation == UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_CPU_TO_GPU);
cpu_to_gpu_pool = gpu->channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_CPU_TO_GPU];
TEST_CHECK_RET(uvm_conf_computing_is_key_rotation_enabled_in_pool(cpu_to_gpu_pool));
initial_plain_cpu = uvm_kvmalloc_zero(size);
if (initial_plain_cpu == NULL) {
status = NV_ERR_NO_MEMORY;
goto out;
}
TEST_NV_CHECK_GOTO(uvm_mem_alloc_vidmem(size, gpu, &plain_gpu), out);
TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(plain_gpu, gpu), out);
plain_gpu_address = uvm_mem_gpu_address_virtual_kernel(plain_gpu, gpu);
memset(initial_plain_cpu, 1, size);
for (i = 0; i < params->iterations; i++) {
TEST_NV_CHECK_GOTO(uvm_conf_computing_util_memcopy_cpu_to_gpu(gpu,
plain_gpu_address,
initial_plain_cpu,
size,
NULL,
"CPU > GPU"),
out);
}
out:
uvm_mem_free(plain_gpu);
uvm_kvfree(initial_plain_cpu);
return status;
}
static NV_STATUS channel_stress_key_rotation_cpu_decryption(uvm_gpu_t *gpu, UVM_TEST_CHANNEL_STRESS_PARAMS *params)
{
unsigned num_rotations_to_insert = 0;
UVM_ASSERT(params->key_rotation_operation == UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_GPU_TO_CPU);
return test_channel_key_rotation_cpu_decryption(gpu, params->iterations, num_rotations_to_insert);
}
static NV_STATUS channel_stress_key_rotation_rotate(uvm_gpu_t *gpu, UVM_TEST_CHANNEL_STRESS_PARAMS *params)
{
NvU32 i;
UVM_ASSERT(params->key_rotation_operation == UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_ROTATE);
for (i = 0; i < params->iterations; ++i) {
NV_STATUS status;
uvm_channel_pool_t *pool;
uvm_channel_type_t type;
if ((i % 3) == 0)
type = UVM_CHANNEL_TYPE_CPU_TO_GPU;
else if ((i % 3) == 1)
type = UVM_CHANNEL_TYPE_GPU_TO_CPU;
else
type = UVM_CHANNEL_TYPE_WLC;
pool = gpu->channel_manager->pool_to_use.default_for_type[type];
if (!uvm_conf_computing_is_key_rotation_enabled_in_pool(pool))
return NV_ERR_INVALID_STATE;
status = force_key_rotation(pool);
if (status != NV_OK)
return status;
}
return NV_OK;
}
// The objective of this test is documented in the user-level function
static NV_STATUS uvm_test_channel_stress_key_rotation(uvm_va_space_t *va_space, UVM_TEST_CHANNEL_STRESS_PARAMS *params)
{
uvm_test_rng_t rng;
uvm_gpu_t *gpu;
NV_STATUS status = NV_OK;
if (!g_uvm_global.conf_computing_enabled)
return NV_OK;
uvm_test_rng_init(&rng, params->seed);
uvm_va_space_down_read(va_space);
// Key rotation should be enabled, or disabled, in all GPUs. Pick a random
// one.
gpu = random_va_space_gpu(&rng, va_space);
if (!uvm_conf_computing_is_key_rotation_enabled(gpu))
goto out;
if (params->key_rotation_operation == UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_CPU_TO_GPU)
status = channel_stress_key_rotation_cpu_encryption(gpu, params);
else if (params->key_rotation_operation == UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_GPU_TO_CPU)
status = channel_stress_key_rotation_cpu_decryption(gpu, params);
else if (params->key_rotation_operation == UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_ROTATE)
status = channel_stress_key_rotation_rotate(gpu, params);
else
status = NV_ERR_INVALID_PARAMETER;
out:
uvm_va_space_up_read(va_space);
return status;
}
NV_STATUS uvm_test_channel_stress(UVM_TEST_CHANNEL_STRESS_PARAMS *params, struct file *filp) NV_STATUS uvm_test_channel_stress(UVM_TEST_CHANNEL_STRESS_PARAMS *params, struct file *filp)
{ {
uvm_va_space_t *va_space = uvm_va_space_get(filp); uvm_va_space_t *va_space = uvm_va_space_get(filp);
@ -1349,6 +1798,8 @@ NV_STATUS uvm_test_channel_stress(UVM_TEST_CHANNEL_STRESS_PARAMS *params, struct
return uvm_test_channel_stress_update_channels(va_space, params); return uvm_test_channel_stress_update_channels(va_space, params);
case UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH: case UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH:
return uvm_test_channel_noop_push(va_space, params); return uvm_test_channel_noop_push(va_space, params);
case UVM_TEST_CHANNEL_STRESS_MODE_KEY_ROTATION:
return uvm_test_channel_stress_key_rotation(va_space, params);
default: default:
return NV_ERR_INVALID_PARAMETER; return NV_ERR_INVALID_PARAMETER;
} }

View File

@ -33,6 +33,15 @@
#include "nv_uvm_interface.h" #include "nv_uvm_interface.h"
#include "uvm_va_block.h" #include "uvm_va_block.h"
// Amount of encrypted data on a given engine that triggers key rotation. This
// is a UVM internal threshold, different from that of RM, and used only during
// testing.
//
// Key rotation is triggered when the total encryption size, or the total
// decryption size (whatever comes first) reaches this lower threshold on the
// engine.
#define UVM_CONF_COMPUTING_KEY_ROTATION_LOWER_THRESHOLD (UVM_SIZE_1MB * 8)
// The maximum number of secure operations per push is: // The maximum number of secure operations per push is:
// UVM_MAX_PUSH_SIZE / min(CE encryption size, CE decryption size) // UVM_MAX_PUSH_SIZE / min(CE encryption size, CE decryption size)
// + 1 (tracking semaphore) = 128 * 1024 / 56 + 1 = 2342 // + 1 (tracking semaphore) = 128 * 1024 / 56 + 1 = 2342
@ -352,6 +361,19 @@ error:
return status; return status;
} }
// The production key rotation defaults are such that key rotations rarely
// happen. During UVM testing more frequent rotations are triggering by relying
// on internal encryption usage accounting. When key rotations are triggered by
// UVM, the driver does not rely on channel key rotation notifiers.
//
// TODO: Bug 4612912: UVM should be able to programmatically set the rotation
// lower threshold. This function, and all the metadata associated with it
// (per-pool encryption accounting, for example) can be removed at that point.
static bool key_rotation_is_notifier_driven(void)
{
return !uvm_enable_builtin_tests;
}
NV_STATUS uvm_conf_computing_gpu_init(uvm_gpu_t *gpu) NV_STATUS uvm_conf_computing_gpu_init(uvm_gpu_t *gpu)
{ {
NV_STATUS status; NV_STATUS status;
@ -394,17 +416,35 @@ void uvm_conf_computing_gpu_deinit(uvm_gpu_t *gpu)
conf_computing_dma_buffer_pool_deinit(&gpu->conf_computing.dma_buffer_pool); conf_computing_dma_buffer_pool_deinit(&gpu->conf_computing.dma_buffer_pool);
} }
void uvm_conf_computing_log_gpu_encryption(uvm_channel_t *channel, UvmCslIv *iv) void uvm_conf_computing_log_gpu_encryption(uvm_channel_t *channel, size_t size, UvmCslIv *iv)
{ {
NV_STATUS status; NV_STATUS status;
uvm_channel_pool_t *pool;
if (uvm_channel_is_lcic(channel))
pool = uvm_channel_lcic_get_paired_wlc(channel)->pool;
else
pool = channel->pool;
uvm_mutex_lock(&channel->csl.ctx_lock); uvm_mutex_lock(&channel->csl.ctx_lock);
if (uvm_conf_computing_is_key_rotation_enabled_in_pool(pool)) {
status = nvUvmInterfaceCslLogEncryption(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT, size);
// Informing RM of an encryption/decryption should not fail
UVM_ASSERT(status == NV_OK);
if (!key_rotation_is_notifier_driven())
atomic64_add(size, &pool->conf_computing.key_rotation.encrypted);
}
status = nvUvmInterfaceCslIncrementIv(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT, 1, iv); status = nvUvmInterfaceCslIncrementIv(&channel->csl.ctx, UVM_CSL_OPERATION_DECRYPT, 1, iv);
uvm_mutex_unlock(&channel->csl.ctx_lock);
// IV rotation is done preemptively as needed, so the above // IV rotation is done preemptively as needed, so the above
// call cannot return failure. // call cannot return failure.
UVM_ASSERT(status == NV_OK); UVM_ASSERT(status == NV_OK);
uvm_mutex_unlock(&channel->csl.ctx_lock);
} }
void uvm_conf_computing_acquire_encryption_iv(uvm_channel_t *channel, UvmCslIv *iv) void uvm_conf_computing_acquire_encryption_iv(uvm_channel_t *channel, UvmCslIv *iv)
@ -428,27 +468,46 @@ void uvm_conf_computing_cpu_encrypt(uvm_channel_t *channel,
void *auth_tag_buffer) void *auth_tag_buffer)
{ {
NV_STATUS status; NV_STATUS status;
uvm_channel_pool_t *pool;
UVM_ASSERT(size); UVM_ASSERT(size);
if (uvm_channel_is_lcic(channel))
pool = uvm_channel_lcic_get_paired_wlc(channel)->pool;
else
pool = channel->pool;
uvm_mutex_lock(&channel->csl.ctx_lock); uvm_mutex_lock(&channel->csl.ctx_lock);
status = nvUvmInterfaceCslEncrypt(&channel->csl.ctx, status = nvUvmInterfaceCslEncrypt(&channel->csl.ctx,
size, size,
(NvU8 const *) src_plain, (NvU8 const *) src_plain,
encrypt_iv, encrypt_iv,
(NvU8 *) dst_cipher, (NvU8 *) dst_cipher,
(NvU8 *) auth_tag_buffer); (NvU8 *) auth_tag_buffer);
uvm_mutex_unlock(&channel->csl.ctx_lock);
// IV rotation is done preemptively as needed, so the above // IV rotation is done preemptively as needed, so the above
// call cannot return failure. // call cannot return failure.
UVM_ASSERT(status == NV_OK); UVM_ASSERT(status == NV_OK);
if (uvm_conf_computing_is_key_rotation_enabled_in_pool(pool)) {
status = nvUvmInterfaceCslLogEncryption(&channel->csl.ctx, UVM_CSL_OPERATION_ENCRYPT, size);
// Informing RM of an encryption/decryption should not fail
UVM_ASSERT(status == NV_OK);
if (!key_rotation_is_notifier_driven())
atomic64_add(size, &pool->conf_computing.key_rotation.decrypted);
}
uvm_mutex_unlock(&channel->csl.ctx_lock);
} }
NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel, NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
void *dst_plain, void *dst_plain,
const void *src_cipher, const void *src_cipher,
const UvmCslIv *src_iv, const UvmCslIv *src_iv,
NvU32 key_version,
size_t size, size_t size,
const void *auth_tag_buffer) const void *auth_tag_buffer)
{ {
@ -469,10 +528,19 @@ NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
size, size,
(const NvU8 *) src_cipher, (const NvU8 *) src_cipher,
src_iv, src_iv,
key_version,
(NvU8 *) dst_plain, (NvU8 *) dst_plain,
NULL, NULL,
0, 0,
(const NvU8 *) auth_tag_buffer); (const NvU8 *) auth_tag_buffer);
if (status != NV_OK) {
UVM_ERR_PRINT("nvUvmInterfaceCslDecrypt() failed: %s, channel %s, GPU %s\n",
nvstatusToString(status),
channel->name,
uvm_gpu_name(uvm_channel_get_gpu(channel)));
}
uvm_mutex_unlock(&channel->csl.ctx_lock); uvm_mutex_unlock(&channel->csl.ctx_lock);
return status; return status;
@ -485,6 +553,8 @@ NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
NvU8 valid) NvU8 valid)
{ {
NV_STATUS status; NV_STATUS status;
NvU32 fault_entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
UvmCslContext *csl_context = &parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx;
// There is no dedicated lock for the CSL context associated with replayable // There is no dedicated lock for the CSL context associated with replayable
// faults. The mutual exclusion required by the RM CSL API is enforced by // faults. The mutual exclusion required by the RM CSL API is enforced by
@ -494,36 +564,48 @@ NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
UVM_ASSERT(g_uvm_global.conf_computing_enabled); UVM_ASSERT(g_uvm_global.conf_computing_enabled);
status = nvUvmInterfaceCslDecrypt(&parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx, status = nvUvmInterfaceCslLogEncryption(csl_context, UVM_CSL_OPERATION_DECRYPT, fault_entry_size);
parent_gpu->fault_buffer_hal->entry_size(parent_gpu),
// Informing RM of an encryption/decryption should not fail
UVM_ASSERT(status == NV_OK);
status = nvUvmInterfaceCslDecrypt(csl_context,
fault_entry_size,
(const NvU8 *) src_cipher, (const NvU8 *) src_cipher,
NULL, NULL,
NV_U32_MAX,
(NvU8 *) dst_plain, (NvU8 *) dst_plain,
&valid, &valid,
sizeof(valid), sizeof(valid),
(const NvU8 *) auth_tag_buffer); (const NvU8 *) auth_tag_buffer);
if (status != NV_OK) if (status != NV_OK) {
UVM_ERR_PRINT("nvUvmInterfaceCslDecrypt() failed: %s, GPU %s\n", UVM_ERR_PRINT("nvUvmInterfaceCslDecrypt() failed: %s, GPU %s\n",
nvstatusToString(status), nvstatusToString(status),
uvm_parent_gpu_name(parent_gpu)); uvm_parent_gpu_name(parent_gpu));
}
return status; return status;
} }
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu, NvU64 increment) void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu)
{ {
NV_STATUS status; NV_STATUS status;
NvU32 fault_entry_size = parent_gpu->fault_buffer_hal->entry_size(parent_gpu);
UvmCslContext *csl_context = &parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx;
// See comment in uvm_conf_computing_fault_decrypt // See comment in uvm_conf_computing_fault_decrypt
UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock)); UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock));
UVM_ASSERT(g_uvm_global.conf_computing_enabled); UVM_ASSERT(g_uvm_global.conf_computing_enabled);
status = nvUvmInterfaceCslIncrementIv(&parent_gpu->fault_buffer_info.rm_info.replayable.cslCtx, status = nvUvmInterfaceCslLogEncryption(csl_context, UVM_CSL_OPERATION_DECRYPT, fault_entry_size);
UVM_CSL_OPERATION_DECRYPT,
increment, // Informing RM of an encryption/decryption should not fail
NULL); UVM_ASSERT(status == NV_OK);
status = nvUvmInterfaceCslIncrementIv(csl_context, UVM_CSL_OPERATION_DECRYPT, 1, NULL);
UVM_ASSERT(status == NV_OK); UVM_ASSERT(status == NV_OK);
} }
@ -625,3 +707,231 @@ NV_STATUS uvm_conf_computing_maybe_rotate_channel_ivs_retry_busy(uvm_channel_t *
{ {
return uvm_conf_computing_rotate_channel_ivs_below_limit(channel, uvm_conf_computing_channel_iv_rotation_limit, true); return uvm_conf_computing_rotate_channel_ivs_below_limit(channel, uvm_conf_computing_channel_iv_rotation_limit, true);
} }
void uvm_conf_computing_enable_key_rotation(uvm_gpu_t *gpu)
{
if (!g_uvm_global.conf_computing_enabled)
return;
// Key rotation cannot be enabled on UVM if it is disabled on RM
if (!gpu->parent->rm_info.gpuConfComputeCaps.bKeyRotationEnabled)
return;
gpu->channel_manager->conf_computing.key_rotation_enabled = true;
}
void uvm_conf_computing_disable_key_rotation(uvm_gpu_t *gpu)
{
if (!g_uvm_global.conf_computing_enabled)
return;
gpu->channel_manager->conf_computing.key_rotation_enabled = false;
}
bool uvm_conf_computing_is_key_rotation_enabled(uvm_gpu_t *gpu)
{
return gpu->channel_manager->conf_computing.key_rotation_enabled;
}
bool uvm_conf_computing_is_key_rotation_enabled_in_pool(uvm_channel_pool_t *pool)
{
if (!uvm_conf_computing_is_key_rotation_enabled(pool->manager->gpu))
return false;
// TODO: Bug 4586447: key rotation must be disabled in the SEC2 engine,
// because currently the encryption key is shared between UVM and RM, but
// UVM is not able to idle SEC2 channels owned by RM.
if (uvm_channel_pool_is_sec2(pool))
return false;
// Key rotation happens as part of channel reservation, and LCIC channels
// are never reserved directly. Rotation of keys in LCIC channels happens
// as the result of key rotation in WLC channels.
//
// Return false even if there is nothing fundamental prohibiting direct key
// rotation on LCIC pools
if (uvm_channel_pool_is_lcic(pool))
return false;
return true;
}
static bool conf_computing_is_key_rotation_pending_use_stats(uvm_channel_pool_t *pool)
{
NvU64 decrypted, encrypted;
UVM_ASSERT(!key_rotation_is_notifier_driven());
decrypted = atomic64_read(&pool->conf_computing.key_rotation.decrypted);
if (decrypted > UVM_CONF_COMPUTING_KEY_ROTATION_LOWER_THRESHOLD)
return true;
encrypted = atomic64_read(&pool->conf_computing.key_rotation.encrypted);
if (encrypted > UVM_CONF_COMPUTING_KEY_ROTATION_LOWER_THRESHOLD)
return true;
return false;
}
static bool conf_computing_is_key_rotation_pending_use_notifier(uvm_channel_pool_t *pool)
{
// If key rotation is pending for the pool's engine, then the key rotation
// notifier in any of the engine channels can be used by UVM to detect the
// situation. Note that RM doesn't update all the notifiers in a single
// atomic operation, so it is possible that the channel read by UVM (the
// first one in the pool) indicates that a key rotation is pending, but
// another channel in the pool (temporarily) indicates the opposite, or vice
// versa.
uvm_channel_t *first_channel = pool->channels;
UVM_ASSERT(key_rotation_is_notifier_driven());
UVM_ASSERT(first_channel != NULL);
return first_channel->channel_info.keyRotationNotifier->status == UVM_KEY_ROTATION_STATUS_PENDING;
}
bool uvm_conf_computing_is_key_rotation_pending_in_pool(uvm_channel_pool_t *pool)
{
if (!uvm_conf_computing_is_key_rotation_enabled_in_pool(pool))
return false;
if (key_rotation_is_notifier_driven())
return conf_computing_is_key_rotation_pending_use_notifier(pool);
else
return conf_computing_is_key_rotation_pending_use_stats(pool);
}
NV_STATUS uvm_conf_computing_rotate_pool_key(uvm_channel_pool_t *pool)
{
NV_STATUS status;
UVM_ASSERT(uvm_conf_computing_is_key_rotation_enabled_in_pool(pool));
UVM_ASSERT(pool->conf_computing.key_rotation.csl_contexts != NULL);
UVM_ASSERT(pool->conf_computing.key_rotation.num_csl_contexts > 0);
// NV_ERR_STATE_IN_USE indicates that RM was not able to acquire the
// required locks at this time. This status is not interpreted as an error,
// but as a sign for UVM to try again later. This is the same "protocol"
// used in IV rotation.
status = nvUvmInterfaceCslRotateKey(pool->conf_computing.key_rotation.csl_contexts,
pool->conf_computing.key_rotation.num_csl_contexts);
if (status == NV_OK) {
pool->conf_computing.key_rotation.version++;
if (!key_rotation_is_notifier_driven()) {
atomic64_set(&pool->conf_computing.key_rotation.decrypted, 0);
atomic64_set(&pool->conf_computing.key_rotation.encrypted, 0);
}
}
else if (status != NV_ERR_STATE_IN_USE) {
UVM_DBG_PRINT("nvUvmInterfaceCslRotateKey() failed in engine %u: %s\n",
pool->engine_index,
nvstatusToString(status));
}
return status;
}
__attribute__ ((format(printf, 6, 7)))
NV_STATUS uvm_conf_computing_util_memcopy_cpu_to_gpu(uvm_gpu_t *gpu,
uvm_gpu_address_t dst_gpu_address,
void *src_plain,
size_t size,
uvm_tracker_t *tracker,
const char *format,
...)
{
NV_STATUS status;
uvm_push_t push;
uvm_conf_computing_dma_buffer_t *dma_buffer;
uvm_gpu_address_t src_gpu_address, auth_tag_gpu_address;
void *dst_cipher, *auth_tag;
va_list args;
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
UVM_ASSERT(size <= UVM_CONF_COMPUTING_DMA_BUFFER_SIZE);
status = uvm_conf_computing_dma_buffer_alloc(&gpu->conf_computing.dma_buffer_pool, &dma_buffer, NULL);
if (status != NV_OK)
return status;
va_start(args, format);
status = uvm_push_begin_acquire(gpu->channel_manager, UVM_CHANNEL_TYPE_CPU_TO_GPU, tracker, &push, format, args);
va_end(args);
if (status != NV_OK)
goto out;
dst_cipher = uvm_mem_get_cpu_addr_kernel(dma_buffer->alloc);
auth_tag = uvm_mem_get_cpu_addr_kernel(dma_buffer->auth_tag);
uvm_conf_computing_cpu_encrypt(push.channel, dst_cipher, src_plain, NULL, size, auth_tag);
src_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu);
auth_tag_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu);
gpu->parent->ce_hal->decrypt(&push, dst_gpu_address, src_gpu_address, size, auth_tag_gpu_address);
status = uvm_push_end_and_wait(&push);
out:
uvm_conf_computing_dma_buffer_free(&gpu->conf_computing.dma_buffer_pool, dma_buffer, NULL);
return status;
}
__attribute__ ((format(printf, 6, 7)))
NV_STATUS uvm_conf_computing_util_memcopy_gpu_to_cpu(uvm_gpu_t *gpu,
void *dst_plain,
uvm_gpu_address_t src_gpu_address,
size_t size,
uvm_tracker_t *tracker,
const char *format,
...)
{
NV_STATUS status;
uvm_push_t push;
uvm_conf_computing_dma_buffer_t *dma_buffer;
uvm_gpu_address_t dst_gpu_address, auth_tag_gpu_address;
void *src_cipher, *auth_tag;
va_list args;
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
UVM_ASSERT(size <= UVM_CONF_COMPUTING_DMA_BUFFER_SIZE);
status = uvm_conf_computing_dma_buffer_alloc(&gpu->conf_computing.dma_buffer_pool, &dma_buffer, NULL);
if (status != NV_OK)
return status;
va_start(args, format);
status = uvm_push_begin_acquire(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, tracker, &push, format, args);
va_end(args);
if (status != NV_OK)
goto out;
uvm_conf_computing_log_gpu_encryption(push.channel, size, dma_buffer->decrypt_iv);
dma_buffer->key_version[0] = uvm_channel_pool_key_version(push.channel->pool);
dst_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu);
auth_tag_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu);
gpu->parent->ce_hal->encrypt(&push, dst_gpu_address, src_gpu_address, size, auth_tag_gpu_address);
status = uvm_push_end_and_wait(&push);
if (status != NV_OK)
goto out;
src_cipher = uvm_mem_get_cpu_addr_kernel(dma_buffer->alloc);
auth_tag = uvm_mem_get_cpu_addr_kernel(dma_buffer->auth_tag);
status = uvm_conf_computing_cpu_decrypt(push.channel,
dst_plain,
src_cipher,
dma_buffer->decrypt_iv,
dma_buffer->key_version[0],
size,
auth_tag);
out:
uvm_conf_computing_dma_buffer_free(&gpu->conf_computing.dma_buffer_pool, dma_buffer, NULL);
return status;
}

View File

@ -87,9 +87,9 @@ typedef struct
// a free buffer. // a free buffer.
uvm_tracker_t tracker; uvm_tracker_t tracker;
// When the DMA buffer is used as the destination of a GPU encryption, SEC2 // When the DMA buffer is used as the destination of a GPU encryption, the
// writes the authentication tag here. Later when the buffer is decrypted // engine (CE or SEC2) writes the authentication tag here. When the buffer
// on the CPU the authentication tag is used again (read) for CSL to verify // is decrypted on the CPU the authentication tag is used by CSL to verify
// the authenticity. The allocation is big enough for one authentication // the authenticity. The allocation is big enough for one authentication
// tag per PAGE_SIZE page in the alloc buffer. // tag per PAGE_SIZE page in the alloc buffer.
uvm_mem_t *auth_tag; uvm_mem_t *auth_tag;
@ -98,7 +98,12 @@ typedef struct
// to the authentication tag. The allocation is big enough for one IV per // to the authentication tag. The allocation is big enough for one IV per
// PAGE_SIZE page in the alloc buffer. The granularity between the decrypt // PAGE_SIZE page in the alloc buffer. The granularity between the decrypt
// IV and authentication tag must match. // IV and authentication tag must match.
UvmCslIv decrypt_iv[(UVM_CONF_COMPUTING_DMA_BUFFER_SIZE / PAGE_SIZE)]; UvmCslIv decrypt_iv[UVM_CONF_COMPUTING_DMA_BUFFER_SIZE / PAGE_SIZE];
// When the DMA buffer is used as the destination of a GPU encryption, the
// key version used during GPU encryption of each PAGE_SIZE page can be
// saved here, so CPU decryption uses the correct decryption key.
NvU32 key_version[UVM_CONF_COMPUTING_DMA_BUFFER_SIZE / PAGE_SIZE];
// Bitmap of the encrypted pages in the backing allocation // Bitmap of the encrypted pages in the backing allocation
uvm_page_mask_t encrypted_page_mask; uvm_page_mask_t encrypted_page_mask;
@ -147,7 +152,7 @@ NV_STATUS uvm_conf_computing_gpu_init(uvm_gpu_t *gpu);
void uvm_conf_computing_gpu_deinit(uvm_gpu_t *gpu); void uvm_conf_computing_gpu_deinit(uvm_gpu_t *gpu);
// Logs encryption information from the GPU and returns the IV. // Logs encryption information from the GPU and returns the IV.
void uvm_conf_computing_log_gpu_encryption(uvm_channel_t *channel, UvmCslIv *iv); void uvm_conf_computing_log_gpu_encryption(uvm_channel_t *channel, size_t size, UvmCslIv *iv);
// Acquires next CPU encryption IV and returns it. // Acquires next CPU encryption IV and returns it.
void uvm_conf_computing_acquire_encryption_iv(uvm_channel_t *channel, UvmCslIv *iv); void uvm_conf_computing_acquire_encryption_iv(uvm_channel_t *channel, UvmCslIv *iv);
@ -167,10 +172,14 @@ void uvm_conf_computing_cpu_encrypt(uvm_channel_t *channel,
// CPU side decryption helper. Decrypts data from src_cipher and writes the // CPU side decryption helper. Decrypts data from src_cipher and writes the
// plain text in dst_plain. src_cipher and dst_plain can't overlap. IV obtained // plain text in dst_plain. src_cipher and dst_plain can't overlap. IV obtained
// from uvm_conf_computing_log_gpu_encryption() needs to be be passed to src_iv. // from uvm_conf_computing_log_gpu_encryption() needs to be be passed to src_iv.
//
// The caller must indicate which key to use for decryption by passing the
// appropiate key version number.
NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel, NV_STATUS uvm_conf_computing_cpu_decrypt(uvm_channel_t *channel,
void *dst_plain, void *dst_plain,
const void *src_cipher, const void *src_cipher,
const UvmCslIv *src_iv, const UvmCslIv *src_iv,
NvU32 key_version,
size_t size, size_t size,
const void *auth_tag_buffer); const void *auth_tag_buffer);
@ -191,12 +200,12 @@ NV_STATUS uvm_conf_computing_fault_decrypt(uvm_parent_gpu_t *parent_gpu,
NvU8 valid); NvU8 valid);
// Increment the CPU-side decrypt IV of the CSL context associated with // Increment the CPU-side decrypt IV of the CSL context associated with
// replayable faults. The function is a no-op if the given increment is zero. // replayable faults.
// //
// The IV associated with a fault CSL context is a 64-bit counter. // The IV associated with a fault CSL context is a 64-bit counter.
// //
// Locking: this function must be invoked while holding the replayable ISR lock. // Locking: this function must be invoked while holding the replayable ISR lock.
void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu, NvU64 increment); void uvm_conf_computing_fault_increment_decrypt_iv(uvm_parent_gpu_t *parent_gpu);
// Query the number of remaining messages before IV needs to be rotated. // Query the number of remaining messages before IV needs to be rotated.
void uvm_conf_computing_query_message_pools(uvm_channel_t *channel, void uvm_conf_computing_query_message_pools(uvm_channel_t *channel,
@ -214,4 +223,71 @@ NV_STATUS uvm_conf_computing_maybe_rotate_channel_ivs_retry_busy(uvm_channel_t *
// Check if there are fewer than 'limit' messages available in either direction // Check if there are fewer than 'limit' messages available in either direction
// and rotate if not. // and rotate if not.
NV_STATUS uvm_conf_computing_rotate_channel_ivs_below_limit(uvm_channel_t *channel, NvU64 limit, bool retry_if_busy); NV_STATUS uvm_conf_computing_rotate_channel_ivs_below_limit(uvm_channel_t *channel, NvU64 limit, bool retry_if_busy);
// Rotate the engine key associated with the given channel pool.
NV_STATUS uvm_conf_computing_rotate_pool_key(uvm_channel_pool_t *pool);
// Returns true if key rotation is allowed in the channel pool.
bool uvm_conf_computing_is_key_rotation_enabled_in_pool(uvm_channel_pool_t *pool);
// Returns true if key rotation is pending in the channel pool.
bool uvm_conf_computing_is_key_rotation_pending_in_pool(uvm_channel_pool_t *pool);
// Enable/disable key rotation in the passed GPU. Note that UVM enablement is
// dependent on RM enablement: key rotation may still be disabled upon calling
// this function, if it is disabled in RM. On the other hand, key rotation can
// be disabled in UVM, even if it is enabled in RM.
//
// Enablement/Disablement affects only kernel key rotation in keys owned by UVM.
// It doesn't affect user key rotation (CUDA, Video...), nor it affects RM
// kernel key rotation.
void uvm_conf_computing_enable_key_rotation(uvm_gpu_t *gpu);
void uvm_conf_computing_disable_key_rotation(uvm_gpu_t *gpu);
// Returns true if key rotation is enabled on UVM in the given GPU. Key rotation
// can be enabled on the GPU but disabled on some of GPU engines (LCEs or SEC2),
// see uvm_conf_computing_is_key_rotation_enabled_in_pool.
bool uvm_conf_computing_is_key_rotation_enabled(uvm_gpu_t *gpu);
// Launch a synchronous, encrypted copy between CPU and GPU.
//
// The maximum copy size allowed is UVM_CONF_COMPUTING_DMA_BUFFER_SIZE.
//
// The source CPU buffer pointed by src_plain contains the unencrypted (plain
// text) contents; the function internally performs a CPU-side encryption step
// before launching the GPU-side CE decryption. The source buffer can be in
// protected or unprotected sysmem, while the destination buffer must be in
// protected vidmem.
//
// The input tracker, if not NULL, is internally acquired by the push
// responsible for the encrypted copy.
__attribute__ ((format(printf, 6, 7)))
NV_STATUS uvm_conf_computing_util_memcopy_cpu_to_gpu(uvm_gpu_t *gpu,
uvm_gpu_address_t dst_gpu_address,
void *src_plain,
size_t size,
uvm_tracker_t *tracker,
const char *format,
...);
// Launch a synchronous, encrypted copy between CPU and GPU.
//
// The maximum copy size allowed is UVM_CONF_COMPUTING_DMA_BUFFER_SIZE.
//
// The source CPU buffer pointed by src_plain contains the unencrypted (plain
// text) contents; the function internally performs a CPU-side encryption step
// before launching the GPU-side CE decryption. The source buffer can be in
// protected or unprotected sysmem, while the destination buffer must be in
// protected vidmem.
//
// The input tracker, if not NULL, is internally acquired by the push
// responsible for the encrypted copy.
__attribute__ ((format(printf, 6, 7)))
NV_STATUS uvm_conf_computing_util_memcopy_gpu_to_cpu(uvm_gpu_t *gpu,
void *dst_plain,
uvm_gpu_address_t src_gpu_address,
size_t size,
uvm_tracker_t *tracker,
const char *format,
...);
#endif // __UVM_CONF_COMPUTING_H__ #endif // __UVM_CONF_COMPUTING_H__

View File

@ -591,7 +591,7 @@ static void fault_buffer_skip_replayable_entry(uvm_parent_gpu_t *parent_gpu, NvU
// replayable faults still requires manual adjustment so it is kept in sync // replayable faults still requires manual adjustment so it is kept in sync
// with the encryption IV on the GSP-RM's side. // with the encryption IV on the GSP-RM's side.
if (g_uvm_global.conf_computing_enabled) if (g_uvm_global.conf_computing_enabled)
uvm_conf_computing_fault_increment_decrypt_iv(parent_gpu, 1); uvm_conf_computing_fault_increment_decrypt_iv(parent_gpu);
parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index); parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index);
} }

View File

@ -60,6 +60,17 @@ struct uvm_gpu_semaphore_pool_page_struct
// Allocation backing the page // Allocation backing the page
uvm_rm_mem_t *memory; uvm_rm_mem_t *memory;
struct {
// Unprotected sysmem storing encrypted value of semaphores
uvm_rm_mem_t *encrypted_payload_memory;
// Unprotected sysmem storing encryption auth tags
uvm_rm_mem_t *auth_tag_memory;
// Unprotected sysmem storing plain text notifier values
uvm_rm_mem_t *notifier_memory;
} conf_computing;
// Pool the page is part of // Pool the page is part of
uvm_gpu_semaphore_pool_t *pool; uvm_gpu_semaphore_pool_t *pool;
@ -80,26 +91,6 @@ static bool gpu_semaphore_is_secure(uvm_gpu_semaphore_t *semaphore)
return gpu_semaphore_pool_is_secure(semaphore->page->pool); return gpu_semaphore_pool_is_secure(semaphore->page->pool);
} }
static NvU32 get_index(uvm_gpu_semaphore_t *semaphore)
{
NvU32 offset;
NvU32 index;
if (gpu_semaphore_is_secure(semaphore))
return semaphore->conf_computing.index;
UVM_ASSERT(semaphore->payload != NULL);
UVM_ASSERT(semaphore->page != NULL);
offset = (char*)semaphore->payload - (char*)uvm_rm_mem_get_cpu_va(semaphore->page->memory);
UVM_ASSERT(offset % UVM_SEMAPHORE_SIZE == 0);
index = offset / UVM_SEMAPHORE_SIZE;
UVM_ASSERT(index < UVM_SEMAPHORE_COUNT_PER_PAGE);
return index;
}
// Use canary values on debug builds to catch semaphore use-after-free. We can // Use canary values on debug builds to catch semaphore use-after-free. We can
// catch release-after-free by simply setting the payload to a known value at // catch release-after-free by simply setting the payload to a known value at
// free then checking it on alloc or pool free, but catching acquire-after-free // free then checking it on alloc or pool free, but catching acquire-after-free
@ -150,34 +141,83 @@ static bool gpu_can_access_semaphore_pool(uvm_gpu_t *gpu, uvm_rm_mem_t *rm_mem)
return ((uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu) + rm_mem->size - 1) < gpu->parent->max_host_va); return ((uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu) + rm_mem->size - 1) < gpu->parent->max_host_va);
} }
// Secure semaphore pools are allocated in the CPR of vidmem and only mapped to static void pool_page_free_buffers(uvm_gpu_semaphore_pool_page_t *page)
// the owning GPU as no other processor have access to it. {
static NV_STATUS pool_alloc_secure_page(uvm_gpu_semaphore_pool_t *pool, uvm_rm_mem_free(page->memory);
uvm_gpu_semaphore_pool_page_t *pool_page, page->memory = NULL;
uvm_rm_mem_type_t memory_type)
if (gpu_semaphore_pool_is_secure(page->pool)) {
uvm_rm_mem_free(page->conf_computing.encrypted_payload_memory);
uvm_rm_mem_free(page->conf_computing.auth_tag_memory);
uvm_rm_mem_free(page->conf_computing.notifier_memory);
page->conf_computing.encrypted_payload_memory = NULL;
page->conf_computing.auth_tag_memory = NULL;
page->conf_computing.notifier_memory = NULL;
}
else {
UVM_ASSERT(!page->conf_computing.encrypted_payload_memory);
UVM_ASSERT(!page->conf_computing.auth_tag_memory);
UVM_ASSERT(!page->conf_computing.notifier_memory);
}
}
static NV_STATUS pool_page_alloc_buffers(uvm_gpu_semaphore_pool_page_t *page)
{ {
NV_STATUS status; NV_STATUS status;
uvm_gpu_semaphore_pool_t *pool = page->pool;
uvm_rm_mem_type_t memory_type = (pool->aperture == UVM_APERTURE_SYS) ? UVM_RM_MEM_TYPE_SYS : UVM_RM_MEM_TYPE_GPU;
size_t align = 0;
bool map_all = true;
align = gpu_semaphore_pool_is_secure(pool) ? UVM_CONF_COMPUTING_BUF_ALIGNMENT : 0;
map_all = gpu_semaphore_pool_is_secure(pool) ? false : true;
UVM_ASSERT(gpu_semaphore_pool_is_secure(pool)); if (map_all)
status = uvm_rm_mem_alloc(pool->gpu, status = uvm_rm_mem_alloc_and_map_all(pool->gpu, memory_type, UVM_SEMAPHORE_PAGE_SIZE, align, &page->memory);
memory_type, else
UVM_SEMAPHORE_PAGE_SIZE, status = uvm_rm_mem_alloc(pool->gpu, memory_type, UVM_SEMAPHORE_PAGE_SIZE, align, &page->memory);
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
&pool_page->memory);
if (status != NV_OK) if (status != NV_OK)
return status; goto error;
if (!gpu_semaphore_pool_is_secure(pool))
return NV_OK;
status = uvm_rm_mem_alloc_and_map_cpu(pool->gpu,
UVM_RM_MEM_TYPE_SYS,
UVM_SEMAPHORE_PAGE_SIZE,
UVM_CONF_COMPUTING_BUF_ALIGNMENT,
&page->conf_computing.encrypted_payload_memory);
if (status != NV_OK)
goto error;
BUILD_BUG_ON(UVM_CONF_COMPUTING_AUTH_TAG_SIZE % UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT);
status = uvm_rm_mem_alloc_and_map_cpu(pool->gpu,
UVM_RM_MEM_TYPE_SYS,
UVM_SEMAPHORE_COUNT_PER_PAGE * UVM_CONF_COMPUTING_AUTH_TAG_SIZE,
UVM_CONF_COMPUTING_AUTH_TAG_ALIGNMENT,
&page->conf_computing.auth_tag_memory);
if (status != NV_OK)
goto error;
status = uvm_rm_mem_alloc_and_map_cpu(pool->gpu,
UVM_RM_MEM_TYPE_SYS,
UVM_SEMAPHORE_COUNT_PER_PAGE * sizeof(NvU32),
0,
&page->conf_computing.notifier_memory);
if (status != NV_OK)
goto error;
return NV_OK; return NV_OK;
error:
pool_page_free_buffers(page);
return status;
} }
static NV_STATUS pool_alloc_page(uvm_gpu_semaphore_pool_t *pool) static NV_STATUS pool_alloc_page(uvm_gpu_semaphore_pool_t *pool)
{ {
NV_STATUS status; NV_STATUS status;
uvm_gpu_semaphore_pool_page_t *pool_page; uvm_gpu_semaphore_pool_page_t *pool_page;
NvU32 *payloads;
size_t i;
uvm_rm_mem_type_t memory_type = (pool->aperture == UVM_APERTURE_SYS) ? UVM_RM_MEM_TYPE_SYS : UVM_RM_MEM_TYPE_GPU;
uvm_assert_mutex_locked(&pool->mutex); uvm_assert_mutex_locked(&pool->mutex);
@ -188,24 +228,9 @@ static NV_STATUS pool_alloc_page(uvm_gpu_semaphore_pool_t *pool)
pool_page->pool = pool; pool_page->pool = pool;
// Whenever the Confidential Computing feature is enabled, engines can status = pool_page_alloc_buffers(pool_page);
// access semaphores only in the CPR of vidmem. Mapping to other GPUs is
// also disabled.
if (gpu_semaphore_pool_is_secure(pool)) {
status = pool_alloc_secure_page(pool, pool_page, memory_type);
if (status != NV_OK) if (status != NV_OK)
goto error; goto error;
}
else {
status = uvm_rm_mem_alloc_and_map_all(pool->gpu,
memory_type,
UVM_SEMAPHORE_PAGE_SIZE,
0,
&pool_page->memory);
if (status != NV_OK)
goto error;
}
// Verify the GPU can access the semaphore pool. // Verify the GPU can access the semaphore pool.
UVM_ASSERT(gpu_can_access_semaphore_pool(pool->gpu, pool_page->memory)); UVM_ASSERT(gpu_can_access_semaphore_pool(pool->gpu, pool_page->memory));
@ -217,7 +242,9 @@ static NV_STATUS pool_alloc_page(uvm_gpu_semaphore_pool_t *pool)
pool->free_semaphores_count += UVM_SEMAPHORE_COUNT_PER_PAGE; pool->free_semaphores_count += UVM_SEMAPHORE_COUNT_PER_PAGE;
if (semaphore_uses_canary(pool)) { if (semaphore_uses_canary(pool)) {
payloads = uvm_rm_mem_get_cpu_va(pool_page->memory); size_t i;
NvU32 *payloads = uvm_rm_mem_get_cpu_va(pool_page->memory);
for (i = 0; i < UVM_SEMAPHORE_COUNT_PER_PAGE; i++) for (i = 0; i < UVM_SEMAPHORE_COUNT_PER_PAGE; i++)
payloads[i] = make_canary(0); payloads[i] = make_canary(0);
} }
@ -253,7 +280,7 @@ static void pool_free_page(uvm_gpu_semaphore_pool_page_t *page)
pool->free_semaphores_count -= UVM_SEMAPHORE_COUNT_PER_PAGE; pool->free_semaphores_count -= UVM_SEMAPHORE_COUNT_PER_PAGE;
list_del(&page->all_pages_node); list_del(&page->all_pages_node);
uvm_rm_mem_free(page->memory); pool_page_free_buffers(page);
uvm_kvfree(page); uvm_kvfree(page);
} }
@ -273,19 +300,22 @@ NV_STATUS uvm_gpu_semaphore_alloc(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_semaph
goto done; goto done;
list_for_each_entry(page, &pool->pages, all_pages_node) { list_for_each_entry(page, &pool->pages, all_pages_node) {
NvU32 semaphore_index = find_first_bit(page->free_semaphores, UVM_SEMAPHORE_COUNT_PER_PAGE); const NvU32 semaphore_index = find_first_bit(page->free_semaphores, UVM_SEMAPHORE_COUNT_PER_PAGE);
UVM_ASSERT(semaphore_index <= UVM_SEMAPHORE_COUNT_PER_PAGE);
if (semaphore_index == UVM_SEMAPHORE_COUNT_PER_PAGE) if (semaphore_index == UVM_SEMAPHORE_COUNT_PER_PAGE)
continue; continue;
if (gpu_semaphore_pool_is_secure(pool)) {
semaphore->conf_computing.index = semaphore_index;
}
else {
semaphore->payload = (NvU32*)((char*)uvm_rm_mem_get_cpu_va(page->memory) +
semaphore_index * UVM_SEMAPHORE_SIZE);
}
semaphore->page = page; semaphore->page = page;
semaphore->index = semaphore_index;
if (gpu_semaphore_pool_is_secure(pool)) {
// Reset the notifier to prevent detection of false attack when
// checking for updated value
*uvm_gpu_semaphore_get_notifier_cpu_va(semaphore) = semaphore->conf_computing.last_observed_notifier;
}
if (semaphore_uses_canary(pool)) if (semaphore_uses_canary(pool))
UVM_ASSERT(is_canary(uvm_gpu_semaphore_get_payload(semaphore))); UVM_ASSERT(is_canary(uvm_gpu_semaphore_get_payload(semaphore)));
@ -311,7 +341,6 @@ void uvm_gpu_semaphore_free(uvm_gpu_semaphore_t *semaphore)
{ {
uvm_gpu_semaphore_pool_page_t *page; uvm_gpu_semaphore_pool_page_t *page;
uvm_gpu_semaphore_pool_t *pool; uvm_gpu_semaphore_pool_t *pool;
NvU32 index;
UVM_ASSERT(semaphore); UVM_ASSERT(semaphore);
@ -323,7 +352,6 @@ void uvm_gpu_semaphore_free(uvm_gpu_semaphore_t *semaphore)
return; return;
pool = page->pool; pool = page->pool;
index = get_index(semaphore);
// Write a known value lower than the current payload in an attempt to catch // Write a known value lower than the current payload in an attempt to catch
// release-after-free and acquire-after-free. // release-after-free and acquire-after-free.
@ -333,10 +361,9 @@ void uvm_gpu_semaphore_free(uvm_gpu_semaphore_t *semaphore)
uvm_mutex_lock(&pool->mutex); uvm_mutex_lock(&pool->mutex);
semaphore->page = NULL; semaphore->page = NULL;
semaphore->payload = NULL;
++pool->free_semaphores_count; ++pool->free_semaphores_count;
__set_bit(index, page->free_semaphores); __set_bit(semaphore->index, page->free_semaphores);
uvm_mutex_unlock(&pool->mutex); uvm_mutex_unlock(&pool->mutex);
} }
@ -449,18 +476,72 @@ NvU64 uvm_gpu_semaphore_get_gpu_proxy_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu
NvU64 uvm_gpu_semaphore_get_gpu_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu, bool is_proxy_va_space) NvU64 uvm_gpu_semaphore_get_gpu_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu, bool is_proxy_va_space)
{ {
NvU32 index = get_index(semaphore);
NvU64 base_va = uvm_rm_mem_get_gpu_va(semaphore->page->memory, gpu, is_proxy_va_space).address; NvU64 base_va = uvm_rm_mem_get_gpu_va(semaphore->page->memory, gpu, is_proxy_va_space).address;
return base_va + UVM_SEMAPHORE_SIZE * index; return base_va + semaphore->index * UVM_SEMAPHORE_SIZE;
}
NvU32 *uvm_gpu_semaphore_get_cpu_va(uvm_gpu_semaphore_t *semaphore)
{
char *base_va;
if (gpu_semaphore_is_secure(semaphore))
return &semaphore->conf_computing.cached_payload;
base_va = uvm_rm_mem_get_cpu_va(semaphore->page->memory);
return (NvU32*)(base_va + semaphore->index * UVM_SEMAPHORE_SIZE);
}
NvU32 *uvm_gpu_semaphore_get_encrypted_payload_cpu_va(uvm_gpu_semaphore_t *semaphore)
{
char *encrypted_base_va = uvm_rm_mem_get_cpu_va(semaphore->page->conf_computing.encrypted_payload_memory);
return (NvU32*)(encrypted_base_va + semaphore->index * UVM_SEMAPHORE_SIZE);
}
uvm_gpu_address_t uvm_gpu_semaphore_get_encrypted_payload_gpu_va(uvm_gpu_semaphore_t *semaphore)
{
NvU64 encrypted_base_va = uvm_rm_mem_get_gpu_uvm_va(semaphore->page->conf_computing.encrypted_payload_memory,
semaphore->page->pool->gpu);
return uvm_gpu_address_virtual_unprotected(encrypted_base_va + semaphore->index * UVM_SEMAPHORE_SIZE);
}
uvm_gpu_semaphore_notifier_t *uvm_gpu_semaphore_get_notifier_cpu_va(uvm_gpu_semaphore_t *semaphore)
{
uvm_gpu_semaphore_notifier_t *notifier_base_va =
uvm_rm_mem_get_cpu_va(semaphore->page->conf_computing.notifier_memory);
return notifier_base_va + semaphore->index;
}
uvm_gpu_address_t uvm_gpu_semaphore_get_notifier_gpu_va(uvm_gpu_semaphore_t *semaphore)
{
NvU64 notifier_base_va = uvm_rm_mem_get_gpu_uvm_va(semaphore->page->conf_computing.notifier_memory,
semaphore->page->pool->gpu);
return uvm_gpu_address_virtual_unprotected(notifier_base_va +
semaphore->index * sizeof(uvm_gpu_semaphore_notifier_t));
}
void *uvm_gpu_semaphore_get_auth_tag_cpu_va(uvm_gpu_semaphore_t *semaphore)
{
char *auth_tag_base_va = uvm_rm_mem_get_cpu_va(semaphore->page->conf_computing.auth_tag_memory);
return (void*)(auth_tag_base_va + semaphore->index * UVM_CONF_COMPUTING_AUTH_TAG_SIZE);
}
uvm_gpu_address_t uvm_gpu_semaphore_get_auth_tag_gpu_va(uvm_gpu_semaphore_t *semaphore)
{
NvU64 auth_tag_base_va = uvm_rm_mem_get_gpu_uvm_va(semaphore->page->conf_computing.auth_tag_memory,
semaphore->page->pool->gpu);
return uvm_gpu_address_virtual_unprotected(auth_tag_base_va + semaphore->index * UVM_CONF_COMPUTING_AUTH_TAG_SIZE);
} }
NvU32 uvm_gpu_semaphore_get_payload(uvm_gpu_semaphore_t *semaphore) NvU32 uvm_gpu_semaphore_get_payload(uvm_gpu_semaphore_t *semaphore)
{ {
if (gpu_semaphore_is_secure(semaphore)) return UVM_GPU_READ_ONCE(*uvm_gpu_semaphore_get_cpu_va(semaphore));
return UVM_GPU_READ_ONCE(semaphore->conf_computing.cached_payload);
return UVM_GPU_READ_ONCE(*semaphore->payload);
} }
void uvm_gpu_semaphore_set_payload(uvm_gpu_semaphore_t *semaphore, NvU32 payload) void uvm_gpu_semaphore_set_payload(uvm_gpu_semaphore_t *semaphore, NvU32 payload)
@ -477,10 +558,7 @@ void uvm_gpu_semaphore_set_payload(uvm_gpu_semaphore_t *semaphore, NvU32 payload
// the GPU correctly even on non-SMP). // the GPU correctly even on non-SMP).
mb(); mb();
if (gpu_semaphore_is_secure(semaphore)) UVM_GPU_WRITE_ONCE(*uvm_gpu_semaphore_get_cpu_va(semaphore), payload);
UVM_GPU_WRITE_ONCE(semaphore->conf_computing.cached_payload, payload);
else
UVM_GPU_WRITE_ONCE(*semaphore->payload, payload);
} }
// This function is intended to catch channels which have been left dangling in // This function is intended to catch channels which have been left dangling in
@ -546,22 +624,11 @@ void uvm_gpu_tracking_semaphore_free(uvm_gpu_tracking_semaphore_t *tracking_sem)
uvm_gpu_semaphore_free(&tracking_sem->semaphore); uvm_gpu_semaphore_free(&tracking_sem->semaphore);
} }
static bool should_skip_secure_semaphore_update(NvU32 last_observed_notifier, NvU32 gpu_notifier) static void gpu_semaphore_encrypted_payload_update(uvm_channel_t *channel, uvm_gpu_semaphore_t *semaphore)
{ {
// No new value, or the GPU is currently writing the new encrypted material
// and no change in value would still result in corrupted data.
return (last_observed_notifier == gpu_notifier) || (gpu_notifier % 2);
}
static void uvm_gpu_semaphore_encrypted_payload_update(uvm_channel_t *channel, uvm_gpu_semaphore_t *semaphore)
{
UvmCslIv local_iv;
NvU32 local_payload; NvU32 local_payload;
NvU32 new_sem_value; uvm_gpu_semaphore_notifier_t gpu_notifier;
NvU32 gpu_notifier; uvm_gpu_semaphore_notifier_t new_gpu_notifier = 0;
NvU32 last_observed_notifier;
NvU32 new_gpu_notifier = 0;
NvU32 iv_index = 0;
// A channel can have multiple entries pending and the tracking semaphore // A channel can have multiple entries pending and the tracking semaphore
// update of each entry can race with this function. Since the semaphore // update of each entry can race with this function. Since the semaphore
@ -570,64 +637,72 @@ static void uvm_gpu_semaphore_encrypted_payload_update(uvm_channel_t *channel, u
unsigned tries_left = channel->num_gpfifo_entries; unsigned tries_left = channel->num_gpfifo_entries;
NV_STATUS status = NV_OK; NV_STATUS status = NV_OK;
NvU8 local_auth_tag[UVM_CONF_COMPUTING_AUTH_TAG_SIZE]; NvU8 local_auth_tag[UVM_CONF_COMPUTING_AUTH_TAG_SIZE];
UvmCslIv *ivs_cpu_addr = semaphore->conf_computing.ivs; uvm_gpu_semaphore_notifier_t *semaphore_notifier_cpu_addr = uvm_gpu_semaphore_get_notifier_cpu_va(semaphore);
void *auth_tag_cpu_addr = uvm_rm_mem_get_cpu_va(semaphore->conf_computing.auth_tag);
NvU32 *gpu_notifier_cpu_addr = (NvU32 *)uvm_rm_mem_get_cpu_va(semaphore->conf_computing.notifier);
NvU32 *payload_cpu_addr = (NvU32 *)uvm_rm_mem_get_cpu_va(semaphore->conf_computing.encrypted_payload);
UVM_ASSERT(g_uvm_global.conf_computing_enabled); UVM_ASSERT(g_uvm_global.conf_computing_enabled);
UVM_ASSERT(uvm_channel_is_ce(channel)); UVM_ASSERT(uvm_channel_is_ce(channel));
last_observed_notifier = semaphore->conf_computing.last_observed_notifier;
gpu_notifier = UVM_READ_ONCE(*gpu_notifier_cpu_addr);
UVM_ASSERT(last_observed_notifier <= gpu_notifier);
if (should_skip_secure_semaphore_update(last_observed_notifier, gpu_notifier))
return;
do { do {
gpu_notifier = UVM_READ_ONCE(*gpu_notifier_cpu_addr); gpu_notifier = UVM_READ_ONCE(*semaphore_notifier_cpu_addr);
UVM_ASSERT(gpu_notifier >= semaphore->conf_computing.last_observed_notifier);
// Odd notifier value means there's an update in progress. // Odd notifier value means there's an update in progress.
if (gpu_notifier % 2) if (gpu_notifier % 2)
continue; continue;
// There's no change since last time
if (gpu_notifier == semaphore->conf_computing.last_observed_notifier)
return;
// Make sure no memory accesses happen before we read the notifier // Make sure no memory accesses happen before we read the notifier
smp_mb__after_atomic(); smp_mb__after_atomic();
iv_index = (gpu_notifier / 2) % channel->num_gpfifo_entries; memcpy(local_auth_tag, uvm_gpu_semaphore_get_auth_tag_cpu_va(semaphore), sizeof(local_auth_tag));
memcpy(local_auth_tag, auth_tag_cpu_addr, sizeof(local_auth_tag)); local_payload = UVM_READ_ONCE(*uvm_gpu_semaphore_get_encrypted_payload_cpu_va(semaphore));
local_payload = UVM_READ_ONCE(*payload_cpu_addr);
memcpy(&local_iv, &ivs_cpu_addr[iv_index], sizeof(local_iv));
// Make sure the second read of notifier happens after // Make sure the second read of notifier happens after
// all memory accesses. // all memory accesses.
smp_mb__before_atomic(); smp_mb__before_atomic();
new_gpu_notifier = UVM_READ_ONCE(*gpu_notifier_cpu_addr); new_gpu_notifier = UVM_READ_ONCE(*semaphore_notifier_cpu_addr);
tries_left--; tries_left--;
} while ((tries_left > 0) && ((gpu_notifier != new_gpu_notifier) || (gpu_notifier % 2))); } while ((tries_left > 0) && ((gpu_notifier != new_gpu_notifier) || (gpu_notifier % 2)));
if (!tries_left) { if (!tries_left) {
status = NV_ERR_INVALID_STATE; status = NV_ERR_INVALID_STATE;
goto error;
} }
else {
NvU32 key_version;
const NvU32 iv_index = (gpu_notifier / 2) % channel->num_gpfifo_entries;
NvU32 new_semaphore_value;
UVM_ASSERT(gpu_notifier == new_gpu_notifier);
UVM_ASSERT(gpu_notifier % 2 == 0);
// CPU decryption is guaranteed to use the same key version as the
// associated GPU encryption, because if there was any key rotation in
// between, then key rotation waited for all channels to complete before
// proceeding. The wait implies that the semaphore value matches the
// last one encrypted on the GPU, so this CPU decryption should happen
// before the key is rotated.
key_version = uvm_channel_pool_key_version(channel->pool);
if (gpu_notifier == new_gpu_notifier) {
status = uvm_conf_computing_cpu_decrypt(channel, status = uvm_conf_computing_cpu_decrypt(channel,
&new_sem_value, &new_semaphore_value,
&local_payload, &local_payload,
&local_iv, &semaphore->conf_computing.ivs[iv_index],
sizeof(new_sem_value), key_version,
sizeof(new_semaphore_value),
&local_auth_tag); &local_auth_tag);
if (status != NV_OK) if (status != NV_OK)
goto error; goto error;
uvm_gpu_semaphore_set_payload(semaphore, new_sem_value); uvm_gpu_semaphore_set_payload(semaphore, new_semaphore_value);
UVM_WRITE_ONCE(semaphore->conf_computing.last_observed_notifier, new_gpu_notifier); UVM_WRITE_ONCE(semaphore->conf_computing.last_observed_notifier, new_gpu_notifier);
}
return; return;
}
error: error:
// Decryption failure is a fatal error as well as running out of try left. // Decryption failure is a fatal error as well as running out of try left.
@ -650,11 +725,11 @@ static NvU64 update_completed_value_locked(uvm_gpu_tracking_semaphore_t *trackin
else else
uvm_assert_spinlock_locked(&tracking_semaphore->s_lock); uvm_assert_spinlock_locked(&tracking_semaphore->s_lock);
if (tracking_semaphore->semaphore.conf_computing.encrypted_payload) { if (gpu_semaphore_is_secure(&tracking_semaphore->semaphore)) {
// TODO: Bug 4008734: [UVM][HCC] Extend secure tracking semaphore // TODO: Bug 4008734: [UVM][HCC] Extend secure tracking semaphore
// mechanism to all semaphore // mechanism to all semaphore
uvm_channel_t *channel = container_of(tracking_semaphore, uvm_channel_t, tracking_sem); uvm_channel_t *channel = container_of(tracking_semaphore, uvm_channel_t, tracking_sem);
uvm_gpu_semaphore_encrypted_payload_update(channel, &tracking_semaphore->semaphore); gpu_semaphore_encrypted_payload_update(channel, &tracking_semaphore->semaphore);
} }
new_sem_value = uvm_gpu_semaphore_get_payload(&tracking_semaphore->semaphore); new_sem_value = uvm_gpu_semaphore_get_payload(&tracking_semaphore->semaphore);
@ -690,7 +765,7 @@ static NvU64 update_completed_value_locked(uvm_gpu_tracking_semaphore_t *trackin
UVM_ASSERT_MSG_RELEASE(new_value - old_value <= UVM_GPU_SEMAPHORE_MAX_JUMP, UVM_ASSERT_MSG_RELEASE(new_value - old_value <= UVM_GPU_SEMAPHORE_MAX_JUMP,
"GPU %s unexpected semaphore (CPU VA 0x%llx) jump from 0x%llx to 0x%llx\n", "GPU %s unexpected semaphore (CPU VA 0x%llx) jump from 0x%llx to 0x%llx\n",
uvm_gpu_name(tracking_semaphore->semaphore.page->pool->gpu), uvm_gpu_name(tracking_semaphore->semaphore.page->pool->gpu),
(NvU64)(uintptr_t)tracking_semaphore->semaphore.payload, (NvU64)(uintptr_t)uvm_gpu_semaphore_get_cpu_va(&tracking_semaphore->semaphore),
old_value, new_value); old_value, new_value);
// Use an atomic write even though the lock is held so that the value can // Use an atomic write even though the lock is held so that the value can

View File

@ -29,6 +29,8 @@
#include "uvm_rm_mem.h" #include "uvm_rm_mem.h"
#include "uvm_linux.h" #include "uvm_linux.h"
typedef NvU32 uvm_gpu_semaphore_notifier_t;
// A GPU semaphore is a memory location accessible by the GPUs and the CPU // A GPU semaphore is a memory location accessible by the GPUs and the CPU
// that's used for synchronization among them. // that's used for synchronization among them.
// The GPU has primitives to acquire (wait for) and release (set) 4-byte memory // The GPU has primitives to acquire (wait for) and release (set) 4-byte memory
@ -45,17 +47,15 @@ struct uvm_gpu_semaphore_struct
// The semaphore pool page the semaphore came from // The semaphore pool page the semaphore came from
uvm_gpu_semaphore_pool_page_t *page; uvm_gpu_semaphore_pool_page_t *page;
// Pointer to the memory location // Index of the semaphore in semaphore page
NvU32 *payload;
struct {
NvU16 index; NvU16 index;
NvU32 cached_payload;
uvm_rm_mem_t *encrypted_payload; struct {
uvm_rm_mem_t *notifier;
uvm_rm_mem_t *auth_tag;
UvmCslIv *ivs; UvmCslIv *ivs;
NvU32 last_pushed_notifier; NvU32 cached_payload;
NvU32 last_observed_notifier;
uvm_gpu_semaphore_notifier_t last_pushed_notifier;
uvm_gpu_semaphore_notifier_t last_observed_notifier;
} conf_computing; } conf_computing;
}; };
@ -151,6 +151,17 @@ NvU64 uvm_gpu_semaphore_get_gpu_proxy_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu
NvU64 uvm_gpu_semaphore_get_gpu_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu, bool is_proxy_va_space); NvU64 uvm_gpu_semaphore_get_gpu_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu, bool is_proxy_va_space);
NvU32 *uvm_gpu_semaphore_get_cpu_va(uvm_gpu_semaphore_t *semaphore);
NvU32 *uvm_gpu_semaphore_get_encrypted_payload_cpu_va(uvm_gpu_semaphore_t *semaphore);
uvm_gpu_address_t uvm_gpu_semaphore_get_encrypted_payload_gpu_va(uvm_gpu_semaphore_t *semaphore);
uvm_gpu_semaphore_notifier_t *uvm_gpu_semaphore_get_notifier_cpu_va(uvm_gpu_semaphore_t *semaphore);
uvm_gpu_address_t uvm_gpu_semaphore_get_notifier_gpu_va(uvm_gpu_semaphore_t *semaphore);
void *uvm_gpu_semaphore_get_auth_tag_cpu_va(uvm_gpu_semaphore_t *semaphore);
uvm_gpu_address_t uvm_gpu_semaphore_get_auth_tag_gpu_va(uvm_gpu_semaphore_t *semaphore);
// Read the 32-bit payload of the semaphore // Read the 32-bit payload of the semaphore
// Notably doesn't provide any memory ordering guarantees and needs to be used with // Notably doesn't provide any memory ordering guarantees and needs to be used with
// care. For an example of what needs to be considered see // care. For an example of what needs to be considered see

View File

@ -284,8 +284,10 @@ static void hmm_va_block_unregister_gpu(uvm_va_block_t *va_block,
// Reset preferred location and accessed-by of policy nodes if needed. // Reset preferred location and accessed-by of policy nodes if needed.
uvm_for_each_va_policy_node_in(node, va_block, va_block->start, va_block->end) { uvm_for_each_va_policy_node_in(node, va_block, va_block->start, va_block->end) {
if (uvm_id_equal(node->policy.preferred_location, gpu->id)) if (uvm_va_policy_preferred_location_equal(&node->policy, gpu->id, NUMA_NO_NODE)) {
node->policy.preferred_location = UVM_ID_INVALID; node->policy.preferred_location = UVM_ID_INVALID;
node->policy.preferred_nid = NUMA_NO_NODE;
}
uvm_processor_mask_clear(&node->policy.accessed_by, gpu->id); uvm_processor_mask_clear(&node->policy.accessed_by, gpu->id);
} }

View File

@ -27,7 +27,7 @@
const char *uvm_lock_order_to_string(uvm_lock_order_t lock_order) const char *uvm_lock_order_to_string(uvm_lock_order_t lock_order)
{ {
BUILD_BUG_ON(UVM_LOCK_ORDER_COUNT != 34); BUILD_BUG_ON(UVM_LOCK_ORDER_COUNT != 36);
switch (lock_order) { switch (lock_order) {
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_INVALID); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_INVALID);
@ -48,7 +48,9 @@ const char *uvm_lock_order_to_string(uvm_lock_order_t lock_order)
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CONF_COMPUTING_DMA_BUFFER_POOL); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CONF_COMPUTING_DMA_BUFFER_POOL);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CHUNK_MAPPING); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CHUNK_MAPPING);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PAGE_TREE); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PAGE_TREE);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_KEY_ROTATION);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CSL_PUSH); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CSL_PUSH);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_KEY_ROTATION_WLC);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CSL_WLC_PUSH); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CSL_WLC_PUSH);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CSL_SEC2_PUSH); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CSL_SEC2_PUSH);
UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PUSH); UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PUSH);

View File

@ -322,6 +322,15 @@
// Operations not allowed while holding this lock // Operations not allowed while holding this lock
// - GPU memory allocation which can evict // - GPU memory allocation which can evict
// //
// - Channel pool key rotation lock
// Order: UVM_LOCK_ORDER_KEY_ROTATION
// Condition: Confidential Computing is enabled
// Mutex per channel pool
//
// The lock ensures mutual exclusion during key rotation affecting all the
// channels in the associated pool. Key rotation in WLC pools is handled
// using a separate lock order, see UVM_LOCK_ORDER_KEY_ROTATION_WLC below.
//
// - CE channel CSL channel pool semaphore // - CE channel CSL channel pool semaphore
// Order: UVM_LOCK_ORDER_CSL_PUSH // Order: UVM_LOCK_ORDER_CSL_PUSH
// Condition: The Confidential Computing feature is enabled // Condition: The Confidential Computing feature is enabled
@ -338,6 +347,15 @@
// Operations allowed while holding this lock // Operations allowed while holding this lock
// - Pushing work to CE channels (except for WLC channels) // - Pushing work to CE channels (except for WLC channels)
// //
// - WLC channel pool key rotation lock
// Order: UVM_LOCK_ORDER_KEY_ROTATION_WLC
// Condition: Confidential Computing is enabled
// Mutex of WLC channel pool
//
// The lock has the same purpose as the regular channel pool key rotation
// lock. Using a different order lock for WLC channels allows key rotation
// on those channels during indirect work submission.
//
// - WLC CSL channel pool semaphore // - WLC CSL channel pool semaphore
// Order: UVM_LOCK_ORDER_CSL_WLC_PUSH // Order: UVM_LOCK_ORDER_CSL_WLC_PUSH
// Condition: The Confidential Computing feature is enabled // Condition: The Confidential Computing feature is enabled
@ -484,7 +502,9 @@ typedef enum
UVM_LOCK_ORDER_CONF_COMPUTING_DMA_BUFFER_POOL, UVM_LOCK_ORDER_CONF_COMPUTING_DMA_BUFFER_POOL,
UVM_LOCK_ORDER_CHUNK_MAPPING, UVM_LOCK_ORDER_CHUNK_MAPPING,
UVM_LOCK_ORDER_PAGE_TREE, UVM_LOCK_ORDER_PAGE_TREE,
UVM_LOCK_ORDER_KEY_ROTATION,
UVM_LOCK_ORDER_CSL_PUSH, UVM_LOCK_ORDER_CSL_PUSH,
UVM_LOCK_ORDER_KEY_ROTATION_WLC,
UVM_LOCK_ORDER_CSL_WLC_PUSH, UVM_LOCK_ORDER_CSL_WLC_PUSH,
UVM_LOCK_ORDER_CSL_SEC2_PUSH, UVM_LOCK_ORDER_CSL_SEC2_PUSH,
UVM_LOCK_ORDER_PUSH, UVM_LOCK_ORDER_PUSH,

View File

@ -39,6 +39,7 @@
#include "uvm_pte_batch.h" #include "uvm_pte_batch.h"
#include "uvm_tlb_batch.h" #include "uvm_tlb_batch.h"
#include "nv_uvm_interface.h" #include "nv_uvm_interface.h"
#include "nv_uvm_types.h"
#include "uvm_pushbuffer.h" #include "uvm_pushbuffer.h"
@ -101,11 +102,11 @@ static NV_STATUS uvm_pte_buffer_init(uvm_va_range_t *va_range,
pte_buffer->va_range = va_range; pte_buffer->va_range = va_range;
pte_buffer->gpu = gpu; pte_buffer->gpu = gpu;
pte_buffer->mapping_info.cachingType = map_rm_params->caching_type; pte_buffer->mapping_info.cachingType = (UvmRmGpuCachingType) map_rm_params->caching_type;
pte_buffer->mapping_info.mappingType = map_rm_params->mapping_type; pte_buffer->mapping_info.mappingType = (UvmRmGpuMappingType) map_rm_params->mapping_type;
pte_buffer->mapping_info.formatType = map_rm_params->format_type; pte_buffer->mapping_info.formatType = (UvmRmGpuFormatType) map_rm_params->format_type;
pte_buffer->mapping_info.elementBits = map_rm_params->element_bits; pte_buffer->mapping_info.elementBits = (UvmRmGpuFormatElementBits) map_rm_params->element_bits;
pte_buffer->mapping_info.compressionType = map_rm_params->compression_type; pte_buffer->mapping_info.compressionType = (UvmRmGpuCompressionType) map_rm_params->compression_type;
if (va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL) if (va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL)
pte_buffer->mapping_info.mappingPageSize = page_size; pte_buffer->mapping_info.mappingPageSize = page_size;

View File

@ -589,7 +589,7 @@ static NV_STATUS uvm_migrate_ranges(uvm_va_space_t *va_space,
skipped_migrate = true; skipped_migrate = true;
} }
else if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, dest_id) && else if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, dest_id) &&
!uvm_id_equal(dest_id, policy->preferred_location)) { !uvm_va_policy_preferred_location_equal(policy, dest_id, NUMA_NO_NODE)) {
// Don't migrate to a non-faultable GPU that is in UVM-Lite mode, // Don't migrate to a non-faultable GPU that is in UVM-Lite mode,
// unless it's the preferred location // unless it's the preferred location
status = NV_ERR_INVALID_DEVICE; status = NV_ERR_INVALID_DEVICE;

View File

@ -126,7 +126,7 @@ NV_STATUS uvm_pmm_sysmem_mappings_add_gpu_mapping(uvm_pmm_sysmem_mappings_t *sys
NvU64 remove_key; NvU64 remove_key;
for (remove_key = base_key; remove_key < key; ++remove_key) for (remove_key = base_key; remove_key < key; ++remove_key)
(void *)radix_tree_delete(&sysmem_mappings->reverse_map_tree, remove_key); (void)radix_tree_delete(&sysmem_mappings->reverse_map_tree, remove_key);
kmem_cache_free(g_reverse_page_map_cache, new_reverse_map); kmem_cache_free(g_reverse_page_map_cache, new_reverse_map);
status = errno_to_nv_status(ret); status = errno_to_nv_status(ret);

View File

@ -671,6 +671,9 @@ static NV_STATUS va_block_set_read_duplication_locked(uvm_va_block_t *va_block,
uvm_assert_mutex_locked(&va_block->lock); uvm_assert_mutex_locked(&va_block->lock);
// Force CPU page residency to be on the preferred NUMA node.
va_block_context->make_resident.dest_nid = uvm_va_range_get_policy(va_block->va_range)->preferred_nid;
for_each_id_in_mask(src_id, &va_block->resident) { for_each_id_in_mask(src_id, &va_block->resident) {
NV_STATUS status; NV_STATUS status;
uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, src_id, NUMA_NO_NODE); uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, src_id, NUMA_NO_NODE);

View File

@ -100,16 +100,8 @@ void uvm_parent_gpus_from_processor_mask(uvm_parent_processor_mask_t *parent_mas
bool uvm_numa_id_eq(int nid0, int nid1) bool uvm_numa_id_eq(int nid0, int nid1)
{ {
UVM_ASSERT(nid0 == -1 || nid0 < MAX_NUMNODES); UVM_ASSERT(nid0 >= NUMA_NO_NODE && nid0 < MAX_NUMNODES);
UVM_ASSERT(nid1 == -1 || nid1 < MAX_NUMNODES); UVM_ASSERT(nid1 >= NUMA_NO_NODE && nid1 < MAX_NUMNODES);
if ((nid0 == NUMA_NO_NODE || nid1 == NUMA_NO_NODE) && nodes_weight(node_possible_map) == 1) {
if (nid0 == NUMA_NO_NODE)
nid0 = first_node(node_possible_map);
if (nid1 == NUMA_NO_NODE)
nid1 = first_node(node_possible_map);
}
return nid0 == nid1; return nid0 == nid1;
} }

View File

@ -65,9 +65,12 @@ typedef enum
} uvm_push_flag_t; } uvm_push_flag_t;
struct uvm_push_crypto_bundle_struct { struct uvm_push_crypto_bundle_struct {
// Initialization vector used to decrypt the push // Initialization vector used to decrypt the push on the CPU
UvmCslIv iv; UvmCslIv iv;
// Key version used to decrypt the push on the CPU
NvU32 key_version;
// Size of the pushbuffer that is encrypted/decrypted // Size of the pushbuffer that is encrypted/decrypted
NvU32 push_size; NvU32 push_size;
}; };

View File

@ -451,7 +451,6 @@ static uvm_pushbuffer_chunk_t *gpfifo_to_chunk(uvm_pushbuffer_t *pushbuffer, uvm
static void decrypt_push(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo) static void decrypt_push(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo)
{ {
NV_STATUS status; NV_STATUS status;
NvU32 auth_tag_offset;
void *auth_tag_cpu_va; void *auth_tag_cpu_va;
void *push_protected_cpu_va; void *push_protected_cpu_va;
void *push_unprotected_cpu_va; void *push_unprotected_cpu_va;
@ -470,16 +469,15 @@ static void decrypt_push(uvm_channel_t *channel, uvm_gpfifo_entry_t *gpfifo)
UVM_ASSERT(!uvm_channel_is_wlc(channel)); UVM_ASSERT(!uvm_channel_is_wlc(channel));
UVM_ASSERT(!uvm_channel_is_lcic(channel)); UVM_ASSERT(!uvm_channel_is_lcic(channel));
push_protected_cpu_va = (char *)get_base_cpu_va(pushbuffer) + pushbuffer_offset; push_protected_cpu_va = get_base_cpu_va(pushbuffer) + pushbuffer_offset;
push_unprotected_cpu_va = (char *)uvm_rm_mem_get_cpu_va(pushbuffer->memory_unprotected_sysmem) + pushbuffer_offset; push_unprotected_cpu_va = (char *)uvm_rm_mem_get_cpu_va(pushbuffer->memory_unprotected_sysmem) + pushbuffer_offset;
auth_tag_offset = push_info_index * UVM_CONF_COMPUTING_AUTH_TAG_SIZE; auth_tag_cpu_va = uvm_channel_get_push_crypto_bundle_auth_tags_cpu_va(channel, push_info_index);
auth_tag_cpu_va = (char *)uvm_rm_mem_get_cpu_va(channel->conf_computing.push_crypto_bundle_auth_tags) +
auth_tag_offset;
status = uvm_conf_computing_cpu_decrypt(channel, status = uvm_conf_computing_cpu_decrypt(channel,
push_protected_cpu_va, push_protected_cpu_va,
push_unprotected_cpu_va, push_unprotected_cpu_va,
&crypto_bundle->iv, &crypto_bundle->iv,
crypto_bundle->key_version,
crypto_bundle->push_size, crypto_bundle->push_size,
auth_tag_cpu_va); auth_tag_cpu_va);
@ -558,7 +556,7 @@ NvU64 uvm_pushbuffer_get_gpu_va_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_
if (uvm_channel_is_wlc(push->channel) || uvm_channel_is_lcic(push->channel)) { if (uvm_channel_is_wlc(push->channel) || uvm_channel_is_lcic(push->channel)) {
// We need to use the same static locations for PB as the fixed // We need to use the same static locations for PB as the fixed
// schedule because that's what the channels are initialized to use. // schedule because that's what the channels are initialized to use.
return uvm_rm_mem_get_gpu_uvm_va(push->channel->conf_computing.static_pb_protected_vidmem, gpu); return uvm_channel_get_static_pb_protected_vidmem_gpu_va(push->channel);
} }
else if (uvm_channel_is_sec2(push->channel)) { else if (uvm_channel_is_sec2(push->channel)) {
// SEC2 PBs are in unprotected sysmem // SEC2 PBs are in unprotected sysmem
@ -575,7 +573,7 @@ void *uvm_pushbuffer_get_unprotected_cpu_va_for_push(uvm_pushbuffer_t *pushbuffe
if (uvm_channel_is_wlc(push->channel)) { if (uvm_channel_is_wlc(push->channel)) {
// Reuse existing WLC static pb for initialization // Reuse existing WLC static pb for initialization
UVM_ASSERT(!uvm_channel_manager_is_wlc_ready(push->channel->pool->manager)); UVM_ASSERT(!uvm_channel_manager_is_wlc_ready(push->channel->pool->manager));
return push->channel->conf_computing.static_pb_unprotected_sysmem_cpu; return uvm_channel_get_static_pb_unprotected_sysmem_cpu(push->channel);
} }
pushbuffer_base = uvm_rm_mem_get_cpu_va(pushbuffer->memory_unprotected_sysmem); pushbuffer_base = uvm_rm_mem_get_cpu_va(pushbuffer->memory_unprotected_sysmem);
@ -590,8 +588,8 @@ NvU64 uvm_pushbuffer_get_unprotected_gpu_va_for_push(uvm_pushbuffer_t *pushbuffe
if (uvm_channel_is_wlc(push->channel)) { if (uvm_channel_is_wlc(push->channel)) {
// Reuse existing WLC static pb for initialization // Reuse existing WLC static pb for initialization
UVM_ASSERT(!uvm_channel_manager_is_wlc_ready(push->channel->pool->manager)); UVM_ASSERT(!uvm_channel_manager_is_wlc_ready(push->channel->pool->manager));
return uvm_rm_mem_get_gpu_uvm_va(push->channel->conf_computing.static_pb_unprotected_sysmem,
uvm_push_get_gpu(push)); return uvm_channel_get_static_pb_unprotected_sysmem_gpu_va(push->channel);
} }
pushbuffer_base = uvm_rm_mem_get_gpu_uvm_va(pushbuffer->memory_unprotected_sysmem, uvm_push_get_gpu(push)); pushbuffer_base = uvm_rm_mem_get_gpu_uvm_va(pushbuffer->memory_unprotected_sysmem, uvm_push_get_gpu(push));

View File

@ -322,6 +322,7 @@ static NV_STATUS cpu_decrypt(uvm_channel_t *channel,
uvm_mem_t *dst_mem, uvm_mem_t *dst_mem,
uvm_mem_t *src_mem, uvm_mem_t *src_mem,
UvmCslIv *decrypt_iv, UvmCslIv *decrypt_iv,
NvU32 key_version,
uvm_mem_t *auth_tag_mem, uvm_mem_t *auth_tag_mem,
size_t size, size_t size,
size_t copy_size) size_t copy_size)
@ -338,6 +339,7 @@ static NV_STATUS cpu_decrypt(uvm_channel_t *channel,
dst_plain, dst_plain,
src_cipher, src_cipher,
&decrypt_iv[i], &decrypt_iv[i],
key_version,
copy_size, copy_size,
auth_tag_buffer)); auth_tag_buffer));
@ -368,7 +370,7 @@ static void gpu_encrypt(uvm_push_t *push,
uvm_gpu_address_t auth_tag_address = uvm_mem_gpu_address_virtual_kernel(auth_tag_mem, gpu); uvm_gpu_address_t auth_tag_address = uvm_mem_gpu_address_virtual_kernel(auth_tag_mem, gpu);
for (i = 0; i < num_iterations; i++) { for (i = 0; i < num_iterations; i++) {
uvm_conf_computing_log_gpu_encryption(push->channel, decrypt_iv); uvm_conf_computing_log_gpu_encryption(push->channel, copy_size, decrypt_iv);
if (i > 0) if (i > 0)
uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED);
@ -427,6 +429,7 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, siz
size_t auth_tag_buffer_size = (size / copy_size) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE; size_t auth_tag_buffer_size = (size / copy_size) * UVM_CONF_COMPUTING_AUTH_TAG_SIZE;
uvm_push_t push; uvm_push_t push;
UvmCslIv *decrypt_iv; UvmCslIv *decrypt_iv;
NvU32 key_version;
decrypt_iv = uvm_kvmalloc_zero((size / copy_size) * sizeof(UvmCslIv)); decrypt_iv = uvm_kvmalloc_zero((size / copy_size) * sizeof(UvmCslIv));
if (!decrypt_iv) if (!decrypt_iv)
@ -456,6 +459,11 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, siz
gpu_encrypt(&push, dst_cipher, dst_plain, decrypt_iv, auth_tag_mem, size, copy_size); gpu_encrypt(&push, dst_cipher, dst_plain, decrypt_iv, auth_tag_mem, size, copy_size);
// There shouldn't be any key rotation between the end of the push and the
// CPU decryption(s), but it is more robust against test changes to force
// decryption to use the saved key.
key_version = uvm_channel_pool_key_version(push.channel->pool);
TEST_NV_CHECK_GOTO(uvm_push_end_and_wait(&push), out); TEST_NV_CHECK_GOTO(uvm_push_end_and_wait(&push), out);
TEST_CHECK_GOTO(!mem_match(src_plain, src_cipher), out); TEST_CHECK_GOTO(!mem_match(src_plain, src_cipher), out);
@ -465,6 +473,7 @@ static NV_STATUS test_cpu_to_gpu_roundtrip(uvm_gpu_t *gpu, size_t copy_size, siz
dst_plain_cpu, dst_plain_cpu,
dst_cipher, dst_cipher,
decrypt_iv, decrypt_iv,
key_version,
auth_tag_mem, auth_tag_mem,
size, size,
copy_size), copy_size),

View File

@ -124,24 +124,23 @@ static NV_STATUS uvm_test_verify_bh_affinity(uvm_intr_handler_t *isr, int node)
static NV_STATUS uvm_test_numa_check_affinity(UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS *params, struct file *filp) static NV_STATUS uvm_test_numa_check_affinity(UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS *params, struct file *filp)
{ {
uvm_gpu_t *gpu; uvm_gpu_t *gpu;
NV_STATUS status; NV_STATUS status = NV_OK;
uvm_rm_user_object_t user_rm_va_space = {
.rm_control_fd = -1,
.user_client = params->client,
.user_object = params->smc_part_ref
};
if (!UVM_THREAD_AFFINITY_SUPPORTED()) if (!UVM_THREAD_AFFINITY_SUPPORTED())
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
status = uvm_gpu_retain_by_uuid(&params->gpu_uuid, &user_rm_va_space, &gpu); uvm_mutex_lock(&g_uvm_global.global_lock);
if (status != NV_OK)
return status; gpu = uvm_gpu_get_by_uuid(&params->gpu_uuid);
if (!gpu) {
status = NV_ERR_INVALID_DEVICE;
goto unlock;
}
// If the GPU is not attached to a NUMA node, there is nothing to do. // If the GPU is not attached to a NUMA node, there is nothing to do.
if (gpu->parent->closest_cpu_numa_node == NUMA_NO_NODE) { if (gpu->parent->closest_cpu_numa_node == NUMA_NO_NODE) {
status = NV_ERR_NOT_SUPPORTED; status = NV_ERR_NOT_SUPPORTED;
goto release; goto unlock;
} }
if (gpu->parent->replayable_faults_supported) { if (gpu->parent->replayable_faults_supported) {
@ -150,7 +149,7 @@ static NV_STATUS uvm_test_numa_check_affinity(UVM_TEST_NUMA_CHECK_AFFINITY_PARAM
gpu->parent->closest_cpu_numa_node); gpu->parent->closest_cpu_numa_node);
uvm_parent_gpu_replayable_faults_isr_unlock(gpu->parent); uvm_parent_gpu_replayable_faults_isr_unlock(gpu->parent);
if (status != NV_OK) if (status != NV_OK)
goto release; goto unlock;
if (gpu->parent->non_replayable_faults_supported) { if (gpu->parent->non_replayable_faults_supported) {
uvm_parent_gpu_non_replayable_faults_isr_lock(gpu->parent); uvm_parent_gpu_non_replayable_faults_isr_lock(gpu->parent);
@ -158,7 +157,7 @@ static NV_STATUS uvm_test_numa_check_affinity(UVM_TEST_NUMA_CHECK_AFFINITY_PARAM
gpu->parent->closest_cpu_numa_node); gpu->parent->closest_cpu_numa_node);
uvm_parent_gpu_non_replayable_faults_isr_unlock(gpu->parent); uvm_parent_gpu_non_replayable_faults_isr_unlock(gpu->parent);
if (status != NV_OK) if (status != NV_OK)
goto release; goto unlock;
} }
if (gpu->parent->access_counters_supported) { if (gpu->parent->access_counters_supported) {
@ -168,8 +167,9 @@ static NV_STATUS uvm_test_numa_check_affinity(UVM_TEST_NUMA_CHECK_AFFINITY_PARAM
uvm_parent_gpu_access_counters_isr_unlock(gpu->parent); uvm_parent_gpu_access_counters_isr_unlock(gpu->parent);
} }
} }
release:
uvm_gpu_release(gpu); unlock:
uvm_mutex_unlock(&g_uvm_global.global_lock);
return status; return status;
} }

View File

@ -347,20 +347,30 @@ typedef enum
UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH = 0, UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH = 0,
UVM_TEST_CHANNEL_STRESS_MODE_UPDATE_CHANNELS, UVM_TEST_CHANNEL_STRESS_MODE_UPDATE_CHANNELS,
UVM_TEST_CHANNEL_STRESS_MODE_STREAM, UVM_TEST_CHANNEL_STRESS_MODE_STREAM,
UVM_TEST_CHANNEL_STRESS_MODE_KEY_ROTATION,
} UVM_TEST_CHANNEL_STRESS_MODE; } UVM_TEST_CHANNEL_STRESS_MODE;
typedef enum
{
UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_CPU_TO_GPU,
UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_GPU_TO_CPU,
UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION_ROTATE,
} UVM_TEST_CHANNEL_STRESS_KEY_ROTATION_OPERATION;
#define UVM_TEST_CHANNEL_STRESS UVM_TEST_IOCTL_BASE(15) #define UVM_TEST_CHANNEL_STRESS UVM_TEST_IOCTL_BASE(15)
typedef struct typedef struct
{ {
NvU32 mode; // In NvU32 mode; // In, one of UVM_TEST_CHANNEL_STRESS_MODE
// Number of iterations: // Number of iterations:
// mode == NOOP_PUSH: number of noop pushes // mode == NOOP_PUSH: number of noop pushes
// mode == UPDATE_CHANNELS: number of updates // mode == UPDATE_CHANNELS: number of updates
// mode == STREAM: number of iterations per stream // mode == STREAM: number of iterations per stream
// mode == ROTATION: number of operations
NvU32 iterations; NvU32 iterations;
NvU32 num_streams; // In, used only for mode == UVM_TEST_CHANNEL_STRESS_MODE_STREAM NvU32 num_streams; // In, used only if mode == STREAM
NvU32 key_rotation_operation; // In, used only if mode == ROTATION
NvU32 seed; // In NvU32 seed; // In
NvU32 verbose; // In NvU32 verbose; // In
NV_STATUS rmStatus; // Out NV_STATUS rmStatus; // Out
@ -1210,8 +1220,6 @@ typedef struct
typedef struct typedef struct
{ {
NvProcessorUuid gpu_uuid; // In NvProcessorUuid gpu_uuid; // In
NvHandle client; // In
NvHandle smc_part_ref; // In
NV_STATUS rmStatus; // Out NV_STATUS rmStatus; // Out
} UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS; } UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS;

View File

@ -725,8 +725,9 @@ bool uvm_va_block_cpu_is_region_resident_on(uvm_va_block_t *va_block, int nid, u
} }
// Return the preferred NUMA node ID for the block's policy. // Return the preferred NUMA node ID for the block's policy.
// If the preferred node ID is NUMA_NO_NODE, the current NUMA node ID // If the preferred node ID is NUMA_NO_NODE, the nearest NUMA node ID
// is returned. // with memory is returned. In most cases, this should be the current
// NUMA node.
static int uvm_va_block_context_get_node(uvm_va_block_context_t *va_block_context) static int uvm_va_block_context_get_node(uvm_va_block_context_t *va_block_context)
{ {
if (va_block_context->make_resident.dest_nid != NUMA_NO_NODE) if (va_block_context->make_resident.dest_nid != NUMA_NO_NODE)
@ -2070,6 +2071,7 @@ static NV_STATUS block_populate_pages_cpu(uvm_va_block_t *block,
uvm_page_mask_t *allocated_mask; uvm_page_mask_t *allocated_mask;
uvm_cpu_chunk_alloc_flags_t alloc_flags = UVM_CPU_CHUNK_ALLOC_FLAGS_NONE; uvm_cpu_chunk_alloc_flags_t alloc_flags = UVM_CPU_CHUNK_ALLOC_FLAGS_NONE;
uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); uvm_va_space_t *va_space = uvm_va_block_get_va_space(block);
const uvm_va_policy_t *policy = uvm_va_policy_get_region(block, populate_region);
uvm_page_index_t page_index; uvm_page_index_t page_index;
uvm_gpu_id_t id; uvm_gpu_id_t id;
int preferred_nid = block_context->make_resident.dest_nid; int preferred_nid = block_context->make_resident.dest_nid;
@ -2077,6 +2079,10 @@ static NV_STATUS block_populate_pages_cpu(uvm_va_block_t *block,
if (block_test && block_test->cpu_chunk_allocation_target_id != NUMA_NO_NODE) if (block_test && block_test->cpu_chunk_allocation_target_id != NUMA_NO_NODE)
preferred_nid = block_test->cpu_chunk_allocation_target_id; preferred_nid = block_test->cpu_chunk_allocation_target_id;
// If the VA range has a preferred NUMA node, use it.
if (preferred_nid == NUMA_NO_NODE)
preferred_nid = policy->preferred_nid;
// TODO: Bug 4158598: Using NUMA_NO_NODE for staging allocations is sub-optimal. // TODO: Bug 4158598: Using NUMA_NO_NODE for staging allocations is sub-optimal.
if (preferred_nid != NUMA_NO_NODE) { if (preferred_nid != NUMA_NO_NODE) {
uvm_va_block_cpu_node_state_t *node_state = block_node_state_get(block, preferred_nid); uvm_va_block_cpu_node_state_t *node_state = block_node_state_get(block, preferred_nid);
@ -2127,13 +2133,12 @@ static NV_STATUS block_populate_pages_cpu(uvm_va_block_t *block,
uvm_page_mask_t *node_pages_mask = &block_context->make_resident.node_pages_mask; uvm_page_mask_t *node_pages_mask = &block_context->make_resident.node_pages_mask;
uvm_chunk_sizes_mask_t allocation_sizes; uvm_chunk_sizes_mask_t allocation_sizes;
if (uvm_page_mask_test(allocated_mask, page_index)) { if (uvm_page_mask_test(allocated_mask, page_index) ||
uvm_va_block_cpu_is_page_resident_on(block, preferred_nid, page_index)) {
page_index = uvm_va_block_next_unset_page_in_mask(populate_region, allocated_mask, page_index) - 1; page_index = uvm_va_block_next_unset_page_in_mask(populate_region, allocated_mask, page_index) - 1;
continue; continue;
} }
UVM_ASSERT(!uvm_va_block_cpu_is_page_resident_on(block, preferred_nid, page_index));
allocation_sizes = block_calculate_largest_alloc_size(block, allocation_sizes = block_calculate_largest_alloc_size(block,
page_index, page_index,
allocated_mask, allocated_mask,
@ -3843,6 +3848,7 @@ static void conf_computing_block_copy_push_gpu_to_cpu(uvm_va_block_t *block,
uvm_gpu_address_t staging_buffer = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu); uvm_gpu_address_t staging_buffer = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu);
uvm_gpu_address_t auth_tag_buffer = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu); uvm_gpu_address_t auth_tag_buffer = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu);
uvm_gpu_address_t src_address = block_copy_get_address(block, &copy_state->src, page_index, gpu); uvm_gpu_address_t src_address = block_copy_get_address(block, &copy_state->src, page_index, gpu);
NvU32 key_version = uvm_channel_pool_key_version(push->channel->pool);
UVM_ASSERT(UVM_ID_IS_GPU(copy_state->src.id)); UVM_ASSERT(UVM_ID_IS_GPU(copy_state->src.id));
UVM_ASSERT(UVM_ID_IS_CPU(copy_state->dst.id)); UVM_ASSERT(UVM_ID_IS_CPU(copy_state->dst.id));
@ -3860,7 +3866,8 @@ static void conf_computing_block_copy_push_gpu_to_cpu(uvm_va_block_t *block,
// crypto-operations and it only guarantees PAGE_SIZE contiguity, all // crypto-operations and it only guarantees PAGE_SIZE contiguity, all
// encryptions and decryptions must happen on a PAGE_SIZE basis. // encryptions and decryptions must happen on a PAGE_SIZE basis.
for_each_va_block_page_in_region(page_index, region) { for_each_va_block_page_in_region(page_index, region) {
uvm_conf_computing_log_gpu_encryption(push->channel, &dma_buffer->decrypt_iv[page_index]); uvm_conf_computing_log_gpu_encryption(push->channel, PAGE_SIZE, &dma_buffer->decrypt_iv[page_index]);
dma_buffer->key_version[page_index] = key_version;
// All but the first encryption can be pipelined. The first encryption // All but the first encryption can be pipelined. The first encryption
// uses the caller's pipelining settings. // uses the caller's pipelining settings.
@ -3919,7 +3926,8 @@ static NV_STATUS conf_computing_copy_pages_finish(uvm_va_block_t *block,
status = uvm_conf_computing_cpu_decrypt(push->channel, status = uvm_conf_computing_cpu_decrypt(push->channel,
cpu_page_address, cpu_page_address,
staging_buffer, staging_buffer,
&dma_buffer->decrypt_iv[page_index], dma_buffer->decrypt_iv + page_index,
dma_buffer->key_version[page_index],
PAGE_SIZE, PAGE_SIZE,
auth_tag_buffer); auth_tag_buffer);
kunmap(dst_page); kunmap(dst_page);
@ -4037,7 +4045,7 @@ static NV_STATUS block_copy_pages(uvm_va_block_t *va_block,
UVM_ASSERT(dst_chunk); UVM_ASSERT(dst_chunk);
UVM_ASSERT(uvm_cpu_chunk_get_size(src_chunk) >= uvm_va_block_region_size(region)); UVM_ASSERT(uvm_cpu_chunk_get_size(src_chunk) >= uvm_va_block_region_size(region));
UVM_ASSERT(uvm_cpu_chunk_get_size(src_chunk) <= uvm_cpu_chunk_get_size(dst_chunk)); UVM_ASSERT(uvm_va_block_region_size(region) <= uvm_cpu_chunk_get_size(dst_chunk));
// CPU-to-CPU copies using memcpy() don't have any inherent ordering with // CPU-to-CPU copies using memcpy() don't have any inherent ordering with
// copies using GPU CEs. So, we have to make sure that all previously // copies using GPU CEs. So, we have to make sure that all previously
@ -5132,7 +5140,7 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
uvm_page_mask_t *dst_resident_mask; uvm_page_mask_t *dst_resident_mask;
uvm_page_mask_t *migrated_pages; uvm_page_mask_t *migrated_pages;
uvm_page_mask_t *staged_pages; uvm_page_mask_t *staged_pages;
uvm_page_mask_t *first_touch_mask; uvm_page_mask_t *scratch_residency_mask;
// TODO: Bug 3660922: need to implement HMM read duplication support. // TODO: Bug 3660922: need to implement HMM read duplication support.
UVM_ASSERT(!uvm_va_block_is_hmm(va_block)); UVM_ASSERT(!uvm_va_block_is_hmm(va_block));
@ -5151,6 +5159,10 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
uvm_assert_mutex_locked(&va_block->lock); uvm_assert_mutex_locked(&va_block->lock);
UVM_ASSERT(!uvm_va_block_is_dead(va_block)); UVM_ASSERT(!uvm_va_block_is_dead(va_block));
scratch_residency_mask = kmem_cache_alloc(g_uvm_page_mask_cache, NV_UVM_GFP_FLAGS);
if (!scratch_residency_mask)
return NV_ERR_NO_MEMORY;
// For pages that are entering read-duplication we need to unmap remote // For pages that are entering read-duplication we need to unmap remote
// mappings and revoke RW and higher access permissions. // mappings and revoke RW and higher access permissions.
// //
@ -5177,12 +5189,12 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
status = block_prep_read_duplicate_mapping(va_block, va_block_context, src_id, region, preprocess_page_mask); status = block_prep_read_duplicate_mapping(va_block, va_block_context, src_id, region, preprocess_page_mask);
if (status != NV_OK) if (status != NV_OK)
return status; goto out;
} }
status = block_populate_pages(va_block, va_block_retry, va_block_context, dest_id, region, page_mask); status = block_populate_pages(va_block, va_block_retry, va_block_context, dest_id, region, page_mask);
if (status != NV_OK) if (status != NV_OK)
return status; goto out;
status = block_copy_resident_pages(va_block, status = block_copy_resident_pages(va_block,
va_block_context, va_block_context,
@ -5192,22 +5204,17 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
prefetch_page_mask, prefetch_page_mask,
UVM_VA_BLOCK_TRANSFER_MODE_COPY); UVM_VA_BLOCK_TRANSFER_MODE_COPY);
if (status != NV_OK) if (status != NV_OK)
return status; goto out;
// Pages that weren't resident anywhere else were populated at the // Pages that weren't resident anywhere else were populated at the
// destination directly. Mark them as resident now, since there were no // destination directly. Mark them as resident now, since there were no
// errors from block_copy_resident_pages() above. // errors from block_copy_resident_pages() above.
// Note that va_block_context->scratch_page_mask is passed to
// block_copy_set_first_touch_residency() which is generally unsafe but in
// this case, block_copy_set_first_touch_residency() copies page_mask
// before scratch_page_mask could be clobbered.
migrated_pages = &va_block_context->make_resident.pages_migrated; migrated_pages = &va_block_context->make_resident.pages_migrated;
first_touch_mask = &va_block_context->scratch_page_mask; uvm_page_mask_init_from_region(scratch_residency_mask, region, page_mask);
uvm_page_mask_init_from_region(first_touch_mask, region, page_mask); uvm_page_mask_andnot(scratch_residency_mask, scratch_residency_mask, migrated_pages);
uvm_page_mask_andnot(first_touch_mask, first_touch_mask, migrated_pages);
if (!uvm_page_mask_empty(first_touch_mask)) if (!uvm_page_mask_empty(scratch_residency_mask))
block_copy_set_first_touch_residency(va_block, va_block_context, dest_id, region, first_touch_mask); block_copy_set_first_touch_residency(va_block, va_block_context, dest_id, region, scratch_residency_mask);
staged_pages = &va_block_context->make_resident.pages_staged; staged_pages = &va_block_context->make_resident.pages_staged;
if (!UVM_ID_IS_CPU(dest_id) && !uvm_page_mask_empty(staged_pages)) { if (!UVM_ID_IS_CPU(dest_id) && !uvm_page_mask_empty(staged_pages)) {
@ -5219,6 +5226,18 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
if (!uvm_page_mask_empty(migrated_pages)) { if (!uvm_page_mask_empty(migrated_pages)) {
if (UVM_ID_IS_CPU(dest_id)) { if (UVM_ID_IS_CPU(dest_id)) {
// Check if the CPU is already in the resident set of processors.
// We need to do this since we can't have multiple NUMA nodes with
// resident pages.
// If any of the migrate pages were already resident on the CPU, the
// residency has to be switched to the destination NUMA node.
if (uvm_processor_mask_test(&va_block->resident, UVM_ID_CPU) &&
uvm_page_mask_and(scratch_residency_mask,
uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU, NUMA_NO_NODE),
migrated_pages)) {
uvm_va_block_cpu_clear_resident_all_chunks(va_block, va_block_context, scratch_residency_mask);
}
uvm_va_block_cpu_set_resident_all_chunks(va_block, va_block_context, migrated_pages); uvm_va_block_cpu_set_resident_all_chunks(va_block, va_block_context, migrated_pages);
} }
else { else {
@ -5247,7 +5266,9 @@ NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block,
// Check state of all chunks after residency change. // Check state of all chunks after residency change.
// TODO: Bug 4207783: Check both CPU and GPU chunks. // TODO: Bug 4207783: Check both CPU and GPU chunks.
UVM_ASSERT(block_check_cpu_chunks(va_block)); UVM_ASSERT(block_check_cpu_chunks(va_block));
return NV_OK; out:
kmem_cache_free(g_uvm_page_mask_cache, scratch_residency_mask);
return status;
} }
// Looks up the current CPU mapping state of page from the // Looks up the current CPU mapping state of page from the
@ -5532,13 +5553,15 @@ static bool block_check_mappings_page(uvm_va_block_t *block,
*block->read_duplicated_pages.bitmap); *block->read_duplicated_pages.bitmap);
// Test read_duplicated_pages mask // Test read_duplicated_pages mask
UVM_ASSERT_MSG((uvm_processor_mask_get_count(resident_processors) <= 1 && UVM_ASSERT_MSG((!uvm_page_mask_test(&block->read_duplicated_pages, page_index) &&
!uvm_page_mask_test(&block->read_duplicated_pages, page_index)) || uvm_processor_mask_get_count(resident_processors) <= 1) ||
(uvm_processor_mask_get_count(resident_processors) > 1 && (uvm_page_mask_test(&block->read_duplicated_pages, page_index) &&
uvm_page_mask_test(&block->read_duplicated_pages, page_index)), uvm_processor_mask_get_count(resident_processors) >= 1),
"Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx - SWA: 0x%lx - RD: 0x%lx\n", "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx - SWA: 0x%lx - RD: 0x%lx\n",
*resident_processors->bitmap, *resident_processors->bitmap,
*read_mappings->bitmap, *write_mappings->bitmap, *atomic_mappings->bitmap, *read_mappings->bitmap,
*write_mappings->bitmap,
*atomic_mappings->bitmap,
*va_space->system_wide_atomics_enabled_processors.bitmap, *va_space->system_wide_atomics_enabled_processors.bitmap,
*block->read_duplicated_pages.bitmap); *block->read_duplicated_pages.bitmap);
@ -6022,7 +6045,7 @@ static bool block_has_remote_mapping_gpu(uvm_va_block_t *block,
if (uvm_page_mask_empty(mapped_pages)) if (uvm_page_mask_empty(mapped_pages))
return false; return false;
return !uvm_id_equal(uvm_va_range_get_policy(block->va_range)->preferred_location, gpu_id); return !uvm_va_policy_preferred_location_equal(uvm_va_range_get_policy(block->va_range), gpu_id, NUMA_NO_NODE);
} }
// Remote pages are pages which are mapped but not resident locally // Remote pages are pages which are mapped but not resident locally
@ -8365,6 +8388,7 @@ static NV_STATUS block_map_gpu_to(uvm_va_block_t *va_block,
uvm_va_block_context_t *block_context, uvm_va_block_context_t *block_context,
uvm_gpu_t *gpu, uvm_gpu_t *gpu,
uvm_processor_id_t resident_id, uvm_processor_id_t resident_id,
int resident_nid,
uvm_page_mask_t *map_page_mask, uvm_page_mask_t *map_page_mask,
uvm_prot_t new_prot, uvm_prot_t new_prot,
uvm_tracker_t *out_tracker) uvm_tracker_t *out_tracker)
@ -8374,7 +8398,7 @@ static NV_STATUS block_map_gpu_to(uvm_va_block_t *va_block,
uvm_push_t push; uvm_push_t push;
NV_STATUS status; NV_STATUS status;
uvm_page_mask_t *pages_to_map = &block_context->mapping.page_mask; uvm_page_mask_t *pages_to_map = &block_context->mapping.page_mask;
const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, resident_id, NUMA_NO_NODE); const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, resident_id, resident_nid);
uvm_pte_bits_gpu_t pte_bit; uvm_pte_bits_gpu_t pte_bit;
uvm_pte_bits_gpu_t prot_pte_bit = get_gpu_pte_bit_index(new_prot); uvm_pte_bits_gpu_t prot_pte_bit = get_gpu_pte_bit_index(new_prot);
uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state;
@ -8383,8 +8407,10 @@ static NV_STATUS block_map_gpu_to(uvm_va_block_t *va_block,
UVM_ASSERT(map_page_mask); UVM_ASSERT(map_page_mask);
UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(resident_id)], gpu->id)); UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(resident_id)], gpu->id));
if (uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), gpu->id)) if (uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), gpu->id)) {
UVM_ASSERT(uvm_id_equal(resident_id, uvm_va_range_get_policy(va_block->va_range)->preferred_location)); uvm_va_policy_t *policy = uvm_va_range_get_policy(va_block->va_range);
UVM_ASSERT(uvm_va_policy_preferred_location_equal(policy, resident_id, policy->preferred_nid));
}
UVM_ASSERT(!uvm_page_mask_and(&block_context->scratch_page_mask, UVM_ASSERT(!uvm_page_mask_and(&block_context->scratch_page_mask,
map_page_mask, map_page_mask,
@ -8486,18 +8512,27 @@ static NV_STATUS block_map_gpu_to(uvm_va_block_t *va_block,
return uvm_tracker_add_push_safe(out_tracker, &push); return uvm_tracker_add_push_safe(out_tracker, &push);
} }
// allowed_nid_mask is only valid if the CPU is set in allowed_mask.
static void map_get_allowed_destinations(uvm_va_block_t *block, static void map_get_allowed_destinations(uvm_va_block_t *block,
uvm_va_block_context_t *va_block_context, uvm_va_block_context_t *va_block_context,
const uvm_va_policy_t *policy, const uvm_va_policy_t *policy,
uvm_processor_id_t id, uvm_processor_id_t id,
uvm_processor_mask_t *allowed_mask) uvm_processor_mask_t *allowed_mask,
nodemask_t *allowed_nid_mask)
{ {
uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); uvm_va_space_t *va_space = uvm_va_block_get_va_space(block);
*allowed_nid_mask = node_possible_map;
if (uvm_processor_mask_test(block_get_uvm_lite_gpus(block), id)) { if (uvm_processor_mask_test(block_get_uvm_lite_gpus(block), id)) {
// UVM-Lite can only map resident pages on the preferred location // UVM-Lite can only map resident pages on the preferred location
uvm_processor_mask_zero(allowed_mask); uvm_processor_mask_zero(allowed_mask);
uvm_processor_mask_set(allowed_mask, policy->preferred_location); uvm_processor_mask_set(allowed_mask, policy->preferred_location);
if (UVM_ID_IS_CPU(policy->preferred_location) &&
!uvm_va_policy_preferred_location_equal(policy, UVM_ID_CPU, NUMA_NO_NODE)) {
nodes_clear(*allowed_nid_mask);
node_set(policy->preferred_nid, *allowed_nid_mask);
}
} }
else if ((uvm_va_policy_is_read_duplicate(policy, va_space) || else if ((uvm_va_policy_is_read_duplicate(policy, va_space) ||
(uvm_id_equal(policy->preferred_location, id) && (uvm_id_equal(policy->preferred_location, id) &&
@ -8540,6 +8575,7 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
uvm_page_mask_t *running_page_mask = &va_block_context->mapping.map_running_page_mask; uvm_page_mask_t *running_page_mask = &va_block_context->mapping.map_running_page_mask;
NV_STATUS status = NV_OK; NV_STATUS status = NV_OK;
const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, region); const uvm_va_policy_t *policy = uvm_va_policy_get_region(va_block, region);
nodemask_t *allowed_nid_destinations;
va_block_context->mapping.cause = cause; va_block_context->mapping.cause = cause;
@ -8589,10 +8625,20 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
if (!allowed_destinations) if (!allowed_destinations)
return NV_ERR_NO_MEMORY; return NV_ERR_NO_MEMORY;
allowed_nid_destinations = uvm_kvmalloc(sizeof(*allowed_nid_destinations));
if (!allowed_nid_destinations) {
uvm_processor_mask_cache_free(allowed_destinations);
return NV_ERR_NO_MEMORY;
}
// Map per resident location so we can more easily detect physically- // Map per resident location so we can more easily detect physically-
// contiguous mappings. // contiguous mappings.
map_get_allowed_destinations(va_block, va_block_context, policy, id, allowed_destinations); map_get_allowed_destinations(va_block,
va_block_context,
policy,
id,
allowed_destinations,
allowed_nid_destinations);
for_each_closest_id(resident_id, allowed_destinations, id, va_space) { for_each_closest_id(resident_id, allowed_destinations, id, va_space) {
if (UVM_ID_IS_CPU(id)) { if (UVM_ID_IS_CPU(id)) {
status = block_map_cpu_to(va_block, status = block_map_cpu_to(va_block,
@ -8603,11 +8649,30 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
new_prot, new_prot,
out_tracker); out_tracker);
} }
else if (UVM_ID_IS_CPU(resident_id)) {
int nid;
// map_get_allowed_distinations() will set the mask of CPU NUMA
// nodes that should be mapped.
for_each_node_mask(nid, *allowed_nid_destinations) {
status = block_map_gpu_to(va_block,
va_block_context,
gpu,
resident_id,
nid,
running_page_mask,
new_prot,
out_tracker);
if (status != NV_OK)
break;
}
}
else { else {
status = block_map_gpu_to(va_block, status = block_map_gpu_to(va_block,
va_block_context, va_block_context,
gpu, gpu,
resident_id, resident_id,
NUMA_NO_NODE,
running_page_mask, running_page_mask,
new_prot, new_prot,
out_tracker); out_tracker);
@ -8622,6 +8687,7 @@ NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block,
} }
uvm_processor_mask_cache_free(allowed_destinations); uvm_processor_mask_cache_free(allowed_destinations);
uvm_kvfree(allowed_nid_destinations);
return status; return status;
} }
@ -11175,8 +11241,8 @@ NV_STATUS uvm_va_block_add_mappings_after_migration(uvm_va_block_t *va_block,
// so uvm_va_block_map will be a no-op. // so uvm_va_block_map will be a no-op.
uvm_processor_mask_and(map_uvm_lite_gpus, map_other_processors, block_get_uvm_lite_gpus(va_block)); uvm_processor_mask_and(map_uvm_lite_gpus, map_other_processors, block_get_uvm_lite_gpus(va_block));
if (!uvm_processor_mask_empty(map_uvm_lite_gpus) && if (!uvm_processor_mask_empty(map_uvm_lite_gpus) &&
uvm_id_equal(new_residency, preferred_location)) { uvm_va_policy_preferred_location_equal(policy, new_residency, va_block_context->make_resident.dest_nid)) {
for_each_id_in_mask(map_processor_id, map_uvm_lite_gpus) { for_each_id_in_mask (map_processor_id, map_uvm_lite_gpus) {
status = uvm_va_block_map(va_block, status = uvm_va_block_map(va_block,
va_block_context, va_block_context,
map_processor_id, map_processor_id,
@ -11637,6 +11703,10 @@ static int block_select_node_residency(uvm_va_block_t *va_block,
// For GPU faults, the bottom half is pinned to CPUs closest to their GPU. // For GPU faults, the bottom half is pinned to CPUs closest to their GPU.
// Therefore, in both cases, we can use numa_mem_id() to get the NUMA node // Therefore, in both cases, we can use numa_mem_id() to get the NUMA node
// ID of the faulting processor. // ID of the faulting processor.
// Note that numa_mem_id() returns the nearest node with memory. In most
// cases, this will be the current NUMA node. However, in the case that the
// current node does not have any memory, we probably want the nearest node
// with memory, anyway.
int current_nid = numa_mem_id(); int current_nid = numa_mem_id();
bool may_read_duplicate = can_read_duplicate(va_block, page_index, policy, thrashing_hint); bool may_read_duplicate = can_read_duplicate(va_block, page_index, policy, thrashing_hint);
@ -11660,7 +11730,12 @@ static int block_select_node_residency(uvm_va_block_t *va_block,
// If read duplication is enabled and the page is also resident on the CPU, // If read duplication is enabled and the page is also resident on the CPU,
// keep its current NUMA node residency. // keep its current NUMA node residency.
if (may_read_duplicate && uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index)) if (may_read_duplicate && uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index))
return block_get_page_node_residency(va_block, page_index); return NUMA_NO_NODE;
// The new_residency processor is the CPU and the preferred location is not
// the CPU. If the page is resident on the CPU, keep its current residency.
if (uvm_va_block_cpu_is_page_resident_on(va_block, NUMA_NO_NODE, page_index))
return NUMA_NO_NODE;
return current_nid; return current_nid;
} }
@ -12564,125 +12639,6 @@ NV_STATUS uvm_va_block_find_create(uvm_va_space_t *va_space,
return uvm_hmm_va_block_find_create(va_space, addr, hmm_vma, out_block); return uvm_hmm_va_block_find_create(va_space, addr, hmm_vma, out_block);
} }
// Launch a synchronous, encrypted copy between GPU and CPU.
//
// The copy entails a GPU-side encryption (relying on the Copy Engine), and a
// CPU-side decryption step, such that the destination CPU buffer pointed by
// dst_plain will contain the unencrypted (plain text) contents. The destination
// buffer can be in protected or unprotected sysmem, while the source buffer
// must be in protected vidmem.
//
// The maximum copy size allowed is UVM_CONF_COMPUTING_DMA_BUFFER_SIZE.
//
// The input tracker, if not NULL, is internally acquired by the push
// responsible for the encrypted copy.
__attribute__ ((format(printf, 6, 7)))
static NV_STATUS encrypted_memcopy_gpu_to_cpu(uvm_gpu_t *gpu,
void *dst_plain,
uvm_gpu_address_t src_gpu_address,
size_t size,
uvm_tracker_t *tracker,
const char *format,
...)
{
NV_STATUS status;
UvmCslIv decrypt_iv;
uvm_push_t push;
uvm_conf_computing_dma_buffer_t *dma_buffer;
uvm_gpu_address_t dst_gpu_address, auth_tag_gpu_address;
void *src_cipher, *auth_tag;
va_list args;
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
UVM_ASSERT(size <= UVM_CONF_COMPUTING_DMA_BUFFER_SIZE);
status = uvm_conf_computing_dma_buffer_alloc(&gpu->conf_computing.dma_buffer_pool, &dma_buffer, NULL);
if (status != NV_OK)
return status;
va_start(args, format);
status = uvm_push_begin_acquire(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, tracker, &push, format, args);
va_end(args);
if (status != NV_OK)
goto out;
uvm_conf_computing_log_gpu_encryption(push.channel, &decrypt_iv);
dst_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu);
auth_tag_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu);
gpu->parent->ce_hal->encrypt(&push, dst_gpu_address, src_gpu_address, size, auth_tag_gpu_address);
status = uvm_push_end_and_wait(&push);
if (status != NV_OK)
goto out;
src_cipher = uvm_mem_get_cpu_addr_kernel(dma_buffer->alloc);
auth_tag = uvm_mem_get_cpu_addr_kernel(dma_buffer->auth_tag);
status = uvm_conf_computing_cpu_decrypt(push.channel, dst_plain, src_cipher, &decrypt_iv, size, auth_tag);
out:
uvm_conf_computing_dma_buffer_free(&gpu->conf_computing.dma_buffer_pool, dma_buffer, NULL);
return status;
}
// Launch a synchronous, encrypted copy between CPU and GPU.
//
// The source CPU buffer pointed by src_plain contains the unencrypted (plain
// text) contents; the function internally performs a CPU-side encryption step
// before launching the GPU-side CE decryption. The source buffer can be in
// protected or unprotected sysmem, while the destination buffer must be in
// protected vidmem.
//
// The maximum copy size allowed is UVM_CONF_COMPUTING_DMA_BUFFER_SIZE.
//
// The input tracker, if not NULL, is internally acquired by the push
// responsible for the encrypted copy.
__attribute__ ((format(printf, 6, 7)))
static NV_STATUS encrypted_memcopy_cpu_to_gpu(uvm_gpu_t *gpu,
uvm_gpu_address_t dst_gpu_address,
void *src_plain,
size_t size,
uvm_tracker_t *tracker,
const char *format,
...)
{
NV_STATUS status;
uvm_push_t push;
uvm_conf_computing_dma_buffer_t *dma_buffer;
uvm_gpu_address_t src_gpu_address, auth_tag_gpu_address;
void *dst_cipher, *auth_tag;
va_list args;
UVM_ASSERT(g_uvm_global.conf_computing_enabled);
UVM_ASSERT(size <= UVM_CONF_COMPUTING_DMA_BUFFER_SIZE);
status = uvm_conf_computing_dma_buffer_alloc(&gpu->conf_computing.dma_buffer_pool, &dma_buffer, NULL);
if (status != NV_OK)
return status;
va_start(args, format);
status = uvm_push_begin_acquire(gpu->channel_manager, UVM_CHANNEL_TYPE_CPU_TO_GPU, tracker, &push, format, args);
va_end(args);
if (status != NV_OK)
goto out;
dst_cipher = uvm_mem_get_cpu_addr_kernel(dma_buffer->alloc);
auth_tag = uvm_mem_get_cpu_addr_kernel(dma_buffer->auth_tag);
uvm_conf_computing_cpu_encrypt(push.channel, dst_cipher, src_plain, NULL, size, auth_tag);
src_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->alloc, gpu);
auth_tag_gpu_address = uvm_mem_gpu_address_virtual_kernel(dma_buffer->auth_tag, gpu);
gpu->parent->ce_hal->decrypt(&push, dst_gpu_address, src_gpu_address, size, auth_tag_gpu_address);
status = uvm_push_end_and_wait(&push);
out:
uvm_conf_computing_dma_buffer_free(&gpu->conf_computing.dma_buffer_pool, dma_buffer, NULL);
return status;
}
static NV_STATUS va_block_write_cpu_to_gpu(uvm_va_block_t *va_block, static NV_STATUS va_block_write_cpu_to_gpu(uvm_va_block_t *va_block,
uvm_gpu_t *gpu, uvm_gpu_t *gpu,
uvm_gpu_address_t dst_gpu_address, uvm_gpu_address_t dst_gpu_address,
@ -12695,7 +12651,7 @@ static NV_STATUS va_block_write_cpu_to_gpu(uvm_va_block_t *va_block,
uvm_gpu_address_t src_gpu_address; uvm_gpu_address_t src_gpu_address;
if (g_uvm_global.conf_computing_enabled) { if (g_uvm_global.conf_computing_enabled) {
return encrypted_memcopy_cpu_to_gpu(gpu, return uvm_conf_computing_util_memcopy_cpu_to_gpu(gpu,
dst_gpu_address, dst_gpu_address,
uvm_mem_get_cpu_addr_kernel(src_mem), uvm_mem_get_cpu_addr_kernel(src_mem),
size, size,
@ -12799,7 +12755,7 @@ static NV_STATUS va_block_read_gpu_to_cpu(uvm_va_block_t *va_block,
uvm_gpu_address_t dst_gpu_address; uvm_gpu_address_t dst_gpu_address;
if (g_uvm_global.conf_computing_enabled) { if (g_uvm_global.conf_computing_enabled) {
return encrypted_memcopy_gpu_to_cpu(gpu, return uvm_conf_computing_util_memcopy_gpu_to_cpu(gpu,
uvm_mem_get_cpu_addr_kernel(dst_mem), uvm_mem_get_cpu_addr_kernel(dst_mem),
src_gpu_address, src_gpu_address,
size, size,

View File

@ -105,6 +105,12 @@ bool uvm_va_policy_preferred_location_equal(const uvm_va_policy_t *policy, uvm_p
{ {
bool equal = uvm_id_equal(policy->preferred_location, proc); bool equal = uvm_id_equal(policy->preferred_location, proc);
if (!UVM_ID_IS_CPU(policy->preferred_location))
UVM_ASSERT(policy->preferred_nid == NUMA_NO_NODE);
if (!UVM_ID_IS_CPU(proc))
UVM_ASSERT(cpu_numa_id == NUMA_NO_NODE);
if (equal && UVM_ID_IS_CPU(policy->preferred_location)) if (equal && UVM_ID_IS_CPU(policy->preferred_location))
equal = uvm_numa_id_eq(policy->preferred_nid, cpu_numa_id); equal = uvm_numa_id_eq(policy->preferred_nid, cpu_numa_id);
@ -656,7 +662,7 @@ const uvm_va_policy_t *uvm_va_policy_set_preferred_location(uvm_va_block_t *va_b
// and that the policy is changing. // and that the policy is changing.
UVM_ASSERT(node->node.start >= start); UVM_ASSERT(node->node.start >= start);
UVM_ASSERT(node->node.end <= end); UVM_ASSERT(node->node.end <= end);
UVM_ASSERT(!uvm_id_equal(node->policy.preferred_location, processor_id)); UVM_ASSERT(!uvm_va_policy_preferred_location_equal(&node->policy, processor_id, cpu_node_id));
} }
node->policy.preferred_location = processor_id; node->policy.preferred_location = processor_id;

View File

@ -868,9 +868,9 @@ static void uvm_va_range_disable_peer_managed(uvm_va_range_t *va_range, uvm_gpu_
// preferred location. If peer mappings are being disabled to the // preferred location. If peer mappings are being disabled to the
// preferred location, then unmap the other GPU. // preferred location, then unmap the other GPU.
// Nothing to do otherwise. // Nothing to do otherwise.
if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, gpu0->id)) if (uvm_va_policy_preferred_location_equal(uvm_va_range_get_policy(va_range), gpu0->id, NUMA_NO_NODE))
uvm_lite_gpu_to_unmap = gpu1; uvm_lite_gpu_to_unmap = gpu1;
else if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, gpu1->id)) else if (uvm_va_policy_preferred_location_equal(uvm_va_range_get_policy(va_range), gpu1->id, NUMA_NO_NODE))
uvm_lite_gpu_to_unmap = gpu0; uvm_lite_gpu_to_unmap = gpu0;
else else
return; return;
@ -951,7 +951,7 @@ static void va_range_unregister_gpu_managed(uvm_va_range_t *va_range, uvm_gpu_t
// Reset preferred location and accessed-by of VA ranges if needed // Reset preferred location and accessed-by of VA ranges if needed
// Note: ignoring the return code of uvm_va_range_set_preferred_location since this // Note: ignoring the return code of uvm_va_range_set_preferred_location since this
// will only return on error when setting a preferred location, not on a reset // will only return on error when setting a preferred location, not on a reset
if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, gpu->id)) if (uvm_va_policy_preferred_location_equal(uvm_va_range_get_policy(va_range), gpu->id, NUMA_NO_NODE))
(void)uvm_va_range_set_preferred_location(va_range, UVM_ID_INVALID, NUMA_NO_NODE, mm, NULL); (void)uvm_va_range_set_preferred_location(va_range, UVM_ID_INVALID, NUMA_NO_NODE, mm, NULL);
uvm_va_range_unset_accessed_by(va_range, gpu->id, NULL); uvm_va_range_unset_accessed_by(va_range, gpu->id, NULL);
@ -1683,7 +1683,7 @@ void uvm_va_range_unset_accessed_by(uvm_va_range_t *va_range,
// If a UVM-Lite GPU is being removed from the accessed_by mask, it will // If a UVM-Lite GPU is being removed from the accessed_by mask, it will
// also stop being a UVM-Lite GPU unless it's also the preferred location. // also stop being a UVM-Lite GPU unless it's also the preferred location.
if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, processor_id) && if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, processor_id) &&
!uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, processor_id)) { !uvm_va_policy_preferred_location_equal(uvm_va_range_get_policy(va_range), processor_id, NUMA_NO_NODE)) {
range_unmap(va_range, processor_id, out_tracker); range_unmap(va_range, processor_id, out_tracker);
} }

View File

@ -0,0 +1,42 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Comments, prototypes and checks taken from DMTF: Copyright 2021-2022 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
*/
#include "os-interface.h"
#include "internal_crypt_lib.h"
#include "library/cryptlib.h"
bool libspdm_check_crypto_backend(void)
{
#ifdef USE_LKCA
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA wrappers found.\n");
nv_printf(NV_DBG_INFO, "libspdm_check_crypto_backend: LKCA calls may still fail if modules have not been loaded!\n");
return true;
#else
nv_printf(NV_DBG_ERRORS, "libspdm_check_crypto_backend: Error - libspdm expects LKCA but found stubs!\n");
return false;
#endif
}

View File

@ -201,7 +201,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be // Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node // allocated on the preferred node
if ((i == (attempts - 1))) if (i == (attempts - 1))
break; break;
// Get the NUMA node where the first page of the stack is resident. If // Get the NUMA node where the first page of the stack is resident. If

View File

@ -37,6 +37,10 @@
#include <linux/kernfs.h> #include <linux/kernfs.h>
#endif #endif
#if !defined(NV_BUS_TYPE_HAS_IOMMU_OPS)
#include <linux/iommu.h>
#endif
static void static void
nv_check_and_exclude_gpu( nv_check_and_exclude_gpu(
nvidia_stack_t *sp, nvidia_stack_t *sp,
@ -530,20 +534,13 @@ nv_pci_probe
if (pci_dev->is_virtfn) if (pci_dev->is_virtfn)
{ {
#if defined(NV_VGPU_KVM_BUILD) #if defined(NV_VGPU_KVM_BUILD)
nvl = pci_get_drvdata(pci_dev->physfn);
if (!nvl)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x "
"since PF is not bound to nvidia driver.\n",
NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
goto failed;
}
#if defined(NV_BUS_TYPE_HAS_IOMMU_OPS)
if (pci_dev->dev.bus->iommu_ops == NULL) if (pci_dev->dev.bus->iommu_ops == NULL)
{ #else
nv = NV_STATE_PTR(nvl); if ((pci_dev->dev.iommu != NULL) && (pci_dev->dev.iommu->iommu_dev != NULL) &&
if (rm_is_iommu_needed_for_sriov(sp, nv)) (pci_dev->dev.iommu->iommu_dev->ops == NULL))
#endif
{ {
nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x " nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x "
"since IOMMU is not present on the system.\n", "since IOMMU is not present on the system.\n",
@ -551,13 +548,6 @@ nv_pci_probe
NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn));
goto failed; goto failed;
} }
}
if (nvidia_vgpu_vfio_probe(pci_dev) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Failed to register device to vGPU VFIO module");
goto failed;
}
nv_kmem_cache_free_stack(sp); nv_kmem_cache_free_stack(sp);
return 0; return 0;

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -45,6 +45,11 @@ typedef struct gpuObject *gpuObjectHandle;
typedef struct gpuRetainedChannel_struct gpuRetainedChannel; typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
NV_STATUS calculatePCIELinkRateMBps(NvU32 lanes,
NvU32 pciLinkMaxSpeed,
NvU32 *pcieLinkRate);
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session); NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session); NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
@ -286,11 +291,11 @@ NV_STATUS nvGpuOpsTogglePrefetchFaults(gpuFaultInfo *pFaultInfo,
NvBool bEnable); NvBool bEnable);
// Interface used for CCSL // Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx, NV_STATUS nvGpuOpsCcslContextInit(struct ccslContext_t **ctx,
gpuChannelHandle channel); gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx); NV_STATUS nvGpuOpsCcslContextClear(struct ccslContext_t *ctx);
NV_STATUS nvGpuOpsCcslContextUpdate(struct ccslContext_t *ctx); NV_STATUS nvGpuOpsCcslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount);
NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx, NV_STATUS nvGpuOpsCcslRotateIv(struct ccslContext_t *ctx,
NvU8 direction); NvU8 direction);
NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx, NV_STATUS nvGpuOpsCcslEncrypt(struct ccslContext_t *ctx,
@ -308,6 +313,7 @@ NV_STATUS nvGpuOpsCcslDecrypt(struct ccslContext_t *ctx,
NvU32 bufferSize, NvU32 bufferSize,
NvU8 const *inputBuffer, NvU8 const *inputBuffer,
NvU8 const *decryptIv, NvU8 const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer, NvU8 *outputBuffer,
NvU8 const *addAuthData, NvU8 const *addAuthData,
NvU32 addAuthDataSize, NvU32 addAuthDataSize,
@ -323,7 +329,8 @@ NV_STATUS nvGpuOpsIncrementIv(struct ccslContext_t *ctx,
NvU8 direction, NvU8 direction,
NvU64 increment, NvU64 increment,
NvU8 *iv); NvU8 *iv);
NV_STATUS nvGpuOpsLogDeviceEncryption(struct ccslContext_t *ctx, NV_STATUS nvGpuOpsLogEncryption(struct ccslContext_t *ctx,
NvU8 direction,
NvU32 bufferSize); NvU32 bufferSize);
#endif /* _NV_GPU_OPS_H_*/ #endif /* _NV_GPU_OPS_H_*/

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -1516,16 +1516,23 @@ void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext)
} }
EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext); EXPORT_SYMBOL(nvUvmInterfaceDeinitCslContext);
NV_STATUS nvUvmInterfaceCslUpdateContext(UvmCslContext *uvmCslContext) NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount)
{ {
NV_STATUS status; NV_STATUS status;
nvidia_stack_t *sp = uvmCslContext->nvidia_stack; nvidia_stack_t *sp;
status = rm_gpu_ops_ccsl_context_update(sp, uvmCslContext->ctx); if ((contextList == NULL) || (contextListCount == 0) || (contextList[0] == NULL))
{
return NV_ERR_INVALID_ARGUMENT;
}
sp = contextList[0]->nvidia_stack;
status = rm_gpu_ops_ccsl_rotate_key(sp, contextList, contextListCount);
return status; return status;
} }
EXPORT_SYMBOL(nvUvmInterfaceCslUpdateContext); EXPORT_SYMBOL(nvUvmInterfaceCslRotateKey);
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext, NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
UvmCslOperation operation) UvmCslOperation operation)
@ -1562,6 +1569,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
NvU32 bufferSize, NvU32 bufferSize,
NvU8 const *inputBuffer, NvU8 const *inputBuffer,
UvmCslIv const *decryptIv, UvmCslIv const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer, NvU8 *outputBuffer,
NvU8 const *addAuthData, NvU8 const *addAuthData,
NvU32 addAuthDataSize, NvU32 addAuthDataSize,
@ -1575,6 +1583,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
bufferSize, bufferSize,
inputBuffer, inputBuffer,
(NvU8 *)decryptIv, (NvU8 *)decryptIv,
keyRotationId,
outputBuffer, outputBuffer,
addAuthData, addAuthData,
addAuthDataSize, addAuthDataSize,
@ -1625,17 +1634,18 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
} }
EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv); EXPORT_SYMBOL(nvUvmInterfaceCslIncrementIv);
NV_STATUS nvUvmInterfaceCslLogExternalEncryption(UvmCslContext *uvmCslContext, NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
UvmCslOperation operation,
NvU32 bufferSize) NvU32 bufferSize)
{ {
NV_STATUS status; NV_STATUS status;
nvidia_stack_t *sp = uvmCslContext->nvidia_stack; nvidia_stack_t *sp = uvmCslContext->nvidia_stack;
status = rm_gpu_ops_ccsl_log_device_encryption(sp, uvmCslContext->ctx, bufferSize); status = rm_gpu_ops_ccsl_log_encryption(sp, uvmCslContext->ctx, operation, bufferSize);
return status; return status;
} }
EXPORT_SYMBOL(nvUvmInterfaceCslLogExternalEncryption); EXPORT_SYMBOL(nvUvmInterfaceCslLogEncryption);
#else // NV_UVM_ENABLE #else // NV_UVM_ENABLE

View File

@ -41,6 +41,7 @@ NVIDIA_SOURCES += nvidia/libspdm_rsa.c
NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c NVIDIA_SOURCES += nvidia/libspdm_aead_aes_gcm.c
NVIDIA_SOURCES += nvidia/libspdm_sha.c NVIDIA_SOURCES += nvidia/libspdm_sha.c
NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c NVIDIA_SOURCES += nvidia/libspdm_hmac_sha.c
NVIDIA_SOURCES += nvidia/libspdm_internal_crypt_lib.c
NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c NVIDIA_SOURCES += nvidia/libspdm_hkdf_sha.c
NVIDIA_SOURCES += nvidia/libspdm_ec.c NVIDIA_SOURCES += nvidia/libspdm_ec.c
NVIDIA_SOURCES += nvidia/libspdm_x509.c NVIDIA_SOURCES += nvidia/libspdm_x509.c

View File

@ -161,7 +161,7 @@ NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_platform_has
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn NV_CONFTEST_FUNCTION_COMPILE_TESTS += follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
@ -228,6 +228,7 @@ NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_alloc_me
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tsec_comms_free_gscco_mem
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_memory_block_size_bytes
NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto NV_CONFTEST_SYMBOL_COMPILE_TESTS += crypto
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_follow_pte
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
@ -251,6 +252,7 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += pci_driver_has_driver_managed_dma
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_has_trapno_arg NV_CONFTEST_TYPE_COMPILE_TESTS += memory_failure_has_trapno_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present NV_CONFTEST_TYPE_COMPILE_TESTS += foll_longterm_present
NV_CONFTEST_TYPE_COMPILE_TESTS += bus_type_has_iommu_ops
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -38,4 +38,4 @@ bool libspdm_aead_aes_gcm_decrypt_prealloc(void *context,
const uint8_t *data_in, size_t data_in_size, const uint8_t *data_in, size_t data_in_size,
const uint8_t *tag, size_t tag_size, const uint8_t *tag, size_t tag_size,
uint8_t *data_out, size_t *data_out_size); uint8_t *data_out, size_t *data_out_size);
bool libspdm_check_crypto_backend(void);

View File

@ -36,10 +36,28 @@ static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
unsigned long *pfn) unsigned long *pfn)
{ {
#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT) #if defined(NV_FOLLOW_PFN_PRESENT)
return unsafe_follow_pfn(vma, address, pfn);
#else
return follow_pfn(vma, address, pfn); return follow_pfn(vma, address, pfn);
#else
#if NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
int status = 0;
spinlock_t *ptl;
pte_t *ptep;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
return status;
status = follow_pte(vma, address, &ptep, &ptl);
if (status)
return status;
*pfn = pte_pfn(ptep_get(ptep));
// The lock is acquired inside follow_pte()
pte_unmap_unlock(ptep, ptl);
return 0;
#else // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
return -1;
#endif // NV_IS_EXPORT_SYMBOL_PRESENT_follow_pte
#endif #endif
} }

View File

@ -313,6 +313,7 @@ namespace DisplayPort
bool bDisableSSC; bool bDisableSSC;
bool bEnableFastLT; bool bEnableFastLT;
NvU32 maxLinkRateFromRegkey; NvU32 maxLinkRateFromRegkey;
bool bFlushTimeslotWhenDirty;
// //
// Latency(ms) to apply between link-train and FEC enable for bug // Latency(ms) to apply between link-train and FEC enable for bug

View File

@ -74,14 +74,14 @@
// //
#define NV_DP_DSC_MST_CAP_BUG_3143315 "DP_DSC_MST_CAP_BUG_3143315" #define NV_DP_DSC_MST_CAP_BUG_3143315 "DP_DSC_MST_CAP_BUG_3143315"
//
// Bug 4388987 : This regkey will disable reading PCON caps for MST. // Bug 4388987 : This regkey will disable reading PCON caps for MST.
//
#define NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED "DP_BUG_4388987_WAR" #define NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED "DP_BUG_4388987_WAR"
// // Bug 4426624: Flush timeslot change to HW when dirty bit is set.
#define NV_DP_REGKEY_FLUSH_TIMESLOT_INFO_WHEN_DIRTY "DP_BUG_4426624_WAR"
// Bug 4459839 : This regkey will enable DSC irrespective of LT status. // Bug 4459839 : This regkey will enable DSC irrespective of LT status.
//
#define NV_DP_REGKEY_FORCE_DSC_ON_SINK "DP_FORCE_DSC_ON_SINK" #define NV_DP_REGKEY_FORCE_DSC_ON_SINK "DP_FORCE_DSC_ON_SINK"
#define NV_DP_REGKEY_ENABLE_SKIP_DPCD_READS_WAR "DP_BUG_4478047_WAR" #define NV_DP_REGKEY_ENABLE_SKIP_DPCD_READS_WAR "DP_BUG_4478047_WAR"
@ -121,6 +121,7 @@ struct DP_REGKEY_DATABASE
bool bMSTPCONCapsReadDisabled; bool bMSTPCONCapsReadDisabled;
bool bForceDscOnSink; bool bForceDscOnSink;
bool bSkipFakeDeviceDpcdAccess; bool bSkipFakeDeviceDpcdAccess;
bool bFlushTimeslotWhenDirty;
}; };
#endif //INCLUDED_DP_REGKEYDATABASE_H #endif //INCLUDED_DP_REGKEYDATABASE_H

View File

@ -176,6 +176,7 @@ void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatab
this->bReassessMaxLink = dpRegkeyDatabase.bReassessMaxLink; this->bReassessMaxLink = dpRegkeyDatabase.bReassessMaxLink;
this->bForceDscOnSink = dpRegkeyDatabase.bForceDscOnSink; this->bForceDscOnSink = dpRegkeyDatabase.bForceDscOnSink;
this->bSkipFakeDeviceDpcdAccess = dpRegkeyDatabase.bSkipFakeDeviceDpcdAccess; this->bSkipFakeDeviceDpcdAccess = dpRegkeyDatabase.bSkipFakeDeviceDpcdAccess;
this->bFlushTimeslotWhenDirty = dpRegkeyDatabase.bFlushTimeslotWhenDirty;
} }
void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled) void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled)
@ -5611,7 +5612,8 @@ void ConnectorImpl::beforeDeleteStream(GroupImpl * group, bool forFlushMode)
} }
} }
if (linkUseMultistream() && group && group->isHeadAttached() && group->timeslot.count) if (linkUseMultistream() && group && group->isHeadAttached() &&
(group->timeslot.count || (this->bFlushTimeslotWhenDirty && group->timeslot.hardwareDirty)))
{ {
// Detach all the panels from payload // Detach all the panels from payload
for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d))

View File

@ -96,7 +96,8 @@ const struct
{NV_DP_REGKEY_REASSESS_MAX_LINK, &dpRegkeyDatabase.bReassessMaxLink, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_REASSESS_MAX_LINK, &dpRegkeyDatabase.bReassessMaxLink, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_MST_PCON_CAPS_READ_DISABLED, &dpRegkeyDatabase.bMSTPCONCapsReadDisabled, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_FORCE_DSC_ON_SINK, &dpRegkeyDatabase.bForceDscOnSink, DP_REG_VAL_BOOL}, {NV_DP_REGKEY_FORCE_DSC_ON_SINK, &dpRegkeyDatabase.bForceDscOnSink, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_ENABLE_SKIP_DPCD_READS_WAR, &dpRegkeyDatabase.bSkipFakeDeviceDpcdAccess, DP_REG_VAL_BOOL} {NV_DP_REGKEY_ENABLE_SKIP_DPCD_READS_WAR, &dpRegkeyDatabase.bSkipFakeDeviceDpcdAccess, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_FLUSH_TIMESLOT_INFO_WHEN_DIRTY, &dpRegkeyDatabase.bFlushTimeslotWhenDirty, DP_REG_VAL_BOOL}
}; };
EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) : EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) :

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without // and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script // corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH #ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r550_00 #define NV_BUILD_BRANCH r552_52
#endif #endif
#ifndef NV_PUBLIC_BRANCH #ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r550_00 #define NV_PUBLIC_BRANCH r552_52
#endif #endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r550_00-242" #define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r550/r552_52-292"
#define NV_BUILD_CHANGELIST_NUM (34157620) #define NV_BUILD_CHANGELIST_NUM (34362171)
#define NV_BUILD_TYPE "Official" #define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r550/r550_00-242" #define NV_BUILD_NAME "rel/gpu_drv/r550/r552_52-292"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34157620) #define NV_LAST_OFFICIAL_CHANGELIST_NUM (34362171)
#else /* Windows builds */ #else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r550_00-233" #define NV_BUILD_BRANCH_VERSION "r552_52-2"
#define NV_BUILD_CHANGELIST_NUM (34158633) #define NV_BUILD_CHANGELIST_NUM (34331643)
#define NV_BUILD_TYPE "Official" #define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "552.25" #define NV_BUILD_NAME "552.55"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (34158633) #define NV_LAST_OFFICIAL_CHANGELIST_NUM (34331643)
#define NV_BUILD_BRANCH_BASE_VERSION R550 #define NV_BUILD_BRANCH_BASE_VERSION R550
#endif #endif
// End buildmeister python edited section // End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ #if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "550.78" #define NV_VERSION_STRING "550.90.07"
#else #else

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2003-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -24,4 +24,64 @@
#ifndef __ga100_dev_runlist_h__ #ifndef __ga100_dev_runlist_h__
#define __ga100_dev_runlist_h__ #define __ga100_dev_runlist_h__
#define NV_CHRAM_CHANNEL(i) (0x000+(i)*4) /* RW-4A */ #define NV_CHRAM_CHANNEL(i) (0x000+(i)*4) /* RW-4A */
#define NV_CHRAM_CHANNEL__SIZE_1 2048 /* */
#define NV_CHRAM_CHANNEL_WRITE_CONTROL 0:0 /* -WIVF */
#define NV_CHRAM_CHANNEL_WRITE_CONTROL_ONES_SET_BITS 0x00000000 /* -WI-V */
#define NV_CHRAM_CHANNEL_WRITE_CONTROL_ONES_CLEAR_BITS 0x00000001 /* -W--V */
#define NV_CHRAM_CHANNEL_ENABLE 1:1 /* RWIVF */
#define NV_CHRAM_CHANNEL_ENABLE_NOT_IN_USE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_ENABLE_IN_USE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_NEXT 2:2 /* RWIVF */
#define NV_CHRAM_CHANNEL_NEXT_FALSE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_NEXT_TRUE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_BUSY 3:3 /* R-IVF */
#define NV_CHRAM_CHANNEL_BUSY_FALSE 0x00000000 /* R-I-V */
#define NV_CHRAM_CHANNEL_BUSY_TRUE 0x00000001 /* R---V */
#define NV_CHRAM_CHANNEL_PBDMA_FAULTED 4:4 /* RWIVF */
#define NV_CHRAM_CHANNEL_PBDMA_FAULTED_FALSE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_PBDMA_FAULTED_TRUE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_ENG_FAULTED 5:5 /* RWIVF */
#define NV_CHRAM_CHANNEL_ENG_FAULTED_FALSE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_ENG_FAULTED_TRUE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_ON_PBDMA 6:6 /* R-IVF */
#define NV_CHRAM_CHANNEL_ON_PBDMA_FALSE 0x00000000 /* R-I-V */
#define NV_CHRAM_CHANNEL_ON_PBDMA_TRUE 0x00000001 /* R---V */
#define NV_CHRAM_CHANNEL_ON_ENG 7:7 /* R-IVF */
#define NV_CHRAM_CHANNEL_ON_ENG_FALSE 0x00000000 /* R-I-V */
#define NV_CHRAM_CHANNEL_ON_ENG_TRUE 0x00000001 /* R---V */
#define NV_CHRAM_CHANNEL_PENDING 8:8 /* RWIVF */
#define NV_CHRAM_CHANNEL_PENDING_FALSE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_PENDING_TRUE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_CTX_RELOAD 9:9 /* RWIVF */
#define NV_CHRAM_CHANNEL_CTX_RELOAD_FALSE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_CTX_RELOAD_TRUE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_PBDMA_BUSY 10:10 /* R-IVF */
#define NV_CHRAM_CHANNEL_PBDMA_BUSY_FALSE 0x00000000 /* R-I-V */
#define NV_CHRAM_CHANNEL_PBDMA_BUSY_TRUE 0x00000001 /* R---V */
#define NV_CHRAM_CHANNEL_ENG_BUSY 11:11 /* R-IVF */
#define NV_CHRAM_CHANNEL_ENG_BUSY_FALSE 0x00000000 /* R-I-V */
#define NV_CHRAM_CHANNEL_ENG_BUSY_TRUE 0x00000001 /* R---V */
#define NV_CHRAM_CHANNEL_ACQUIRE_FAIL 12:12 /* RWIVF */
#define NV_CHRAM_CHANNEL_ACQUIRE_FAIL_FALSE 0x00000000 /* RWI-V */
#define NV_CHRAM_CHANNEL_ACQUIRE_FAIL_TRUE 0x00000001 /* RW--V */
#define NV_CHRAM_CHANNEL_UPDATE 31:0 /* */
#define NV_CHRAM_CHANNEL_UPDATE_ENABLE_CHANNEL 0x00000002 /* */
#define NV_CHRAM_CHANNEL_UPDATE_DISABLE_CHANNEL 0x00000003 /* */
#define NV_CHRAM_CHANNEL_UPDATE_FORCE_CTX_RELOAD 0x00000200 /* */
#define NV_CHRAM_CHANNEL_UPDATE_RESET_PBDMA_FAULTED 0x00000011 /* */
#define NV_CHRAM_CHANNEL_UPDATE_RESET_ENG_FAULTED 0x00000021 /* */
#define NV_CHRAM_CHANNEL_UPDATE_CLEAR_CHANNEL 0xFFFFFFFF /* */
#define NV_RUNLIST_PREEMPT 0x098 /* RW-4R */
#define NV_RUNLIST_PREEMPT_ID 11:0 /* */
#define NV_RUNLIST_PREEMPT_ID_HW 10:0 /* RWIUF */
#define NV_RUNLIST_PREEMPT_ID_HW_NULL 0x00000000 /* RWI-V */
#define NV_RUNLIST_PREEMPT_TSG_PREEMPT_PENDING 20:20 /* R-IVF */
#define NV_RUNLIST_PREEMPT_TSG_PREEMPT_PENDING_FALSE 0x00000000 /* R-I-V */
#define NV_RUNLIST_PREEMPT_TSG_PREEMPT_PENDING_TRUE 0x00000001 /* R---V */
#define NV_RUNLIST_PREEMPT_RUNLIST_PREEMPT_PENDING 21:21 /* R-IVF */
#define NV_RUNLIST_PREEMPT_RUNLIST_PREEMPT_PENDING_FALSE 0x00000000 /* R-I-V */
#define NV_RUNLIST_PREEMPT_RUNLIST_PREEMPT_PENDING_TRUE 0x00000001 /* R---V */
#define NV_RUNLIST_PREEMPT_TYPE 25:24 /* RWIVF */
#define NV_RUNLIST_PREEMPT_TYPE_RUNLIST 0x00000000 /* RWI-V */
#define NV_RUNLIST_PREEMPT_TYPE_TSG 0x00000001 /* RW--V */
#endif // __ga100_dev_runlist_h__ #endif // __ga100_dev_runlist_h__

View File

@ -33,7 +33,7 @@
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED 1:1 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED 1:1
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_TRUE 0x1 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_TRUE 0x1
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_FALSE 0x0 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_DEV_ENABLED_FALSE 0x0
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE 7:6 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE 6:5
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE_NONE 0x0 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE_NONE 0x0
#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE_PROTECTED_PCIE 0x1 #define NV_PGC6_AON_SECURE_SCRATCH_GROUP_20_CC_MULTI_GPU_MODE_PROTECTED_PCIE 0x1

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -98,7 +98,7 @@ _nvswitch_fsp_poll_for_queue_empty
// //
if (!bCmdqEmpty && !bMsgqEmpty) if (!bCmdqEmpty && !bMsgqEmpty)
{ {
nvswitch_fsp_read_message(device, NULL, 0); nvswitch_fsp_read_message(device, NULL, 0, &timeout);
NVSWITCH_PRINT(device, ERROR, "Received error message from FSP while waiting for CMDQ to be empty.\n"); NVSWITCH_PRINT(device, ERROR, "Received error message from FSP while waiting for CMDQ to be empty.\n");
return -NVL_ERR_GENERIC; return -NVL_ERR_GENERIC;
} }
@ -125,23 +125,22 @@ _nvswitch_fsp_poll_for_queue_empty
* @brief Poll for response from FSP via RM message queue * @brief Poll for response from FSP via RM message queue
* *
* @param[in] device nvswitch_device pointer * @param[in] device nvswitch_device pointer
* @param[in] pTimeout RPC timeout
* *
* @return NVL_SUCCESS, or NV_ERR_TIMEOUT * @return NVL_SUCCESS, or NV_ERR_TIMEOUT
*/ */
static NvlStatus static NvlStatus
_nvswitch_fsp_poll_for_response _nvswitch_fsp_poll_for_response
( (
nvswitch_device *device nvswitch_device *device,
NVSWITCH_TIMEOUT *pTimeout
) )
{ {
NvBool bKeepPolling; NvBool bKeepPolling;
NVSWITCH_TIMEOUT timeout;
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout);
do do
{ {
bKeepPolling = nvswitch_timeout_check(&timeout) ? NV_FALSE : NV_TRUE; bKeepPolling = nvswitch_timeout_check(pTimeout) ? NV_FALSE : NV_TRUE;
// //
// Poll for message queue to wait for FSP's reply // Poll for message queue to wait for FSP's reply
@ -178,6 +177,8 @@ _nvswitch_fsp_poll_for_response
* @param[in] device nvswitch_device pointer * @param[in] device nvswitch_device pointer
* @param[in/out] pPayloadBuffer Buffer in which to return message payload * @param[in/out] pPayloadBuffer Buffer in which to return message payload
* @param[in] payloadBufferSize Payload buffer size * @param[in] payloadBufferSize Payload buffer size
* @param[in] pTimeout RPC timeout
*
* *
* @return NVL_SUCCESS, NV_ERR_INVALID_DATA, NV_ERR_INSUFFICIENT_RESOURCES, or errors * @return NVL_SUCCESS, NV_ERR_INVALID_DATA, NV_ERR_INSUFFICIENT_RESOURCES, or errors
* from functions called within * from functions called within
@ -187,7 +188,8 @@ nvswitch_fsp_read_message
( (
nvswitch_device *device, nvswitch_device *device,
NvU8 *pPayloadBuffer, NvU8 *pPayloadBuffer,
NvU32 payloadBufferSize NvU32 payloadBufferSize,
NVSWITCH_TIMEOUT *pTimeout
) )
{ {
NvU8 *pPacketBuffer; NvU8 *pPacketBuffer;
@ -206,7 +208,7 @@ nvswitch_fsp_read_message
if (pPacketBuffer == NULL) if (pPacketBuffer == NULL)
{ {
NVSWITCH_PRINT(device, ERROR, NVSWITCH_PRINT(device, ERROR,
"Failed to allocate memory for GLT!!\n"); "%s: Failed to allocate memory!!\n", __FUNCTION__);
return -NVL_NO_MEM; return -NVL_NO_MEM;
} }
@ -219,9 +221,10 @@ nvswitch_fsp_read_message
NvU8 tag; NvU8 tag;
// Wait for next packet // Wait for next packet
status = _nvswitch_fsp_poll_for_response(device); status = _nvswitch_fsp_poll_for_response(device, pTimeout);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {
NVSWITCH_PRINT(device, ERROR, "%s: Timed out waiting for response from FSP!\n", __FUNCTION__);
goto done; goto done;
} }
@ -353,6 +356,7 @@ nvswitch_fsp_send_packet
* @param[in] nvdmType NVDM type of message being sent * @param[in] nvdmType NVDM type of message being sent
* @param[in] pResponsePayload Buffer in which to return response payload * @param[in] pResponsePayload Buffer in which to return response payload
* @param[in] responseBufferSize Response payload buffer size * @param[in] responseBufferSize Response payload buffer size
* @param[in] pTimeout RPC timeout
* *
* @return NVL_SUCCESS, or NV_ERR_* * @return NVL_SUCCESS, or NV_ERR_*
*/ */
@ -364,7 +368,8 @@ nvswitch_fsp_send_and_read_message
NvU32 size, NvU32 size,
NvU32 nvdmType, NvU32 nvdmType,
NvU8 *pResponsePayload, NvU8 *pResponsePayload,
NvU32 responseBufferSize NvU32 responseBufferSize,
NVSWITCH_TIMEOUT *pTimeout
) )
{ {
NvU32 dataSent, dataRemaining; NvU32 dataSent, dataRemaining;
@ -443,12 +448,13 @@ nvswitch_fsp_send_and_read_message
} }
} }
status = _nvswitch_fsp_poll_for_response(device); status = _nvswitch_fsp_poll_for_response(device, pTimeout);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {
NVSWITCH_PRINT(device, ERROR, "%s: Timed out waiting for response from FSP!\n", __FUNCTION__);
goto failed; goto failed;
} }
status = nvswitch_fsp_read_message(device, pResponsePayload, responseBufferSize); status = nvswitch_fsp_read_message(device, pResponsePayload, responseBufferSize, pTimeout);
failed: failed:
nvswitch_os_free(pBuffer); nvswitch_os_free(pBuffer);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -91,8 +91,8 @@ typedef enum mctp_packet_state
MCTP_PACKET_STATE_SINGLE_PACKET MCTP_PACKET_STATE_SINGLE_PACKET
} MCTP_PACKET_STATE, *PMCTP_PACKET_STATE; } MCTP_PACKET_STATE, *PMCTP_PACKET_STATE;
NvlStatus nvswitch_fsp_read_message(nvswitch_device *device, NvU8 *pPayloadBuffer, NvU32 payloadBufferSize); NvlStatus nvswitch_fsp_read_message(nvswitch_device *device, NvU8 *pPayloadBuffer, NvU32 payloadBufferSize, struct NVSWITCH_TIMEOUT *pTimeout);
NvlStatus nvswitch_fsp_send_packet(nvswitch_device *device, NvU8 *pPacket, NvU32 packetSize); NvlStatus nvswitch_fsp_send_packet(nvswitch_device *device, NvU8 *pPacket, NvU32 packetSize);
NvlStatus nvswitch_fsp_send_and_read_message(nvswitch_device *device, NvU8 *pPayload, NvU32 size, NvU32 nvdmType, NvU8 *pResponsePayload, NvU32 responseBufferSize); NvlStatus nvswitch_fsp_send_and_read_message(nvswitch_device *device, NvU8 *pPayload, NvU32 size, NvU32 nvdmType, NvU8 *pResponsePayload, NvU32 responseBufferSize, struct NVSWITCH_TIMEOUT *pTimeout);
#endif //_FSPRPC_NVSWITCH_H_ #endif //_FSPRPC_NVSWITCH_H_

View File

@ -515,7 +515,7 @@ typedef struct
NV_NPORT_PORTSTAT_LS10(_block, _reg, _idx, ), _data); \ NV_NPORT_PORTSTAT_LS10(_block, _reg, _idx, ), _data); \
} }
#define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS ((device->bModeContinuousALI ? 12 : 30) *\ #define NVSWITCH_DEFERRED_LINK_STATE_CHECK_INTERVAL_NS ((device->bModeContinuousALI ? 15 : 30) *\
NVSWITCH_INTERVAL_1SEC_IN_NS) NVSWITCH_INTERVAL_1SEC_IN_NS)
#define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1MSEC_IN_NS) #define NVSWITCH_DEFERRED_FAULT_UP_CHECK_INTERVAL_NS (12 * NVSWITCH_INTERVAL_1MSEC_IN_NS)

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -585,13 +585,16 @@ nvswitch_fsprpc_get_caps_ls10
TNVL_RPC_CAPS_PAYLOAD payload; TNVL_RPC_CAPS_PAYLOAD payload;
TNVL_RPC_CAPS_RSP_PAYLOAD responsePayload; TNVL_RPC_CAPS_RSP_PAYLOAD responsePayload;
NvlStatus status; NvlStatus status;
NVSWITCH_TIMEOUT timeout;
payload.subMessageId = TNVL_CAPS_SUBMESSAGE_ID; payload.subMessageId = TNVL_CAPS_SUBMESSAGE_ID;
nvswitch_os_memset(&responsePayload, 0, sizeof(TNVL_RPC_CAPS_RSP_PAYLOAD)); nvswitch_os_memset(&responsePayload, 0, sizeof(TNVL_RPC_CAPS_RSP_PAYLOAD));
nvswitch_timeout_create(5 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = nvswitch_fsp_send_and_read_message(device, status = nvswitch_fsp_send_and_read_message(device,
(NvU8*) &payload, sizeof(TNVL_RPC_CAPS_PAYLOAD), NVDM_TYPE_CAPS_QUERY, (NvU8*) &payload, sizeof(TNVL_RPC_CAPS_PAYLOAD), NVDM_TYPE_CAPS_QUERY,
(NvU8*) &responsePayload, sizeof(TNVL_RPC_CAPS_RSP_PAYLOAD)); (NvU8*) &responsePayload, sizeof(TNVL_RPC_CAPS_RSP_PAYLOAD), &timeout);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {
NVSWITCH_PRINT(device, ERROR, "RPC failed for FSP caps query\n"); NVSWITCH_PRINT(device, ERROR, "RPC failed for FSP caps query\n");

View File

@ -6728,6 +6728,9 @@ _nvswitch_service_nvlipt_lnk_status_ls10
// //
_nvswitch_clear_deferred_link_errors_ls10(device, link_id); _nvswitch_clear_deferred_link_errors_ls10(device, link_id);
chip_device->deferredLinkErrors[link_id].state.lastLinkUpTime = nvswitch_os_get_platform_time(); chip_device->deferredLinkErrors[link_id].state.lastLinkUpTime = nvswitch_os_get_platform_time();
// Reset NV_NPORT_SCRATCH_WARM_PORT_RESET_REQUIRED to 0x0
NVSWITCH_LINK_WR32(device, link_id, NPORT, _NPORT, _SCRATCH_WARM, 0);
} }
else if (mode == NVLINK_LINKSTATE_FAULT) else if (mode == NVLINK_LINKSTATE_FAULT)
{ {

View File

@ -1664,8 +1664,8 @@ _nvswitch_reset_and_drain_links_ls10
continue; continue;
} }
// Initialize select scratch registers to 0x0 // Reset NV_NPORT_SCRATCH_WARM_PORT_RESET_REQUIRED to 0x0
device->hal.nvswitch_init_scratch(device); NVSWITCH_LINK_WR32(device, link, NPORT, _NPORT, _SCRATCH_WARM, 0);
// //
// Step 9.0: Launch ALI training to re-initialize and train the links // Step 9.0: Launch ALI training to re-initialize and train the links

View File

@ -639,6 +639,7 @@ _nvswitch_tnvl_get_cert_chain_from_fsp_ls10
NvlStatus status; NvlStatus status;
TNVL_GET_ATT_CERTS_CMD_PAYLOAD *pCmdPayload = nvswitch_os_malloc(sizeof(TNVL_GET_ATT_CERTS_CMD_PAYLOAD)); TNVL_GET_ATT_CERTS_CMD_PAYLOAD *pCmdPayload = nvswitch_os_malloc(sizeof(TNVL_GET_ATT_CERTS_CMD_PAYLOAD));
TNVL_GET_ATT_CERTS_RSP_PAYLOAD *pRspPayload = nvswitch_os_malloc(sizeof(TNVL_GET_ATT_CERTS_RSP_PAYLOAD)); TNVL_GET_ATT_CERTS_RSP_PAYLOAD *pRspPayload = nvswitch_os_malloc(sizeof(TNVL_GET_ATT_CERTS_RSP_PAYLOAD));
NVSWITCH_TIMEOUT timeout;
if (pCmdPayload == NULL || pRspPayload == NULL) if (pCmdPayload == NULL || pRspPayload == NULL)
{ {
@ -653,9 +654,11 @@ _nvswitch_tnvl_get_cert_chain_from_fsp_ls10
pCmdPayload->minorVersion = 0; pCmdPayload->minorVersion = 0;
pCmdPayload->majorVersion = 1; pCmdPayload->majorVersion = 1;
nvswitch_timeout_create(5 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = nvswitch_fsp_send_and_read_message(device, status = nvswitch_fsp_send_and_read_message(device,
(NvU8*) pCmdPayload, sizeof(TNVL_GET_ATT_CERTS_CMD_PAYLOAD), NVDM_TYPE_TNVL, (NvU8*) pCmdPayload, sizeof(TNVL_GET_ATT_CERTS_CMD_PAYLOAD), NVDM_TYPE_TNVL,
(NvU8*) pRspPayload, sizeof(TNVL_GET_ATT_CERTS_RSP_PAYLOAD)); (NvU8*) pRspPayload, sizeof(TNVL_GET_ATT_CERTS_RSP_PAYLOAD), &timeout);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {
NVSWITCH_PRINT(device, ERROR, NVSWITCH_PRINT(device, ERROR,
@ -762,6 +765,10 @@ nvswitch_tnvl_get_attestation_certificate_chain_ls10
goto ErrorExit; goto ErrorExit;
} }
certChainLength = certChainLength -
NVSWITCH_IK_HASH_LENGTH -
NVSWITCH_ATT_CERT_SIZE_FIELD_LENGTH -
NVSWITCH_ATT_RSVD1_FIELD_LENGTH;
// //
// pCertChainBufferEnd represents last valid byte for cert buffer. // pCertChainBufferEnd represents last valid byte for cert buffer.
// //
@ -865,6 +872,7 @@ nvswitch_tnvl_get_attestation_report_ls10
NvlStatus status; NvlStatus status;
TNVL_GET_ATT_REPORT_CMD_PAYLOAD *pCmdPayload; TNVL_GET_ATT_REPORT_CMD_PAYLOAD *pCmdPayload;
TNVL_GET_ATT_REPORT_RSP_PAYLOAD *pRspPayload; TNVL_GET_ATT_REPORT_RSP_PAYLOAD *pRspPayload;
NVSWITCH_TIMEOUT timeout;
if (!nvswitch_is_tnvl_mode_enabled(device)) if (!nvswitch_is_tnvl_mode_enabled(device))
{ {
@ -892,9 +900,11 @@ nvswitch_tnvl_get_attestation_report_ls10
pCmdPayload->majorVersion = 1; pCmdPayload->majorVersion = 1;
nvswitch_os_memcpy(pCmdPayload->nonce, params->nonce, NVSWITCH_NONCE_SIZE); nvswitch_os_memcpy(pCmdPayload->nonce, params->nonce, NVSWITCH_NONCE_SIZE);
nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = nvswitch_fsp_send_and_read_message(device, status = nvswitch_fsp_send_and_read_message(device,
(NvU8*) pCmdPayload, sizeof(TNVL_GET_ATT_REPORT_CMD_PAYLOAD), NVDM_TYPE_TNVL, (NvU8*) pCmdPayload, sizeof(TNVL_GET_ATT_REPORT_CMD_PAYLOAD), NVDM_TYPE_TNVL,
(NvU8*) pRspPayload, sizeof(TNVL_GET_ATT_REPORT_RSP_PAYLOAD)); (NvU8*) pRspPayload, sizeof(TNVL_GET_ATT_REPORT_RSP_PAYLOAD), &timeout);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {
NVSWITCH_PRINT(device, ERROR, NVSWITCH_PRINT(device, ERROR,
@ -970,6 +980,7 @@ nvswitch_tnvl_send_fsp_lock_config_ls10
NvlStatus status; NvlStatus status;
TNVL_LOCK_CONFIG_CMD_PAYLOAD *pCmdPayload; TNVL_LOCK_CONFIG_CMD_PAYLOAD *pCmdPayload;
TNVL_LOCK_CONFIG_RSP_PAYLOAD *pRspPayload; TNVL_LOCK_CONFIG_RSP_PAYLOAD *pRspPayload;
NVSWITCH_TIMEOUT timeout;
if (!nvswitch_is_tnvl_mode_enabled(device)) if (!nvswitch_is_tnvl_mode_enabled(device))
{ {
@ -995,9 +1006,11 @@ nvswitch_tnvl_send_fsp_lock_config_ls10
pCmdPayload->minorVersion = 0; pCmdPayload->minorVersion = 0;
pCmdPayload->majorVersion = 1; pCmdPayload->majorVersion = 1;
nvswitch_timeout_create(5 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout);
status = nvswitch_fsp_send_and_read_message(device, status = nvswitch_fsp_send_and_read_message(device,
(NvU8*) pCmdPayload, sizeof(TNVL_LOCK_CONFIG_CMD_PAYLOAD), NVDM_TYPE_TNVL, (NvU8*) pCmdPayload, sizeof(TNVL_LOCK_CONFIG_CMD_PAYLOAD), NVDM_TYPE_TNVL,
(NvU8*) pRspPayload, sizeof(TNVL_LOCK_CONFIG_RSP_PAYLOAD)); (NvU8*) pRspPayload, sizeof(TNVL_LOCK_CONFIG_RSP_PAYLOAD), &timeout);
if (status != NVL_SUCCESS) if (status != NVL_SUCCESS)
{ {
NVSWITCH_PRINT(device, ERROR, NVSWITCH_PRINT(device, ERROR,

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -62,8 +62,9 @@ typedef enum KEY_ROTATION_STATUS {
KEY_ROTATION_STATUS_IN_PROGRESS = 2, // Key rotation is in progress KEY_ROTATION_STATUS_IN_PROGRESS = 2, // Key rotation is in progress
KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3, // Key rotation timeout failure, RM will RC non-idle channels KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3, // Key rotation timeout failure, RM will RC non-idle channels
KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4, // Key rotation failed because upper threshold was crossed, RM will RC non-idle channels KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4, // Key rotation failed because upper threshold was crossed, RM will RC non-idle channels
KEY_ROTATION_STATUS_FAILED_ROTATION = 5, // Internal RM failure while rotating keys for a certain channel, RM will RC the channel. KEY_ROTATION_STATUS_FAILED_ROTATION = 5, // Internal RM failure while rotating keys for a certain channel, RM will RC the channel
KEY_ROTATION_STATUS_MAX_COUNT = 6, KEY_ROTATION_STATUS_PENDING_TIMER_SUSPENDED = 6, // Key rotation timer suspended waiting for kernel key rotation to complete
KEY_ROTATION_STATUS_MAX_COUNT = 7,
} KEY_ROTATION_STATUS; } KEY_ROTATION_STATUS;
typedef struct CC_AES_CRYPTOBUNDLE { typedef struct CC_AES_CRYPTOBUNDLE {

View File

@ -31,7 +31,7 @@ extern "C" {
/*event values*/ /*event values*/
#define NV0000_NOTIFIERS_DISPLAY_CHANGE (0) #define NV0000_NOTIFIERS_DISPLAY_CHANGE (0)
#define NV0000_NOTIFIERS_EVENT_NONE_PENDING (1) #define NV0000_NOTIFIERS_EVENT_NONE_PENDING (1)
#define NV0000_NOTIFIERS_VM_START (2) #define NV0000_NOTIFIERS_GPU_UNBIND_EVENT (2)
#define NV0000_NOTIFIERS_GPU_BIND_EVENT (3) #define NV0000_NOTIFIERS_GPU_BIND_EVENT (3)
#define NV0000_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (4) #define NV0000_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (4)
#define NV0000_NOTIFIERS_MAXCOUNT (5) #define NV0000_NOTIFIERS_MAXCOUNT (5)

View File

@ -77,6 +77,7 @@ typedef struct NVA084_ALLOC_PARAMETERS {
NvHandle hPluginClient; NvHandle hPluginClient;
NvU32 numGuestFbHandles; NvU32 numGuestFbHandles;
NvHandle guestFbHandleList[NVA084_MAX_VMMU_SEGMENTS]; NvHandle guestFbHandleList[NVA084_MAX_VMMU_SEGMENTS];
NvU8 vgpuDevName[VM_UUID_SIZE];
NvHandle hPluginHeapMemory; NvHandle hPluginHeapMemory;
NvHandle hMigRmHeapMemory; NvHandle hMigRmHeapMemory;
NV_DECLARE_ALIGNED(NvU64 ctrlBuffOffset, 8); NV_DECLARE_ALIGNED(NvU64 ctrlBuffOffset, 8);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -36,24 +36,27 @@
#include "ctrl/ctrla081.h" #include "ctrl/ctrla081.h"
#include "class/cl0000.h" #include "class/cl0000.h"
#include "nv_vgpu_types.h" #include "nv_vgpu_types.h"
/* /*
* NV0000_CTRL_CMD_VGPU_GET_START_DATA * NV0000_CTRL_CMD_VGPU_CREATE_DEVICE
* *
* This command gets data associated with NV0000_NOTIFIERS_VGPU_MGR_START to * This command informs RM to create a vGPU device on KVM.
* start VGPU process.
* *
* mdevUuid * vgpuName [IN]
* This parameter gives mdev device UUID for which nvidia-vgpu-mgr should * This parameter provides the MDEV UUID or VF BDF depending on whether MDEV
* init process. * or vfio-pci-core framework is used.
* *
* qemuPid * gpuPciId [IN]
* This parameter specifies the QEMU process ID of the VM.
*
* gpuPciId
* This parameter provides gpuId of GPU on which vgpu device is created. * This parameter provides gpuId of GPU on which vgpu device is created.
* *
* configParams * gpuPciBdf
* This parameter specifies the configuration parameters for vGPU * This parameter specifies the BDF of the VF. (Same as PF for non-sriov)
*
* vgpuTypeId [IN]
* This parameter specifies the vGPU type ID for the device to be created.
*
* vgpuId [OUT]
* This parameter returns the vgpu id allocated by RM for the device
* *
* Possible status values returned are: * Possible status values returned are:
* NV_OK * NV_OK
@ -62,17 +65,114 @@
* NV_ERR_INVALID_CLIENT * NV_ERR_INVALID_CLIENT
* *
*/ */
#define NV0000_CTRL_CMD_VGPU_GET_START_DATA (0xc01) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_GET_START_DATA_PARAMS_MESSAGE_ID" */
#define NV0000_CTRL_VGPU_GET_START_DATA_PARAMS_MESSAGE_ID (0x1U) #define NV0000_CTRL_CMD_VGPU_CREATE_DEVICE (0xc02) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS_MESSAGE_ID" */
typedef struct NV0000_CTRL_VGPU_GET_START_DATA_PARAMS { #define NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS_MESSAGE_ID (0x2U)
NvU8 mdevUuid[VM_UUID_SIZE];
NvU8 configParams[1024]; typedef struct NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS {
NvU32 qemuPid; NvU8 vgpuName[VM_UUID_SIZE];
NvU32 gpuPciId; NvU32 gpuPciId;
NvU16 vgpuId;
NvU32 gpuPciBdf; NvU32 gpuPciBdf;
} NV0000_CTRL_VGPU_GET_START_DATA_PARAMS; NvU32 vgpuTypeId;
NvU16 vgpuId;
} NV0000_CTRL_VGPU_CREATE_DEVICE_PARAMS;
/*
* NV0000_CTRL_CMD_VGPU_GET_INSTANCES
*
* This command queries RM for available instances for a particular vGPU type ID
* on KVM.
*
* gpuPciId [IN]
* This parameter specifies gpuId of GPU on which vGPU instances are being
* queried.
*
* gpuPciBdf [IN]
* This parameter specifies the BDF of the VF. (Same as PF for non-sriov)
*
* numVgpuTypes [IN]
* This parameter specifies the count of vgpuTypeIds supplied and the
* count of availableInstances values to be returned.
*
* vgpuTypeIds [IN]
* This parameter specifies a total of numVgpuTypes vGPU type IDs for which
* the available instances are to be queried.
*
* availableInstances [OUT]
* This parameter returns a total of numVgpuTypes available instances for
* the respective vGPU type IDs supplied in vgpuTypeIds input parameter.
*
* Possible status values returned are:
* NV_OK
* NV_ERR_INVALID_EVENT
* NV_ERR_OBJECT_NOT_FOUND
* NV_ERR_INVALID_CLIENT
* NV_ERR_INVALID_STATE
*
*/
#define NV0000_CTRL_CMD_VGPU_GET_INSTANCES (0xc03) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS_MESSAGE_ID" */
#define NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS_MESSAGE_ID (0x3U)
typedef struct NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS {
NvU32 gpuPciId;
NvU32 gpuPciBdf;
NvU32 numVgpuTypes;
NvU32 vgpuTypeIds[NVA081_MAX_VGPU_TYPES_PER_PGPU];
NvU32 availableInstances[NVA081_MAX_VGPU_TYPES_PER_PGPU];
} NV0000_CTRL_VGPU_GET_INSTANCES_PARAMS;
/*
* NV0000_CTRL_CMD_VGPU_DELETE_DEVICE
*
* This command informs RM to delete a vGPU device on KVM.
*
* vgpuName [IN]
* This parameter provides the MDEV UUID or VF BDF depending on whether MDEV
* or vfio-pci-core framework is used.
*
* vgpuId [IN]
* This parameter provides the vgpu id allocated by RM for the device to be
* deleted.
*
* Possible status values returned are:
* NV_OK
* NV_ERR_INVALID_EVENT
* NV_ERR_OBJECT_NOT_FOUND
* NV_ERR_INVALID_CLIENT
*
*/
#define NV0000_CTRL_CMD_VGPU_DELETE_DEVICE (0xc04) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS_MESSAGE_ID" */
#define NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS_MESSAGE_ID (0x4U)
typedef struct NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS {
NvU8 vgpuName[VM_UUID_SIZE];
NvU16 vgpuId;
} NV0000_CTRL_VGPU_DELETE_DEVICE_PARAMS;
/*
* NV0000_CTRL_CMD_VGPU_VFIO_UNREGISTER_STATUS
*
* This command informs RM the status vgpu-vfio unregister for a GPU.
*
* returnStatus [IN]
* This parameter provides the status vgpu-vfio unregister operation.
*
* gpuPciId [IN]
* This parameter provides the gpu id of the GPU
*/
#define NV0000_CTRL_CMD_VGPU_VFIO_UNREGISTER_STATUS (0xc05) /* finn: Evaluated from "(FINN_NV01_ROOT_VGPU_INTERFACE_ID << 8) | NV0000_CTRL_VGPU_VFIO_UNREGISTER_STATUS_PARAMS_MESSAGE_ID" */
#define NV0000_CTRL_VGPU_VFIO_UNREGISTER_STATUS_PARAMS_MESSAGE_ID (0x5U)
typedef struct NV0000_CTRL_VGPU_VFIO_UNREGISTER_STATUS_PARAMS {
NvU32 returnStatus;
NvU32 gpuId;
} NV0000_CTRL_VGPU_VFIO_UNREGISTER_STATUS_PARAMS;
/* _ctrl0000vgpu_h_ */ /* _ctrl0000vgpu_h_ */

View File

@ -905,6 +905,34 @@ typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS {
NvBool bEnableAfterKeyRotation; NvBool bEnableAfterKeyRotation;
} NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS; } NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_PARAMS;
/*
* NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2
*
* This command does the same thing as @ref NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION.
* The difference is that it doesn't take a list of clients and instead all channels belong
* to the client on which this control call is made.
*
* numChannels
* The number of valid entries in hChannelList array.
* hChannelList
* An array of NvHandle listing the channel handles
* to be stopped.
* bEnableAfterKeyRotation
* This determines if channel is enabled by RM after it completes key rotation.
* Possible status values returned are:
* NV_OK
* NVOS_INVALID_STATE
*/
#define NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2 (0x2080111b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS_MESSAGE_ID (0x1BU)
typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS {
NvU32 numChannels;
NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_MAX_ENTRIES];
NvBool bEnableAfterKeyRotation;
} NV2080_CTRL_FIFO_DISABLE_CHANNELS_FOR_KEY_ROTATION_V2_PARAMS;
/* /*

View File

@ -3741,7 +3741,8 @@ typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS {
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SIZE 3U #define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SIZE 3U
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_KERNEL 0U #define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_KERNEL 0U
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_USER 1U #define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_USER 1U
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_COUNT 2U #define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_SCRUBBER 2U
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_SWL_COUNT 3U
#define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_LCE_COUNT 6U #define NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK_LCE_COUNT 6U
typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK { typedef struct NV2080_CTRL_INTERNAL_CONF_COMPUTE_IVMASK {

View File

@ -34,6 +34,8 @@
/*************************** SPDM COMMANDS ************************************/ /*************************** SPDM COMMANDS ************************************/
#include "cc_drv.h"
/*! /*!
* @brief SPDM Command Types * @brief SPDM Command Types
* *
@ -43,6 +45,7 @@
#define RM_GSP_SPDM_CMD_ID_CC_CTRL (0x3) #define RM_GSP_SPDM_CMD_ID_CC_CTRL (0x3)
#define RM_GSP_SPDM_CMD_ID_CC_INIT_RM_DATA (0x4) #define RM_GSP_SPDM_CMD_ID_CC_INIT_RM_DATA (0x4)
#define RM_GSP_SPDM_CMD_ID_CC_HEARTBEAT_CTRL (0x5) #define RM_GSP_SPDM_CMD_ID_CC_HEARTBEAT_CTRL (0x5)
#define RM_GSP_SPDM_CMD_ID_FIPS_SELFTEST (0x6)
#define RM_GSP_SPDM_CMD_ID_INVALID_COMMAND (0xFF) #define RM_GSP_SPDM_CMD_ID_INVALID_COMMAND (0xFF)
@ -114,6 +117,25 @@ typedef struct RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL {
typedef struct RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL *PRM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL; typedef struct RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL *PRM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL;
/*!
* HCC FIPS Self-test.
*/
#define CE_FIPS_SELF_TEST_DATA_SIZE 16
#define CE_FIPS_SELF_TEST_AUTH_TAG_SIZE 16
#define CE_FIPS_SELF_TEST_IV_SIZE 12
typedef struct RM_GSP_SPDM_CMD_FIPS_SELFTEST {
NvU8 cmdType;
NvU8 isEnc;
CC_KMB kmb;
NvU8 text[CE_FIPS_SELF_TEST_DATA_SIZE];
NvU8 authTag[CE_FIPS_SELF_TEST_AUTH_TAG_SIZE];
} RM_GSP_SPDM_CMD_FIPS_SELFTEST;
typedef struct RM_GSP_SPDM_CMD_FIPS_SELFTEST *PRM_GSP_SPDM_CMD_FIPS_SELFTEST;
/*! /*!
* NOTE : Do not include structure members that have alignment requirement >= 8 to avoid alignment directives * NOTE : Do not include structure members that have alignment requirement >= 8 to avoid alignment directives
* getting added in FINN generated structures / unions as RM_GSP_SPDM_CMD / RM_GSP_SPDM_MSG are pragma packed in * getting added in FINN generated structures / unions as RM_GSP_SPDM_CMD / RM_GSP_SPDM_MSG are pragma packed in
@ -132,6 +154,9 @@ typedef union RM_GSP_SPDM_CMD {
RM_GSP_SPDM_CMD_CC_INIT_RM_DATA rmDataInitCmd; RM_GSP_SPDM_CMD_CC_INIT_RM_DATA rmDataInitCmd;
RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL ccHeartbeatCtrl; RM_GSP_SPDM_CMD_CC_HEARTBEAT_CTRL ccHeartbeatCtrl;
RM_GSP_SPDM_CMD_FIPS_SELFTEST ccFipsTest;
} RM_GSP_SPDM_CMD; } RM_GSP_SPDM_CMD;
typedef union RM_GSP_SPDM_CMD *PRM_GSP_SPDM_CMD; typedef union RM_GSP_SPDM_CMD *PRM_GSP_SPDM_CMD;
@ -149,6 +174,7 @@ typedef union RM_GSP_SPDM_CMD *PRM_GSP_SPDM_CMD;
#define RM_GSP_SPDM_MSG_ID_CC_CTRL (0x3) #define RM_GSP_SPDM_MSG_ID_CC_CTRL (0x3)
#define RM_GSP_SPDM_MSG_ID_CC_INIT_RM_DATA (0x4) #define RM_GSP_SPDM_MSG_ID_CC_INIT_RM_DATA (0x4)
#define RM_GSP_SPDM_MSG_ID_CC_HEARTBEAT_CTRL (0x5) #define RM_GSP_SPDM_MSG_ID_CC_HEARTBEAT_CTRL (0x5)
#define RM_GSP_SPDM_MSG_ID_FIPS_SELFTEST (0x6)

View File

@ -52,6 +52,10 @@
#define NVA081_PGPU_METADATA_STRING_SIZE 256 #define NVA081_PGPU_METADATA_STRING_SIZE 256
#define NVA081_EXTRA_PARAMETERS_SIZE 1024 #define NVA081_EXTRA_PARAMETERS_SIZE 1024
#define NVA081_PLACEMENT_ID_INVALID 0xFFFFU #define NVA081_PLACEMENT_ID_INVALID 0xFFFFU
#define NVA081_CONFIG_PARAMS_MAX_LENGTH 1024
#define NVA081_MAX_BAR_REGION_COUNT 4
#define NVA081_MAX_SPARSE_REGION_COUNT 5
/* /*
* NVA081_CTRL_CMD_VGPU_CONFIG_SET_INFO * NVA081_CTRL_CMD_VGPU_CONFIG_SET_INFO
@ -434,42 +438,6 @@ typedef struct NVA081_CTRL_VGPU_CONFIG_EVENT_SET_NOTIFICATION_PARAMS {
#define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) #define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001)
#define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) #define NVA081_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
/*
* NVA081_CTRL_CMD_VGPU_CONFIG_NOTIFY_START
*
* This command notifies the nvidia-vgpu-vfio module with start status.
* It notifies whether start has been successful or not.
*
* mdevUuid
* This parameter specifies the uuid of the mdev device for which start has
* been called.
* vmUuid
* The UUID of VM for which vGPU has been created.
* vmName
* The name of VM for which vGPU has been created.
* returnStatus
* This parameter species whether the vGPU plugin is initialized or not.
* it specifies the error code in case plugin initialization has failed
*
* Possible status values returned are:
* NV_OK
* NV_ERR_OBJECT_NOT_FOUND
*/
#define NVA081_CTRL_CMD_VGPU_CONFIG_NOTIFY_START (0xa0810107) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS_MESSAGE_ID" */
/*
* NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS
* This structure represents information of plugin init status.
*/
#define NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS_MESSAGE_ID (0x7U)
typedef struct NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS {
NvU8 mdevUuid[VM_UUID_SIZE];
NvU8 vmUuid[VM_UUID_SIZE];
NvU8 vmName[NVA081_VM_NAME_SIZE];
NvU32 returnStatus;
} NVA081_CTRL_VGPU_CONFIG_NOTIFY_START_PARAMS;
/* /*
* NVA081_CTRL_CMD_VGPU_CONFIG_UPDATE_PGPU_INFO * NVA081_CTRL_CMD_VGPU_CONFIG_UPDATE_PGPU_INFO
* *
@ -908,4 +876,102 @@ typedef struct NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS {
NvBool state; NvBool state;
} NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS; } NVA081_CTRL_VGPU_GET_CAPABILITY_PARAMS;
/*
* NVA081_CTRL_CMD_VGPU_SET_VM_NAME
*
* This command is to set VM name for KVM.
*
* vgpuName [IN]
* This param provides the vGPU device name to RM.
*
* vmName [IN]
* This param provides the VM name of the vGPU device attached.
*
* Possible status values returned are:
* NV_OK
* NV_ERR_OBJECT_NOT_FOUND
* NV_ERR_INVALID_ARGUMENT
*/
#define NVA081_CTRL_CMD_VGPU_SET_VM_NAME (0xa0810120) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS_MESSAGE_ID" */
#define NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS_MESSAGE_ID (0x20U)
typedef struct NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS {
NvU8 vgpuName[VM_UUID_SIZE];
NvU8 vmName[NVA081_VM_NAME_SIZE];
} NVA081_CTRL_VGPU_SET_VM_NAME_PARAMS;
/*
* NVA081_CTRL_CMD_VGPU_GET_BAR_INFO
*
* This command is to get the bar info for a vGPU.
*
* gpuPciId [IN]
* This param specifies the PCI device ID of VF on which VM is running
*
* vgpuName [IN]
* This param provides the vGPU device name to RM.
*
* configParams [IN]
* This param provides the vGPU config params to RM
*
* barSizes [OUT]
* This param provides the BAR size for each region index of the device
*
* sparseOffsets [OUT]
* This param provides the offset of each sparse mmap region in BAR0
*
* sparseSizes [OUT]
* This param provides the size of each sparse mmap region in BAR0
*
* sparseCount [OUT]
* This param provides the number of sparse mmap regions in BAR0
*
* isBar064bit [OUT]
* This param provides whether the BAR0 is 64bit of the vGPU device
*
* Possible status values returned are:
* NV_OK
* NV_ERR_OBJECT_NOT_FOUND
* NV_ERR_INVALID_ARGUMENT
*/
#define NVA081_CTRL_CMD_VGPU_GET_BAR_INFO (0xa0810121) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS_MESSAGE_ID" */
#define NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS_MESSAGE_ID (0x21U)
typedef struct NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS {
NvU32 gpuPciId;
NvU8 vgpuName[VM_UUID_SIZE];
NvU8 configParams[NVA081_CONFIG_PARAMS_MAX_LENGTH];
NV_DECLARE_ALIGNED(NvU64 barSizes[NVA081_MAX_BAR_REGION_COUNT], 8);
NV_DECLARE_ALIGNED(NvU64 sparseOffsets[NVA081_MAX_SPARSE_REGION_COUNT], 8);
NV_DECLARE_ALIGNED(NvU64 sparseSizes[NVA081_MAX_SPARSE_REGION_COUNT], 8);
NvU32 sparseCount;
NvBool isBar064bit;
} NVA081_CTRL_VGPU_GET_BAR_INFO_PARAMS;
/*
* NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH
*
* This command is to get the migration bandwidth of the physical GPU.
*
* migrationBandwidth [OUT]
* This param specifies the migration bandwidth of GPU
*
* Possible status values returned are:
* NV_OK
* NV_ERR_INVALID_REQUEST
* NV_ERR_INVALID_STATE
* NV_ERR_INVALID_ARGUMENT
*/
#define NVA081_CTRL_CMD_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH (0xa0810122) /* finn: Evaluated from "(FINN_NVA081_VGPU_CONFIG_VGPU_CONFIG_INTERFACE_ID << 8) | NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS_MESSAGE_ID" */
#define NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS_MESSAGE_ID (0x22U)
typedef struct NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS {
NvU32 migrationBandwidth;
} NVA081_CTRL_VGPU_CONFIG_GET_MIGRATION_BANDWIDTH_PARAMS;
/* _ctrlA081vgpuconfig_h_ */ /* _ctrlA081vgpuconfig_h_ */

View File

@ -245,7 +245,7 @@ typedef struct NVC56F_CTRL_ROTATE_SECURE_CHANNEL_IV_PARAMS {
*/ */
#define SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT (60) #define SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT (60)
#define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MIN (50) #define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MIN (50)
#define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MAX (75) #define SET_SECURITY_POLICY_ATTACKER_ADVANTAGE_MAX (65)
#define NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY (0xc56f010d) /* finn: Evaluated from "(FINN_AMPERE_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS_MESSAGE_ID" */ #define NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY (0xc56f010d) /* finn: Evaluated from "(FINN_AMPERE_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS_MESSAGE_ID" */

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -387,5 +387,36 @@ typedef struct NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_NUM_SECURE_CHANNELS_PARAMS {
NvU32 maxCeChannels; NvU32 maxCeChannels;
} NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_NUM_SECURE_CHANNELS_PARAMS; } NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_NUM_SECURE_CHANNELS_PARAMS;
/*
* NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE
* This control call returns if key rotation is enabled.
*
* hSubDevice: [IN]
* subdevice handle for the GPU queried
* keyRotationState: [OUT]
* NV_CONF_COMPUTE_CTRL_CMD_GPU_KEY_ROTATION_* value
*
* Possible return values:
* NV_OK
* NV_ERR_NOT_SUPPORTED
* NV_ERR_INVALID_ARGUMENT
* NV_ERR_INVALID_OBJECT_HANDLE
* NV_ERR_INVALID_CLIENT
* NV_ERR_OBJECT_NOT_FOUND
*/
#define NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE (0xcb33010c) /* finn: Evaluated from "(FINN_NV_CONFIDENTIAL_COMPUTE_CONF_COMPUTE_INTERFACE_ID << 8) | 0xC" */
#define NV_CONF_COMPUTE_CTRL_CMD_GPU_KEY_ROTATION_DISABLED 0 // key rotation is disabled
#define NV_CONF_COMPUTE_CTRL_CMD_GPU_KEY_ROTATION_KERN_ENABLED 1 // key rotation enabled for kernel keys
#define NV_CONF_COMPUTE_CTRL_CMD_GPU_KEY_ROTATION_USER_ENABLED 2 // key rotation enabled for user keys
#define NV_CONF_COMPUTE_CTRL_CMD_GPU_KEY_ROTATION_BOTH_ENABLED 3 // key rotation enabled for both keys
#define NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS_MESSAGE_ID (0xCU)
typedef struct NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS {
NvHandle hSubDevice;
NvU32 keyRotationState;
} NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS;
/* _ctrlcb33_h_ */ /* _ctrlcb33_h_ */

View File

@ -37,11 +37,9 @@ typedef enum _HYPERVISOR_TYPE
OS_HYPERVISOR_UNKNOWN OS_HYPERVISOR_UNKNOWN
} HYPERVISOR_TYPE; } HYPERVISOR_TYPE;
#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0 #define CMD_VFIO_WAKE_REMOVE_GPU 1
#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1 #define CMD_VGPU_VFIO_PRESENT 2
#define CMD_VGPU_VFIO_REGISTER_MDEV 2 #define CMD_VFIO_PCI_CORE_PRESENT 3
#define CMD_VGPU_VFIO_PRESENT 3
#define CMD_VFIO_PCI_CORE_PRESENT 4
#define MAX_VF_COUNT_PER_GPU 64 #define MAX_VF_COUNT_PER_GPU 64
@ -54,17 +52,11 @@ typedef enum _VGPU_TYPE_INFO
typedef struct typedef struct
{ {
void *vgpuVfioRef;
void *waitQueue;
void *nv; void *nv;
NvU32 *vgpuTypeIds;
NvU8 **vgpuNames;
NvU32 numVgpuTypes;
NvU32 domain; NvU32 domain;
NvU8 bus; NvU32 bus;
NvU8 slot; NvU32 device;
NvU8 function; NvU32 return_status;
NvBool is_virtfn;
} vgpu_vfio_info; } vgpu_vfio_info;
typedef struct typedef struct

View File

@ -193,6 +193,13 @@ NV_CRASHCAT_CAUSE_TYPE crashcatReportV1SourceCauseType(NvCrashCatReport_V1 *pRep
pReport->sourceCause); pReport->sourceCause);
} }
static NV_INLINE
NV_CRASHCAT_CONTAINMENT crashcatReportV1SourceCauseContainment(NvCrashCatReport_V1 *pReport)
{
return (NV_CRASHCAT_CONTAINMENT)DRF_VAL64(_CRASHCAT, _REPORT_V1_SOURCE_CAUSE, _CONTAINMENT,
pReport->sourceCause);
}
// //
// CrashCat RISC-V 64-bit CSR State V1 Bitfield Accessors // CrashCat RISC-V 64-bit CSR State V1 Bitfield Accessors
// //

View File

@ -226,6 +226,16 @@ typedef enum {
NV_CRASHCAT_RISCV_MODE_LAST = 0x3, NV_CRASHCAT_RISCV_MODE_LAST = 0x3,
} NV_CRASHCAT_RISCV_MODE; } NV_CRASHCAT_RISCV_MODE;
typedef enum {
NV_CRASHCAT_CONTAINMENT_UNSPECIFIED = 0x0,
NV_CRASHCAT_CONTAINMENT_RISCV_MODE_M = NV_CRASHCAT_RISCV_MODE_M,
NV_CRASHCAT_CONTAINMENT_RISCV_MODE_S = NV_CRASHCAT_RISCV_MODE_S,
NV_CRASHCAT_CONTAINMENT_RISCV_MODE_U = NV_CRASHCAT_RISCV_MODE_U,
NV_CRASHCAT_CONTAINMENT_RISCV_HART = 0x4,
NV_CRASHCAT_CONTAINMENT_UNCONTAINED = 0xF,
NV_CRASHCAT_CONTAINMENT_LAST = 0xF
} NV_CRASHCAT_CONTAINMENT;
// //
// CrashCat Partition // CrashCat Partition
// Represents a NVRISC-V microcode partition index // Represents a NVRISC-V microcode partition index
@ -589,7 +599,22 @@ typedef struct NvCrashCatReport_V1 {
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_EXCEPTION NV_CRASHCAT_CAUSE_TYPE_EXCEPTION #define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_EXCEPTION NV_CRASHCAT_CAUSE_TYPE_EXCEPTION
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_TIMEOUT NV_CRASHCAT_CAUSE_TYPE_TIMEOUT #define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_TIMEOUT NV_CRASHCAT_CAUSE_TYPE_TIMEOUT
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_PANIC NV_CRASHCAT_CAUSE_TYPE_PANIC #define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_TYPE_PANIC NV_CRASHCAT_CAUSE_TYPE_PANIC
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_RESERVED 31:4
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT 7:4
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT_UNSPECIFIED \
NV_CRASHCAT_CONTAINMENT_UNSPECIFIED
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT_RISCV_MODE_M \
NV_CRASHCAT_CONTAINMENT_RISCV_MODE_M
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT_RISCV_MODE_S \
NV_CRASHCAT_CONTAINMENT_RISCV_MODE_S
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT_RISCV_MODE_U \
NV_CRASHCAT_CONTAINMENT_RISCV_MODE_U
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT_RISCV_HART \
NV_CRASHCAT_CONTAINMENT_RISCV_HART
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_CONTAINMENT_UNCONTAINED \
NV_CRASHCAT_CONTAINMENT_UNCONTAINED
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_RESERVED 31:8
#define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_IMPL_DEF 63:32 #define NV_CRASHCAT_REPORT_V1_SOURCE_CAUSE_IMPL_DEF 63:32
// //

View File

@ -0,0 +1,34 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef CC_KEYROTATION_H
#define CC_KEYROTATION_H
//
// Default threshold value derived from SECURITY_POLICY_ATTACKER_ADVANTAGE_DEFAULT
// Minimum threshold defined based on minimum in confComputeSetKeyRotation.
//
#define KEY_ROTATION_MINIMUM_INTERNAL_THRESHOLD (134217727u)
#define KEY_ROTATION_DEFAULT_INTERNAL_THRESHOLD (24296003999ull)
#endif // CC_KEYROTATION_H

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -55,8 +55,8 @@ enum
CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT, CC_LKEYID_GSP_CPU_REPLAYABLE_FAULT,
CC_LKEYID_CPU_GSP_RESERVED2, CC_LKEYID_CPU_GSP_RESERVED2,
CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT, CC_LKEYID_GSP_CPU_NON_REPLAYABLE_FAULT,
CC_LKEYID_GSP_SEC2_LOCKED_RPC,
CC_LKEYID_SEC2_GSP_LOCKED_RPC, CC_LKEYID_SEC2_GSP_LOCKED_RPC,
CC_LKEYID_GSP_SEC2_LOCKED_RPC,
CC_KEYSPACE_GSP_SIZE // This is always the last element. CC_KEYSPACE_GSP_SIZE // This is always the last element.
}; };
// The fault buffers only support GPU-to-CPU encryption, so the CPU-to-GPU encryption slot // The fault buffers only support GPU-to-CPU encryption, so the CPU-to-GPU encryption slot
@ -75,6 +75,8 @@ enum
CC_LKEYID_CPU_SEC2_HMAC_USER, CC_LKEYID_CPU_SEC2_HMAC_USER,
CC_LKEYID_CPU_SEC2_DATA_KERN, CC_LKEYID_CPU_SEC2_DATA_KERN,
CC_LKEYID_CPU_SEC2_HMAC_KERN, CC_LKEYID_CPU_SEC2_HMAC_KERN,
CC_LKEYID_CPU_SEC2_DATA_SCRUBBER,
CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER,
CC_KEYSPACE_SEC2_SIZE // This is always the last element. CC_KEYSPACE_SEC2_SIZE // This is always the last element.
}; };
@ -82,6 +84,8 @@ enum
#define CC_LKEYID_CPU_SEC2_HMAC_USER_STR "cpu_sec2_hmac_user" #define CC_LKEYID_CPU_SEC2_HMAC_USER_STR "cpu_sec2_hmac_user"
#define CC_LKEYID_CPU_SEC2_DATA_KERN_STR "cpu_sec2_data_kernel" #define CC_LKEYID_CPU_SEC2_DATA_KERN_STR "cpu_sec2_data_kernel"
#define CC_LKEYID_CPU_SEC2_HMAC_KERN_STR "cpu_sec2_hmac_kernel" #define CC_LKEYID_CPU_SEC2_HMAC_KERN_STR "cpu_sec2_hmac_kernel"
#define CC_LKEYID_CPU_SEC2_DATA_SCRUBBER_STR "cpu_sec2_data_scrubber"
#define CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER_STR "cpu_sec2_hmac_scrubber"
enum enum
{ {
@ -188,7 +192,11 @@ enum
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_KERN) ? \ (CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_KERN) ? \
CC_LKEYID_CPU_SEC2_DATA_KERN_STR : \ CC_LKEYID_CPU_SEC2_DATA_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_KERN) ? \ (CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_KERN) ? \
CC_LKEYID_CPU_SEC2_HMAC_KERN_STR : NULL : \ CC_LKEYID_CPU_SEC2_HMAC_KERN_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_DATA_SCRUBBER) ? \
CC_LKEYID_CPU_SEC2_DATA_SCRUBBER_STR : \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER) ? \
CC_LKEYID_CPU_SEC2_HMAC_SCRUBBER_STR : NULL : \
(CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE0) ? \ (CC_GKEYID_GET_KEYSPACE(a) == CC_KEYSPACE_LCE0) ? \
(CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \ (CC_GKEYID_GET_LKEYID(a) == CC_LKEYID_LCE_H2D_USER) ? \
CC_LKEYID_LCE0_H2D_USER_STR : \ CC_LKEYID_LCE0_H2D_USER_STR : \

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -29,6 +29,10 @@
#define TNVL_GET_ATT_REPORT_SUBMESSAGE_ID 0x1 #define TNVL_GET_ATT_REPORT_SUBMESSAGE_ID 0x1
#define TNVL_LOCK_CONFIG_SUBMESSAGE_ID 0x2 #define TNVL_LOCK_CONFIG_SUBMESSAGE_ID 0x2
#define NVSWITCH_IK_HASH_LENGTH (48)
#define NVSWITCH_ATT_CERT_SIZE_FIELD_LENGTH (2)
#define NVSWITCH_ATT_RSVD1_FIELD_LENGTH (2)
#pragma pack(1) #pragma pack(1)
/*! /*!
@ -65,6 +69,7 @@ typedef struct
*/ */
typedef struct typedef struct
{ {
NvU8 nvdmType;
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse; NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 subMessageId; NvU8 subMessageId;
NvU8 rsvd0; NvU8 rsvd0;
@ -72,6 +77,7 @@ typedef struct
NvU8 majorVersion; NvU8 majorVersion;
NvU16 certChainLength; NvU16 certChainLength;
NvU16 rsvd1; NvU16 rsvd1;
NvU8 devIkHash[NVSWITCH_IK_HASH_LENGTH];
NvU8 certChain[NVSWITCH_ATTESTATION_CERT_CHAIN_MAX_SIZE]; NvU8 certChain[NVSWITCH_ATTESTATION_CERT_CHAIN_MAX_SIZE];
} TNVL_GET_ATT_CERTS_RSP_PAYLOAD; } TNVL_GET_ATT_CERTS_RSP_PAYLOAD;
@ -92,6 +98,7 @@ typedef struct
*/ */
typedef struct typedef struct
{ {
NvU8 nvdmType;
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse; NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 subMessageId; NvU8 subMessageId;
NvU8 rsvd0; NvU8 rsvd0;
@ -117,6 +124,7 @@ typedef struct
*/ */
typedef struct typedef struct
{ {
NvU8 nvdmType;
NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse; NVDM_PAYLOAD_COMMAND_RESPONSE cmdResponse;
NvU8 subMessageId; NvU8 subMessageId;
NvU8 rsvd0; NvU8 rsvd0;

View File

@ -344,6 +344,7 @@
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE 15:8 #define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE 15:8
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_CORRECTABLE_ERROR 0 #define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_CORRECTABLE_ERROR 0
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_UNCORRECTABLE_ERROR 1 #define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_UNCORRECTABLE_ERROR 1
#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_ECC_STATE_FLAGS 2
#define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_GPU 0x00000000 #define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_GPU 0x00000000
#define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_MODULE 0x00000003 #define NV_MSGBOX_CMD_ARG1_ENERGY_COUNTER_MODULE 0x00000003
@ -968,6 +969,10 @@
#define NV_MSGBOX_DATA_CAP_5_MEMORY_CAPACITY_UTILIZATION_NOT_AVAILABLE 0x00000000 #define NV_MSGBOX_DATA_CAP_5_MEMORY_CAPACITY_UTILIZATION_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_5_MEMORY_CAPACITY_UTILIZATION_AVAILABLE 0x00000001 #define NV_MSGBOX_DATA_CAP_5_MEMORY_CAPACITY_UTILIZATION_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_5_SRAM_ERROR_THRESHOLD_EXCEEDED 9:9
#define NV_MSGBOX_DATA_CAP_5_SRAM_ERROR_THRESHOLD_EXCEEDED_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_5_SRAM_ERROR_THRESHOLD_EXCEEDED_AVAILABLE 0x00000001
/* ECC counters */ /* ECC counters */
#define NV_MSGBOX_DATA_ECC_CNT_16BIT_DBE 31:16 #define NV_MSGBOX_DATA_ECC_CNT_16BIT_DBE 31:16
#define NV_MSGBOX_DATA_ECC_CNT_16BIT_SBE 16:0 #define NV_MSGBOX_DATA_ECC_CNT_16BIT_SBE 16:0
@ -1002,6 +1007,13 @@
#define NV_MSGBOX_DATA_ECC_V5_METADATA_LOCATION_ID 26:22 #define NV_MSGBOX_DATA_ECC_V5_METADATA_LOCATION_ID 26:22
#define NV_MSGBOX_DATA_ECC_V5_METADATA_SUBLOCATION_ID 31:27 #define NV_MSGBOX_DATA_ECC_V5_METADATA_SUBLOCATION_ID 31:27
/* ECC state flags */
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS 31:0
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS_SRAM_ERROR_THRESHOLD_EXCEEDED 0:0
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS_SRAM_ERROR_THRESHOLD_EXCEEDED_FALSE 0
#define NV_MSGBOX_DATA_ECC_V6_STATE_FLAGS_SRAM_ERROR_THRESHOLD_EXCEEDED_TRUE 1
/* NV_MSGBOX_CMD_OPCODE_SCRATCH_COPY src offset argument */ /* NV_MSGBOX_CMD_OPCODE_SCRATCH_COPY src offset argument */
#define NV_MSGBOX_DATA_COPY_SRC_OFFSET 7:0 #define NV_MSGBOX_DATA_COPY_SRC_OFFSET 7:0

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -76,7 +76,7 @@ typedef struct _NV_SPDM_DESC_HEADER
#define NV_SPDM_MAX_TRANSCRIPT_BUFFER_SIZE (2 * NV_SPDM_MAX_SPDM_PAYLOAD_SIZE) #define NV_SPDM_MAX_TRANSCRIPT_BUFFER_SIZE (2 * NV_SPDM_MAX_SPDM_PAYLOAD_SIZE)
// Limited by the transport size, do not increase without increasing transport buffer. // Limited by the transport size, do not increase without increasing transport buffer.
#define NV_SPDM_MAX_RANDOM_MSG_BYTES (0x80) #define NV_SPDM_MAX_RANDOM_MSG_BYTES (0x0)
#ifdef NVRM #ifdef NVRM
#include "gpu/mem_mgr/mem_desc.h" #include "gpu/mem_mgr/mem_desc.h"

View File

@ -1041,13 +1041,12 @@ NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, c
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool); NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *, NvBool *); NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *); NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *); NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *);
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*); NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2014-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -55,6 +55,7 @@
static NV_STATUS nv_parse_config_params(const char *, const char *, const char, NvU32 *); static NV_STATUS nv_parse_config_params(const char *, const char *, const char, NvU32 *);
void hypervisorSetHypervVgpuSupported_IMPL(OBJHYPERVISOR *pHypervisor) void hypervisorSetHypervVgpuSupported_IMPL(OBJHYPERVISOR *pHypervisor)
{ {
pHypervisor->bIsHypervVgpuSupported = NV_TRUE; pHypervisor->bIsHypervVgpuSupported = NV_TRUE;
@ -73,7 +74,7 @@ NV_STATUS hypervisorInjectInterrupt_IMPL
{ {
NV_STATUS status = NV_ERR_NOT_SUPPORTED; NV_STATUS status = NV_ERR_NOT_SUPPORTED;
if (pVgpuNsIntr->pVgpuVfioRef) if (osIsVgpuVfioPresent() == NV_TRUE)
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
else else
{ {
@ -95,135 +96,6 @@ HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void)
return hypervisorGetHypervisorType(pHypervisor); return hypervisorGetHypervisorType(pHypervisor);
} }
static NV_STATUS get_available_instances(
NvU32 *avail_instances,
nv_state_t *pNv,
VGPU_TYPE *vgpuTypeInfo,
NvU32 pgpuIndex,
NvU8 devfn
)
{
NV_STATUS rmStatus = NV_OK;
OBJGPU *pGpu = NULL;
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
*avail_instances = 0;
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto exit;
}
/* TODO: Needs to have a proper fix this for DriverVM config */
if (gpuIsSriovEnabled(pGpu) &&
!(pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
{
NvU8 fnId = devfn - pGpu->sriovState.firstVFOffset;
if (fnId > 63)
{
NV_ASSERT(0);
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto exit;
}
if (IS_MIG_ENABLED(pGpu))
{
if (IS_MIG_IN_USE(pGpu)) {
NvU64 swizzIdInUseMask = 0;
NvU32 partitionFlag = PARTITIONID_INVALID;
KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu);
NvU32 id;
swizzIdInUseMask = kmigmgrGetSwizzIdInUseMask(pGpu, pKernelMIGManager);
if (!vgpuTypeInfo->gpuInstanceSize)
{
// Query for a non MIG vgpuType
NV_PRINTF(LEVEL_INFO, "%s Query for a non MIG vGPU type \n",
__FUNCTION__);
rmStatus = NV_OK;
goto exit;
}
rmStatus = kvgpumgrGetPartitionFlag(vgpuTypeInfo->vgpuTypeId,
&partitionFlag);
if (rmStatus != NV_OK)
{
// Query for a non MIG vgpuType
NV_PRINTF(LEVEL_ERROR, "%s failed to get partition flags.\n",
__FUNCTION__);
goto exit;
}
// Determine valid swizzids not assigned to any vGPU device.
FOR_EACH_INDEX_IN_MASK(64, id, swizzIdInUseMask)
{
KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance;
NvU64 mask = 0;
rmStatus = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager,
id, &pKernelMIGGpuInstance);
if (rmStatus != NV_OK)
{
// Didn't find requested GPU instance
NV_PRINTF(LEVEL_ERROR,
"No valid GPU instance with SwizzId - %d found\n", id);
goto exit;
}
mask = NVBIT64(id);
if (pKernelMIGGpuInstance->partitionFlag == partitionFlag)
{
// Validate that same ID is not already set and VF is available
if (!(mask & pKernelVgpuMgr->pgpuInfo[pgpuIndex].assignedSwizzIdMask) &&
!(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
{
*avail_instances = 1;
break;
}
}
}
FOR_EACH_INDEX_IN_MASK_END;
}
}
else
{
if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu < vgpuTypeInfo->maxInstance)
{
if (vgpuTypeInfo->gpuInstanceSize)
{
// Query for a MIG vgpuType
NV_PRINTF(LEVEL_INFO, "%s Query for a MIG vGPU type \n",
__FUNCTION__);
rmStatus = NV_OK;
goto exit;
}
if (!(pKernelVgpuMgr->pgpuInfo[pgpuIndex].createdVfMask & NVBIT64(fnId)))
{
if (kvgpumgrCheckVgpuTypeCreatable(pGpu, &pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
*avail_instances = 1;
}
}
}
}
else
{
if (kvgpumgrCheckVgpuTypeCreatable(pGpu, &pKernelVgpuMgr->pgpuInfo[pgpuIndex], vgpuTypeInfo) == NV_OK)
*avail_instances = vgpuTypeInfo->maxInstance - pKernelVgpuMgr->pgpuInfo[pgpuIndex].numCreatedVgpu;
}
exit:
return rmStatus;
}
#define MAX_STR_LEN 256 #define MAX_STR_LEN 256
NV_STATUS NV_API_CALL nv_vgpu_get_type_info( NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
nvidia_stack_t *sp, nvidia_stack_t *sp,
@ -240,6 +112,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
NV_STATUS rmStatus = NV_OK; NV_STATUS rmStatus = NV_OK;
VGPU_TYPE *vgpuTypeInfo; VGPU_TYPE *vgpuTypeInfo;
NvU32 pgpuIndex, i, avail_instances = 0; NvU32 pgpuIndex, i, avail_instances = 0;
OBJGPU *pGpu = NULL;
void *fp; void *fp;
NV_ENTER_RM_RUNTIME(sp,fp); NV_ENTER_RM_RUNTIME(sp,fp);
@ -262,22 +135,17 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_info(
switch (type_info) switch (type_info)
{ {
case VGPU_TYPE_NAME:
os_snprintf(buffer, VGPU_STRING_BUFFER_SIZE, "%s\n",
vgpuTypeInfo->vgpuName);
break;
case VGPU_TYPE_DESCRIPTION:
os_snprintf(buffer, MAX_STR_LEN,
"num_heads=%d, frl_config=%d, "
"framebuffer=%lluM, max_resolution=%dx%d, max_instance=%d\n",
vgpuTypeInfo->numHeads, vgpuTypeInfo->frlConfig,
vgpuTypeInfo->profileSize >> 20,
vgpuTypeInfo->maxResolutionX,
vgpuTypeInfo->maxResolutionY,
vgpuTypeInfo->maxInstance);
break;
case VGPU_TYPE_INSTANCES: case VGPU_TYPE_INSTANCES:
rmStatus = get_available_instances(&avail_instances, pNv, pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n",
__FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto exit;
}
rmStatus = kvgpumgrGetAvailableInstances(&avail_instances, pGpu,
vgpuTypeInfo, vgpuTypeInfo,
pgpuIndex, devfn); pgpuIndex, devfn);
if (rmStatus != NV_OK) if (rmStatus != NV_OK)
@ -315,6 +183,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
{ {
THREAD_STATE_NODE threadState; THREAD_STATE_NODE threadState;
OBJSYS *pSys = SYS_GET_INSTANCE(); OBJSYS *pSys = SYS_GET_INSTANCE();
OBJGPU *pGpu = NULL;
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_STATUS rmStatus = NV_OK; NV_STATUS rmStatus = NV_OK;
NvU32 pgpuIndex, i, avail_instances = 0; NvU32 pgpuIndex, i, avail_instances = 0;
@ -355,7 +224,15 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
continue; continue;
} }
rmStatus = get_available_instances(&avail_instances, pNv, pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n",
__FUNCTION__);
goto exit;
}
rmStatus = kvgpumgrGetAvailableInstances(&avail_instances, pGpu,
vgpuTypeInfo, pgpuIndex, vgpuTypeInfo, pgpuIndex,
devfn); devfn);
if (rmStatus != NV_OK) if (rmStatus != NV_OK)
@ -374,6 +251,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(
} }
} }
exit:
// UNLOCK: release API lock // UNLOCK: release API lock
rmapiLockRelease(); rmapiLockRelease();
} }
@ -517,79 +395,19 @@ exit:
return rmStatus; return rmStatus;
} }
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info( static NV_STATUS
nvidia_stack_t *sp, _nv_vgpu_get_bar_size(OBJGPU *pGpu, KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
nv_state_t *pNv, NvU32 regionIndex, NvU64 *size, NvU8 *configParams)
const NvU8 *pMdevUuid,
NvU64 *size,
NvU32 regionIndex,
void *pVgpuVfioRef,
NvBool *isBar64bit
)
{ {
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK, status;
OBJGPU *pGpu = NULL;
KernelBus *pKernelBus;
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
void *fp = NULL;
NvU32 value = 0;
OBJSYS *pSys = SYS_GET_INSTANCE(); OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr * pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys); KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_STATUS status;
NV_ENTER_RM_RUNTIME(sp,fp); KernelBus *pKernelBus;
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); NvU32 value = 0;
/*
* This function can be used to query both BAR 64bit state and/or BAR size
* If neither is queried, return with error.
*/
if ((size == NULL) && (isBar64bit == NULL))
{
rmStatus = NV_ERR_INVALID_ARGUMENT;
goto exit;
}
// LOCK: acquire API lock
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto release_lock;
}
/* Get input BAR index 64bit state */
if (isBar64bit != NULL)
{
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
is_bar_64bit(pGpu, regionIndex, isBar64bit), release_lock);
/* Query is only for BAR index 64bit state*/
if (size == NULL)
goto release_lock;
}
pKernelBus = GPU_GET_KERNEL_BUS(pGpu); pKernelBus = GPU_GET_KERNEL_BUS(pGpu);
*size = kbusGetPciBarSize(pKernelBus, regionIndex); *size = kbusGetPciBarSize(pKernelBus, regionIndex);
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id,
pMdevUuid,
&pKernelHostVgpuDevice), release_lock);
pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
if (pRequestVgpu == NULL)
{
rmStatus = NV_ERR_INVALID_POINTER;
goto release_lock;
}
pKernelHostVgpuDevice->pVgpuVfioRef = pVgpuVfioRef;
if (regionIndex == NV_VFIO_PCI_BAR1_REGION_INDEX) if (regionIndex == NV_VFIO_PCI_BAR1_REGION_INDEX)
{ {
VGPU_TYPE *vgpuTypeInfo; VGPU_TYPE *vgpuTypeInfo;
@ -597,13 +415,11 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
NvBool bOverrideBar1Size = NV_FALSE; NvBool bOverrideBar1Size = NV_FALSE;
// Read BAR1 length from vgpuTypeInfo // Read BAR1 length from vgpuTypeInfo
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, NV_ASSERT_OK_OR_RETURN(kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType,
kvgpumgrGetVgpuTypeInfo(pKernelHostVgpuDevice->vgpuType, &vgpuTypeInfo), release_lock); &vgpuTypeInfo));
*size = vgpuTypeInfo->bar1Length << 20; *size = vgpuTypeInfo->bar1Length << 20;
NV_ASSERT_OK_OR_RETURN(kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pGpu->gpuId, &pgpuIndex));
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pNv->gpu_id, &pgpuIndex), release_lock);
/* /*
* check for 'override_bar1_size' param in vgpuExtraParams list first, * check for 'override_bar1_size' param in vgpuExtraParams list first,
@ -611,11 +427,13 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
*/ */
status = nv_parse_config_params((const char*)vgpuTypeInfo->vgpuExtraParams, status = nv_parse_config_params((const char*)vgpuTypeInfo->vgpuExtraParams,
"override_bar1_size", ';', &value); "override_bar1_size", ';', &value);
if (status == NV_OK && value)
if (status == NV_OK && value) { {
bOverrideBar1Size = NV_TRUE; bOverrideBar1Size = NV_TRUE;
} else if (status == NV_ERR_OBJECT_NOT_FOUND) { }
status = nv_parse_config_params(pRequestVgpu->configParams, else if (status == NV_ERR_OBJECT_NOT_FOUND)
{
status = nv_parse_config_params((const char *)configParams,
"override_bar1_size", ',', &value); "override_bar1_size", ',', &value);
if (status == NV_OK && value) if (status == NV_OK && value)
bOverrideBar1Size = NV_TRUE; bOverrideBar1Size = NV_TRUE;
@ -630,11 +448,12 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
} }
} }
if (bOverrideBar1Size) { if (bOverrideBar1Size)
{
NvU64 bar1SizeInBytes, guestBar1; NvU64 bar1SizeInBytes, guestBar1;
NvU64 gpuBar1LowerLimit = 256 * 1024 * 1024; // bar1 lower limit for override_bar1_length parameter NvU64 gpuBar1LowerLimit = 256 * 1024 * 1024; // bar1 lower limit for override_bar1_length parameter
bar1SizeInBytes = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR1_REGION_INDEX); bar1SizeInBytes = kbusGetPciBarSize(pKernelBus, NV_VFIO_PCI_BAR1_REGION_INDEX);
if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled) if (pKernelVgpuMgr->pgpuInfo[pgpuIndex].sriovEnabled)
{ {
*size = pGpu->sriovState.vfBarSize[1]; *size = pGpu->sriovState.vfBarSize[1];
@ -649,7 +468,7 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
else if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX || else if (regionIndex == NV_VFIO_PCI_BAR2_REGION_INDEX ||
regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX) regionIndex == NV_VFIO_PCI_BAR3_REGION_INDEX)
{ {
status = nv_parse_config_params(pRequestVgpu->configParams, status = nv_parse_config_params((const char *)configParams,
"address64", ',', &value); "address64", ',', &value);
if ((status != NV_OK) || ((status == NV_OK) && (value != 0))) if ((status != NV_OK) || ((status == NV_OK) && (value != 0)))
@ -661,6 +480,48 @@ NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(
} }
} }
return NV_OK;
}
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info
(
nvidia_stack_t *sp,
nv_state_t *pNv,
const NvU8 *pMdevUuid,
NvU64 *barSizes,
NvU64 *sparseOffsets,
NvU64 *sparseSizes,
NvU32 *sparseCount,
NvBool *isBar064bit,
NvU8 *configParams
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
OBJGPU *pGpu = NULL;
void *fp = NULL;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR), exit);
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
NV_PRINTF(LEVEL_ERROR, "%s GPU handle is not valid \n", __FUNCTION__);
rmStatus = NV_ERR_INVALID_STATE;
goto release_lock;
}
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
nv_vgpu_rm_get_bar_info(pGpu, pMdevUuid, barSizes,
sparseOffsets, sparseSizes,
sparseCount, isBar064bit,
configParams),
release_lock);
release_lock: release_lock:
// UNLOCK: release API lock // UNLOCK: release API lock
rmapiLockRelease(); rmapiLockRelease();
@ -737,48 +598,6 @@ exit:
return rmStatus; return rmStatus;
} }
NV_STATUS osVgpuVfioWake(
void *waitQueue
)
{
vgpu_vfio_info vgpu_info;
vgpu_info.waitQueue = waitQueue;
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_WAKE_WAIT_QUEUE);
}
NV_STATUS NV_API_CALL nv_vgpu_start(
nvidia_stack_t *sp,
const NvU8 *pMdevUuid,
void *waitQueue,
NvS32 *returnStatus,
NvU8 *vmName,
NvU32 qemuPid
)
{
THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_OK;
void *fp = NULL;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
rmStatus = kvgpumgrStart(pMdevUuid, waitQueue, returnStatus,
vmName, qemuPid);
// UNLOCK: release API lock
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
}
static NV_STATUS nv_parse_config_params( static NV_STATUS nv_parse_config_params(
const char *config_params, const char *config_params,
const char *key, const char *key,
@ -815,216 +634,159 @@ static NV_STATUS nv_parse_config_params(
return rmStatus; return rmStatus;
} }
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap( static NV_STATUS _nv_vgpu_get_sparse_mmap(
nvidia_stack_t *sp , OBJGPU *pGpu,
nv_state_t *pNv, KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice,
const NvU8 *pMdevUuid, NvU64 *offsets,
NvU64 **offsets, NvU64 *sizes,
NvU64 **sizes, NvU32 *numAreas,
NvU32 *numAreas NvU8 *configParams
) )
{ {
THREAD_STATE_NODE threadState; NV_STATUS rmStatus = NV_OK, status;
NV_STATUS rmStatus = NV_ERR_INVALID_STATE, status; OBJTMR *pTmr = GPU_GET_TIMER(pGpu);;
OBJGPU *pGpu = NULL; KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);;
OBJTMR *pTmr = NULL; KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
KernelFifo *pKernelFifo = NULL; NvU32 value = 0;
void *fp = NULL;
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
NvU32 bar0TmrMapSize = 0, bar0FifoMapSize = 0, value = 0;
NvU64 bar0TmrMapOffset = 0, bar0FifoMapOffset = 0;
NvU64 *vfRegionSizes = NULL;
NvU64 *vfRegionOffsets = NULL;
KernelBif *pKernelBif = NULL;
NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
// LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{
pGpu = NV_GET_NV_PRIV_PGPU(pNv);
if (pGpu == NULL)
{
rmStatus = NV_ERR_INVALID_STATE;
goto cleanup;
}
pTmr = GPU_GET_TIMER(pGpu);
pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu);
pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
*numAreas = 0; *numAreas = 0;
rmStatus = kvgpumgrGetHostVgpuDeviceFromMdevUuid(pNv->gpu_id, pMdevUuid,
&pKernelHostVgpuDevice);
if (rmStatus == NV_OK)
{
if (pKernelHostVgpuDevice->gfid != 0) if (pKernelHostVgpuDevice->gfid != 0)
{ {
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size, rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice,
numAreas, NULL, NULL); os_page_size, numAreas, NULL, NULL);
if (rmStatus == NV_OK) if (rmStatus == NV_OK)
{ {
rmStatus = os_alloc_mem((void **)&vfRegionOffsets, sizeof(NvU64) * (*numAreas)); if (*numAreas > NVA081_MAX_SPARSE_REGION_COUNT)
if (rmStatus != NV_OK)
goto cleanup;
rmStatus = os_alloc_mem((void **)&vfRegionSizes, sizeof (NvU64) * (*numAreas));
if (rmStatus != NV_OK)
{ {
os_free_mem(vfRegionOffsets); NV_PRINTF(LEVEL_ERROR, "Not enough space for sparse mmap region info\n");
goto cleanup; return NV_ERR_INSUFFICIENT_RESOURCES;
} }
if (vfRegionOffsets && vfRegionSizes)
{
rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size, rmStatus = kbifGetVFSparseMmapRegions_HAL(pGpu, pKernelBif, pKernelHostVgpuDevice, os_page_size,
numAreas, vfRegionOffsets, vfRegionSizes); numAreas, offsets, sizes);
if (rmStatus == NV_OK) if (rmStatus != NV_OK)
{ return rmStatus;
*offsets = vfRegionOffsets;
*sizes = vfRegionSizes;
}
else
{
os_free_mem(vfRegionOffsets);
os_free_mem(vfRegionSizes);
} }
} }
else else
{ {
if (vfRegionOffsets != NULL) status = nv_parse_config_params((const char *)configParams,
os_free_mem(vfRegionOffsets); "direct_gpu_timer_access", ',', &value);
if (vfRegionSizes != NULL)
os_free_mem(vfRegionSizes);
rmStatus = NV_ERR_INSUFFICIENT_RESOURCES;
}
}
}
else
{
pRequestVgpu = pKernelHostVgpuDevice->pRequestVgpuInfoNode;
if (pRequestVgpu == NULL)
{
rmStatus = NV_ERR_INVALID_POINTER;
goto cleanup;
}
status = nv_parse_config_params(pRequestVgpu->configParams, "direct_gpu_timer_access", ',', &value);
if ((status == NV_OK) && (value != 0)) if ((status == NV_OK) && (value != 0))
{ {
rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, NvU64 offset = 0;
&bar0TmrMapOffset, NvU32 size = 0;
&bar0TmrMapSize);
rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, &offset, &size);
if (rmStatus == NV_OK) if (rmStatus == NV_OK)
{
offsets[*numAreas] = offset;
sizes[*numAreas] = size;
(*numAreas)++; (*numAreas)++;
else }
NV_PRINTF(LEVEL_ERROR,
"%s Failed to get NV_PTIMER region \n",
__FUNCTION__);
} }
value = 0; value = 0;
{ {
status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, NvU64 offset = 0;
&bar0FifoMapOffset, NvU32 size = 0;
&bar0FifoMapSize);
status = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, &offset, &size);
if (status == NV_OK) if (status == NV_OK)
{
offsets[*numAreas] = offset;
sizes[*numAreas] = size;
(*numAreas)++; (*numAreas)++;
} }
if (*numAreas != 0)
{
NvU32 i = 0;
NvU64 *tmpOffset, *tmpSize;
rmStatus = os_alloc_mem((void **)offsets, sizeof(NvU64) * (*numAreas));
if (rmStatus != NV_OK)
goto cleanup;
rmStatus = os_alloc_mem((void **)sizes, sizeof (NvU64) * (*numAreas));
if (rmStatus != NV_OK)
{
os_free_mem(*offsets);
goto cleanup;
}
tmpOffset = *offsets;
tmpSize = *sizes;
if (bar0TmrMapSize != 0)
{
tmpOffset[i] = bar0TmrMapOffset;
tmpSize[i] = bar0TmrMapSize;
i++;
}
if (bar0FifoMapSize != 0)
{
tmpOffset[i] = bar0FifoMapOffset;
tmpSize[i] = bar0FifoMapSize;
} }
} }
}
}
cleanup:
// UNLOCK: release API lock
rmapiLockRelease();
}
threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus; return rmStatus;
} }
NV_STATUS NV_API_CALL nv_vgpu_update_request( NV_STATUS nv_vgpu_rm_get_bar_info
nvidia_stack_t *sp , (
OBJGPU *pGpu,
const NvU8 *pMdevUuid, const NvU8 *pMdevUuid,
VGPU_DEVICE_STATE deviceState, NvU64 *barSizes,
NvU64 *offsets, NvU64 *sparseOffsets,
NvU64 *sizes, NvU64 *sparseSizes,
const char *configParams NvU32 *sparseCount,
NvBool *isBar064bit,
NvU8 *configParams
)
{
KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice;
NV_STATUS rmStatus;
NvU32 i = 0;
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
is_bar_64bit(pGpu, NV_VFIO_PCI_BAR0_REGION_INDEX, isBar064bit),
exit);
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
kvgpumgrGetHostVgpuDeviceFromMdevUuid(pGpu->gpuId,
pMdevUuid,
&pKernelHostVgpuDevice),
exit);
for (i = 0; i < NVA081_MAX_BAR_REGION_COUNT; i++)
{
/*
* For SRIOV, only VF BAR1 is queried via RM, others BARs are directly
* queried via VF config space in vgpu-vfio
*/
if (gpuIsSriovEnabled(pGpu) && (i != NV_VFIO_PCI_BAR1_REGION_INDEX))
{
barSizes[i] = 0;
continue;
}
rmStatus = _nv_vgpu_get_bar_size(pGpu, pKernelHostVgpuDevice, i,
&barSizes[i], configParams);
if (rmStatus != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Failed to query BAR size for index %u 0x%x\n",
i, rmStatus);
goto exit;
}
}
NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT,
_nv_vgpu_get_sparse_mmap(pGpu, pKernelHostVgpuDevice,
sparseOffsets, sparseSizes,
sparseCount, configParams),
exit);
exit:
return rmStatus;
}
NV_STATUS NV_API_CALL nv_gpu_unbind_event
(
nvidia_stack_t *sp,
NvU32 gpuId,
NvBool *isEventNotified
) )
{ {
THREAD_STATE_NODE threadState; THREAD_STATE_NODE threadState;
NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; NV_STATUS rmStatus = NV_OK;
void *fp = NULL; void *fp = NULL;
REQUEST_VGPU_INFO_NODE *pRequestVgpu = NULL;
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
NV_ENTER_RM_RUNTIME(sp,fp); NV_ENTER_RM_RUNTIME(sp,fp);
threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
if (offsets != NULL)
os_free_mem(offsets);
if (sizes != NULL)
os_free_mem(sizes);
// LOCK: acquire API lock // LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK) if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{ {
for (pRequestVgpu = listHead(&pKernelVgpuMgr->listRequestVgpuHead); /*
pRequestVgpu != NULL; * Send gpu_id in "status" field of the event so that nvidia-vgpu-mgr
pRequestVgpu = listNext(&pKernelVgpuMgr->listRequestVgpuHead, pRequestVgpu)) * daemon knows which GPU is being unbound
{ */
if (portMemCmp(pRequestVgpu->mdevUuid, pMdevUuid, VGPU_UUID_SIZE) == 0) CliAddSystemEvent(NV0000_NOTIFIERS_GPU_UNBIND_EVENT, gpuId, isEventNotified);
{
if (configParams != NULL)
portStringCopy(pRequestVgpu->configParams,
sizeof(pRequestVgpu->configParams),
configParams, (portStringLength(configParams) + 1));
pRequestVgpu->deviceState = deviceState;
rmStatus = NV_OK;
}
}
// UNLOCK: release API lock // UNLOCK: release API lock
rmapiLockRelease(); rmapiLockRelease();
@ -1050,7 +812,7 @@ NV_STATUS NV_API_CALL nv_gpu_bind_event(
// LOCK: acquire API lock // LOCK: acquire API lock
if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK) if ((rmStatus = rmapiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_HYPERVISOR)) == NV_OK)
{ {
CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, 0); CliAddSystemEvent(NV0000_NOTIFIERS_GPU_BIND_EVENT, 0, NULL);
// UNLOCK: release API lock // UNLOCK: release API lock
rmapiLockRelease(); rmapiLockRelease();
@ -1062,103 +824,6 @@ NV_STATUS NV_API_CALL nv_gpu_bind_event(
return rmStatus; return rmStatus;
} }
NV_STATUS osVgpuInjectInterrupt(void *vgpuVfioRef)
{
vgpu_vfio_info vgpu_info;
vgpu_info.vgpuVfioRef = vgpuVfioRef;
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VGPU_VFIO_INJECT_INTERRUPT);
}
NV_STATUS osVgpuRegisterMdev
(
OS_GPU_INFO *pOsGpuInfo
)
{
NV_STATUS status = NV_OK;
vgpu_vfio_info vgpu_info = {0};
OBJSYS *pSys = SYS_GET_INSTANCE();
KernelVgpuMgr *pKernelVgpuMgr = SYS_GET_KERNEL_VGPUMGR(pSys);
KERNEL_PHYS_GPU_INFO *pPhysGpuInfo;
NvU32 pgpuIndex, i;
OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys);
status = kvgpumgrGetPgpuIndex(pKernelVgpuMgr, pOsGpuInfo->gpu_id, &pgpuIndex);
if (status != NV_OK)
return status;
pPhysGpuInfo = &(pKernelVgpuMgr->pgpuInfo[pgpuIndex]);
vgpu_info.numVgpuTypes = pKernelVgpuMgr->pgpuInfo[pgpuIndex].numVgpuTypes;
status = os_alloc_mem((void **)&vgpu_info.vgpuTypeIds,
((vgpu_info.numVgpuTypes) * sizeof(NvU32)));
if (status != NV_OK)
goto free_mem;
status = os_alloc_mem((void **)&vgpu_info.vgpuNames,
((vgpu_info.numVgpuTypes) * sizeof(char *)));
if (status != NV_OK)
goto free_mem;
vgpu_info.nv = pOsGpuInfo;
for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
{
status = os_alloc_mem((void *)&vgpu_info.vgpuNames[i], (VGPU_STRING_BUFFER_SIZE * sizeof(char)));
if (status != NV_OK)
goto free_mem;
vgpu_info.vgpuTypeIds[i] = pPhysGpuInfo->vgpuTypes[i]->vgpuTypeId;
os_snprintf((char *) vgpu_info.vgpuNames[i], VGPU_STRING_BUFFER_SIZE, "%s\n", pPhysGpuInfo->vgpuTypes[i]->vgpuName);
}
if ((!pPhysGpuInfo->sriovEnabled) ||
(pHypervisor->getProperty(pHypervisor, PDB_PROP_HYPERVISOR_DRIVERVM_ENABLED)))
{
vgpu_info.is_virtfn = NV_FALSE;
status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
}
else
{
for (i = 0; i < MAX_VF_COUNT_PER_GPU; i++)
{
if (pPhysGpuInfo->vfPciInfo[i].isNvidiaAttached)
{
vgpu_info.is_virtfn = NV_TRUE;
vgpu_info.domain = pPhysGpuInfo->vfPciInfo[i].domain;
vgpu_info.bus = pPhysGpuInfo->vfPciInfo[i].bus;
vgpu_info.slot = pPhysGpuInfo->vfPciInfo[i].slot;
vgpu_info.function = pPhysGpuInfo->vfPciInfo[i].function;
status = os_call_vgpu_vfio((void *)&vgpu_info, CMD_VGPU_VFIO_REGISTER_MDEV);
if (status == NV_OK)
{
pPhysGpuInfo->vfPciInfo[i].isMdevAttached = NV_TRUE;
}
}
}
}
free_mem:
if (vgpu_info.vgpuTypeIds)
os_free_mem(vgpu_info.vgpuTypeIds);
if (vgpu_info.vgpuNames)
{
for (i = 0; i < pPhysGpuInfo->numVgpuTypes; i++)
{
if (vgpu_info.vgpuNames[i])
{
os_free_mem(vgpu_info.vgpuNames[i]);
}
}
os_free_mem(vgpu_info.vgpuNames);
}
return status;
}
NV_STATUS osIsVgpuVfioPresent(void) NV_STATUS osIsVgpuVfioPresent(void)
{ {
vgpu_vfio_info vgpu_info; vgpu_vfio_info vgpu_info;
@ -1173,6 +838,19 @@ NV_STATUS osIsVfioPciCorePresent(void)
return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT); return os_call_vgpu_vfio((void *) &vgpu_info, CMD_VFIO_PCI_CORE_PRESENT);
} }
void osWakeRemoveVgpu(NvU32 gpuId, NvU32 returnStatus)
{
vgpu_vfio_info vgpu_info;
vgpu_info.return_status = returnStatus;
vgpu_info.domain = gpuDecodeDomain(gpuId);
vgpu_info.bus = gpuDecodeBus(gpuId);
vgpu_info.device = gpuDecodeDevice(gpuId);
os_call_vgpu_vfio((void *)&vgpu_info, CMD_VFIO_WAKE_REMOVE_GPU);
}
NvU32 osGetGridCspSupport(void) NvU32 osGetGridCspSupport(void)
{ {
return os_get_grid_csp_support(); return os_get_grid_csp_support();

View File

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -871,13 +871,14 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *sp,
return rmStatus; return rmStatus;
} }
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_update(nvidia_stack_t *sp, NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *sp,
struct ccslContext_t *ctx) UvmCslContext *contextList[],
NvU32 contextListCount)
{ {
NV_STATUS rmStatus; NV_STATUS rmStatus;
void *fp; void *fp;
NV_ENTER_RM_RUNTIME(sp,fp); NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslContextUpdate(ctx); rmStatus = nvGpuOpsCcslRotateKey(contextList, contextListCount);
NV_EXIT_RM_RUNTIME(sp,fp); NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus; return rmStatus;
} }
@ -930,6 +931,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
NvU32 bufferSize, NvU32 bufferSize,
NvU8 const *inputBuffer, NvU8 const *inputBuffer,
NvU8 const *decryptIv, NvU8 const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer, NvU8 *outputBuffer,
NvU8 const *addAuthData, NvU8 const *addAuthData,
NvU32 addAuthDataSize, NvU32 addAuthDataSize,
@ -938,7 +940,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *sp,
NV_STATUS rmStatus; NV_STATUS rmStatus;
void *fp; void *fp;
NV_ENTER_RM_RUNTIME(sp,fp); NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, outputBuffer, rmStatus = nvGpuOpsCcslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, outputBuffer,
addAuthData, addAuthDataSize, authTagData); addAuthData, addAuthDataSize, authTagData);
NV_EXIT_RM_RUNTIME(sp,fp); NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus; return rmStatus;
@ -986,14 +988,15 @@ NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *sp,
return rmStatus; return rmStatus;
} }
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_device_encryption(nvidia_stack_t *sp, NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *sp,
struct ccslContext_t *ctx, struct ccslContext_t *ctx,
NvU8 direction,
NvU32 bufferSize) NvU32 bufferSize)
{ {
NV_STATUS rmStatus; NV_STATUS rmStatus;
void *fp; void *fp;
NV_ENTER_RM_RUNTIME(sp,fp); NV_ENTER_RM_RUNTIME(sp,fp);
rmStatus = nvGpuOpsLogDeviceEncryption(ctx, bufferSize); rmStatus = nvGpuOpsLogEncryption(ctx, direction, bufferSize);
NV_EXIT_RM_RUNTIME(sp,fp); NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus; return rmStatus;
} }

View File

@ -129,12 +129,12 @@
--undefined=rm_gpu_ops_ccsl_encrypt_with_iv --undefined=rm_gpu_ops_ccsl_encrypt_with_iv
--undefined=rm_gpu_ops_ccsl_context_init --undefined=rm_gpu_ops_ccsl_context_init
--undefined=rm_gpu_ops_ccsl_context_clear --undefined=rm_gpu_ops_ccsl_context_clear
--undefined=rm_gpu_ops_ccsl_context_update --undefined=rm_gpu_ops_ccsl_rotate_key
--undefined=rm_gpu_ops_ccsl_rotate_iv --undefined=rm_gpu_ops_ccsl_rotate_iv
--undefined=rm_gpu_ops_ccsl_decrypt --undefined=rm_gpu_ops_ccsl_decrypt
--undefined=rm_gpu_ops_ccsl_query_message_pool --undefined=rm_gpu_ops_ccsl_query_message_pool
--undefined=rm_gpu_ops_ccsl_increment_iv --undefined=rm_gpu_ops_ccsl_increment_iv
--undefined=rm_gpu_ops_ccsl_log_device_encryption --undefined=rm_gpu_ops_ccsl_log_encryption
--undefined=rm_log_gpu_crash --undefined=rm_log_gpu_crash
--undefined=rm_kernel_rmapi_op --undefined=rm_kernel_rmapi_op
--undefined=nv_get_hypervisor_type --undefined=nv_get_hypervisor_type
@ -186,13 +186,11 @@
--undefined=nv_vgpu_delete --undefined=nv_vgpu_delete
--undefined=nv_vgpu_get_bar_info --undefined=nv_vgpu_get_bar_info
--undefined=nv_vgpu_get_hbm_info --undefined=nv_vgpu_get_hbm_info
--undefined=nv_vgpu_start
--undefined=nv_vgpu_get_type_ids --undefined=nv_vgpu_get_type_ids
--undefined=nv_vgpu_get_type_info --undefined=nv_vgpu_get_type_info
--undefined=nv_vgpu_get_sparse_mmap
--undefined=nv_vgpu_update_request
--undefined=nv_vgpu_process_vf_info --undefined=nv_vgpu_process_vf_info
--undefined=nv_gpu_bind_event --undefined=nv_gpu_bind_event
--undefined=nv_gpu_unbind_event
--undefined=rm_check_for_gpu_surprise_removal --undefined=rm_check_for_gpu_surprise_removal
--undefined=rm_set_external_kernel_client_count --undefined=rm_set_external_kernel_client_count
--undefined=rm_schedule_gpu_wakeup --undefined=rm_schedule_gpu_wakeup

View File

@ -39,8 +39,8 @@
// //
static BINDATA_CONST NvU8 kgspBinArchiveConcatenatedFMCDesc_GH100_ucode_desc_prod_data[] = static BINDATA_CONST NvU8 kgspBinArchiveConcatenatedFMCDesc_GH100_ucode_desc_prod_data[] =
{ {
0x63, 0x65, 0x20, 0x02, 0x70, 0x41, 0xf1, 0x72, 0x20, 0xde, 0x08, 0xc4, 0x37, 0x19, 0x19, 0x18, 0x63, 0x65, 0x20, 0x02, 0x70, 0x41, 0xf1, 0x0a, 0x20, 0xde, 0x04, 0xc4, 0x37, 0x19, 0x19, 0x18,
0xf1, 0xe8, 0x03, 0x00, 0x1e, 0x4d, 0xae, 0xcc, 0x54, 0x00, 0x00, 0x00, 0xf1, 0xe8, 0x03, 0x00, 0x92, 0x10, 0x68, 0x6c, 0x54, 0x00, 0x00, 0x00,
}; };
#endif // defined(BINDATA_INCLUDE_DATA) #endif // defined(BINDATA_INCLUDE_DATA)

View File

@ -7,7 +7,7 @@ extern "C" {
#endif #endif
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -90,11 +90,11 @@ NV_STATUS __nvoc_objCreate_Ccsl(Ccsl**, Dynamic*, NvU32);
#define __objCreate_Ccsl(ppNewObj, pParent, createFlags) \ #define __objCreate_Ccsl(ppNewObj, pParent, createFlags) \
__nvoc_objCreate_Ccsl((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) __nvoc_objCreate_Ccsl((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
NV_STATUS ccslContextInitViaChannel_IMPL(pCcslContext *ppCtx, NvHandle hClient, NvHandle hChannel); NV_STATUS ccslContextInitViaChannel_IMPL(pCcslContext *ppCtx, NvHandle hClient, NvHandle hSubdevice, NvHandle hChannel);
#define ccslContextInitViaChannel(ppCtx, hClient, hChannel) ccslContextInitViaChannel_IMPL(ppCtx, hClient, hChannel) #define ccslContextInitViaChannel(ppCtx, hClient, hSubdevice, hChannel) ccslContextInitViaChannel_IMPL(ppCtx, hClient, hSubdevice, hChannel)
#define ccslContextInitViaChannel_HAL(ppCtx, hClient, hChannel) ccslContextInitViaChannel(ppCtx, hClient, hChannel) #define ccslContextInitViaChannel_HAL(ppCtx, hClient, hSubdevice, hChannel) ccslContextInitViaChannel(ppCtx, hClient, hSubdevice, hChannel)
NV_STATUS ccslContextInitViaKeyId_KERNEL(struct ConfidentialCompute *pConfCompute, pCcslContext *ppCtx, NvU32 globalKeyId); NV_STATUS ccslContextInitViaKeyId_KERNEL(struct ConfidentialCompute *pConfCompute, pCcslContext *ppCtx, NvU32 globalKeyId);
@ -126,11 +126,23 @@ NV_STATUS ccslEncrypt_KERNEL(pCcslContext ctx, NvU32 bufferSize, const NvU8 *inp
#define ccslEncrypt(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslEncrypt_KERNEL(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) #define ccslEncrypt(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslEncrypt_KERNEL(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer)
#define ccslEncrypt_HAL(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslEncrypt(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) #define ccslEncrypt_HAL(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslEncrypt(ctx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer)
NV_STATUS ccslDecrypt_KERNEL(pCcslContext ctx, NvU32 bufferSize, const NvU8 *inputBuffer, const NvU8 *decryptIv, const NvU8 *aadBuffer, NvU32 aadSize, NvU8 *outputBuffer, const NvU8 *authTagBuffer); NV_STATUS ccslEncryptWithRotationChecks_KERNEL(pCcslContext pCtx, NvU32 bufferSize, const NvU8 *inputBuffer, const NvU8 *aadBuffer, NvU32 aadSize, NvU8 *outputBuffer, NvU8 *authTagBuffer);
#define ccslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslDecrypt_KERNEL(ctx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer) #define ccslEncryptWithRotationChecks(pCtx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslEncryptWithRotationChecks_KERNEL(pCtx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer)
#define ccslDecrypt_HAL(ctx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer) #define ccslEncryptWithRotationChecks_HAL(pCtx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslEncryptWithRotationChecks(pCtx, bufferSize, inputBuffer, aadBuffer, aadSize, outputBuffer, authTagBuffer)
NV_STATUS ccslDecrypt_KERNEL(pCcslContext ctx, NvU32 bufferSize, const NvU8 *inputBuffer, const NvU8 *decryptIv, NvU32 keyRotationId, const NvU8 *aadBuffer, NvU32 aadSize, NvU8 *outputBuffer, const NvU8 *authTagBuffer);
#define ccslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslDecrypt_KERNEL(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, aadBuffer, aadSize, outputBuffer, authTagBuffer)
#define ccslDecrypt_HAL(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslDecrypt(ctx, bufferSize, inputBuffer, decryptIv, keyRotationId, aadBuffer, aadSize, outputBuffer, authTagBuffer)
NV_STATUS ccslDecryptWithRotationChecks_KERNEL(pCcslContext pCtx, NvU32 bufferSize, const NvU8 *inputBuffer, const NvU8 *decryptIv, const NvU8 *aadBuffer, NvU32 aadSize, NvU8 *outputBuffer, const NvU8 *authTagBuffer);
#define ccslDecryptWithRotationChecks(pCtx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslDecryptWithRotationChecks_KERNEL(pCtx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer)
#define ccslDecryptWithRotationChecks_HAL(pCtx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer) ccslDecryptWithRotationChecks(pCtx, bufferSize, inputBuffer, decryptIv, aadBuffer, aadSize, outputBuffer, authTagBuffer)
NV_STATUS ccslSign_IMPL(pCcslContext ctx, NvU32 bufferSize, const NvU8 *inputBuffer, NvU8 *authTagBuffer); NV_STATUS ccslSign_IMPL(pCcslContext ctx, NvU32 bufferSize, const NvU8 *inputBuffer, NvU8 *authTagBuffer);
@ -150,11 +162,19 @@ NV_STATUS ccslIncrementIv_IMPL(pCcslContext pCtx, NvU8 direction, NvU64 incremen
#define ccslIncrementIv(pCtx, direction, increment, iv) ccslIncrementIv_IMPL(pCtx, direction, increment, iv) #define ccslIncrementIv(pCtx, direction, increment, iv) ccslIncrementIv_IMPL(pCtx, direction, increment, iv)
#define ccslIncrementIv_HAL(pCtx, direction, increment, iv) ccslIncrementIv(pCtx, direction, increment, iv) #define ccslIncrementIv_HAL(pCtx, direction, increment, iv) ccslIncrementIv(pCtx, direction, increment, iv)
NV_STATUS ccslLogDeviceEncryption_IMPL(pCcslContext pCtx, NvU32 bufferSize); NV_STATUS ccslLogEncryption_IMPL(pCcslContext pCtx, NvU8 direction, NvU32 bufferSize);
#define ccslLogDeviceEncryption(pCtx, bufferSize) ccslLogDeviceEncryption_IMPL(pCtx, bufferSize) #define ccslLogEncryption(pCtx, direction, bufferSize) ccslLogEncryption_IMPL(pCtx, direction, bufferSize)
#define ccslLogDeviceEncryption_HAL(pCtx, bufferSize) ccslLogDeviceEncryption(pCtx, bufferSize) #define ccslLogEncryption_HAL(pCtx, direction, bufferSize) ccslLogEncryption(pCtx, direction, bufferSize)
static inline void ccslUpdateRotationThreshold_b3696a(NvU64 threshold) {
return;
}
#define ccslUpdateRotationThreshold(threshold) ccslUpdateRotationThreshold_b3696a(threshold)
#define ccslUpdateRotationThreshold_HAL(threshold) ccslUpdateRotationThreshold(threshold)
void ccslContextClear_IMPL(pCcslContext ctx); void ccslContextClear_IMPL(pCcslContext ctx);

View File

@ -1551,21 +1551,6 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
#endif #endif
}, },
{ /* [90] */ { /* [90] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) cliresCtrlCmdVgpuGetStartData_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xc01u,
/*paramSize=*/ sizeof(NV0000_CTRL_VGPU_GET_START_DATA_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "cliresCtrlCmdVgpuGetStartData"
#endif
},
{ /* [91] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1580,7 +1565,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientGetAddrSpaceType" /*func=*/ "cliresCtrlCmdClientGetAddrSpaceType"
#endif #endif
}, },
{ /* [92] */ { /* [91] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1595,7 +1580,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientGetHandleInfo" /*func=*/ "cliresCtrlCmdClientGetHandleInfo"
#endif #endif
}, },
{ /* [93] */ { /* [92] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1610,7 +1595,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientGetAccessRights" /*func=*/ "cliresCtrlCmdClientGetAccessRights"
#endif #endif
}, },
{ /* [94] */ { /* [93] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1625,7 +1610,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientSetInheritedSharePolicy" /*func=*/ "cliresCtrlCmdClientSetInheritedSharePolicy"
#endif #endif
}, },
{ /* [95] */ { /* [94] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1640,7 +1625,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientGetChildHandle" /*func=*/ "cliresCtrlCmdClientGetChildHandle"
#endif #endif
}, },
{ /* [96] */ { /* [95] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1655,7 +1640,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientShareObject" /*func=*/ "cliresCtrlCmdClientShareObject"
#endif #endif
}, },
{ /* [97] */ { /* [96] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1670,7 +1655,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdObjectsAreDuplicates" /*func=*/ "cliresCtrlCmdObjectsAreDuplicates"
#endif #endif
}, },
{ /* [98] */ { /* [97] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1685,7 +1670,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdClientSubscribeToImexChannel" /*func=*/ "cliresCtrlCmdClientSubscribeToImexChannel"
#endif #endif
}, },
{ /* [99] */ { /* [98] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1700,7 +1685,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdOsUnixFlushUserCache" /*func=*/ "cliresCtrlCmdOsUnixFlushUserCache"
#endif #endif
}, },
{ /* [100] */ { /* [99] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1715,7 +1700,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdOsUnixExportObjectToFd" /*func=*/ "cliresCtrlCmdOsUnixExportObjectToFd"
#endif #endif
}, },
{ /* [101] */ { /* [100] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1730,7 +1715,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdOsUnixImportObjectFromFd" /*func=*/ "cliresCtrlCmdOsUnixImportObjectFromFd"
#endif #endif
}, },
{ /* [102] */ { /* [101] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1745,7 +1730,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdOsUnixGetExportObjectInfo" /*func=*/ "cliresCtrlCmdOsUnixGetExportObjectInfo"
#endif #endif
}, },
{ /* [103] */ { /* [102] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1760,7 +1745,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdOsUnixCreateExportObjectFd" /*func=*/ "cliresCtrlCmdOsUnixCreateExportObjectFd"
#endif #endif
}, },
{ /* [104] */ { /* [103] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1775,7 +1760,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
/*func=*/ "cliresCtrlCmdOsUnixExportObjectsToFd" /*func=*/ "cliresCtrlCmdOsUnixExportObjectsToFd"
#endif #endif
}, },
{ /* [105] */ { /* [104] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) #if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL, /*pFunc=*/ (void (*)(void)) NULL,
#else #else
@ -1795,7 +1780,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClient
const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClientResource = const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClientResource =
{ {
/*numEntries=*/ 106, /*numEntries=*/ 105,
/*pExportEntries=*/ __nvoc_exported_method_def_RmClientResource /*pExportEntries=*/ __nvoc_exported_method_def_RmClientResource
}; };
@ -2226,10 +2211,6 @@ static void __nvoc_init_funcTable_RmClientResource_1(RmClientResource *pThis) {
pThis->__cliresCtrlCmdSyncGpuBoostGroupInfo__ = &cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL; pThis->__cliresCtrlCmdSyncGpuBoostGroupInfo__ = &cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL;
#endif #endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__cliresCtrlCmdVgpuGetStartData__ = &cliresCtrlCmdVgpuGetStartData_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) #if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__cliresCtrlCmdVgpuGetVgpuVersion__ = &cliresCtrlCmdVgpuGetVgpuVersion_IMPL; pThis->__cliresCtrlCmdVgpuGetVgpuVersion__ = &cliresCtrlCmdVgpuGetVgpuVersion_IMPL;
#endif #endif

View File

@ -176,7 +176,6 @@ struct RmClientResource {
NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupCreate__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *); NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupCreate__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupDestroy__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *); NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupDestroy__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupInfo__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *); NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupInfo__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuGetStartData__)(struct RmClientResource *, NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuGetVgpuVersion__)(struct RmClientResource *, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *); NV_STATUS (*__cliresCtrlCmdVgpuGetVgpuVersion__)(struct RmClientResource *, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *);
NV_STATUS (*__cliresCtrlCmdVgpuSetVgpuVersion__)(struct RmClientResource *, NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS *); NV_STATUS (*__cliresCtrlCmdVgpuSetVgpuVersion__)(struct RmClientResource *, NV0000_CTRL_VGPU_SET_VGPU_VERSION_PARAMS *);
NV_STATUS (*__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *); NV_STATUS (*__cliresCtrlCmdSystemNVPCFGetPowerModeInfo__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *);
@ -335,7 +334,6 @@ NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource**, Dynamic*, NvU32,
#define cliresCtrlCmdSyncGpuBoostGroupCreate(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupCreate_DISPATCH(pRmCliRes, pParams) #define cliresCtrlCmdSyncGpuBoostGroupCreate(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupCreate_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSyncGpuBoostGroupDestroy(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupDestroy_DISPATCH(pRmCliRes, pParams) #define cliresCtrlCmdSyncGpuBoostGroupDestroy(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupDestroy_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdSyncGpuBoostGroupInfo(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(pRmCliRes, pParams) #define cliresCtrlCmdSyncGpuBoostGroupInfo(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(pRmCliRes, pParams)
#define cliresCtrlCmdVgpuGetStartData(pRmCliRes, pVgpuStartParams) cliresCtrlCmdVgpuGetStartData_DISPATCH(pRmCliRes, pVgpuStartParams)
#define cliresCtrlCmdVgpuGetVgpuVersion(pRmCliRes, vgpuVersionInfo) cliresCtrlCmdVgpuGetVgpuVersion_DISPATCH(pRmCliRes, vgpuVersionInfo) #define cliresCtrlCmdVgpuGetVgpuVersion(pRmCliRes, vgpuVersionInfo) cliresCtrlCmdVgpuGetVgpuVersion_DISPATCH(pRmCliRes, vgpuVersionInfo)
#define cliresCtrlCmdVgpuSetVgpuVersion(pRmCliRes, vgpuVersionInfo) cliresCtrlCmdVgpuSetVgpuVersion_DISPATCH(pRmCliRes, vgpuVersionInfo) #define cliresCtrlCmdVgpuSetVgpuVersion(pRmCliRes, vgpuVersionInfo) cliresCtrlCmdVgpuSetVgpuVersion_DISPATCH(pRmCliRes, vgpuVersionInfo)
#define cliresCtrlCmdSystemNVPCFGetPowerModeInfo(pRmCliRes, pParams) cliresCtrlCmdSystemNVPCFGetPowerModeInfo_DISPATCH(pRmCliRes, pParams) #define cliresCtrlCmdSystemNVPCFGetPowerModeInfo(pRmCliRes, pParams) cliresCtrlCmdSystemNVPCFGetPowerModeInfo_DISPATCH(pRmCliRes, pParams)
@ -949,12 +947,6 @@ static inline NV_STATUS cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(struct RmCli
return pRmCliRes->__cliresCtrlCmdSyncGpuBoostGroupInfo__(pRmCliRes, pParams); return pRmCliRes->__cliresCtrlCmdSyncGpuBoostGroupInfo__(pRmCliRes, pParams);
} }
NV_STATUS cliresCtrlCmdVgpuGetStartData_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *pVgpuStartParams);
static inline NV_STATUS cliresCtrlCmdVgpuGetStartData_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_START_DATA_PARAMS *pVgpuStartParams) {
return pRmCliRes->__cliresCtrlCmdVgpuGetStartData__(pRmCliRes, pVgpuStartParams);
}
NV_STATUS cliresCtrlCmdVgpuGetVgpuVersion_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *vgpuVersionInfo); NV_STATUS cliresCtrlCmdVgpuGetVgpuVersion_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *vgpuVersionInfo);
static inline NV_STATUS cliresCtrlCmdVgpuGetVgpuVersion_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *vgpuVersionInfo) { static inline NV_STATUS cliresCtrlCmdVgpuGetVgpuVersion_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_VGPU_GET_VGPU_VERSION_PARAMS *vgpuVersionInfo) {

View File

@ -322,6 +322,21 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Confiden
/*pClassInfo=*/ &(__nvoc_class_def_ConfidentialComputeApi.classInfo), /*pClassInfo=*/ &(__nvoc_class_def_ConfidentialComputeApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED #if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "confComputeApiCtrlCmdGpuGetNumSecureChannels" /*func=*/ "confComputeApiCtrlCmdGpuGetNumSecureChannels"
#endif
},
{ /* [10] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) confComputeApiCtrlCmdGpuGetKeyRotationState_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*flags=*/ 0x10u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xcb33010cu,
/*paramSize=*/ sizeof(NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_ConfidentialComputeApi.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "confComputeApiCtrlCmdGpuGetKeyRotationState"
#endif #endif
}, },
@ -329,7 +344,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Confiden
const struct NVOC_EXPORT_INFO __nvoc_export_info_ConfidentialComputeApi = const struct NVOC_EXPORT_INFO __nvoc_export_info_ConfidentialComputeApi =
{ {
/*numEntries=*/ 10, /*numEntries=*/ 11,
/*pExportEntries=*/ __nvoc_exported_method_def_ConfidentialComputeApi /*pExportEntries=*/ __nvoc_exported_method_def_ConfidentialComputeApi
}; };
@ -406,6 +421,10 @@ static void __nvoc_init_funcTable_ConfidentialComputeApi_1(ConfidentialComputeAp
pThis->__confComputeApiCtrlCmdSystemSetSecurityPolicy__ = &confComputeApiCtrlCmdSystemSetSecurityPolicy_IMPL; pThis->__confComputeApiCtrlCmdSystemSetSecurityPolicy__ = &confComputeApiCtrlCmdSystemSetSecurityPolicy_IMPL;
#endif #endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
pThis->__confComputeApiCtrlCmdGpuGetKeyRotationState__ = &confComputeApiCtrlCmdGpuGetKeyRotationState_IMPL;
#endif
pThis->__confComputeApiShareCallback__ = &__nvoc_thunk_RmResource_confComputeApiShareCallback; pThis->__confComputeApiShareCallback__ = &__nvoc_thunk_RmResource_confComputeApiShareCallback;
pThis->__confComputeApiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_confComputeApiCheckMemInterUnmap; pThis->__confComputeApiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_confComputeApiCheckMemInterUnmap;

View File

@ -7,7 +7,7 @@ extern "C" {
#endif #endif
/* /*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT * SPDX-License-Identifier: MIT
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
@ -76,6 +76,7 @@ struct ConfidentialComputeApi {
NV_STATUS (*__confComputeApiCtrlCmdGpuGetNumSecureChannels__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_NUM_SECURE_CHANNELS_PARAMS *); NV_STATUS (*__confComputeApiCtrlCmdGpuGetNumSecureChannels__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_NUM_SECURE_CHANNELS_PARAMS *);
NV_STATUS (*__confComputeApiCtrlCmdSystemGetSecurityPolicy__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_GET_SECURITY_POLICY_PARAMS *); NV_STATUS (*__confComputeApiCtrlCmdSystemGetSecurityPolicy__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_GET_SECURITY_POLICY_PARAMS *);
NV_STATUS (*__confComputeApiCtrlCmdSystemSetSecurityPolicy__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS *); NV_STATUS (*__confComputeApiCtrlCmdSystemSetSecurityPolicy__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_SET_SECURITY_POLICY_PARAMS *);
NV_STATUS (*__confComputeApiCtrlCmdGpuGetKeyRotationState__)(struct ConfidentialComputeApi *, NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS *);
NvBool (*__confComputeApiShareCallback__)(struct ConfidentialComputeApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); NvBool (*__confComputeApiShareCallback__)(struct ConfidentialComputeApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
NV_STATUS (*__confComputeApiCheckMemInterUnmap__)(struct ConfidentialComputeApi *, NvBool); NV_STATUS (*__confComputeApiCheckMemInterUnmap__)(struct ConfidentialComputeApi *, NvBool);
NV_STATUS (*__confComputeApiControl__)(struct ConfidentialComputeApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); NV_STATUS (*__confComputeApiControl__)(struct ConfidentialComputeApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
@ -138,6 +139,7 @@ NV_STATUS __nvoc_objCreate_ConfidentialComputeApi(ConfidentialComputeApi**, Dyna
#define confComputeApiCtrlCmdGpuGetNumSecureChannels(pConfComputeApi, pParams) confComputeApiCtrlCmdGpuGetNumSecureChannels_DISPATCH(pConfComputeApi, pParams) #define confComputeApiCtrlCmdGpuGetNumSecureChannels(pConfComputeApi, pParams) confComputeApiCtrlCmdGpuGetNumSecureChannels_DISPATCH(pConfComputeApi, pParams)
#define confComputeApiCtrlCmdSystemGetSecurityPolicy(pConfComputeApi, pParams) confComputeApiCtrlCmdSystemGetSecurityPolicy_DISPATCH(pConfComputeApi, pParams) #define confComputeApiCtrlCmdSystemGetSecurityPolicy(pConfComputeApi, pParams) confComputeApiCtrlCmdSystemGetSecurityPolicy_DISPATCH(pConfComputeApi, pParams)
#define confComputeApiCtrlCmdSystemSetSecurityPolicy(pConfComputeApi, pParams) confComputeApiCtrlCmdSystemSetSecurityPolicy_DISPATCH(pConfComputeApi, pParams) #define confComputeApiCtrlCmdSystemSetSecurityPolicy(pConfComputeApi, pParams) confComputeApiCtrlCmdSystemSetSecurityPolicy_DISPATCH(pConfComputeApi, pParams)
#define confComputeApiCtrlCmdGpuGetKeyRotationState(pConfComputeApi, pParams) confComputeApiCtrlCmdGpuGetKeyRotationState_DISPATCH(pConfComputeApi, pParams)
#define confComputeApiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) confComputeApiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) #define confComputeApiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) confComputeApiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
#define confComputeApiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) confComputeApiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) #define confComputeApiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) confComputeApiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
#define confComputeApiControl(pResource, pCallContext, pParams) confComputeApiControl_DISPATCH(pResource, pCallContext, pParams) #define confComputeApiControl(pResource, pCallContext, pParams) confComputeApiControl_DISPATCH(pResource, pCallContext, pParams)
@ -219,6 +221,12 @@ static inline NV_STATUS confComputeApiCtrlCmdSystemSetSecurityPolicy_DISPATCH(st
return pConfComputeApi->__confComputeApiCtrlCmdSystemSetSecurityPolicy__(pConfComputeApi, pParams); return pConfComputeApi->__confComputeApiCtrlCmdSystemSetSecurityPolicy__(pConfComputeApi, pParams);
} }
NV_STATUS confComputeApiCtrlCmdGpuGetKeyRotationState_IMPL(struct ConfidentialComputeApi *pConfComputeApi, NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS *pParams);
static inline NV_STATUS confComputeApiCtrlCmdGpuGetKeyRotationState_DISPATCH(struct ConfidentialComputeApi *pConfComputeApi, NV_CONF_COMPUTE_CTRL_CMD_GPU_GET_KEY_ROTATION_STATE_PARAMS *pParams) {
return pConfComputeApi->__confComputeApiCtrlCmdGpuGetKeyRotationState__(pConfComputeApi, pParams);
}
static inline NvBool confComputeApiShareCallback_DISPATCH(struct ConfidentialComputeApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { static inline NvBool confComputeApiShareCallback_DISPATCH(struct ConfidentialComputeApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
return pResource->__confComputeApiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); return pResource->__confComputeApiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
} }

View File

@ -165,7 +165,8 @@ void __nvoc_init_dataField_ConfidentialCompute(ConfidentialCompute *pThis, RmHal
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED, ((NvBool)(0 != 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_SPDM_ENABLED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED, ((NvBool)(0 != 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED, ((NvBool)(0 != 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED, ((NvBool)(0 != 0))); pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_KEY_ROTATION_ENABLED, ((NvBool)(0 != 0)));
pThis->setProperty(pThis, PDB_PROP_CONFCOMPUTE_INTERNAL_KEY_ROTATION_ENABLED, ((NvBool)(0 != 0)));
} }
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
@ -322,6 +323,17 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeGlobalKeyIsKernelPriv__ = &confComputeGlobalKeyIsKernelPriv_491d52; pThis->__confComputeGlobalKeyIsKernelPriv__ = &confComputeGlobalKeyIsKernelPriv_491d52;
} }
// Hal function -- confComputeGlobalKeyIsUvmKey
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGlobalKeyIsUvmKey__ = &confComputeGlobalKeyIsUvmKey_GH100;
}
// default
else
{
pThis->__confComputeGlobalKeyIsUvmKey__ = &confComputeGlobalKeyIsUvmKey_491d52;
}
// Hal function -- confComputeGetKeyPairByChannel // Hal function -- confComputeGetKeyPairByChannel
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {
@ -351,6 +363,17 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
} }
} }
// Hal function -- confComputeGetKeyPairForKeySpace
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeGetKeyPairForKeySpace__ = &confComputeGetKeyPairForKeySpace_GH100;
}
// default
else
{
pThis->__confComputeGetKeyPairForKeySpace__ = &confComputeGetKeyPairForKeySpace_b3696a;
}
// Hal function -- confComputeEnableKeyRotationCallback // Hal function -- confComputeEnableKeyRotationCallback
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */ if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{ {
@ -387,6 +410,24 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
} }
} }
// Hal function -- confComputeEnableInternalKeyRotationSupport
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000001UL) )) /* RmVariantHal: VF */
{
pThis->__confComputeEnableInternalKeyRotationSupport__ = &confComputeEnableInternalKeyRotationSupport_56cd7a;
}
else
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeEnableInternalKeyRotationSupport__ = &confComputeEnableInternalKeyRotationSupport_GH100;
}
// default
else
{
pThis->__confComputeEnableInternalKeyRotationSupport__ = &confComputeEnableInternalKeyRotationSupport_56cd7a;
}
}
// Hal function -- confComputeIsDebugModeEnabled // Hal function -- confComputeIsDebugModeEnabled
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {
@ -431,6 +472,28 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeKeyStoreDepositIvMask__ = &confComputeKeyStoreDepositIvMask_b3696a; pThis->__confComputeKeyStoreDepositIvMask__ = &confComputeKeyStoreDepositIvMask_b3696a;
} }
// Hal function -- confComputeKeyStoreUpdateKey
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeKeyStoreUpdateKey__ = &confComputeKeyStoreUpdateKey_GH100;
}
// default
else
{
pThis->__confComputeKeyStoreUpdateKey__ = &confComputeKeyStoreUpdateKey_46f6a7;
}
// Hal function -- confComputeKeyStoreIsValidGlobalKeyId
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeKeyStoreIsValidGlobalKeyId__ = &confComputeKeyStoreIsValidGlobalKeyId_GH100;
}
// default
else
{
pThis->__confComputeKeyStoreIsValidGlobalKeyId__ = &confComputeKeyStoreIsValidGlobalKeyId_491d52;
}
// Hal function -- confComputeKeyStoreInit // Hal function -- confComputeKeyStoreInit
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{ {
@ -486,17 +549,6 @@ static void __nvoc_init_funcTable_ConfidentialCompute_1(ConfidentialCompute *pTh
pThis->__confComputeKeyStoreClearExportMasterKey__ = &confComputeKeyStoreClearExportMasterKey_b3696a; pThis->__confComputeKeyStoreClearExportMasterKey__ = &confComputeKeyStoreClearExportMasterKey_b3696a;
} }
// Hal function -- confComputeKeyStoreUpdateKey
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__confComputeKeyStoreUpdateKey__ = &confComputeKeyStoreUpdateKey_GH100;
}
// default
else
{
pThis->__confComputeKeyStoreUpdateKey__ = &confComputeKeyStoreUpdateKey_46f6a7;
}
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_ConfidentialCompute_engstateConstructEngine; pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_ConfidentialCompute_engstateConstructEngine;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_ConfidentialCompute_engstateStatePreInitLocked; pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_ConfidentialCompute_engstateStatePreInitLocked;

View File

@ -42,6 +42,7 @@ extern "C" {
#include "ctrl/ctrlc56f.h" #include "ctrl/ctrlc56f.h"
#include "cc_drv.h" #include "cc_drv.h"
#include "conf_compute/cc_keystore.h" #include "conf_compute/cc_keystore.h"
#include "conf_compute/cc_keyrotation.h"
#include "kernel/gpu/fifo/kernel_channel.h" #include "kernel/gpu/fifo/kernel_channel.h"
#include "kernel/gpu/fifo/kernel_fifo.h" #include "kernel/gpu/fifo/kernel_fifo.h"
#include "kernel/gpu/intr/engine_idx.h" #include "kernel/gpu/intr/engine_idx.h"
@ -55,12 +56,6 @@ extern "C" {
****************************************************************************/ ****************************************************************************/
// //
// Temp threshold values until we move to using
// encryption statistics buffers.
//
#define KEY_ROTATION_UPPER_THRESHOLD 30
#define KEY_ROTATION_LOWER_THRESHOLD 20
// Per-key info regarding encryption ops // Per-key info regarding encryption ops
typedef struct typedef struct
{ {
@ -76,6 +71,13 @@ typedef struct
KEY_ROTATION_STATUS status; KEY_ROTATION_STATUS status;
} KEY_ROTATION_WORKITEM_INFO; } KEY_ROTATION_WORKITEM_INFO;
// Info needed by timer to setup timeouts for key rotation
typedef struct
{
TMR_EVENT *pTimer;
NvU64 timeLeftNs; // time left before hitting timeout
} KEY_ROTATION_TIMEOUT_INFO;
// Private field names are wrapped in PRIVATE_FIELD, which does nothing for // Private field names are wrapped in PRIVATE_FIELD, which does nothing for
// the matching C source file, but causes diagnostics to be issued if another // the matching C source file, but causes diagnostics to be issued if another
@ -106,20 +108,24 @@ struct ConfidentialCompute {
NvBool (*__confComputeIsSpdmEnabled__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsSpdmEnabled__)(struct OBJGPU *, struct ConfidentialCompute *);
RM_ENGINE_TYPE (*__confComputeGetEngineIdFromKeySpace__)(struct ConfidentialCompute *, NvU32); RM_ENGINE_TYPE (*__confComputeGetEngineIdFromKeySpace__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeGlobalKeyIsKernelPriv__)(struct ConfidentialCompute *, NvU32); NvBool (*__confComputeGlobalKeyIsKernelPriv__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeGlobalKeyIsUvmKey__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeGetKeyPairByChannel__)(struct OBJGPU *, struct ConfidentialCompute *, struct KernelChannel *, NvU32 *, NvU32 *); NV_STATUS (*__confComputeGetKeyPairByChannel__)(struct OBJGPU *, struct ConfidentialCompute *, struct KernelChannel *, NvU32 *, NvU32 *);
NV_STATUS (*__confComputeTriggerKeyRotation__)(struct OBJGPU *, struct ConfidentialCompute *); NV_STATUS (*__confComputeTriggerKeyRotation__)(struct OBJGPU *, struct ConfidentialCompute *);
void (*__confComputeGetKeyPairForKeySpace__)(struct OBJGPU *, struct ConfidentialCompute *, NvU32, NvBool, NvU32 *, NvU32 *);
NV_STATUS (*__confComputeEnableKeyRotationCallback__)(struct OBJGPU *, struct ConfidentialCompute *, NvBool); NV_STATUS (*__confComputeEnableKeyRotationCallback__)(struct OBJGPU *, struct ConfidentialCompute *, NvBool);
NV_STATUS (*__confComputeEnableKeyRotationSupport__)(struct OBJGPU *, struct ConfidentialCompute *); NV_STATUS (*__confComputeEnableKeyRotationSupport__)(struct OBJGPU *, struct ConfidentialCompute *);
NV_STATUS (*__confComputeEnableInternalKeyRotationSupport__)(struct OBJGPU *, struct ConfidentialCompute *);
NvBool (*__confComputeIsDebugModeEnabled__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsDebugModeEnabled__)(struct OBJGPU *, struct ConfidentialCompute *);
NvBool (*__confComputeIsGpuCcCapable__)(struct OBJGPU *, struct ConfidentialCompute *); NvBool (*__confComputeIsGpuCcCapable__)(struct OBJGPU *, struct ConfidentialCompute *);
NV_STATUS (*__confComputeEstablishSpdmSessionAndKeys__)(struct OBJGPU *, struct ConfidentialCompute *); NV_STATUS (*__confComputeEstablishSpdmSessionAndKeys__)(struct OBJGPU *, struct ConfidentialCompute *);
void (*__confComputeKeyStoreDepositIvMask__)(struct ConfidentialCompute *, NvU32, void *); void (*__confComputeKeyStoreDepositIvMask__)(struct ConfidentialCompute *, NvU32, void *);
NV_STATUS (*__confComputeKeyStoreUpdateKey__)(struct ConfidentialCompute *, NvU32);
NvBool (*__confComputeKeyStoreIsValidGlobalKeyId__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeKeyStoreInit__)(struct ConfidentialCompute *); NV_STATUS (*__confComputeKeyStoreInit__)(struct ConfidentialCompute *);
void (*__confComputeKeyStoreDeinit__)(struct ConfidentialCompute *); void (*__confComputeKeyStoreDeinit__)(struct ConfidentialCompute *);
void *(*__confComputeKeyStoreGetExportMasterKey__)(struct ConfidentialCompute *); void *(*__confComputeKeyStoreGetExportMasterKey__)(struct ConfidentialCompute *);
NV_STATUS (*__confComputeKeyStoreDeriveKey__)(struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeKeyStoreDeriveKey__)(struct ConfidentialCompute *, NvU32);
void (*__confComputeKeyStoreClearExportMasterKey__)(struct ConfidentialCompute *); void (*__confComputeKeyStoreClearExportMasterKey__)(struct ConfidentialCompute *);
NV_STATUS (*__confComputeKeyStoreUpdateKey__)(struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeStateLoad__)(POBJGPU, struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeStateLoad__)(POBJGPU, struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeStateUnload__)(POBJGPU, struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeStateUnload__)(POBJGPU, struct ConfidentialCompute *, NvU32);
NV_STATUS (*__confComputeStatePreLoad__)(POBJGPU, struct ConfidentialCompute *, NvU32); NV_STATUS (*__confComputeStatePreLoad__)(POBJGPU, struct ConfidentialCompute *, NvU32);
@ -138,7 +144,8 @@ struct ConfidentialCompute {
NvBool PDB_PROP_CONFCOMPUTE_SPDM_ENABLED; NvBool PDB_PROP_CONFCOMPUTE_SPDM_ENABLED;
NvBool PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED; NvBool PDB_PROP_CONFCOMPUTE_MULTI_GPU_PROTECTED_PCIE_MODE_ENABLED;
NvBool PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED; NvBool PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED;
NvBool PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED; NvBool PDB_PROP_CONFCOMPUTE_KEY_ROTATION_ENABLED;
NvBool PDB_PROP_CONFCOMPUTE_INTERNAL_KEY_ROTATION_ENABLED;
NvU32 gspProxyRegkeys; NvU32 gspProxyRegkeys;
struct Spdm *pSpdm; struct Spdm *pSpdm;
NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS ccStaticInfo; NV2080_CTRL_INTERNAL_CONF_COMPUTE_GET_STATIC_INFO_PARAMS ccStaticInfo;
@ -147,24 +154,23 @@ struct ConfidentialCompute {
struct ccslContext_t *pNonReplayableFaultCcslCtx; struct ccslContext_t *pNonReplayableFaultCcslCtx;
struct ccslContext_t *pReplayableFaultCcslCtx; struct ccslContext_t *pReplayableFaultCcslCtx;
struct ccslContext_t *pGspSec2RpcCcslCtx; struct ccslContext_t *pGspSec2RpcCcslCtx;
NvU32 keyRotationCallbackCount;
NvU32 keyRotationChannelRefCount;
NvBool bAcceptClientRequest; NvBool bAcceptClientRequest;
PTMR_EVENT pGspHeartbeatTimer; PTMR_EVENT pGspHeartbeatTimer;
NvU32 heartbeatPeriodSec; NvU32 heartbeatPeriodSec;
NvU32 keyRotationEnableMask; NvU32 keyRotationEnableMask;
KEY_ROTATION_STATS_INFO lowerThreshold; NvU64 keyRotationInternalThreshold;
KEY_ROTATION_STATS_INFO upperThreshold;
NvU64 attackerAdvantage; NvU64 attackerAdvantage;
KEY_ROTATION_STATS_INFO aggregateStats[64];
NvU8 PRIVATE_FIELD(m_exportMasterKey)[32]; NvU8 PRIVATE_FIELD(m_exportMasterKey)[32];
void *PRIVATE_FIELD(m_keySlot); void *PRIVATE_FIELD(m_keySlot);
KEY_ROTATION_STATUS PRIVATE_FIELD(keyRotationState)[62]; KEY_ROTATION_STATUS PRIVATE_FIELD(keyRotationState)[64];
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(aggregateStats)[62]; KEY_ROTATION_STATS_INFO PRIVATE_FIELD(freedChannelAggregateStats)[64];
KEY_ROTATION_STATS_INFO PRIVATE_FIELD(freedChannelAggregateStats)[62]; KEY_ROTATION_TIMEOUT_INFO PRIVATE_FIELD(keyRotationTimeoutInfo)[64];
PTMR_EVENT PRIVATE_FIELD(ppKeyRotationTimer)[62]; NvU32 PRIVATE_FIELD(keyRotationCount)[64];
NvU64 PRIVATE_FIELD(keyRotationLimitDelta); NvU32 PRIVATE_FIELD(keyRotationTimeout);
NvU64 PRIVATE_FIELD(keyRotationUpperLimit); NvU64 PRIVATE_FIELD(keyRotationThresholdDelta);
NvU64 PRIVATE_FIELD(keyRotationLowerLimit); NvU64 PRIVATE_FIELD(keyRotationUpperThreshold);
NvU64 PRIVATE_FIELD(keyRotationLowerThreshold);
}; };
#ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__ #ifndef __NVOC_CLASS_ConfidentialCompute_TYPEDEF__
@ -190,14 +196,16 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_ConfidentialCompute;
#define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED_BASE_NAME PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED #define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED_BASE_NAME PDB_PROP_CONFCOMPUTE_KEY_ROTATION_SUPPORTED
#define PDB_PROP_CONFCOMPUTE_INTERNAL_KEY_ROTATION_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_INTERNAL_KEY_ROTATION_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_INTERNAL_KEY_ROTATION_ENABLED
#define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_KEY_ROTATION_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_KEY_ROTATION_ENABLED
#define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED #define PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_APM_FEATURE_ENABLED
#define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. #define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
#define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING #define PDB_PROP_CONFCOMPUTE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
#define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_CAST #define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_NAME PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT #define PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT_BASE_NAME PDB_PROP_CONFCOMPUTE_ENABLE_EARLY_INIT
#define PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_DUMMY_KEY_ROTATION_ENABLED
#define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_CAST
#define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED #define PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED_BASE_NAME PDB_PROP_CONFCOMPUTE_GPUS_READY_CHECK_ENABLED
#define PDB_PROP_CONFCOMPUTE_ENABLED_BASE_CAST #define PDB_PROP_CONFCOMPUTE_ENABLED_BASE_CAST
@ -242,14 +250,20 @@ NV_STATUS __nvoc_objCreate_ConfidentialCompute(ConfidentialCompute**, Dynamic*,
#define confComputeGetEngineIdFromKeySpace_HAL(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace) #define confComputeGetEngineIdFromKeySpace_HAL(pConfCompute, keySpace) confComputeGetEngineIdFromKeySpace_DISPATCH(pConfCompute, keySpace)
#define confComputeGlobalKeyIsKernelPriv(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId) #define confComputeGlobalKeyIsKernelPriv(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId)
#define confComputeGlobalKeyIsKernelPriv_HAL(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId) #define confComputeGlobalKeyIsKernelPriv_HAL(pConfCompute, keyId) confComputeGlobalKeyIsKernelPriv_DISPATCH(pConfCompute, keyId)
#define confComputeGlobalKeyIsUvmKey(pConfCompute, keyId) confComputeGlobalKeyIsUvmKey_DISPATCH(pConfCompute, keyId)
#define confComputeGlobalKeyIsUvmKey_HAL(pConfCompute, keyId) confComputeGlobalKeyIsUvmKey_DISPATCH(pConfCompute, keyId)
#define confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) confComputeGetKeyPairByChannel_DISPATCH(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) #define confComputeGetKeyPairByChannel(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) confComputeGetKeyPairByChannel_DISPATCH(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey)
#define confComputeGetKeyPairByChannel_HAL(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) confComputeGetKeyPairByChannel_DISPATCH(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) #define confComputeGetKeyPairByChannel_HAL(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey) confComputeGetKeyPairByChannel_DISPATCH(pGpu, pConfCompute, pKernelChannel, pH2DKey, pD2HKey)
#define confComputeTriggerKeyRotation(pGpu, pConfCompute) confComputeTriggerKeyRotation_DISPATCH(pGpu, pConfCompute) #define confComputeTriggerKeyRotation(pGpu, pConfCompute) confComputeTriggerKeyRotation_DISPATCH(pGpu, pConfCompute)
#define confComputeTriggerKeyRotation_HAL(pGpu, pConfCompute) confComputeTriggerKeyRotation_DISPATCH(pGpu, pConfCompute) #define confComputeTriggerKeyRotation_HAL(pGpu, pConfCompute) confComputeTriggerKeyRotation_DISPATCH(pGpu, pConfCompute)
#define confComputeGetKeyPairForKeySpace(pGpu, pConfCompute, arg0, arg1, arg2, arg3) confComputeGetKeyPairForKeySpace_DISPATCH(pGpu, pConfCompute, arg0, arg1, arg2, arg3)
#define confComputeGetKeyPairForKeySpace_HAL(pGpu, pConfCompute, arg0, arg1, arg2, arg3) confComputeGetKeyPairForKeySpace_DISPATCH(pGpu, pConfCompute, arg0, arg1, arg2, arg3)
#define confComputeEnableKeyRotationCallback(pGpu, pConfCompute, bEnable) confComputeEnableKeyRotationCallback_DISPATCH(pGpu, pConfCompute, bEnable) #define confComputeEnableKeyRotationCallback(pGpu, pConfCompute, bEnable) confComputeEnableKeyRotationCallback_DISPATCH(pGpu, pConfCompute, bEnable)
#define confComputeEnableKeyRotationCallback_HAL(pGpu, pConfCompute, bEnable) confComputeEnableKeyRotationCallback_DISPATCH(pGpu, pConfCompute, bEnable) #define confComputeEnableKeyRotationCallback_HAL(pGpu, pConfCompute, bEnable) confComputeEnableKeyRotationCallback_DISPATCH(pGpu, pConfCompute, bEnable)
#define confComputeEnableKeyRotationSupport(pGpu, pConfCompute) confComputeEnableKeyRotationSupport_DISPATCH(pGpu, pConfCompute) #define confComputeEnableKeyRotationSupport(pGpu, pConfCompute) confComputeEnableKeyRotationSupport_DISPATCH(pGpu, pConfCompute)
#define confComputeEnableKeyRotationSupport_HAL(pGpu, pConfCompute) confComputeEnableKeyRotationSupport_DISPATCH(pGpu, pConfCompute) #define confComputeEnableKeyRotationSupport_HAL(pGpu, pConfCompute) confComputeEnableKeyRotationSupport_DISPATCH(pGpu, pConfCompute)
#define confComputeEnableInternalKeyRotationSupport(pGpu, pConfCompute) confComputeEnableInternalKeyRotationSupport_DISPATCH(pGpu, pConfCompute)
#define confComputeEnableInternalKeyRotationSupport_HAL(pGpu, pConfCompute) confComputeEnableInternalKeyRotationSupport_DISPATCH(pGpu, pConfCompute)
#define confComputeIsDebugModeEnabled(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsDebugModeEnabled(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeIsDebugModeEnabled_HAL(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute) #define confComputeIsDebugModeEnabled_HAL(pGpu, pConfCompute) confComputeIsDebugModeEnabled_DISPATCH(pGpu, pConfCompute)
#define confComputeIsGpuCcCapable(pGpu, pConfCompute) confComputeIsGpuCcCapable_DISPATCH(pGpu, pConfCompute) #define confComputeIsGpuCcCapable(pGpu, pConfCompute) confComputeIsGpuCcCapable_DISPATCH(pGpu, pConfCompute)
@ -258,6 +272,10 @@ NV_STATUS __nvoc_objCreate_ConfidentialCompute(ConfidentialCompute**, Dynamic*,
#define confComputeEstablishSpdmSessionAndKeys_HAL(pGpu, pConfCompute) confComputeEstablishSpdmSessionAndKeys_DISPATCH(pGpu, pConfCompute) #define confComputeEstablishSpdmSessionAndKeys_HAL(pGpu, pConfCompute) confComputeEstablishSpdmSessionAndKeys_DISPATCH(pGpu, pConfCompute)
#define confComputeKeyStoreDepositIvMask(pConfCompute, globalKeyId, ivMask) confComputeKeyStoreDepositIvMask_DISPATCH(pConfCompute, globalKeyId, ivMask) #define confComputeKeyStoreDepositIvMask(pConfCompute, globalKeyId, ivMask) confComputeKeyStoreDepositIvMask_DISPATCH(pConfCompute, globalKeyId, ivMask)
#define confComputeKeyStoreDepositIvMask_HAL(pConfCompute, globalKeyId, ivMask) confComputeKeyStoreDepositIvMask_DISPATCH(pConfCompute, globalKeyId, ivMask) #define confComputeKeyStoreDepositIvMask_HAL(pConfCompute, globalKeyId, ivMask) confComputeKeyStoreDepositIvMask_DISPATCH(pConfCompute, globalKeyId, ivMask)
#define confComputeKeyStoreUpdateKey(pConfCompute, globalKeyId) confComputeKeyStoreUpdateKey_DISPATCH(pConfCompute, globalKeyId)
#define confComputeKeyStoreUpdateKey_HAL(pConfCompute, globalKeyId) confComputeKeyStoreUpdateKey_DISPATCH(pConfCompute, globalKeyId)
#define confComputeKeyStoreIsValidGlobalKeyId(pConfCompute, globalKeyId) confComputeKeyStoreIsValidGlobalKeyId_DISPATCH(pConfCompute, globalKeyId)
#define confComputeKeyStoreIsValidGlobalKeyId_HAL(pConfCompute, globalKeyId) confComputeKeyStoreIsValidGlobalKeyId_DISPATCH(pConfCompute, globalKeyId)
#define confComputeKeyStoreInit(pConfCompute) confComputeKeyStoreInit_DISPATCH(pConfCompute) #define confComputeKeyStoreInit(pConfCompute) confComputeKeyStoreInit_DISPATCH(pConfCompute)
#define confComputeKeyStoreInit_HAL(pConfCompute) confComputeKeyStoreInit_DISPATCH(pConfCompute) #define confComputeKeyStoreInit_HAL(pConfCompute) confComputeKeyStoreInit_DISPATCH(pConfCompute)
#define confComputeKeyStoreDeinit(pConfCompute) confComputeKeyStoreDeinit_DISPATCH(pConfCompute) #define confComputeKeyStoreDeinit(pConfCompute) confComputeKeyStoreDeinit_DISPATCH(pConfCompute)
@ -268,8 +286,6 @@ NV_STATUS __nvoc_objCreate_ConfidentialCompute(ConfidentialCompute**, Dynamic*,
#define confComputeKeyStoreDeriveKey_HAL(pConfCompute, globalKeyId) confComputeKeyStoreDeriveKey_DISPATCH(pConfCompute, globalKeyId) #define confComputeKeyStoreDeriveKey_HAL(pConfCompute, globalKeyId) confComputeKeyStoreDeriveKey_DISPATCH(pConfCompute, globalKeyId)
#define confComputeKeyStoreClearExportMasterKey(pConfCompute) confComputeKeyStoreClearExportMasterKey_DISPATCH(pConfCompute) #define confComputeKeyStoreClearExportMasterKey(pConfCompute) confComputeKeyStoreClearExportMasterKey_DISPATCH(pConfCompute)
#define confComputeKeyStoreClearExportMasterKey_HAL(pConfCompute) confComputeKeyStoreClearExportMasterKey_DISPATCH(pConfCompute) #define confComputeKeyStoreClearExportMasterKey_HAL(pConfCompute) confComputeKeyStoreClearExportMasterKey_DISPATCH(pConfCompute)
#define confComputeKeyStoreUpdateKey(pConfCompute, globalKeyId) confComputeKeyStoreUpdateKey_DISPATCH(pConfCompute, globalKeyId)
#define confComputeKeyStoreUpdateKey_HAL(pConfCompute, globalKeyId) confComputeKeyStoreUpdateKey_DISPATCH(pConfCompute, globalKeyId)
#define confComputeStateLoad(pGpu, pEngstate, arg0) confComputeStateLoad_DISPATCH(pGpu, pEngstate, arg0) #define confComputeStateLoad(pGpu, pEngstate, arg0) confComputeStateLoad_DISPATCH(pGpu, pEngstate, arg0)
#define confComputeStateUnload(pGpu, pEngstate, arg0) confComputeStateUnload_DISPATCH(pGpu, pEngstate, arg0) #define confComputeStateUnload(pGpu, pEngstate, arg0) confComputeStateUnload_DISPATCH(pGpu, pEngstate, arg0)
#define confComputeStatePreLoad(pGpu, pEngstate, arg0) confComputeStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) #define confComputeStatePreLoad(pGpu, pEngstate, arg0) confComputeStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
@ -427,6 +443,16 @@ static inline NvBool confComputeGlobalKeyIsKernelPriv_DISPATCH(struct Confidenti
return pConfCompute->__confComputeGlobalKeyIsKernelPriv__(pConfCompute, keyId); return pConfCompute->__confComputeGlobalKeyIsKernelPriv__(pConfCompute, keyId);
} }
NvBool confComputeGlobalKeyIsUvmKey_GH100(struct ConfidentialCompute *pConfCompute, NvU32 keyId);
static inline NvBool confComputeGlobalKeyIsUvmKey_491d52(struct ConfidentialCompute *pConfCompute, NvU32 keyId) {
return ((NvBool)(0 != 0));
}
static inline NvBool confComputeGlobalKeyIsUvmKey_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 keyId) {
return pConfCompute->__confComputeGlobalKeyIsUvmKey__(pConfCompute, keyId);
}
NV_STATUS confComputeGetKeyPairByChannel_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey); NV_STATUS confComputeGetKeyPairByChannel_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey);
static inline NV_STATUS confComputeGetKeyPairByChannel_46f6a7(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey) { static inline NV_STATUS confComputeGetKeyPairByChannel_46f6a7(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, struct KernelChannel *pKernelChannel, NvU32 *pH2DKey, NvU32 *pD2HKey) {
@ -451,6 +477,16 @@ static inline NV_STATUS confComputeTriggerKeyRotation_DISPATCH(struct OBJGPU *pG
return pConfCompute->__confComputeTriggerKeyRotation__(pGpu, pConfCompute); return pConfCompute->__confComputeTriggerKeyRotation__(pGpu, pConfCompute);
} }
void confComputeGetKeyPairForKeySpace_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 arg0, NvBool arg1, NvU32 *arg2, NvU32 *arg3);
static inline void confComputeGetKeyPairForKeySpace_b3696a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 arg0, NvBool arg1, NvU32 *arg2, NvU32 *arg3) {
return;
}
static inline void confComputeGetKeyPairForKeySpace_DISPATCH(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 arg0, NvBool arg1, NvU32 *arg2, NvU32 *arg3) {
pConfCompute->__confComputeGetKeyPairForKeySpace__(pGpu, pConfCompute, arg0, arg1, arg2, arg3);
}
NV_STATUS confComputeEnableKeyRotationCallback_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable); NV_STATUS confComputeEnableKeyRotationCallback_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable);
static inline NV_STATUS confComputeEnableKeyRotationCallback_56cd7a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable) { static inline NV_STATUS confComputeEnableKeyRotationCallback_56cd7a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvBool bEnable) {
@ -471,6 +507,16 @@ static inline NV_STATUS confComputeEnableKeyRotationSupport_DISPATCH(struct OBJG
return pConfCompute->__confComputeEnableKeyRotationSupport__(pGpu, pConfCompute); return pConfCompute->__confComputeEnableKeyRotationSupport__(pGpu, pConfCompute);
} }
NV_STATUS confComputeEnableInternalKeyRotationSupport_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute);
static inline NV_STATUS confComputeEnableInternalKeyRotationSupport_56cd7a(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return NV_OK;
}
static inline NV_STATUS confComputeEnableInternalKeyRotationSupport_DISPATCH(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
return pConfCompute->__confComputeEnableInternalKeyRotationSupport__(pGpu, pConfCompute);
}
NvBool confComputeIsDebugModeEnabled_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute); NvBool confComputeIsDebugModeEnabled_GH100(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute);
static inline NvBool confComputeIsDebugModeEnabled_491d52(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) { static inline NvBool confComputeIsDebugModeEnabled_491d52(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
@ -511,6 +557,26 @@ static inline void confComputeKeyStoreDepositIvMask_DISPATCH(struct Confidential
pConfCompute->__confComputeKeyStoreDepositIvMask__(pConfCompute, globalKeyId, ivMask); pConfCompute->__confComputeKeyStoreDepositIvMask__(pConfCompute, globalKeyId, ivMask);
} }
NV_STATUS confComputeKeyStoreUpdateKey_GH100(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
static inline NV_STATUS confComputeKeyStoreUpdateKey_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeKeyStoreUpdateKey_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return pConfCompute->__confComputeKeyStoreUpdateKey__(pConfCompute, globalKeyId);
}
NvBool confComputeKeyStoreIsValidGlobalKeyId_GH100(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
static inline NvBool confComputeKeyStoreIsValidGlobalKeyId_491d52(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return ((NvBool)(0 != 0));
}
static inline NvBool confComputeKeyStoreIsValidGlobalKeyId_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return pConfCompute->__confComputeKeyStoreIsValidGlobalKeyId__(pConfCompute, globalKeyId);
}
NV_STATUS confComputeKeyStoreInit_GH100(struct ConfidentialCompute *pConfCompute); NV_STATUS confComputeKeyStoreInit_GH100(struct ConfidentialCompute *pConfCompute);
static inline NV_STATUS confComputeKeyStoreInit_46f6a7(struct ConfidentialCompute *pConfCompute) { static inline NV_STATUS confComputeKeyStoreInit_46f6a7(struct ConfidentialCompute *pConfCompute) {
@ -561,16 +627,6 @@ static inline void confComputeKeyStoreClearExportMasterKey_DISPATCH(struct Confi
pConfCompute->__confComputeKeyStoreClearExportMasterKey__(pConfCompute); pConfCompute->__confComputeKeyStoreClearExportMasterKey__(pConfCompute);
} }
NV_STATUS confComputeKeyStoreUpdateKey_GH100(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
static inline NV_STATUS confComputeKeyStoreUpdateKey_46f6a7(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS confComputeKeyStoreUpdateKey_DISPATCH(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId) {
return pConfCompute->__confComputeKeyStoreUpdateKey__(pConfCompute, globalKeyId);
}
static inline NV_STATUS confComputeStateLoad_DISPATCH(POBJGPU pGpu, struct ConfidentialCompute *pEngstate, NvU32 arg0) { static inline NV_STATUS confComputeStateLoad_DISPATCH(POBJGPU pGpu, struct ConfidentialCompute *pEngstate, NvU32 arg0) {
return pEngstate->__confComputeStateLoad__(pGpu, pEngstate, arg0); return pEngstate->__confComputeStateLoad__(pGpu, pEngstate, arg0);
} }
@ -651,26 +707,37 @@ static inline NV_STATUS confComputeGetKeySlotFromGlobalKeyId(struct Confidential
#define confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, pSlot) confComputeGetKeySlotFromGlobalKeyId_IMPL(pConfCompute, globalKeyId, pSlot) #define confComputeGetKeySlotFromGlobalKeyId(pConfCompute, globalKeyId, pSlot) confComputeGetKeySlotFromGlobalKeyId_IMPL(pConfCompute, globalKeyId, pSlot)
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeCheckAndScheduleKeyRotation_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey); NV_STATUS confComputeCheckAndPerformKeyRotation_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
#ifdef __nvoc_conf_compute_h_disabled #ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeCheckAndScheduleKeyRotation(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey) { static inline NV_STATUS confComputeCheckAndPerformKeyRotation(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!"); NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
#else //__nvoc_conf_compute_h_disabled #else //__nvoc_conf_compute_h_disabled
#define confComputeCheckAndScheduleKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey) confComputeCheckAndScheduleKeyRotation_IMPL(pGpu, pConfCompute, h2dKey, d2hKey) #define confComputeCheckAndPerformKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey) confComputeCheckAndPerformKeyRotation_IMPL(pGpu, pConfCompute, h2dKey, d2hKey)
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeScheduleKeyRotationWorkItem_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey); NV_STATUS confComputePerformKeyRotation_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey, NvBool bWorkItem);
#ifdef __nvoc_conf_compute_h_disabled #ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeScheduleKeyRotationWorkItem(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey) { static inline NV_STATUS confComputePerformKeyRotation(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey, NvBool bWorkItem) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!"); NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED; return NV_ERR_NOT_SUPPORTED;
} }
#else //__nvoc_conf_compute_h_disabled #else //__nvoc_conf_compute_h_disabled
#define confComputeScheduleKeyRotationWorkItem(pGpu, pConfCompute, h2dKey, d2hKey) confComputeScheduleKeyRotationWorkItem_IMPL(pGpu, pConfCompute, h2dKey, d2hKey) #define confComputePerformKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey, bWorkItem) confComputePerformKeyRotation_IMPL(pGpu, pConfCompute, h2dKey, d2hKey, bWorkItem)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeForceKeyRotation_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeForceKeyRotation(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey, NvU32 d2hKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeForceKeyRotation(pGpu, pConfCompute, h2dKey, d2hKey) confComputeForceKeyRotation_IMPL(pGpu, pConfCompute, h2dKey, d2hKey)
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeSetKeyRotationStatus_IMPL(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, KEY_ROTATION_STATUS status); NV_STATUS confComputeSetKeyRotationStatus_IMPL(struct ConfidentialCompute *pConfCompute, NvU32 globalKey, KEY_ROTATION_STATUS status);
@ -716,6 +783,39 @@ static inline NV_STATUS confComputeUpdateFreedChannelStats(struct OBJGPU *pGpu,
#define confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel) confComputeUpdateFreedChannelStats_IMPL(pGpu, pConfCompute, pKernelChannel) #define confComputeUpdateFreedChannelStats(pGpu, pConfCompute, pKernelChannel) confComputeUpdateFreedChannelStats_IMPL(pGpu, pConfCompute, pKernelChannel)
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeStartKeyRotationTimer_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeStartKeyRotationTimer(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeStartKeyRotationTimer(pGpu, pConfCompute, h2dKey) confComputeStartKeyRotationTimer_IMPL(pGpu, pConfCompute, h2dKey)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeStopKeyRotationTimer_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey);
#ifdef __nvoc_conf_compute_h_disabled
static inline NV_STATUS confComputeStopKeyRotationTimer(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute, NvU32 h2dKey) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeStopKeyRotationTimer(pGpu, pConfCompute, h2dKey) confComputeStopKeyRotationTimer_IMPL(pGpu, pConfCompute, h2dKey)
#endif //__nvoc_conf_compute_h_disabled
NvBool confComputeIsUvmKeyRotationPending_IMPL(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute);
#ifdef __nvoc_conf_compute_h_disabled
static inline NvBool confComputeIsUvmKeyRotationPending(struct OBJGPU *pGpu, struct ConfidentialCompute *pConfCompute) {
NV_ASSERT_FAILED_PRECOMP("ConfidentialCompute was disabled!");
return NV_FALSE;
}
#else //__nvoc_conf_compute_h_disabled
#define confComputeIsUvmKeyRotationPending(pGpu, pConfCompute) confComputeIsUvmKeyRotationPending_IMPL(pGpu, pConfCompute)
#endif //__nvoc_conf_compute_h_disabled
NV_STATUS confComputeSetKeyRotationThreshold_IMPL(struct ConfidentialCompute *pConfCompute, NvU64 attackerAdvantage); NV_STATUS confComputeSetKeyRotationThreshold_IMPL(struct ConfidentialCompute *pConfCompute, NvU64 attackerAdvantage);
#ifdef __nvoc_conf_compute_h_disabled #ifdef __nvoc_conf_compute_h_disabled
@ -749,6 +849,9 @@ static inline NvBool confComputeIsLowerThresholdCrossed(struct ConfidentialCompu
#define confComputeIsLowerThresholdCrossed(pConfCompute, pStatsInfo) confComputeIsLowerThresholdCrossed_IMPL(pConfCompute, pStatsInfo) #define confComputeIsLowerThresholdCrossed(pConfCompute, pStatsInfo) confComputeIsLowerThresholdCrossed_IMPL(pConfCompute, pStatsInfo)
#endif //__nvoc_conf_compute_h_disabled #endif //__nvoc_conf_compute_h_disabled
NvBool confComputeIsGivenThresholdCrossed_IMPL(const CC_CRYPTOBUNDLE_STATS *pStatsInfo, NvU64 threshold, NvBool bEncrypt);
#define confComputeIsGivenThresholdCrossed(pStatsInfo, threshold, bEncrypt) confComputeIsGivenThresholdCrossed_IMPL(pStatsInfo, threshold, bEncrypt)
#undef PRIVATE_FIELD #undef PRIVATE_FIELD
#ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED #ifndef NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED
@ -782,22 +885,6 @@ void NVOC_PRIVATE_FUNCTION(confComputeKeyStoreClearExportMasterKey)(struct Confi
#undef confComputeKeyStoreClearExportMasterKey_HAL #undef confComputeKeyStoreClearExportMasterKey_HAL
void NVOC_PRIVATE_FUNCTION(confComputeKeyStoreClearExportMasterKey_HAL)(struct ConfidentialCompute *pConfCompute); void NVOC_PRIVATE_FUNCTION(confComputeKeyStoreClearExportMasterKey_HAL)(struct ConfidentialCompute *pConfCompute);
#undef confComputeKeyStoreUpdateKey
NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey)(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
#undef confComputeKeyStoreUpdateKey_HAL
NV_STATUS NVOC_PRIVATE_FUNCTION(confComputeKeyStoreUpdateKey_HAL)(struct ConfidentialCompute *pConfCompute, NvU32 globalKeyId);
#ifndef __nvoc_conf_compute_h_disabled
#undef confComputeIsUpperThresholdCrossed
NvBool NVOC_PRIVATE_FUNCTION(confComputeIsUpperThresholdCrossed)(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
#endif //__nvoc_conf_compute_h_disabled
#ifndef __nvoc_conf_compute_h_disabled
#undef confComputeIsLowerThresholdCrossed
NvBool NVOC_PRIVATE_FUNCTION(confComputeIsLowerThresholdCrossed)(struct ConfidentialCompute *pConfCompute, const KEY_ROTATION_STATS_INFO *pStatsInfo);
#endif //__nvoc_conf_compute_h_disabled
#endif // NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED #endif // NVOC_CONF_COMPUTE_H_PRIVATE_ACCESS_ALLOWED

View File

@ -124,6 +124,16 @@ static void __nvoc_init_funcTable_CrashCatReport_1(CrashCatReport *pThis) {
PORT_UNREFERENCED_VARIABLE(reportHal); PORT_UNREFERENCED_VARIABLE(reportHal);
PORT_UNREFERENCED_VARIABLE(reportHal_HalVarIdx); PORT_UNREFERENCED_VARIABLE(reportHal_HalVarIdx);
// Hal function -- crashcatReportSourceContainment
if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000004UL) )) /* CrashCatReportHal: V1_LIBOS3 */
{
pThis->__crashcatReportSourceContainment__ = &crashcatReportSourceContainment_V1_LIBOS3;
}
else
{
pThis->__crashcatReportSourceContainment__ = &crashcatReportSourceContainment_3e9f29;
}
// Hal function -- crashcatReportLogReporter // Hal function -- crashcatReportLogReporter
if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* CrashCatReportHal: V1_LIBOS2 */ if (( ((reportHal_HalVarIdx >> 5) == 0UL) && ((1UL << (reportHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* CrashCatReportHal: V1_LIBOS2 */
{ {

View File

@ -87,6 +87,7 @@ struct CrashCatReport {
struct Object __nvoc_base_Object; struct Object __nvoc_base_Object;
struct Object *__nvoc_pbase_Object; struct Object *__nvoc_pbase_Object;
struct CrashCatReport *__nvoc_pbase_CrashCatReport; struct CrashCatReport *__nvoc_pbase_CrashCatReport;
NV_CRASHCAT_CONTAINMENT (*__crashcatReportSourceContainment__)(struct CrashCatReport *);
void (*__crashcatReportLogReporter__)(struct CrashCatReport *); void (*__crashcatReportLogReporter__)(struct CrashCatReport *);
void (*__crashcatReportLogSource__)(struct CrashCatReport *); void (*__crashcatReportLogSource__)(struct CrashCatReport *);
struct CrashCatReportHal reportHal; struct CrashCatReportHal reportHal;
@ -124,6 +125,8 @@ NV_STATUS __nvoc_objCreate_CrashCatReport(CrashCatReport**, Dynamic*, NvU32,
#define __objCreate_CrashCatReport(ppNewObj, pParent, createFlags, CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining) \ #define __objCreate_CrashCatReport(ppNewObj, pParent, createFlags, CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining) \
__nvoc_objCreate_CrashCatReport((ppNewObj), staticCast((pParent), Dynamic), (createFlags), CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining) __nvoc_objCreate_CrashCatReport((ppNewObj), staticCast((pParent), Dynamic), (createFlags), CrashCatReportHal_version, CrashCatReportHal_implementer, arg_ppReportBytes, arg_bytesRemaining)
#define crashcatReportSourceContainment(arg0) crashcatReportSourceContainment_DISPATCH(arg0)
#define crashcatReportSourceContainment_HAL(arg0) crashcatReportSourceContainment_DISPATCH(arg0)
#define crashcatReportLogReporter(arg0) crashcatReportLogReporter_DISPATCH(arg0) #define crashcatReportLogReporter(arg0) crashcatReportLogReporter_DISPATCH(arg0)
#define crashcatReportLogReporter_HAL(arg0) crashcatReportLogReporter_DISPATCH(arg0) #define crashcatReportLogReporter_HAL(arg0) crashcatReportLogReporter_DISPATCH(arg0)
#define crashcatReportLogSource(arg0) crashcatReportLogSource_DISPATCH(arg0) #define crashcatReportLogSource(arg0) crashcatReportLogSource_DISPATCH(arg0)
@ -268,6 +271,16 @@ static inline void crashcatReportLogIo32State(struct CrashCatReport *arg0) {
#define crashcatReportLogIo32State_HAL(arg0) crashcatReportLogIo32State(arg0) #define crashcatReportLogIo32State_HAL(arg0) crashcatReportLogIo32State(arg0)
static inline NV_CRASHCAT_CONTAINMENT crashcatReportSourceContainment_3e9f29(struct CrashCatReport *arg0) {
return NV_CRASHCAT_CONTAINMENT_UNSPECIFIED;
}
NV_CRASHCAT_CONTAINMENT crashcatReportSourceContainment_V1_LIBOS3(struct CrashCatReport *arg0);
static inline NV_CRASHCAT_CONTAINMENT crashcatReportSourceContainment_DISPATCH(struct CrashCatReport *arg0) {
return arg0->__crashcatReportSourceContainment__(arg0);
}
void crashcatReportLogReporter_V1_GENERIC(struct CrashCatReport *arg0); void crashcatReportLogReporter_V1_GENERIC(struct CrashCatReport *arg0);
void crashcatReportLogReporter_V1_LIBOS2(struct CrashCatReport *arg0); void crashcatReportLogReporter_V1_LIBOS2(struct CrashCatReport *arg0);

View File

@ -558,7 +558,7 @@ void notifyDestruct_IMPL(struct Notifier *pNotifier);
#undef PRIVATE_FIELD #undef PRIVATE_FIELD
void CliAddSystemEvent(NvU32, NvU32); void CliAddSystemEvent(NvU32, NvU32, NvBool *);
NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject); NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject);
NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent); NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent);
NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject, NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject,

View File

@ -1108,6 +1108,17 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
pThis->__gpuIsDevModeEnabledInHw__ = &gpuIsDevModeEnabledInHw_491d52; pThis->__gpuIsDevModeEnabledInHw__ = &gpuIsDevModeEnabledInHw_491d52;
} }
// Hal function -- gpuIsProtectedPcieEnabledInHw
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__gpuIsProtectedPcieEnabledInHw__ = &gpuIsProtectedPcieEnabledInHw_GH100;
}
// default
else
{
pThis->__gpuIsProtectedPcieEnabledInHw__ = &gpuIsProtectedPcieEnabledInHw_491d52;
}
// Hal function -- gpuIsCtxBufAllocInPmaSupported // Hal function -- gpuIsCtxBufAllocInPmaSupported
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */ if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{ {

View File

@ -971,6 +971,7 @@ struct OBJGPU {
NvBool (*__gpuIsSliCapableWithoutDisplay__)(struct OBJGPU *); NvBool (*__gpuIsSliCapableWithoutDisplay__)(struct OBJGPU *);
NvBool (*__gpuIsCCEnabledInHw__)(struct OBJGPU *); NvBool (*__gpuIsCCEnabledInHw__)(struct OBJGPU *);
NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *); NvBool (*__gpuIsDevModeEnabledInHw__)(struct OBJGPU *);
NvBool (*__gpuIsProtectedPcieEnabledInHw__)(struct OBJGPU *);
NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *); NvBool (*__gpuIsCtxBufAllocInPmaSupported__)(struct OBJGPU *);
NV_STATUS (*__gpuUpdateErrorContainmentState__)(struct OBJGPU *, NV_ERROR_CONT_ERR_ID, NV_ERROR_CONT_LOCATION, NvU32 *); NV_STATUS (*__gpuUpdateErrorContainmentState__)(struct OBJGPU *, NV_ERROR_CONT_ERR_ID, NV_ERROR_CONT_LOCATION, NvU32 *);
NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *); NV_STATUS (*__gpuWaitForGfwBootComplete__)(struct OBJGPU *);
@ -1053,6 +1054,7 @@ struct OBJGPU {
NvBool PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK; NvBool PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK;
NvBool PDB_PROP_GPU_C2C_SYSMEM; NvBool PDB_PROP_GPU_C2C_SYSMEM;
NvBool PDB_PROP_GPU_IN_TCC_MODE; NvBool PDB_PROP_GPU_IN_TCC_MODE;
NvBool PDB_PROP_GPU_SUPPORTS_TDR_EVENT;
NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE; NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE;
NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K; NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K;
NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT; NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT;
@ -1132,6 +1134,8 @@ struct OBJGPU {
NvS32 computeModeRefCount; NvS32 computeModeRefCount;
NvHandle hComputeModeReservation; NvHandle hComputeModeReservation;
NvBool bIsDebugModeEnabled; NvBool bIsDebugModeEnabled;
NvU64 lastCallbackTime;
volatile NvU32 bCallbackQueued;
NvU32 masterFromSLIConfig; NvU32 masterFromSLIConfig;
NvU32 sliStatus; NvU32 sliStatus;
struct OBJOS *pOS; struct OBJOS *pOS;
@ -1444,6 +1448,8 @@ extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH #define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH
#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST
#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED #define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED
#define PDB_PROP_GPU_SUPPORTS_TDR_EVENT_BASE_CAST
#define PDB_PROP_GPU_SUPPORTS_TDR_EVENT_BASE_NAME PDB_PROP_GPU_SUPPORTS_TDR_EVENT
#define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_CAST #define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_CAST
#define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_NAME PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE #define PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE_BASE_NAME PDB_PROP_GPU_IS_VGPU_HETEROGENEOUS_MODE
#define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST #define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST
@ -1629,6 +1635,8 @@ NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32,
#define gpuIsCCEnabledInHw_HAL(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu) #define gpuIsCCEnabledInHw_HAL(pGpu) gpuIsCCEnabledInHw_DISPATCH(pGpu)
#define gpuIsDevModeEnabledInHw(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu) #define gpuIsDevModeEnabledInHw(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu)
#define gpuIsDevModeEnabledInHw_HAL(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu) #define gpuIsDevModeEnabledInHw_HAL(pGpu) gpuIsDevModeEnabledInHw_DISPATCH(pGpu)
#define gpuIsProtectedPcieEnabledInHw(pGpu) gpuIsProtectedPcieEnabledInHw_DISPATCH(pGpu)
#define gpuIsProtectedPcieEnabledInHw_HAL(pGpu) gpuIsProtectedPcieEnabledInHw_DISPATCH(pGpu)
#define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu) #define gpuIsCtxBufAllocInPmaSupported(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
#define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu) #define gpuIsCtxBufAllocInPmaSupported_HAL(pGpu) gpuIsCtxBufAllocInPmaSupported_DISPATCH(pGpu)
#define gpuUpdateErrorContainmentState(pGpu, arg0, arg1, arg2) gpuUpdateErrorContainmentState_DISPATCH(pGpu, arg0, arg1, arg2) #define gpuUpdateErrorContainmentState(pGpu, arg0, arg1, arg2) gpuUpdateErrorContainmentState_DISPATCH(pGpu, arg0, arg1, arg2)
@ -3147,6 +3155,16 @@ static inline NvBool gpuIsDevModeEnabledInHw_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuIsDevModeEnabledInHw__(pGpu); return pGpu->__gpuIsDevModeEnabledInHw__(pGpu);
} }
NvBool gpuIsProtectedPcieEnabledInHw_GH100(struct OBJGPU *pGpu);
static inline NvBool gpuIsProtectedPcieEnabledInHw_491d52(struct OBJGPU *pGpu) {
return ((NvBool)(0 != 0));
}
static inline NvBool gpuIsProtectedPcieEnabledInHw_DISPATCH(struct OBJGPU *pGpu) {
return pGpu->__gpuIsProtectedPcieEnabledInHw__(pGpu);
}
NvBool gpuIsCtxBufAllocInPmaSupported_GA100(struct OBJGPU *pGpu); NvBool gpuIsCtxBufAllocInPmaSupported_GA100(struct OBJGPU *pGpu);
static inline NvBool gpuIsCtxBufAllocInPmaSupported_491d52(struct OBJGPU *pGpu) { static inline NvBool gpuIsCtxBufAllocInPmaSupported_491d52(struct OBJGPU *pGpu) {

Some files were not shown because too many files have changed in this diff Show More