525.85.05

This commit is contained in:
Andy Ritger 2023-01-19 10:41:59 -08:00
parent dac2350c7f
commit 811073c51e
No known key found for this signature in database
GPG Key ID: 6D466BB75E006CFC
90 changed files with 1937 additions and 668 deletions

View File

@ -2,6 +2,12 @@
## Release 525 Entries
### [525.85.05] 2023-01-19
#### Fixed
- Fix build problems with Clang 15.0, [#377](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/377) by @ptr1337
### [525.78.01] 2023-01-05
### [525.60.13] 2022-12-05

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 525.78.01.
version 525.85.05.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
525.78.01 driver release. This can be achieved by installing
525.85.05 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -167,7 +167,7 @@ for the target kernel.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 525.78.01 release,
(see the table below). However, in the 525.85.05 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@ -175,7 +175,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.78.01/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.85.05/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@ -720,7 +720,11 @@ Subsystem Device ID.
| NVIDIA A10 | 2236 10DE 1482 |
| NVIDIA A10G | 2237 10DE 152F |
| NVIDIA A10M | 2238 10DE 1677 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C0 |
| NVIDIA H100 80GB HBM3 | 2330 10DE 16C1 |
| NVIDIA H100 PCIe | 2331 10DE 1626 |
| NVIDIA H100 80GB HBM2e | 2336 10DE 16C2 |
| NVIDIA H100 80GB HBM2e | 2336 10DE 16C7 |
| NVIDIA GeForce RTX 3060 Ti | 2414 |
| NVIDIA GeForce RTX 3080 Ti Laptop GPU | 2420 |
| NVIDIA RTX A5500 Laptop GPU | 2438 |
@ -784,6 +788,7 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 2050 | 25A7 |
| NVIDIA GeForce RTX 2050 | 25A9 |
| NVIDIA GeForce MX570 A | 25AA |
| NVIDIA GeForce RTX 2050 | 25AD |
| NVIDIA A16 | 25B6 10DE 14A9 |
| NVIDIA A2 | 25B6 10DE 157E |
| NVIDIA RTX A2000 Laptop GPU | 25B8 |
@ -793,6 +798,7 @@ Subsystem Device ID.
| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25E0 |
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E2 |
| NVIDIA GeForce RTX 3050 Laptop GPU | 25E5 |
| NVIDIA GeForce RTX 2050 | 25ED |
| NVIDIA RTX A1000 Embedded GPU | 25F9 |
| NVIDIA RTX A2000 Embedded GPU | 25FA |
| NVIDIA RTX A500 Embedded GPU | 25FB |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.78.01\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.85.05\"
EXTRA_CFLAGS += -Wno-unused-function

View File

@ -101,7 +101,8 @@ test_header_presence() {
build_cflags() {
BASE_CFLAGS="-O2 -D__KERNEL__ \
-DKBUILD_BASENAME=\"#conftest$$\" -DKBUILD_MODNAME=\"#conftest$$\" \
-nostdinc -isystem $ISYSTEM"
-nostdinc -isystem $ISYSTEM \
-Wno-implicit-function-declaration -Wno-strict-prototypes"
if [ "$OUTPUT" != "$SOURCES" ]; then
OUTPUT_CFLAGS="-I$OUTPUT/include2 -I$OUTPUT/include"

View File

@ -131,16 +131,19 @@ static int __nv_drm_put_back_post_fence_fd(
const struct NvKmsKapiLayerReplyConfig *layer_reply_config)
{
int fd = layer_reply_config->postSyncptFd;
int ret = 0;
if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) {
if (put_user(fd, plane_state->fd_user_ptr)) {
return -EFAULT;
ret = copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd));
if (ret != 0) {
return ret;
}
/*! set back to Null and let set_property specify it again */
plane_state->fd_user_ptr = NULL;
}
return 0;
return ret;
}
static int __nv_drm_get_syncpt_data(

View File

@ -34,7 +34,7 @@ static int uvm_debug_prints = UVM_IS_DEBUG() || UVM_IS_DEVELOP();
module_param(uvm_debug_prints, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(uvm_debug_prints, "Enable uvm debug prints.");
bool uvm_debug_prints_enabled()
bool uvm_debug_prints_enabled(void)
{
return uvm_debug_prints != 0;
}

View File

@ -1718,7 +1718,7 @@ bool uvm_va_space_has_access_counter_migrations(uvm_va_space_t *va_space)
return atomic_read(&va_space_access_counters->params.enable_mimc_migrations);
}
NV_STATUS uvm_perf_access_counters_init()
NV_STATUS uvm_perf_access_counters_init(void)
{
uvm_perf_module_init("perf_access_counters",
UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS,
@ -1729,7 +1729,7 @@ NV_STATUS uvm_perf_access_counters_init()
return NV_OK;
}
void uvm_perf_access_counters_exit()
void uvm_perf_access_counters_exit(void)
{
}

View File

@ -334,7 +334,7 @@ bool __uvm_check_all_unlocked(uvm_thread_context_lock_t *uvm_context)
return false;
}
bool __uvm_thread_check_all_unlocked()
bool __uvm_thread_check_all_unlocked(void)
{
return __uvm_check_all_unlocked(uvm_thread_context_lock_get());
}

View File

@ -795,7 +795,7 @@ static NV_STATUS semaphore_release(NvU64 semaphore_address,
return semaphore_release_from_gpu(gpu, semaphore_pool, semaphore_address, semaphore_payload, tracker_ptr);
}
NV_STATUS uvm_migrate_init()
NV_STATUS uvm_migrate_init(void)
{
NV_STATUS status = uvm_migrate_pageable_init();
if (status != NV_OK)
@ -821,7 +821,7 @@ NV_STATUS uvm_migrate_init()
return NV_OK;
}
void uvm_migrate_exit()
void uvm_migrate_exit(void)
{
uvm_migrate_pageable_exit();
}

View File

@ -1001,7 +1001,7 @@ NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args)
return status;
}
NV_STATUS uvm_migrate_pageable_init()
NV_STATUS uvm_migrate_pageable_init(void)
{
g_uvm_migrate_vma_state_cache = NV_KMEM_CACHE_CREATE("migrate_vma_state_t", migrate_vma_state_t);
if (!g_uvm_migrate_vma_state_cache)
@ -1010,7 +1010,7 @@ NV_STATUS uvm_migrate_pageable_init()
return NV_OK;
}
void uvm_migrate_pageable_exit()
void uvm_migrate_pageable_exit(void)
{
kmem_cache_destroy_safe(&g_uvm_migrate_vma_state_cache);
}

View File

@ -28,7 +28,7 @@
#include "uvm_gpu_access_counters.h"
#include "uvm_va_space.h"
NV_STATUS uvm_perf_heuristics_init()
NV_STATUS uvm_perf_heuristics_init(void)
{
NV_STATUS status;
@ -47,7 +47,7 @@ NV_STATUS uvm_perf_heuristics_init()
return NV_OK;
}
void uvm_perf_heuristics_exit()
void uvm_perf_heuristics_exit(void)
{
uvm_perf_access_counters_exit();
uvm_perf_thrashing_exit();

View File

@ -425,7 +425,7 @@ void uvm_perf_prefetch_get_hint(uvm_va_block_t *va_block,
}
}
NV_STATUS uvm_perf_prefetch_init()
NV_STATUS uvm_perf_prefetch_init(void)
{
g_uvm_perf_prefetch_enable = uvm_perf_prefetch_enable != 0;

View File

@ -1943,7 +1943,7 @@ NV_STATUS uvm_perf_thrashing_register_gpu(uvm_va_space_t *va_space, uvm_gpu_t *g
return NV_OK;
}
NV_STATUS uvm_perf_thrashing_init()
NV_STATUS uvm_perf_thrashing_init(void)
{
NV_STATUS status;
@ -2002,7 +2002,7 @@ error:
return status;
}
void uvm_perf_thrashing_exit()
void uvm_perf_thrashing_exit(void)
{
cpu_thrashing_stats_exit();

View File

@ -46,7 +46,7 @@ static struct proc_dir_entry *uvm_proc_dir;
static struct proc_dir_entry *uvm_proc_gpus;
static struct proc_dir_entry *uvm_proc_cpu;
NV_STATUS uvm_procfs_init()
NV_STATUS uvm_procfs_init(void)
{
if (!uvm_procfs_is_enabled())
return NV_OK;
@ -66,17 +66,17 @@ NV_STATUS uvm_procfs_init()
return NV_OK;
}
void uvm_procfs_exit()
void uvm_procfs_exit(void)
{
proc_remove(uvm_proc_dir);
}
struct proc_dir_entry *uvm_procfs_get_gpu_base_dir()
struct proc_dir_entry *uvm_procfs_get_gpu_base_dir(void)
{
return uvm_proc_gpus;
}
struct proc_dir_entry *uvm_procfs_get_cpu_base_dir()
struct proc_dir_entry *uvm_procfs_get_cpu_base_dir(void)
{
return uvm_proc_cpu;
}

View File

@ -289,12 +289,12 @@ NV_STATUS __uvm_push_begin_acquire_on_reserved_channel_with_info(uvm_channel_t *
return status;
}
bool uvm_push_info_is_tracking_descriptions()
bool uvm_push_info_is_tracking_descriptions(void)
{
return uvm_debug_enable_push_desc != 0;
}
bool uvm_push_info_is_tracking_acquires()
bool uvm_push_info_is_tracking_acquires(void)
{
return uvm_debug_enable_push_acquire_info != 0;
}

View File

@ -101,7 +101,7 @@ static DEFINE_PER_CPU(uvm_thread_context_lock_acquired_t, interrupt_thread_conte
static void thread_context_non_interrupt_remove(uvm_thread_context_t *thread_context,
uvm_thread_context_table_entry_t *thread_context_entry);
bool uvm_thread_context_wrapper_is_used()
bool uvm_thread_context_wrapper_is_used(void)
{
// The wrapper contains lock information. While uvm_record_lock_X
// routines are a no-op outside of debug mode, unit tests do invoke their

View File

@ -2204,7 +2204,7 @@ NV_STATUS uvm_api_tools_get_processor_uuid_table(UVM_TOOLS_GET_PROCESSOR_UUID_TA
return NV_OK;
}
void uvm_tools_flush_events()
void uvm_tools_flush_events(void)
{
tools_schedule_completed_events();

View File

@ -560,7 +560,7 @@ void nvlink_assert(int cond)
}
}
void * nvlink_allocLock()
void * nvlink_allocLock(void)
{
struct semaphore *sema;

View File

@ -1170,14 +1170,14 @@ void NV_API_CALL os_dbg_breakpoint(void)
#endif // CONFIG_X86_REMOTE_DEBUG || CONFIG_KGDB || CONFIG_XMON
}
NvU32 NV_API_CALL os_get_cpu_number()
NvU32 NV_API_CALL os_get_cpu_number(void)
{
NvU32 cpu_id = get_cpu();
put_cpu();
return cpu_id;
}
NvU32 NV_API_CALL os_get_cpu_count()
NvU32 NV_API_CALL os_get_cpu_count(void)
{
return NV_NUM_CPUS();
}
@ -1273,7 +1273,7 @@ void NV_API_CALL os_get_screen_info(
#endif
}
void NV_API_CALL os_dump_stack()
void NV_API_CALL os_dump_stack(void)
{
dump_stack();
}

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r527_92
#define NV_BUILD_BRANCH r528_10
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r527_92
#define NV_PUBLIC_BRANCH r528_10
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r527_92-225"
#define NV_BUILD_CHANGELIST_NUM (32231446)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r528_10-249"
#define NV_BUILD_CHANGELIST_NUM (32293795)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r525/r527_92-225"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32231446)
#define NV_BUILD_NAME "rel/gpu_drv/r525/r528_10-249"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32293795)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r527_92-2"
#define NV_BUILD_CHANGELIST_NUM (32226812)
#define NV_BUILD_BRANCH_VERSION "r528_10-6"
#define NV_BUILD_CHANGELIST_NUM (32293795)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "528.02"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32226812)
#define NV_BUILD_NAME "528.22"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32293795)
#define NV_BUILD_BRANCH_BASE_VERSION R525
#endif
// End buildmeister python edited section

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "525.78.01"
#define NV_VERSION_STRING "525.85.05"
#else

View File

@ -3,7 +3,7 @@
#define NV_COMPANY_NAME_STRING_SHORT "NVIDIA"
#define NV_COMPANY_NAME_STRING_FULL "NVIDIA Corporation"
#define NV_COMPANY_NAME_STRING NV_COMPANY_NAME_STRING_FULL
#define NV_COPYRIGHT_YEAR "2022"
#define NV_COPYRIGHT_YEAR "2023"
#define NV_COPYRIGHT "(C) " NV_COPYRIGHT_YEAR " NVIDIA Corporation. All rights reserved." // Please do not use the non-ascii copyright symbol for (C).
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \

View File

@ -75,6 +75,7 @@ enum
{
RM_SOE_THERM_MSG_ID_SLOWDOWN_STATUS,
RM_SOE_THERM_MSG_ID_SHUTDOWN_STATUS,
RM_SOE_THERM_MSG_ID_ACK_FORCE_SLOWDOWN,
};
/*!
@ -102,6 +103,7 @@ typedef struct
NvBool bSlowdown;
NvTemp maxTemperature;
NvTemp warnThreshold;
NvBool bLinksL1Status;
struct
{

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -565,6 +565,7 @@ NvlStatus nvswitch_launch_ALI(nvswitch_device *device);
NvlStatus nvswitch_launch_ALI_link_training(nvswitch_device *device, nvlink_link *link, NvBool bSync);
NvlStatus nvswitch_inband_read_data(nvswitch_device *device, NvU8 *dest, NvU32 linkId, NvU32 *dataSize);
void nvswitch_filter_messages(nvswitch_device *device, NvU32 linkId);
NvlStatus nvswitch_reset_and_train_link(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_set_training_mode(nvswitch_device *device);
NvBool nvswitch_is_link_in_reset(nvswitch_device *device, nvlink_link *link);
void nvswitch_apply_recal_settings(nvswitch_device *device, nvlink_link *link);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -239,6 +239,7 @@
_op(NvlStatus, nvswitch_ctrl_get_rb_stall_busy, (nvswitch_device *device, NVSWITCH_GET_RB_STALL_BUSY *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_multicast_id_error_vector, (nvswitch_device *device, NVSWITCH_GET_MULTICAST_ID_ERROR_VECTOR *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_clear_multicast_id_error_vector, (nvswitch_device *device, NVSWITCH_CLEAR_MULTICAST_ID_ERROR_VECTOR *p), _arch) \
_op(NvlStatus, nvswitch_reset_and_train_link, (nvswitch_device *device, nvlink_link *link), _arch) \
//
// Declare HAL function pointer table

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -657,6 +657,7 @@ NvlStatus nvswitch_ctrl_get_board_part_number_lr10(nvswitch_device *device, NVSW
NvlStatus nvswitch_ctrl_set_mc_rid_table_lr10(nvswitch_device *device, NVSWITCH_SET_MC_RID_TABLE_PARAMS *p);
NvlStatus nvswitch_ctrl_get_mc_rid_table_lr10(nvswitch_device *device, NVSWITCH_GET_MC_RID_TABLE_PARAMS *p);
NvlStatus nvswitch_launch_ALI_lr10(nvswitch_device *device);
NvlStatus nvswitch_reset_and_train_link_lr10(nvswitch_device *device, nvlink_link *link);
NvlStatus nvswitch_ctrl_get_bios_info_lr10(nvswitch_device *device, NVSWITCH_GET_BIOS_INFO_PARAMS *p);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -993,6 +993,7 @@ void nvswitch_service_minion_all_links_ls10(nvswitch_device *device);
NvBool nvswitch_is_inforom_supported_ls10(nvswitch_device *device);
void nvswitch_set_error_rate_threshold_ls10(nvlink_link *link, NvBool bIsDefault);
void nvswitch_configure_error_rate_threshold_interrupt_ls10(nvlink_link *link, NvBool bEnable);
NvlStatus nvswitch_reset_and_train_link_ls10(nvswitch_device *device, nvlink_link *link);
#endif //_LS10_H_

View File

@ -39,6 +39,8 @@ void nvswitch_cci_soe_callback_ls10(nvswitch_device *device, RM_FLCN_MSG *pGenMs
void *pParams, NvU32 seqDesc, NV_STATUS status);
NvlStatus nvswitch_set_nport_tprod_state_ls10(nvswitch_device *device, NvU32 nport);
void nvswitch_soe_unregister_events_ls10(nvswitch_device *device);
void nvswitch_therm_soe_callback_ls10(nvswitch_device *device, union RM_FLCN_MSG *pMsg,
void *pParams, NvU32 seqDesc, NV_STATUS status);
NvlStatus nvswitch_soe_register_event_callbacks_ls10(nvswitch_device *device);
NvlStatus nvswitch_soe_restore_nport_state_ls10(nvswitch_device *device, NvU32 nport);
NvlStatus nvswitch_soe_issue_nport_reset_ls10(nvswitch_device *device, NvU32 nport);

View File

@ -569,7 +569,7 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0x328908f4, 0xfbfa324f, 0xbf02f971, 0xbcb0b2b9, 0xb9a6b0c9, 0xe41708f4, 0xbcffffd9, 0xfba6f09b,
0x980b08f4, 0xf9a60109, 0xf8050df4, 0xb2dc7202, 0x28d77eed, 0xb201fb00, 0x05ab98b9, 0xdeb2cfb2,
0xfd729cb2, 0x0042a97e, 0xf0fc00f8, 0xf9fc30f4, 0xbf62f9f0, 0x08e1b0b9, 0xd4b2a5b2, 0xa630c9bc,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x24bd0101, 0x763efc06, 0x02f80043,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x010124bd, 0x763efc06, 0x02f80043,
0x853e0101, 0x42bc0043, 0x0096b192, 0x060df401, 0x90010049, 0x96ff0399, 0x0b947e04, 0xb23bb200,
0xdd0c725a, 0x00001200, 0x7e3030bc, 0x320028d7, 0x00a433a1, 0x08b0b434, 0xb209c0b4, 0x1200da2d,
0x20bc0000, 0x01004e20, 0x0021367e, 0x0a00a033, 0x853e02f8, 0x00da0043, 0xbd000012, 0x01004cb4,
@ -859,7 +859,7 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0x4c99c700, 0x6a0090b3, 0xf60190b3, 0xde3e02f8, 0xf9cf0066, 0x4c99c700, 0x3a0090b3, 0xf60190b3,
0xf23e02f8, 0x00d90066, 0xbf000014, 0x014bfe9a, 0xbb90080c, 0x006e7e14, 0x0149fe00, 0xbf1c9990,
0x05dcd99f, 0x99bf0000, 0x0bf5f9a6, 0xe03e00ba, 0x00890067, 0x99cf01c2, 0x0608de00, 0x9fc70000,
0x1899c710, 0x3515ef35, 0x063e14e9, 0xff900067, 0xc1008960, 0x009ff601, 0x0000f1df, 0x0099b880,
0x1899c710, 0x3516ef35, 0x063e15e9, 0xff900067, 0xc1008960, 0x009ff601, 0x0000f1df, 0x0099b880,
0x9ff70201, 0x009fcf00, 0xf23e9fb2, 0x00890066, 0x99cf01c2, 0x009fe400, 0xff94f120, 0x00fdb33f,
0xc13eff40, 0x448f0066, 0x0089066f, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f,
0x3e9fb200, 0x890066a8, 0xcf01c200, 0x9fe40099, 0x94f12000, 0xfdb33fff, 0x3efec700, 0x8f006681,
@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0x62f5c2c4, 0xc41c2f31, 0x9af0cbcc, 0xb7efe098,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x5f24a73a, 0x55cea6d1, 0x59205a69, 0x18a31f2d,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb22438cf, 0xcfd90bc8, 0xf23ebc55, 0x2e5c0e40,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x0d4a5d7d, 0x9c31ffb3, 0x95bc604f, 0x40cc834d,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,

View File

@ -569,7 +569,7 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0x328908f4, 0xfbfa324f, 0xbf02f971, 0xbcb0b2b9, 0xb9a6b0c9, 0xe41708f4, 0xbcffffd9, 0xfba6f09b,
0x980b08f4, 0xf9a60109, 0xf8050df4, 0xb2dc7202, 0x28d77eed, 0xb201fb00, 0x05ab98b9, 0xdeb2cfb2,
0xfd729cb2, 0x0042a97e, 0xf0fc00f8, 0xf9fc30f4, 0xbf62f9f0, 0x08e1b0b9, 0xd4b2a5b2, 0xa630c9bc,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x24bd0101, 0x763efc06, 0x02f80043,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x010124bd, 0x763efc06, 0x02f80043,
0x853e0101, 0x42bc0043, 0x0096b192, 0x060df401, 0x90010049, 0x96ff0399, 0x0b947e04, 0xb23bb200,
0xdd0c725a, 0x00001200, 0x7e3030bc, 0x320028d7, 0x00a433a1, 0x08b0b434, 0xb209c0b4, 0x1200da2d,
0x20bc0000, 0x01004e20, 0x0021367e, 0x0a00a033, 0x853e02f8, 0x00da0043, 0xbd000012, 0x01004cb4,
@ -859,7 +859,7 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0x4c99c700, 0x6a0090b3, 0xf60190b3, 0xde3e02f8, 0xf9cf0066, 0x4c99c700, 0x3a0090b3, 0xf60190b3,
0xf23e02f8, 0x00d90066, 0xbf000014, 0x014bfe9a, 0xbb90080c, 0x006e7e14, 0x0149fe00, 0xbf1c9990,
0x05dcd99f, 0x99bf0000, 0x0bf5f9a6, 0xe03e00ba, 0x00890067, 0x99cf01c2, 0x0608de00, 0x9fc70000,
0x1899c710, 0x3515ef35, 0x063e14e9, 0xff900067, 0xc1008960, 0x009ff601, 0x0000f1df, 0x0099b880,
0x1899c710, 0x3516ef35, 0x063e15e9, 0xff900067, 0xc1008960, 0x009ff601, 0x0000f1df, 0x0099b880,
0x9ff70201, 0x009fcf00, 0xf23e9fb2, 0x00890066, 0x99cf01c2, 0x009fe400, 0xff94f120, 0x00fdb33f,
0xc13eff40, 0x448f0066, 0x0089066f, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f,
0x3e9fb200, 0x890066a8, 0xcf01c200, 0x9fe40099, 0x94f12000, 0xfdb33fff, 0x3efec700, 0x8f006681,
@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0x62f5c2c4, 0xc41c2f31, 0x9af0cbcc, 0xb7efe098,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x5f24a73a, 0x55cea6d1, 0x59205a69, 0x18a31f2d,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xb22438cf, 0xcfd90bc8, 0xf23ebc55, 0x2e5c0e40,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x0d4a5d7d, 0x9c31ffb3, 0x95bc604f, 0x40cc834d,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -2369,3 +2369,13 @@ nvswitch_launch_ALI_link_training_lr10
{
return NVL_ERR_NOT_IMPLEMENTED;
}
NvlStatus
nvswitch_reset_and_train_link_lr10
(
nvswitch_device *device,
nvlink_link *link
)
{
return NVL_ERR_NOT_IMPLEMENTED;
}

View File

@ -6611,12 +6611,12 @@ _nvswitch_service_nvlw_fatal_ls10
status[5] = _nvswitch_service_nvlipt_link_fatal_ls10(device, instance);
if (status[0] != NVL_SUCCESS &&
status[1] != NVL_SUCCESS &&
status[2] != NVL_SUCCESS &&
status[3] != NVL_SUCCESS &&
status[4] != NVL_SUCCESS &&
status[5] != NVL_SUCCESS)
if (status[0] != NVL_SUCCESS && status[0] != -NVL_NOT_FOUND &&
status[1] != NVL_SUCCESS && status[1] != -NVL_NOT_FOUND &&
status[2] != NVL_SUCCESS && status[2] != -NVL_NOT_FOUND &&
status[3] != NVL_SUCCESS && status[3] != -NVL_NOT_FOUND &&
status[4] != NVL_SUCCESS && status[4] != -NVL_NOT_FOUND &&
status[5] != NVL_SUCCESS && status[5] != -NVL_NOT_FOUND)
{
return -NVL_MORE_PROCESSING_REQUIRED;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -496,6 +496,34 @@ nvswitch_corelib_get_rx_detect_ls10
return NVL_SUCCESS;
}
static NvBool
_nvswitch_is_tlc_in_reset
(
nvswitch_device *device,
nvlink_link *link
)
{
NvU32 clkStatus;
clkStatus = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber,
NVLIPT_LNK, _NVLIPT_LNK, _CTRL_CLK_CTRL);
//
// TLC is in reset if any of the per-link clocks are off
// -- if TX and RX clocks are off then link is not powered on
// -- if TX/RX clocks are on but NCISOC clock is off, DL layer
// is on but TLC is still off
//
if (FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _RXCLK_STS, _OFF, clkStatus) ||
FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _TXCLK_STS, _OFF, clkStatus) ||
FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _NCISOCCLK_STS, _OFF, clkStatus))
{
return NV_TRUE;
}
return NV_FALSE;
}
void
nvswitch_reset_persistent_link_hw_state_ls10
(
@ -509,14 +537,21 @@ nvswitch_reset_persistent_link_hw_state_ls10
return;
}
// SETUPTC called with HW Reset
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_SETUPTC , 0x4);
// clear TLC TP Counters
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_CLR_TLC_MISC_REGS, 0);
// clear DL error counters
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLERRCNT, 0);
// If TLC is not up then return
if (_nvswitch_is_tlc_in_reset(device, link))
{
return;
}
// SETUPTC called to reset and setup throughput counters
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_SETUPTC , 0x4);
// clear miscellaneous TLC counters and registers
(void)nvswitch_minion_send_command(device, linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_CLR_TLC_MISC_REGS, 0);
}
NvlStatus
@ -1469,3 +1504,82 @@ nvswitch_execute_unilateral_link_shutdown_ls10
return;
}
NvlStatus
nvswitch_reset_and_train_link_ls10
(
nvswitch_device *device,
nvlink_link *link
)
{
NvlStatus status = NVL_SUCCESS;
NvU32 retry_count = 3;
NvU32 link_state_request;
NvU32 link_state;
NvU32 stat_data;
NvU32 link_intr_subcode;
nvswitch_execute_unilateral_link_shutdown_ls10(link);
nvswitch_corelib_clear_link_state_ls10(link);
do
{
status = nvswitch_request_tl_link_state_ls10(link,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE);
if (status == NVL_SUCCESS)
{
break;
}
else
{
link_state_request = NVSWITCH_LINK_RD32_LS10(device, link->linkNumber,
NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_REQUEST);
link_state = DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _STATUS,
link_state_request);
if (nvswitch_minion_get_dl_status(device, link->linkNumber,
NV_NVLSTAT_MN00, 0, &stat_data) == NVL_SUCCESS)
{
link_intr_subcode = DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, stat_data);
}
if ((link_state == NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL) &&
(link_intr_subcode == MINION_ALARM_BUSY))
{
status = nvswitch_request_tl_link_state_ls10(link,
NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE);
//
// We retry the shutdown sequence 3 times when we see a MINION_REQUEST_FAIL
// or MINION_ALARM_BUSY
//
retry_count--;
}
else
{
break;
}
}
} while(retry_count);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: NvLink Reset has failed for link %d\n",
__FUNCTION__, link->linkNumber);
// Re-register links.
status = nvlink_lib_register_link(device->nvlink_device, link);
if (status != NVL_SUCCESS)
{
nvswitch_destroy_link(link);
return status;
}
return status;
}
return NVL_SUCCESS;
}

View File

@ -31,7 +31,6 @@
#include "common_nvswitch.h"
#include "ls10/ls10.h"
#include "ls10/soe_ls10.h"
#include "lr10/soe_lr10.h"
#include "nvswitch/ls10/dev_soe_ip.h"
#include "nvswitch/ls10/dev_soe_ip_addendum.h"
@ -555,7 +554,7 @@ nvswitch_soe_register_event_callbacks_ls10
device, pFlcn,
RM_SOE_UNIT_THERM,
NULL,
nvswitch_therm_soe_callback_lr10,
nvswitch_therm_soe_callback_ls10,
NULL,
&pSoe->thermEvtDesc);
if (status != NV_OK)

View File

@ -28,6 +28,7 @@
#include "ls10/therm_ls10.h"
#include "error_nvswitch.h"
#include "soe/soeiftherm.h"
#include "rmflcncmdif_nvswitch.h"
#include "nvswitch/ls10/dev_therm.h"
@ -356,3 +357,100 @@ nvswitch_monitor_thermal_alert_ls10
return;
}
/*
* @brief Callback function to recieve thermal messages from SOE.
*/
void
nvswitch_therm_soe_callback_ls10
(
nvswitch_device *device,
RM_FLCN_MSG *pGenMsg,
void *pParams,
NvU32 seqDesc,
NV_STATUS status
)
{
RM_SOE_THERM_MSG_SLOWDOWN_STATUS slowdown_status;
RM_SOE_THERM_MSG_SHUTDOWN_STATUS shutdown_status;
RM_FLCN_MSG_SOE *pMsg = (RM_FLCN_MSG_SOE *)pGenMsg;
NvU32 temperature;
NvU32 threshold;
switch (pMsg->msg.soeTherm.msgType)
{
case RM_SOE_THERM_MSG_ID_SLOWDOWN_STATUS:
{
slowdown_status = pMsg->msg.soeTherm.slowdown;
if (slowdown_status.bSlowdown)
{
if (slowdown_status.source.bTsense) // TSENSE_THERM_ALERT
{
temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.maxTemperature);
threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.warnThreshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"NVSWITCH Temperature %dC | TSENSE WARN Threshold %dC\n",
temperature, threshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"Thermal Slowdown Engaged | Temp higher than WARN Threshold\n");
}
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"Thermal Slowdown Engaged | Links Thermal Mode %s\n", (slowdown_status.bLinksL1Status ? "ON" : "OFF"));
if (slowdown_status.source.bPmgr) // PMGR_THERM_ALERT
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"Thermal Slowdown Engaged | PMGR WARN Threshold reached\n");
}
}
else // REVERT_SLOWDOWN
{
temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.maxTemperature);
threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.warnThreshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END,
"NVSWITCH Temperature %dC | TSENSE WARN Threshold %dC\n",
temperature, threshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END,
"Thermal Slowdown Disengaged | Links Thermal Mode %s\n", (slowdown_status.bLinksL1Status ? "ON" : "OFF"));
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END,
"Thermal slowdown Disengaged\n");
}
break;
}
case RM_SOE_THERM_MSG_ID_SHUTDOWN_STATUS:
{
shutdown_status = pMsg->msg.soeTherm.shutdown;
if (shutdown_status.source.bTsense) // TSENSE_THERM_SHUTDOWN
{
temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(shutdown_status.maxTemperature);
threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(shutdown_status.overtThreshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN,
"NVSWITCH Temperature %dC | OVERT Threshold %dC\n",
temperature, threshold);
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN,
"TSENSE OVERT Threshold reached. Shutting Down\n");
}
if (shutdown_status.source.bPmgr) // PMGR_THERM_SHUTDOWN
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START,
"PMGR OVERT Threshold reached. Shutting Down\n");
}
break;
}
default:
{
NVSWITCH_PRINT(device, ERROR, "%s Unknown message Id\n", __FUNCTION__);
NVSWITCH_ASSERT(0);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -1336,6 +1336,7 @@ nvswitch_lib_initialize_device
NvU8 link_num;
nvlink_link *link = NULL;
NvBool is_blacklisted_by_os = NV_FALSE;
NvU64 mode;
if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device))
{
@ -1497,6 +1498,19 @@ nvswitch_lib_initialize_device
}
nvswitch_reset_persistent_link_hw_state(device, link_num);
if(_nvswitch_corelib_get_dl_link_mode(link, &mode) != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "%s: nvlipt_lnk_status: Failed to check link mode! LinkId %d\n",
__FUNCTION__, link_num);
}
else if(mode == NVLINK_LINKSTATE_FAULT)
{
NVSWITCH_PRINT(device, INFO, "%s: retraining LinkId %d\n",
__FUNCTION__, link_num);
nvswitch_reset_and_train_link(device, link);
}
}
retval = nvswitch_set_training_mode(device);
@ -3004,11 +3018,17 @@ _nvswitch_ctrl_unregister_link
return -NVL_BAD_ARGS;
}
// With ALI in FW, links can be unregistered while Active
if (!device->nvlink_device->enableALI)
{
if (device->hal.nvswitch_is_link_in_use(device, params->portNum))
{
return -NVL_ERR_STATE_IN_USE;
}
}
nvlink_lib_unregister_link(link);
nvswitch_destroy_link(link);
@ -4632,6 +4652,16 @@ nvswitch_launch_ALI_link_training
return device->hal.nvswitch_launch_ALI_link_training(device, link, bSync);
}
NvlStatus
nvswitch_reset_and_train_link
(
nvswitch_device *device,
nvlink_link *link
)
{
return device->hal.nvswitch_reset_and_train_link(device, link);
}
static NvlStatus
_nvswitch_ctrl_get_err_info
(

View File

@ -40,7 +40,8 @@ extern "C" {
#define NVA084_NOTIFIERS_EVENT_GUEST_DRIVER_LOADED (3)
#define NVA084_NOTIFIERS_EVENT_GUEST_DRIVER_UNLOADED (4)
#define NVA084_NOTIFIERS_EVENT_PRINT_ERROR_MESSAGE (5)
#define NVA084_NOTIFIERS_MAXCOUNT (6)
#define NVA084_NOTIFIERS_EVENT_GUEST_LICENSE_STATE_CHANGED (6)
#define NVA084_NOTIFIERS_MAXCOUNT (7)
#define NVA084_NOTIFICATION_STATUS_IN_PROGRESS (0x8000)
#define NVA084_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000)

View File

@ -2068,30 +2068,18 @@ typedef struct NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS {
*
* NVPCF is an acronym for Nvidia Platform Controllers and Framework
* which implements platform level policies. NVPCF is implemented in
* a kernel driver on windows. It is implemented in a user mode app
* a kernel driver on windows. It is implemented in a user mode app
* called nvidia-powerd on Linux.
*
* gpuId
* GPU ID
* tpp
* Total processing power including CPU and GPU
* ratedTgp
* Rated total GPU Power
* subFunc
* NVPCF subfunction id
* ctgpOffsetmW
* Configurable TGP offset, in mW
* targetTppOffsetmW
* TPP, as offset in mW.
* maxOutputOffsetmW
* Maximum allowed output, as offset in mW.
* minOutputOffsetmW;
* Minimum allowed output, as offset in mW.
*
* Valid subFunc ids for NVPCF 1x include :
* NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED
* NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_PARAMS
*
* Valid subFunc ids for NVPCF 2x include :
* NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED
* NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_PARAMS
* NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CONFIG_TABLES
*
* Possible status values returned are:
* NV_OK
* NV_ERR_INVALID_REQUEST
@ -2102,25 +2090,89 @@ typedef struct NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS {
#define NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS_MESSAGE_ID (0x3BU)
typedef struct NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS {
/* GPU ID */
NvU32 gpuId;
/* Total processing power including CPU and GPU */
NvU32 tpp;
/* Rated total GPU Power */
NvU32 ratedTgp;
/* NVPCF subfunction id */
NvU32 subFunc;
/* Configurable TGP offset, in mW */
NvU32 ctgpOffsetmW;
/* TPP, as offset in mW */
NvU32 targetTppOffsetmW;
/* Maximum allowed output, as offset in mW */
NvU32 maxOutputOffsetmW;
/* Minimum allowed output, as offset in mW */
NvU32 minOutputOffsetmW;
/* The System Controller Table Version */
NvU8 version;
/* Base sampling period */
NvU16 samplingPeriodmS;
/* Sampling Multiplier */
NvU16 samplingMulti;
/* Fitler function type */
NvU8 filterType;
union {
/* weight */
NvU8 weight;
/* windowSize */
NvU8 windowSize;
} filterParam;
/* Reserved */
NvU16 filterReserved;
/* Controller Type Dynamic Boost Controller */
NvBool bIsBoostController;
/* Increase power limit ratio */
NvU16 incRatio;
/* Decrease power limit ratio */
NvU16 decRatio;
/* Dynamic Boost Controller DC Support */
NvBool bSupportBatt;
/* CPU type(Intel/AMD) */
NvU8 cpuType;
/* GPU type(Nvidia) */
NvU8 gpuType;
} NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS;
/* Define the filter types */
#define CONTROLLER_FILTER_TYPE_EMWA 0U
#define CONTROLLER_FILTER_TYPE_MOVING_MAX 1U
/* Valid NVPCF subfunction case */
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED_CASE 0U
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_CASE 1U
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED_CASE 2U
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_CASE 3U
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED_CASE 0U
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_CASE 1U
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED_CASE 2U
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_CASE 3U
/* NVPCF subfunction to get the static data tables */
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CASE 4U
/* Valid NVPCF subfunction ids */
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED (0x00000000)
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_PARAMS (0x00000002)
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_SUPPORTED (0x00000000)
#define NVPCF0100_CTRL_CONFIG_DSM_1X_FUNC_GET_DYNAMIC_PARAMS (0x00000002)
/*
* Defines for get supported sub functions bit fields
@ -2132,9 +2184,15 @@ typedef struct NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS {
/*!
* Config DSM 2x version specific defines
*/
#define NVPCF0100_CTRL_CONFIG_DSM_2X_VERSION (0x00000200)
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED (0x00000000)
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_PARAMS (0x00000002)
#define NVPCF0100_CTRL_CONFIG_DSM_2X_VERSION (0x00000200)
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_SUPPORTED (0x00000000)
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CONFIG_TABLES (0x00000001)
#define NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_DYNAMIC_PARAMS (0x00000002)
/*!
* Defines the max buffer size for config
*/
#define NVPCF0100_CTRL_CONFIG_2X_BUFF_SIZE_MAX (255)
/*
* NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT
@ -2148,7 +2206,7 @@ typedef struct NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS {
* Possible status values returned are:
* NV_OK
*/
#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT (0x13cU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID" */
#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT (0x13cU) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID" */
#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID (0x3CU)

View File

@ -2860,29 +2860,39 @@ typedef struct NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS {
NvU32 dataSize;
} NV2080_CTRL_NVLINK_INBAND_RECEIVED_DATA_PARAMS;
#define NV2080_CTRL_NVLINK_L1_THRESHOLD_SET (0x00000000U)
#define NV2080_CTRL_NVLINK_L1_THRESHOLD_GET (0x00000001U)
/*
* NV2080_CTRL_CMD_NVLINK_L1_THRESHOLD
* NV2080_CTRL_CMD_NVLINK_SET_L1_THRESHOLD
*
* This command is used to get/set the L1 threshold value
* This command is used to set the L1 threshold value
*
* [in] flag
* Whether to set or get the L1 threshold value
*
* [in/out] l1Threshold
* Used to set or get the L1 threshold value
* [in] l1Threshold
* Used to set the L1 threshold value
*
*/
#define NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS_MESSAGE_ID (0x3eU)
#define NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS_MESSAGE_ID (0x3eU)
typedef struct NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS {
NvU32 flag;
typedef struct NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS {
NvU32 l1Threshold;
} NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS;
} NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_L1_THRESHOLD (0x2080303eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_CMD_NVLINK_SET_L1_THRESHOLD (0x2080303eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_GET_L1_THRESHOLD
*
* This command is used to get the L1 threshold value
*
* [out] l1Threshold
* Used to get the L1 threshold value
*
*/
#define NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS_MESSAGE_ID (0x3fU)
typedef struct NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS {
NvU32 l1Threshold;
} NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_GET_L1_THRESHOLD (0x2080303fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_INBAND_SEND_DATA
@ -2894,14 +2904,14 @@ typedef struct NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS {
* [In] dataSize
* Size of valid data in data array
*/
#define NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS_MESSAGE_ID (0x3fU)
#define NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS_MESSAGE_ID (0x40U)
typedef struct NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS {
NvU8 buffer[NV2080_CTRL_NVLINK_INBAND_MAX_MSG_SIZE];
NvU32 dataSize;
} NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_INBAND_SEND_DATA (0x2080303fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_CMD_NVLINK_INBAND_SEND_DATA (0x20803040U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_IS_GPU_DEGRADED
@ -2913,14 +2923,14 @@ typedef struct NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS {
* [In] bIsGpuDegraded
* Boolean to track corresponding GPU is degraded or not
*/
#define NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID (0x40U)
#define NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID (0x41U)
typedef struct NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS {
NvU32 linkId;
NvBool bIsGpuDegraded;
} NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_IS_GPU_DEGRADED (0x20803040U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_CMD_NVLINK_IS_GPU_DEGRADED (0x20803041U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK
@ -2935,7 +2945,7 @@ typedef struct NV2080_CTRL_NVLINK_IS_GPU_DEGRADED_PARAMS {
* [out] bridgePresenceMask
* Bit mask of NVLink bridges's presence
*/
#define NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID (0x41U)
#define NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID (0x42U)
typedef struct NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS {
NvBool bIsEnoughNvLink;
@ -2943,7 +2953,7 @@ typedef struct NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS {
NvU32 bridgePresenceMask;
} NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK (0x20803041U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK (0x20803042U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP
@ -2953,13 +2963,13 @@ typedef struct NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS {
* [out] linkId
* Link number which the sequence should be triggered
*/
#define NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID (0x42U)
#define NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID (0x43U)
typedef struct NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS {
NvU32 linkId;
} NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP (0x20803042U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID" */
#define NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP (0x20803043U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID" */
/* _ctrl2080nvlink_h_ */

View File

@ -31,6 +31,9 @@ extern "C" {
#endif
void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo);
NVEvoPassiveDpDongleType nvDpyGetPassiveDpDongleType(
const NVDpyEvoRec *pDpyEvo,
NvU32 *passiveDpDongleMaxPclkKHz);
void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo,
struct NvKmsModeValidationValidSyncs *pValidSyncs);
NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo,

View File

@ -59,6 +59,7 @@ NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo);
void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo);
NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo);
NvBool nvHdmiDpySupportsFrl(const NVDpyEvoRec *pDpyEvo);
NvBool nvHdmiFrlQueryConfig(const NVDpyEvoRec *pDpyEvo,
const NvModeTimings *pModeTimings,
NVHwModeTimingsEvo *pTimings,

View File

@ -54,9 +54,6 @@ static void DpyGetDynamicDfpProperties(
NVDpyEvoPtr pDpyEvo,
const NvBool disableACPIBrightnessHotkeys);
static NVEvoPassiveDpDongleType
DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo,
NvU32 *passiveDpDongleMaxPclkKHz);
static void
CreateParsedEdidFromNVT_TIMING(NVT_TIMING *pTimings,
NvU8 bpc,
@ -676,53 +673,37 @@ void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo)
nvkms_memset(&pDpyEvo->hdmi.srcCaps, 0, sizeof(pDpyEvo->hdmi.srcCaps));
nvkms_memset(&pDpyEvo->hdmi.sinkCaps, 0, sizeof(pDpyEvo->hdmi.sinkCaps));
if (pDevEvo->hal->caps.supportsHDMIFRL) {
if (nvHdmiDpySupportsFrl(pDpyEvo)) {
/*
* This function is called multiple times for each pDpyEvo:
* - Once when the dpy is created
* - Once when the dpy is connected
* - Once when the dpy is disconnected
* In the first and third cases, we don't yet have an EDID so
* we don't know if the sink supports HDMI FRL. Assume it
* doesn't, since if we try to set a mode anyway there won't be
* a sink to do link training with.
* An SOR needs to be assigned temporarily to do FRL training.
*
* Since the only other SORs in use at the moment (if any) are
* those driving heads, we don't need to exclude RM from
* selecting any SOR, so an sorExcludeMask of 0 is appropriate.
*/
if (pDpyEvo->parsedEdid.valid &&
pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) {
if (nvAssignSOREvo(pConnectorEvo, 0) &&
nvHdmiFrlAssessLink(pDpyEvo)) {
/*
* An SOR needs to be assigned temporarily to do FRL
* training.
* Since the only other SORs in use at the moment (if any)
* are those driving heads, we don't need to exclude RM
* from selecting any SOR, so an sorExcludeMask of 0 is
* appropriate.
* Note that although we "assessed" the link above, the
* maximum pixel clock set here doesn't take that into
* account -- it's the maximum the GPU hardware is capable
* of on the most capable link, mostly for reporting
* purposes.
*
* The calculation for if a given mode can fit in the
* assessed FRL configuration is complex and depends on
* things like the amount of blanking, rather than a simple
* pclk cutoff. So, we query the hdmi library when
* validating each individual mode, when we know actual
* timings.
*/
if (nvAssignSOREvo(pConnectorEvo, 0)) {
if (nvHdmiFrlAssessLink(pDpyEvo)) {
/*
* Note that although we "assessed" the link above,
* the maximum pixel clock set here doesn't take
* that into account -- it's the maximum the GPU
* hardware is capable of on the most capable link,
* mostly for reporting purposes.
*
* The calculation for if a given mode can fit in
* the assessed FRL configuration is complex and
* depends on things like the amount of blanking,
* rather than a simple pclk cutoff. So, we query
* the hdmi library when validating each individual
* mode, when we know actual timings.
*/
pDpyEvo->maxPixelClockKHz =
/*
* This comes from the Windows display driver:
* (4 lanes * 12Gb per lane *
* FRL encoding i.e 16/18) / 1K
*/
((4 * 12 * 1000 * 1000 * 16) / 18);
}
}
/*
* This comes from the Windows display driver: (4 lanes *
* 12Gb per lane * FRL encoding i.e 16/18) / 1K
*/
pDpyEvo->maxPixelClockKHz =
((4 * 12 * 1000 * 1000 * 16) / 18);
}
}
} else {
@ -754,8 +735,8 @@ void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo)
* restrictive than the one described above. Check whether one of
* these dongles is in use, and override the limit accordingly.
*/
passiveDpDongleType = DpyGetPassiveDpDongleType(pDpyEvo,
&passiveDpDongleMaxPclkKHz);
passiveDpDongleType =
nvDpyGetPassiveDpDongleType(pDpyEvo, &passiveDpDongleMaxPclkKHz);
if (passiveDpDongleType != NV_EVO_PASSIVE_DP_DONGLE_UNUSED) {
pDpyEvo->maxPixelClockKHz = NV_MIN(passiveDpDongleMaxPclkKHz,
@ -832,9 +813,9 @@ static NvBool IsConnectorTMDS(NVConnectorEvoPtr pConnectorEvo)
* Query RM for the passive Displayport dongle type; this can influence
* the maximum pixel clock allowed on that display.
*/
static NVEvoPassiveDpDongleType
DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo,
NvU32 *passiveDpDongleMaxPclkKHz)
NVEvoPassiveDpDongleType
nvDpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo,
NvU32 *passiveDpDongleMaxPclkKHz)
{
NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS params = { 0 };
NvU32 ret;

View File

@ -1866,6 +1866,8 @@ NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo)
NVHDMIPKT_RESULT ret;
const NvU32 displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId);
nvAssert(nvDpyIsHdmiEvo(pDpyEvo));
/* HDMI dpys not dynamic dpy so its connector should have a dpyId. */
nvAssert(displayId != 0);
nvAssert(pDpyEvo->parsedEdid.valid);
@ -1884,33 +1886,54 @@ NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo)
return pDpyEvo->hdmi.sinkCaps.linkMaxFRLRate != HDMI_FRL_DATA_RATE_NONE;
}
/* Determine if HDMI FRL is needed to drive the given timings on the given dpy. */
static NvBool TimingsNeedFRL(const NVDpyEvoRec *pDpyEvo,
const NVHwModeTimingsEvo *pTimings)
/*
* Determine if the given HDMI dpy supports FRL.
*
* Returns TRUE if the dpy supports FRL, or FALSE otherwise.
*/
NvBool nvHdmiDpySupportsFrl(const NVDpyEvoRec *pDpyEvo)
{
NvU32 passiveDpDongleMaxPclkKHz;
const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo;
/* Can't use FRL if the display hardware doesn't support it */
nvAssert(nvDpyIsHdmiEvo(pDpyEvo));
/* Can't use FRL if the display hardware doesn't support it. */
if (!pDevEvo->hal->caps.supportsHDMIFRL) {
return FALSE;
}
/* Can only use FRL for HDMI devices. */
if (!nvDpyIsHdmiEvo(pDpyEvo)) {
return FALSE;
}
/* Can only use FRL if the HDMI sink supports it. */
/* Can't use FRL if the HDMI sink doesn't support it. */
if (!pDpyEvo->parsedEdid.valid ||
!pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) {
return FALSE;
}
/* Can't use FRL if we are using a passive DP to HDMI dongle. */
if (nvDpyGetPassiveDpDongleType(pDpyEvo, &passiveDpDongleMaxPclkKHz) !=
NV_EVO_PASSIVE_DP_DONGLE_UNUSED) {
return FALSE;
}
return TRUE;
}
/*
* Determine if HDMI FRL is needed to drive timings with the given pixel clock
* on the given dpy.
*
* Returns TRUE if FRL is needed, or FALSE otherwise.
* */
static NvBool HdmiTimingsNeedFrl(const NVDpyEvoRec *pDpyEvo,
const NvU32 pixelClock)
{
nvAssert(nvDpyIsHdmiEvo(pDpyEvo));
/*
* For HDMI, maxSingleLinkPixelClockKHz is the maximum non-FRL rate.
* If the rate is higher than that, try to use FRL for the mode.
*/
return pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz;
return pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz;
}
NvBool nvHdmiFrlQueryConfig(
@ -1927,10 +1950,15 @@ NvBool nvHdmiFrlQueryConfig(
NVT_TIMING nvtTiming = { };
NVHDMIPKT_RESULT ret;
if (!TimingsNeedFRL(pDpyEvo, pHwTimings)) {
if (!nvDpyIsHdmiEvo(pDpyEvo) ||
!HdmiTimingsNeedFrl(pDpyEvo, pHwTimings->pixelClock)) {
return TRUE;
}
if (!nvHdmiDpySupportsFrl(pDpyEvo)) {
return FALSE;
}
/* See if we can find an NVT_TIMING for this mode from the EDID. */
pNvtTiming = nvFindEdidNVT_TIMING(pDpyEvo, pModeTimings, pValidationParams);

View File

@ -1,65 +0,0 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef EFI_CONSOLE_H
#define EFI_CONSOLE_H
#include "gpu/disp/kern_disp_max.h"
struct OBJGPU;
typedef struct
{
NvBool isDispStateSave;
NvU32 activeDisplayId[OBJ_MAX_HEADS];
struct
{
NvU32 displayId;
struct {
NvU32 index;
NvU32 subLinkMask;
} sorXBar;
struct {
NvU32 linkBw;
NvU32 laneCount;
NvU32 linkCtl;
} displayPort;
} activeDfpState[OBJ_MAX_DFPS];
NvU32 numDfps;
struct
{
NvU32 coreChannelClass;
NvU32 cacheSize;
NvU32 *pCache;
} display;
} nv_efi_t;
void RmSaveEFIDisplayState (OBJGPU *pGpu);
void RmRestoreEFIDisplayState (OBJGPU *pGpu);
#endif // EFI_CONSOLE_H

View File

@ -30,8 +30,6 @@
#include <gpu/disp/kern_disp_max.h>
#include <gpu/disp/kern_disp_type.h>
#include <efi-console.h>
#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d))
#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d))
#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d))
@ -331,8 +329,6 @@ typedef struct
nv_vga_t vga;
nv_efi_t efi;
NvU32 flags;
NvU32 status;

View File

@ -88,12 +88,12 @@ struct OS_RM_CAPS
nv_cap_t **caps;
};
NvBool osIsRaisedIRQL()
NvBool osIsRaisedIRQL(void)
{
return (!os_semaphore_may_sleep());
}
NvBool osIsISR()
NvBool osIsISR(void)
{
return os_is_isr();
}
@ -1783,7 +1783,7 @@ NV_STATUS osPackageRegistry(
return RmPackageRegistry(nv, pRegTable, pSize);
}
NvU32 osGetCpuCount()
NvU32 osGetCpuCount(void)
{
return os_get_cpu_count(); // Total number of logical CPUs.
}
@ -1834,7 +1834,7 @@ void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *p
return;
}
void osFlushLog()
void osFlushLog(void)
{
// Not implemented
}
@ -2671,7 +2671,7 @@ NV_STATUS osGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLocksRelease)
return NV_SEMA_RELEASE_FAILED;
}
void osSyncWithRmDestroy()
void osSyncWithRmDestroy(void)
{
}
@ -3511,7 +3511,7 @@ osGetGpuRailVoltageInfo
* @return pointer to the security token.
*/
PSECURITY_TOKEN
osGetSecurityToken()
osGetSecurityToken(void)
{
NV_STATUS rmStatus;
TOKEN_USER *pTokenUser;
@ -4177,7 +4177,7 @@ osWaitForIbmnpuRsync
}
NvU32
osGetPageSize()
osGetPageSize(void)
{
return os_page_size;
}

View File

@ -1213,8 +1213,6 @@ void RmClearPrivateState(
void *pVbiosCopy = NULL;
void *pRegistryCopy = NULL;
NvU32 vbiosSize;
NvU32 *pEfiDisplayCache;
NvU32 efiDisplayCacheSize;
nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS];
nv_dynamic_power_t dynamicPowerCopy;
NvU32 x = 0;
@ -1234,8 +1232,6 @@ void RmClearPrivateState(
pVbiosCopy = nvp->pVbiosCopy;
vbiosSize = nvp->vbiosSize;
pRegistryCopy = nvp->pRegistry;
pEfiDisplayCache = nvp->efi.display.pCache;
efiDisplayCacheSize = nvp->efi.display.cacheSize;
dynamicPowerCopy = nvp->dynamic_power;
pmc_boot_0 = nvp->pmc_boot_0;
pmc_boot_42 = nvp->pmc_boot_42;
@ -1251,8 +1247,6 @@ void RmClearPrivateState(
nvp->pVbiosCopy = pVbiosCopy;
nvp->vbiosSize = vbiosSize;
nvp->pRegistry = pRegistryCopy;
nvp->efi.display.pCache = pEfiDisplayCache;
nvp->efi.display.cacheSize = efiDisplayCacheSize;
nvp->dynamic_power = dynamicPowerCopy;
nvp->pmc_boot_0 = pmc_boot_0;
nvp->pmc_boot_42 = pmc_boot_42;
@ -1280,7 +1274,6 @@ void RmFreePrivateState(
if (nvp != NULL)
{
portMemFree(nvp->pVbiosCopy);
portMemFree(nvp->efi.display.pCache);
os_free_mem(nvp);
}

View File

@ -192,21 +192,6 @@ static NvBool __nvoc_thunk_RmResource_deviceAccessCallback(struct Device *pResou
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] =
{
{ /* [0] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdBifReset_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*flags=*/ 0x204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x800102u,
/*paramSize=*/ sizeof(NV0080_CTRL_BIF_RESET_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "deviceCtrlCmdBifReset"
#endif
},
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -221,7 +206,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBifGetDmaBaseSysmemAddr"
#endif
},
{ /* [2] */
{ /* [1] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -236,7 +221,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBifAspmFeatureSupported"
#endif
},
{ /* [3] */
{ /* [2] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -251,7 +236,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBifAspmCyaUpdate"
#endif
},
{ /* [4] */
{ /* [3] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -266,7 +251,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetClasslist"
#endif
},
{ /* [5] */
{ /* [4] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -281,7 +266,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetNumSubdevices"
#endif
},
{ /* [6] */
{ /* [5] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -296,7 +281,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence"
#endif
},
{ /* [7] */
{ /* [6] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -311,7 +296,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence"
#endif
},
{ /* [8] */
{ /* [7] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -326,7 +311,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode"
#endif
},
{ /* [9] */
{ /* [8] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -341,7 +326,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetSparseTextureComputeMode"
#endif
},
{ /* [10] */
{ /* [9] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -356,7 +341,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuSetSparseTextureComputeMode"
#endif
},
{ /* [11] */
{ /* [10] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -371,7 +356,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetVgxCaps"
#endif
},
{ /* [12] */
{ /* [11] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -386,7 +371,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetSriovCaps"
#endif
},
{ /* [13] */
{ /* [12] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -401,7 +386,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetClasslistV2"
#endif
},
{ /* [14] */
{ /* [13] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -416,7 +401,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle"
#endif
},
{ /* [15] */
{ /* [14] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -431,7 +416,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuGetBrandCaps"
#endif
},
{ /* [16] */
{ /* [15] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -446,7 +431,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size"
#endif
},
{ /* [17] */
{ /* [16] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -461,7 +446,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetCaps"
#endif
},
{ /* [18] */
{ /* [17] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -476,7 +461,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetInfo"
#endif
},
{ /* [19] */
{ /* [18] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -491,7 +476,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetTpcPartitionMode"
#endif
},
{ /* [20] */
{ /* [19] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -506,7 +491,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrSetTpcPartitionMode"
#endif
},
{ /* [21] */
{ /* [20] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -521,7 +506,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetCapsV2"
#endif
},
{ /* [22] */
{ /* [21] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -536,7 +521,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdKGrGetInfoV2"
#endif
},
{ /* [23] */
{ /* [22] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -551,7 +536,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFbGetCaps"
#endif
},
{ /* [24] */
{ /* [23] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -566,7 +551,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFbGetCompbitStoreInfo"
#endif
},
{ /* [25] */
{ /* [24] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -581,7 +566,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFbGetCapsV2"
#endif
},
{ /* [26] */
{ /* [25] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -596,7 +581,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCaps"
#endif
},
{ /* [27] */
{ /* [26] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -611,7 +596,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdHostGetCapsV2"
#endif
},
{ /* [28] */
{ /* [27] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -626,7 +611,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCaps"
#endif
},
{ /* [29] */
{ /* [28] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -641,7 +626,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartSelectedChannels"
#endif
},
{ /* [30] */
{ /* [29] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -656,7 +641,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetEngineContextProperties"
#endif
},
{ /* [31] */
{ /* [30] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -671,7 +656,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetChannelList"
#endif
},
{ /* [32] */
{ /* [31] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -686,7 +671,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetLatencyBufferSize"
#endif
},
{ /* [33] */
{ /* [32] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -701,7 +686,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoSetChannelProperties"
#endif
},
{ /* [34] */
{ /* [33] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -716,7 +701,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStopRunlist"
#endif
},
{ /* [35] */
{ /* [34] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -731,7 +716,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoStartRunlist"
#endif
},
{ /* [36] */
{ /* [35] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -746,7 +731,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoGetCapsV2"
#endif
},
{ /* [37] */
{ /* [36] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -761,7 +746,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdFifoIdleChannels"
#endif
},
{ /* [38] */
{ /* [37] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -776,7 +761,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPteInfo"
#endif
},
{ /* [39] */
{ /* [38] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -791,7 +776,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaFlush"
#endif
},
{ /* [40] */
{ /* [39] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -806,7 +791,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaAdvSchedGetVaCaps"
#endif
},
{ /* [41] */
{ /* [40] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -821,7 +806,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetPdeInfo"
#endif
},
{ /* [42] */
{ /* [41] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -836,7 +821,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPteInfo"
#endif
},
{ /* [43] */
{ /* [42] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -851,7 +836,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaInvalidateTLB"
#endif
},
{ /* [44] */
{ /* [43] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -866,7 +851,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaGetCaps"
#endif
},
{ /* [45] */
{ /* [44] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -881,7 +866,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetVASpaceSize"
#endif
},
{ /* [46] */
{ /* [45] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -896,7 +881,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUpdatePde2"
#endif
},
{ /* [47] */
{ /* [46] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -911,7 +896,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaEnablePrivilegedRange"
#endif
},
{ /* [48] */
{ /* [47] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1c0000u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -926,7 +911,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetDefaultVASpace"
#endif
},
{ /* [49] */
{ /* [48] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -941,7 +926,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaSetPageDirectory"
#endif
},
{ /* [50] */
{ /* [49] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x140004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -956,7 +941,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdDmaUnsetPageDirectory"
#endif
},
{ /* [51] */
{ /* [50] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -971,7 +956,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdMsencGetCaps"
#endif
},
{ /* [52] */
{ /* [51] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -986,7 +971,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdBspGetCapsV2"
#endif
},
{ /* [53] */
{ /* [52] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1001,7 +986,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
#endif
},
{ /* [54] */
{ /* [53] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1016,7 +1001,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
#endif
},
{ /* [55] */
{ /* [54] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1031,7 +1016,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdNvjpgGetCapsV2"
#endif
},
{ /* [56] */
{ /* [55] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1046,7 +1031,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfCudaLimitDisable"
#endif
},
{ /* [57] */
{ /* [56] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1061,7 +1046,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
/*func=*/ "deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount"
#endif
},
{ /* [58] */
{ /* [57] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe10u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -1081,7 +1066,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[]
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
{
/*numEntries=*/ 59,
/*numEntries=*/ 58,
/*pExportEntries=*/ __nvoc_exported_method_def_Device
};
@ -1122,10 +1107,6 @@ static void __nvoc_init_funcTable_Device_1(Device *pThis) {
pThis->__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL;
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
pThis->__deviceCtrlCmdBifReset__ = &deviceCtrlCmdBifReset_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
pThis->__deviceCtrlCmdBifGetDmaBaseSysmemAddr__ = &deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL;
#endif

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -83,7 +83,6 @@ struct Device {
struct Device *__nvoc_pbase_Device;
NV_STATUS (*__deviceControl__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
NV_STATUS (*__deviceInternalControlForward__)(struct Device *, NvU32, void *, NvU32);
NV_STATUS (*__deviceCtrlCmdBifReset__)(struct Device *, NV0080_CTRL_BIF_RESET_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifGetDmaBaseSysmemAddr__)(struct Device *, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifAspmFeatureSupported__)(struct Device *, NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS *);
NV_STATUS (*__deviceCtrlCmdBifAspmCyaUpdate__)(struct Device *, NV0080_CTRL_BIF_ASPM_CYA_UPDATE_PARAMS *);
@ -213,7 +212,6 @@ NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT
#define deviceControl(pResource, pCallContext, pParams) deviceControl_DISPATCH(pResource, pCallContext, pParams)
#define deviceInternalControlForward(pDevice, command, pParams, size) deviceInternalControlForward_DISPATCH(pDevice, command, pParams, size)
#define deviceCtrlCmdBifReset(pDevice, pBifResetParams) deviceCtrlCmdBifReset_DISPATCH(pDevice, pBifResetParams)
#define deviceCtrlCmdBifGetDmaBaseSysmemAddr(pDevice, pBifDmaBaseSysmemParams) deviceCtrlCmdBifGetDmaBaseSysmemAddr_DISPATCH(pDevice, pBifDmaBaseSysmemParams)
#define deviceCtrlCmdBifAspmFeatureSupported(pDevice, pBifAspmParams) deviceCtrlCmdBifAspmFeatureSupported_DISPATCH(pDevice, pBifAspmParams)
#define deviceCtrlCmdBifAspmCyaUpdate(pDevice, pBifAspmCyaParams) deviceCtrlCmdBifAspmCyaUpdate_DISPATCH(pDevice, pBifAspmCyaParams)
@ -305,12 +303,6 @@ static inline NV_STATUS deviceInternalControlForward_DISPATCH(struct Device *pDe
return pDevice->__deviceInternalControlForward__(pDevice, command, pParams, size);
}
NV_STATUS deviceCtrlCmdBifReset_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_RESET_PARAMS *pBifResetParams);
static inline NV_STATUS deviceCtrlCmdBifReset_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_RESET_PARAMS *pBifResetParams) {
return pDevice->__deviceCtrlCmdBifReset__(pDevice, pBifResetParams);
}
NV_STATUS deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *pBifDmaBaseSysmemParams);
static inline NV_STATUS deviceCtrlCmdBifGetDmaBaseSysmemAddr_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *pBifDmaBaseSysmemParams) {

View File

@ -533,11 +533,11 @@ static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
// Hal function -- gpuClearFbhubPoisonIntrForBug2924523
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_GA100_KERNEL;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_56cd7a;
}

View File

@ -78,6 +78,10 @@ static NV_STATUS __nvoc_thunk_KernelBus_engstateStateInitLocked(OBJGPU *pGpu, st
return kbusStateInitLocked(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset));
}
static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePreLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) {
return kbusStatePreLoad(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_KernelBus_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) {
return kbusStateLoad(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
@ -102,10 +106,6 @@ static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState(POBJGPU pGpu
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePreLoad(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePostUnload(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0);
}
@ -278,6 +278,12 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusStateInitLocked__ = &kbusStateInitLocked_IMPL;
// Hal function -- kbusStatePreLoad
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
pThis->__kbusStatePreLoad__ = &kbusStatePreLoad_56cd7a;
}
// Hal function -- kbusStateLoad
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
@ -300,12 +306,9 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
}
// Hal function -- kbusStateUnload
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x11f0ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 | GH100 */
{
pThis->__kbusStateUnload__ = &kbusStateUnload_GM107;
}
pThis->__kbusStateUnload__ = &kbusStateUnload_GM107;
}
// Hal function -- kbusStateDestroy
@ -550,6 +553,21 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusAllocateFlaVaspace__ = &kbusAllocateFlaVaspace_395e98;
}
// Hal function -- kbusGetFlaRange
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */
{
pThis->__kbusGetFlaRange__ = &kbusGetFlaRange_GA100;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__kbusGetFlaRange__ = &kbusGetFlaRange_GH100;
}
// default
else
{
pThis->__kbusGetFlaRange__ = &kbusGetFlaRange_395e98;
}
// Hal function -- kbusAllocateLegacyFlaVaspace
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
@ -816,6 +834,8 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelBus_engstateStateInitLocked;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreLoad__ = &__nvoc_thunk_KernelBus_engstateStatePreLoad;
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelBus_engstateStateLoad;
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelBus_engstateStatePostLoad;
@ -828,8 +848,6 @@ static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *
pThis->__kbusReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState;
pThis->__kbusStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePreLoad;
pThis->__kbusStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePostUnload;
pThis->__kbusStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked;

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -297,6 +297,7 @@ struct KernelBus {
NV_STATUS (*__kbusConstructEngine__)(OBJGPU *, struct KernelBus *, ENGDESCRIPTOR);
NV_STATUS (*__kbusStatePreInitLocked__)(OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusStateInitLocked__)(OBJGPU *, struct KernelBus *);
NV_STATUS (*__kbusStatePreLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePostLoad__)(OBJGPU *, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePreUnload__)(OBJGPU *, struct KernelBus *, NvU32);
@ -323,6 +324,7 @@ struct KernelBus {
NV_STATUS (*__kbusCheckFlaSupportedAndInit__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusDetermineFlaRangeAndAllocate__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusAllocateFlaVaspace__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusGetFlaRange__)(OBJGPU *, struct KernelBus *, NvU64 *, NvU64 *, NvBool);
NV_STATUS (*__kbusAllocateLegacyFlaVaspace__)(OBJGPU *, struct KernelBus *, NvU64, NvU64);
NV_STATUS (*__kbusAllocateHostManagedFlaVaspace__)(OBJGPU *, struct KernelBus *, NvHandle, NvHandle, NvHandle, NvHandle, NvU64, NvU64, NvU32);
void (*__kbusDestroyFla__)(OBJGPU *, struct KernelBus *);
@ -347,7 +349,6 @@ struct KernelBus {
void (*__kbusUnmapCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, PMEMORY_DESCRIPTOR);
void (*__kbusTeardownCoherentCpuMapping__)(OBJGPU *, struct KernelBus *, NvBool);
NV_STATUS (*__kbusReconcileTunableState__)(POBJGPU, struct KernelBus *, void *);
NV_STATUS (*__kbusStatePreLoad__)(POBJGPU, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStatePostUnload__)(POBJGPU, struct KernelBus *, NvU32);
NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *);
void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *);
@ -451,6 +452,8 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusStatePreInitLocked(pGpu, pKernelBus) kbusStatePreInitLocked_DISPATCH(pGpu, pKernelBus)
#define kbusStatePreInitLocked_HAL(pGpu, pKernelBus) kbusStatePreInitLocked_DISPATCH(pGpu, pKernelBus)
#define kbusStateInitLocked(pGpu, pKernelBus) kbusStateInitLocked_DISPATCH(pGpu, pKernelBus)
#define kbusStatePreLoad(pGpu, pKernelBus, arg0) kbusStatePreLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStatePreLoad_HAL(pGpu, pKernelBus, arg0) kbusStatePreLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStateLoad(pGpu, pKernelBus, arg0) kbusStateLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStateLoad_HAL(pGpu, pKernelBus, arg0) kbusStateLoad_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusStatePostLoad(pGpu, pKernelBus, arg0) kbusStatePostLoad_DISPATCH(pGpu, pKernelBus, arg0)
@ -505,6 +508,8 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusDetermineFlaRangeAndAllocate_HAL(pGpu, pKernelBus, base, size) kbusDetermineFlaRangeAndAllocate_DISPATCH(pGpu, pKernelBus, base, size)
#define kbusAllocateFlaVaspace(pGpu, pKernelBus, arg0, arg1) kbusAllocateFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusAllocateFlaVaspace_HAL(pGpu, pKernelBus, arg0, arg1) kbusAllocateFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusGetFlaRange(pGpu, pKernelBus, arg0, arg1, arg2) kbusGetFlaRange_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2)
#define kbusGetFlaRange_HAL(pGpu, pKernelBus, arg0, arg1, arg2) kbusGetFlaRange_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2)
#define kbusAllocateLegacyFlaVaspace(pGpu, pKernelBus, arg0, arg1) kbusAllocateLegacyFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusAllocateLegacyFlaVaspace_HAL(pGpu, pKernelBus, arg0, arg1) kbusAllocateLegacyFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1)
#define kbusAllocateHostManagedFlaVaspace(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6) kbusAllocateHostManagedFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6)
@ -552,7 +557,6 @@ NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32);
#define kbusTeardownCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusTeardownCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_DISPATCH(pGpu, pKernelBus, arg0)
#define kbusReconcileTunableState(pGpu, pEngstate, pTunableState) kbusReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
#define kbusStatePreLoad(pGpu, pEngstate, arg0) kbusStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
#define kbusStatePostUnload(pGpu, pEngstate, arg0) kbusStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
#define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate)
#define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate)
@ -1601,6 +1605,14 @@ static inline NV_STATUS kbusStateInitLocked_DISPATCH(OBJGPU *pGpu, struct Kernel
return pKernelBus->__kbusStateInitLocked__(pGpu, pKernelBus);
}
static inline NV_STATUS kbusStatePreLoad_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
return NV_OK;
}
static inline NV_STATUS kbusStatePreLoad_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
return pKernelBus->__kbusStatePreLoad__(pGpu, pKernelBus, arg0);
}
NV_STATUS kbusStateLoad_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0);
static inline NV_STATUS kbusStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) {
@ -1837,6 +1849,18 @@ static inline NV_STATUS kbusAllocateFlaVaspace_DISPATCH(OBJGPU *pGpu, struct Ker
return pKernelBus->__kbusAllocateFlaVaspace__(pGpu, pKernelBus, arg0, arg1);
}
NV_STATUS kbusGetFlaRange_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2);
NV_STATUS kbusGetFlaRange_GH100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2);
static inline NV_STATUS kbusGetFlaRange_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS kbusGetFlaRange_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0, NvU64 *arg1, NvBool arg2) {
return pKernelBus->__kbusGetFlaRange__(pGpu, pKernelBus, arg0, arg1, arg2);
}
NV_STATUS kbusAllocateLegacyFlaVaspace_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1);
static inline NV_STATUS kbusAllocateLegacyFlaVaspace_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1) {
@ -2069,10 +2093,6 @@ static inline NV_STATUS kbusReconcileTunableState_DISPATCH(POBJGPU pGpu, struct
return pEngstate->__kbusReconcileTunableState__(pGpu, pEngstate, pTunableState);
}
static inline NV_STATUS kbusStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return pEngstate->__kbusStatePreLoad__(pGpu, pEngstate, arg0);
}
static inline NV_STATUS kbusStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) {
return pEngstate->__kbusStatePostUnload__(pGpu, pEngstate, arg0);
}

View File

@ -7,7 +7,7 @@ extern "C" {
#endif
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@ -149,7 +149,7 @@ struct KernelHostVgpuDeviceApi {
NV_STATUS (*__kernelhostvgpudeviceapiMap__)(struct KernelHostVgpuDeviceApi *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NvBool (*__kernelhostvgpudeviceapiAccessCallback__)(struct KernelHostVgpuDeviceApi *, struct RsClient *, void *, RsAccessRight);
struct KernelHostVgpuDeviceShr *pShared;
NvU32 notifyActions[6];
NvU32 notifyActions[7];
};
#ifndef __NVOC_CLASS_KernelHostVgpuDeviceApi_TYPEDEF__

View File

@ -884,7 +884,11 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2236, 0x1482, 0x10de, "NVIDIA A10" },
{ 0x2237, 0x152f, 0x10de, "NVIDIA A10G" },
{ 0x2238, 0x1677, 0x10de, "NVIDIA A10M" },
{ 0x2330, 0x16c0, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2330, 0x16c1, 0x10de, "NVIDIA H100 80GB HBM3" },
{ 0x2331, 0x1626, 0x10de, "NVIDIA H100 PCIe" },
{ 0x2336, 0x16c2, 0x10de, "NVIDIA H100 80GB HBM2e" },
{ 0x2336, 0x16c7, 0x10de, "NVIDIA H100 80GB HBM2e" },
{ 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" },
{ 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" },
{ 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" },
@ -948,6 +952,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25A7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25A9, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25AA, 0x0000, 0x0000, "NVIDIA GeForce MX570 A" },
{ 0x25AD, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" },
{ 0x25B6, 0x157e, 0x10de, "NVIDIA A2" },
{ 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" },
@ -957,6 +962,7 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" },
{ 0x25E2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
{ 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" },
{ 0x25ED, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" },
{ 0x25F9, 0x0000, 0x0000, "NVIDIA RTX A1000 Embedded GPU" },
{ 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" },
{ 0x25FB, 0x0000, 0x0000, "NVIDIA RTX A500 Embedded GPU" },
@ -1700,20 +1706,20 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" },
{ 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" },
{ 0x2238, 0x16e6, 0x10DE, "NVIDIA A10M-1" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA H800-1-10CME" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA H800-1-10C" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA H800-2-20C" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA H800-3-40C" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA H800-4-40C" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA H800-7-80C" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA H800-4C" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA H800-5C" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA H800-8C" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA H800-10C" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA H800-16C" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA H800-20C" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA H800-40C" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA H800-80C" },
{ 0x2322, 0x17e2, 0x10DE, "NVIDIA GPU-2322-17E2" },
{ 0x2322, 0x17e3, 0x10DE, "NVIDIA GPU-2322-17E3" },
{ 0x2322, 0x17e4, 0x10DE, "NVIDIA GPU-2322-17E4" },
{ 0x2322, 0x17e5, 0x10DE, "NVIDIA GPU-2322-17E5" },
{ 0x2322, 0x17e6, 0x10DE, "NVIDIA GPU-2322-17E6" },
{ 0x2322, 0x17e7, 0x10DE, "NVIDIA GPU-2322-17E7" },
{ 0x2322, 0x17e8, 0x10DE, "NVIDIA GPU-2322-17E8" },
{ 0x2322, 0x17e9, 0x10DE, "NVIDIA GPU-2322-17E9" },
{ 0x2322, 0x17ea, 0x10DE, "NVIDIA GPU-2322-17EA" },
{ 0x2322, 0x17eb, 0x10DE, "NVIDIA GPU-2322-17EB" },
{ 0x2322, 0x17ec, 0x10DE, "NVIDIA GPU-2322-17EC" },
{ 0x2322, 0x17ed, 0x10DE, "NVIDIA GPU-2322-17ED" },
{ 0x2322, 0x17ee, 0x10DE, "NVIDIA GPU-2322-17EE" },
{ 0x2322, 0x17ef, 0x10DE, "NVIDIA GPU-2322-17EF" },
{ 0x2331, 0x16d3, 0x10DE, "NVIDIA H100-1-10C" },
{ 0x2331, 0x16d4, 0x10DE, "NVIDIA H100-2-20C" },
{ 0x2331, 0x16d5, 0x10DE, "NVIDIA H100-3-40C" },
@ -1761,45 +1767,45 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x25B6, 0x1655, 0x10DE, "NVIDIA A2-4C" },
{ 0x25B6, 0x1656, 0x10DE, "NVIDIA A2-8C" },
{ 0x25B6, 0x1657, 0x10DE, "NVIDIA A2-16C" },
{ 0x26B1, 0x1708, 0x10DE, "NVIDIA RTX 6000 Ada-1B" },
{ 0x26B1, 0x1709, 0x10DE, "NVIDIA RTX 6000 Ada-2B" },
{ 0x26B1, 0x170a, 0x10DE, "NVIDIA RTX 6000 Ada-1Q" },
{ 0x26B1, 0x170b, 0x10DE, "NVIDIA RTX 6000 Ada-2Q" },
{ 0x26B1, 0x170c, 0x10DE, "NVIDIA RTX 6000 Ada-3Q" },
{ 0x26B1, 0x170d, 0x10DE, "NVIDIA RTX 6000 Ada-4Q" },
{ 0x26B1, 0x170e, 0x10DE, "NVIDIA RTX 6000 Ada-6Q" },
{ 0x26B1, 0x170f, 0x10DE, "NVIDIA RTX 6000 Ada-8Q" },
{ 0x26B1, 0x1710, 0x10DE, "NVIDIA RTX 6000 Ada-12Q" },
{ 0x26B1, 0x1711, 0x10DE, "NVIDIA RTX 6000 Ada-16Q" },
{ 0x26B1, 0x1712, 0x10DE, "NVIDIA RTX 6000 Ada-24Q" },
{ 0x26B1, 0x1713, 0x10DE, "NVIDIA RTX 6000 Ada-48Q" },
{ 0x26B1, 0x1714, 0x10DE, "NVIDIA RTX 6000 Ada-1A" },
{ 0x26B1, 0x1715, 0x10DE, "NVIDIA RTX 6000 Ada-2A" },
{ 0x26B1, 0x1716, 0x10DE, "NVIDIA RTX 6000 Ada-3A" },
{ 0x26B1, 0x1717, 0x10DE, "NVIDIA RTX 6000 Ada-4A" },
{ 0x26B1, 0x1718, 0x10DE, "NVIDIA RTX 6000 Ada-6A" },
{ 0x26B1, 0x1719, 0x10DE, "NVIDIA RTX 6000 Ada-8A" },
{ 0x26B1, 0x171a, 0x10DE, "NVIDIA RTX 6000 Ada-12A" },
{ 0x26B1, 0x171b, 0x10DE, "NVIDIA RTX 6000 Ada-16A" },
{ 0x26B1, 0x171c, 0x10DE, "NVIDIA RTX 6000 Ada-24A" },
{ 0x26B1, 0x171d, 0x10DE, "NVIDIA RTX 6000 Ada-48A" },
{ 0x26B1, 0x171e, 0x10DE, "NVIDIA RTX 6000 Ada-1" },
{ 0x26B1, 0x171f, 0x10DE, "NVIDIA RTX 6000 Ada-2" },
{ 0x26B1, 0x1720, 0x10DE, "NVIDIA RTX 6000 Ada-3" },
{ 0x26B1, 0x1721, 0x10DE, "NVIDIA RTX 6000 Ada-4" },
{ 0x26B1, 0x1722, 0x10DE, "NVIDIA RTX 6000 Ada-6" },
{ 0x26B1, 0x1723, 0x10DE, "NVIDIA RTX 6000 Ada-8" },
{ 0x26B1, 0x1724, 0x10DE, "NVIDIA RTX 6000 Ada-12" },
{ 0x26B1, 0x1725, 0x10DE, "NVIDIA RTX 6000 Ada-16" },
{ 0x26B1, 0x1726, 0x10DE, "NVIDIA RTX 6000 Ada-24" },
{ 0x26B1, 0x1727, 0x10DE, "NVIDIA RTX 6000 Ada-48" },
{ 0x26B1, 0x1728, 0x10DE, "NVIDIA RTX 6000 Ada-4C" },
{ 0x26B1, 0x1729, 0x10DE, "NVIDIA RTX 6000 Ada-6C" },
{ 0x26B1, 0x172a, 0x10DE, "NVIDIA RTX 6000 Ada-8C" },
{ 0x26B1, 0x172b, 0x10DE, "NVIDIA RTX 6000 Ada-12C" },
{ 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX 6000 Ada-16C" },
{ 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX 6000 Ada-24C" },
{ 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX 6000 Ada-48C" },
{ 0x26B1, 0x1708, 0x10DE, "NVIDIA RTX6000-Ada-1B" },
{ 0x26B1, 0x1709, 0x10DE, "NVIDIA RTX6000-Ada-2B" },
{ 0x26B1, 0x170a, 0x10DE, "NVIDIA RTX6000-Ada-1Q" },
{ 0x26B1, 0x170b, 0x10DE, "NVIDIA RTX6000-Ada-2Q" },
{ 0x26B1, 0x170c, 0x10DE, "NVIDIA RTX6000-Ada-3Q" },
{ 0x26B1, 0x170d, 0x10DE, "NVIDIA RTX6000-Ada-4Q" },
{ 0x26B1, 0x170e, 0x10DE, "NVIDIA RTX6000-Ada-6Q" },
{ 0x26B1, 0x170f, 0x10DE, "NVIDIA RTX6000-Ada-8Q" },
{ 0x26B1, 0x1710, 0x10DE, "NVIDIA RTX6000-Ada-12Q" },
{ 0x26B1, 0x1711, 0x10DE, "NVIDIA RTX6000-Ada-16Q" },
{ 0x26B1, 0x1712, 0x10DE, "NVIDIA RTX6000-Ada-24Q" },
{ 0x26B1, 0x1713, 0x10DE, "NVIDIA RTX6000-Ada-48Q" },
{ 0x26B1, 0x1714, 0x10DE, "NVIDIA RTX6000-Ada-1A" },
{ 0x26B1, 0x1715, 0x10DE, "NVIDIA RTX6000-Ada-2A" },
{ 0x26B1, 0x1716, 0x10DE, "NVIDIA RTX6000-Ada-3A" },
{ 0x26B1, 0x1717, 0x10DE, "NVIDIA RTX6000-Ada-4A" },
{ 0x26B1, 0x1718, 0x10DE, "NVIDIA RTX6000-Ada-6A" },
{ 0x26B1, 0x1719, 0x10DE, "NVIDIA RTX6000-Ada-8A" },
{ 0x26B1, 0x171a, 0x10DE, "NVIDIA RTX6000-Ada-12A" },
{ 0x26B1, 0x171b, 0x10DE, "NVIDIA RTX6000-Ada-16A" },
{ 0x26B1, 0x171c, 0x10DE, "NVIDIA RTX6000-Ada-24A" },
{ 0x26B1, 0x171d, 0x10DE, "NVIDIA RTX6000-Ada-48A" },
{ 0x26B1, 0x171e, 0x10DE, "NVIDIA RTX6000-Ada-1" },
{ 0x26B1, 0x171f, 0x10DE, "NVIDIA RTX6000-Ada-2" },
{ 0x26B1, 0x1720, 0x10DE, "NVIDIA RTX6000-Ada-3" },
{ 0x26B1, 0x1721, 0x10DE, "NVIDIA RTX6000-Ada-4" },
{ 0x26B1, 0x1722, 0x10DE, "NVIDIA RTX6000-Ada-6" },
{ 0x26B1, 0x1723, 0x10DE, "NVIDIA RTX6000-Ada-8" },
{ 0x26B1, 0x1724, 0x10DE, "NVIDIA RTX6000-Ada-12" },
{ 0x26B1, 0x1725, 0x10DE, "NVIDIA RTX6000-Ada-16" },
{ 0x26B1, 0x1726, 0x10DE, "NVIDIA RTX6000-Ada-24" },
{ 0x26B1, 0x1727, 0x10DE, "NVIDIA RTX6000-Ada-48" },
{ 0x26B1, 0x1728, 0x10DE, "NVIDIA RTX6000-Ada-4C" },
{ 0x26B1, 0x1729, 0x10DE, "NVIDIA RTX6000-Ada-6C" },
{ 0x26B1, 0x172a, 0x10DE, "NVIDIA RTX6000-Ada-8C" },
{ 0x26B1, 0x172b, 0x10DE, "NVIDIA RTX6000-Ada-12C" },
{ 0x26B1, 0x172c, 0x10DE, "NVIDIA RTX6000-Ada-16C" },
{ 0x26B1, 0x172d, 0x10DE, "NVIDIA RTX6000-Ada-24C" },
{ 0x26B1, 0x172e, 0x10DE, "NVIDIA RTX6000-Ada-48C" },
{ 0x26B5, 0x176d, 0x10DE, "NVIDIA L40-1B" },
{ 0x26B5, 0x176e, 0x10DE, "NVIDIA L40-2B" },
{ 0x26B5, 0x176f, 0x10DE, "NVIDIA L40-1Q" },
@ -1870,37 +1876,37 @@ static const CHIPS_RELEASED sChipsReleased[] = {
{ 0x26B8, 0x176a, 0x10DE, "NVIDIA L40G-8C" },
{ 0x26B8, 0x176b, 0x10DE, "NVIDIA L40G-12C" },
{ 0x26B8, 0x176c, 0x10DE, "NVIDIA L40G-24C" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA GPU 27B8-172F" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA GPU 27B8-1730" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA GPU 27B8-1731" },
{ 0x27B8, 0x1732, 0x10DE, "NVIDIA GPU 27B8-1732" },
{ 0x27B8, 0x1733, 0x10DE, "NVIDIA GPU 27B8-1733" },
{ 0x27B8, 0x1734, 0x10DE, "NVIDIA GPU 27B8-1734" },
{ 0x27B8, 0x1735, 0x10DE, "NVIDIA GPU 27B8-1735" },
{ 0x27B8, 0x1736, 0x10DE, "NVIDIA GPU 27B8-1736" },
{ 0x27B8, 0x1737, 0x10DE, "NVIDIA GPU 27B8-1737" },
{ 0x27B8, 0x1738, 0x10DE, "NVIDIA GPU 27B8-1738" },
{ 0x27B8, 0x1739, 0x10DE, "NVIDIA GPU 27B8-1739" },
{ 0x27B8, 0x173a, 0x10DE, "NVIDIA GPU 27B8-173A" },
{ 0x27B8, 0x173b, 0x10DE, "NVIDIA GPU 27B8-173B" },
{ 0x27B8, 0x173c, 0x10DE, "NVIDIA GPU 27B8-173C" },
{ 0x27B8, 0x173d, 0x10DE, "NVIDIA GPU 27B8-173D" },
{ 0x27B8, 0x173e, 0x10DE, "NVIDIA GPU 27B8-173E" },
{ 0x27B8, 0x173f, 0x10DE, "NVIDIA GPU 27B8-173F" },
{ 0x27B8, 0x1740, 0x10DE, "NVIDIA GPU 27B8-1740" },
{ 0x27B8, 0x1741, 0x10DE, "NVIDIA GPU 27B8-1741" },
{ 0x27B8, 0x1742, 0x10DE, "NVIDIA GPU 27B8-1742" },
{ 0x27B8, 0x1743, 0x10DE, "NVIDIA GPU 27B8-1743" },
{ 0x27B8, 0x1744, 0x10DE, "NVIDIA GPU 27B8-1744" },
{ 0x27B8, 0x1745, 0x10DE, "NVIDIA GPU 27B8-1745" },
{ 0x27B8, 0x1746, 0x10DE, "NVIDIA GPU 27B8-1746" },
{ 0x27B8, 0x1747, 0x10DE, "NVIDIA GPU 27B8-1747" },
{ 0x27B8, 0x1748, 0x10DE, "NVIDIA GPU 27B8-1748" },
{ 0x27B8, 0x1749, 0x10DE, "NVIDIA GPU 27B8-1749" },
{ 0x27B8, 0x174a, 0x10DE, "NVIDIA GPU 27B8-174A" },
{ 0x27B8, 0x174b, 0x10DE, "NVIDIA GPU 27B8-174B" },
{ 0x27B8, 0x174c, 0x10DE, "NVIDIA GPU 27B8-174C" },
{ 0x27B8, 0x174d, 0x10DE, "NVIDIA GPU 27B8-174D" },
{ 0x27B8, 0x172f, 0x10DE, "NVIDIA GPU-27B8-172F" },
{ 0x27B8, 0x1730, 0x10DE, "NVIDIA GPU-27B8-1730" },
{ 0x27B8, 0x1731, 0x10DE, "NVIDIA GPU-27B8-1731" },
{ 0x27B8, 0x1732, 0x10DE, "NVIDIA GPU-27B8-1732" },
{ 0x27B8, 0x1733, 0x10DE, "NVIDIA GPU-27B8-1733" },
{ 0x27B8, 0x1734, 0x10DE, "NVIDIA GPU-27B8-1734" },
{ 0x27B8, 0x1735, 0x10DE, "NVIDIA GPU-27B8-1735" },
{ 0x27B8, 0x1736, 0x10DE, "NVIDIA GPU-27B8-1736" },
{ 0x27B8, 0x1737, 0x10DE, "NVIDIA GPU-27B8-1737" },
{ 0x27B8, 0x1738, 0x10DE, "NVIDIA GPU-27B8-1738" },
{ 0x27B8, 0x1739, 0x10DE, "NVIDIA GPU-27B8-1739" },
{ 0x27B8, 0x173a, 0x10DE, "NVIDIA GPU-27B8-173A" },
{ 0x27B8, 0x173b, 0x10DE, "NVIDIA GPU-27B8-173B" },
{ 0x27B8, 0x173c, 0x10DE, "NVIDIA GPU-27B8-173C" },
{ 0x27B8, 0x173d, 0x10DE, "NVIDIA GPU-27B8-173D" },
{ 0x27B8, 0x173e, 0x10DE, "NVIDIA GPU-27B8-173E" },
{ 0x27B8, 0x173f, 0x10DE, "NVIDIA GPU-27B8-173F" },
{ 0x27B8, 0x1740, 0x10DE, "NVIDIA GPU-27B8-1740" },
{ 0x27B8, 0x1741, 0x10DE, "NVIDIA GPU-27B8-1741" },
{ 0x27B8, 0x1742, 0x10DE, "NVIDIA GPU-27B8-1742" },
{ 0x27B8, 0x1743, 0x10DE, "NVIDIA GPU-27B8-1743" },
{ 0x27B8, 0x1744, 0x10DE, "NVIDIA GPU-27B8-1744" },
{ 0x27B8, 0x1745, 0x10DE, "NVIDIA GPU-27B8-1745" },
{ 0x27B8, 0x1746, 0x10DE, "NVIDIA GPU-27B8-1746" },
{ 0x27B8, 0x1747, 0x10DE, "NVIDIA GPU-27B8-1747" },
{ 0x27B8, 0x1748, 0x10DE, "NVIDIA GPU-27B8-1748" },
{ 0x27B8, 0x1749, 0x10DE, "NVIDIA GPU-27B8-1749" },
{ 0x27B8, 0x174a, 0x10DE, "NVIDIA GPU-27B8-174A" },
{ 0x27B8, 0x174b, 0x10DE, "NVIDIA GPU-27B8-174B" },
{ 0x27B8, 0x174c, 0x10DE, "NVIDIA GPU-27B8-174C" },
{ 0x27B8, 0x174d, 0x10DE, "NVIDIA GPU-27B8-174D" },
};
#endif // G_NV_NAME_RELEASED_H

View File

@ -6503,18 +6503,33 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkL1Threshold_IMPL,
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSetL1Threshold_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*flags=*/ 0x204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x2080303eu,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS),
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkL1Threshold"
/*func=*/ "subdeviceCtrlCmdNvlinkSetL1Threshold"
#endif
},
{ /* [419] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetL1Threshold_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x2080303fu,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkGetL1Threshold"
#endif
},
{ /* [420] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1240u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6522,14 +6537,14 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1240u)
/*flags=*/ 0x1240u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x2080303fu,
/*methodId=*/ 0x20803040u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkInbandSendData"
#endif
},
{ /* [420] */
{ /* [421] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6537,14 +6552,14 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*flags=*/ 0x210u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803041u,
/*methodId=*/ 0x20803042u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkDirectConnectCheck"
#endif
},
{ /* [421] */
{ /* [422] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6552,14 +6567,14 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*flags=*/ 0x200u,
/*accessRight=*/0x0u,
/*methodId=*/ 0x20803042u,
/*methodId=*/ 0x20803043u,
/*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS),
/*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo),
#if NV_PRINTF_STRINGS_ALLOWED
/*func=*/ "subdeviceCtrlCmdNvlinkPostFaultUp"
#endif
},
{ /* [422] */
{ /* [423] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6574,7 +6589,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage"
#endif
},
{ /* [423] */
{ /* [424] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6589,7 +6604,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch"
#endif
},
{ /* [424] */
{ /* [425] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6604,7 +6619,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo"
#endif
},
{ /* [425] */
{ /* [426] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6619,7 +6634,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet"
#endif
},
{ /* [426] */
{ /* [427] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6634,7 +6649,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet"
#endif
},
{ /* [427] */
{ /* [428] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6649,7 +6664,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo"
#endif
},
{ /* [428] */
{ /* [429] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6664,7 +6679,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize"
#endif
},
{ /* [429] */
{ /* [430] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6679,7 +6694,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters"
#endif
},
{ /* [430] */
{ /* [431] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6694,7 +6709,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaRange"
#endif
},
{ /* [431] */
{ /* [432] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6709,7 +6724,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock"
#endif
},
{ /* [432] */
{ /* [433] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x100004u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6724,7 +6739,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetRange"
#endif
},
{ /* [433] */
{ /* [434] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6739,7 +6754,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats"
#endif
},
{ /* [434] */
{ /* [435] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6754,7 +6769,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGspGetFeatures"
#endif
},
{ /* [435] */
{ /* [436] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6769,7 +6784,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo"
#endif
},
{ /* [436] */
{ /* [437] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6784,7 +6799,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt"
#endif
},
{ /* [437] */
{ /* [438] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6799,7 +6814,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff"
#endif
},
{ /* [438] */
{ /* [439] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6814,7 +6829,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower"
#endif
},
{ /* [439] */
{ /* [440] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6829,7 +6844,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus"
#endif
},
{ /* [440] */
{ /* [441] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6844,7 +6859,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus"
#endif
},
{ /* [441] */
{ /* [442] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6859,7 +6874,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalBootloadGspVgpuPluginTask"
#endif
},
{ /* [442] */
{ /* [443] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6874,7 +6889,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalShutdownGspVgpuPluginTask"
#endif
},
{ /* [443] */
{ /* [444] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6889,7 +6904,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalPgpuAddVgpuType"
#endif
},
{ /* [444] */
{ /* [445] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6904,7 +6919,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalEnumerateVgpuPerPgpu"
#endif
},
{ /* [445] */
{ /* [446] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6919,7 +6934,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalClearGuestVmInfo"
#endif
},
{ /* [446] */
{ /* [447] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6934,7 +6949,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetVgpuFbUsage"
#endif
},
{ /* [447] */
{ /* [448] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6949,7 +6964,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuEncoderCapacity"
#endif
},
{ /* [448] */
{ /* [449] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6964,7 +6979,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalCleanupGspVgpuPluginResources"
#endif
},
{ /* [449] */
{ /* [450] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6979,7 +6994,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuFsEncoding"
#endif
},
{ /* [450] */
{ /* [451] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -6994,7 +7009,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalGetPgpuMigrationSupport"
#endif
},
{ /* [451] */
{ /* [452] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7009,7 +7024,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdVgpuMgrInternalSetVgpuMgrConfig"
#endif
},
{ /* [452] */
{ /* [453] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7024,7 +7039,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
/*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask"
#endif
},
{ /* [453] */
{ /* [454] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
@ -7044,7 +7059,7 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevic
const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice =
{
/*numEntries=*/ 454,
/*numEntries=*/ 455,
/*pExportEntries=*/ __nvoc_exported_method_def_Subdevice
};
@ -7489,13 +7504,17 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
pThis->__subdeviceCtrlCmdNvlinkL1Threshold__ = &subdeviceCtrlCmdNvlinkL1Threshold_IMPL;
pThis->__subdeviceCtrlCmdNvlinkSetL1Threshold__ = &subdeviceCtrlCmdNvlinkSetL1Threshold_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdNvlinkDirectConnectCheck__ = &subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdNvlinkGetL1Threshold__ = &subdeviceCtrlCmdNvlinkGetL1Threshold_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u)
pThis->__subdeviceCtrlCmdI2cReadBuffer__ = &subdeviceCtrlCmdI2cReadBuffer_IMPL;
#endif
@ -8151,10 +8170,6 @@ static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u)
pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL;
#endif
}
static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) {
@ -8165,6 +8180,10 @@ static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
pThis->__subdeviceCtrlCmdGpuAcquireComputeModeReservation__ = &subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL;
#endif

View File

@ -215,8 +215,9 @@ struct Subdevice {
NV_STATUS (*__subdeviceCtrlCmdNvlinkInbandSendData__)(struct Subdevice *, NV2080_CTRL_NVLINK_INBAND_SEND_DATA_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkPostFaultUp__)(struct Subdevice *, NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkEomControl__)(struct Subdevice *, NV2080_CTRL_NVLINK_EOM_CONTROL_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkSetL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkDirectConnectCheck__)(struct Subdevice *, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdNvlinkGetL1Threshold__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cReadBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cWriteBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS *);
NV_STATUS (*__subdeviceCtrlCmdI2cReadReg__)(struct Subdevice *, NV2080_CTRL_I2C_RW_REG_PARAMS *);
@ -755,8 +756,9 @@ NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_C
#define subdeviceCtrlCmdNvlinkInbandSendData(pSubdevice, pParams) subdeviceCtrlCmdNvlinkInbandSendData_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkPostFaultUp(pSubdevice, pParams) subdeviceCtrlCmdNvlinkPostFaultUp_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkEomControl(pSubdevice, pParams) subdeviceCtrlCmdNvlinkEomControl_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkSetL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSetL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkDirectConnectCheck(pSubdevice, pParams) subdeviceCtrlCmdNvlinkDirectConnectCheck_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdNvlinkGetL1Threshold(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetL1Threshold_DISPATCH(pSubdevice, pParams)
#define subdeviceCtrlCmdI2cReadBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadBuffer_DISPATCH(pSubdevice, pI2cParams)
#define subdeviceCtrlCmdI2cWriteBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cWriteBuffer_DISPATCH(pSubdevice, pI2cParams)
#define subdeviceCtrlCmdI2cReadReg(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadReg_DISPATCH(pSubdevice, pI2cParams)
@ -1736,10 +1738,10 @@ static inline NV_STATUS subdeviceCtrlCmdNvlinkEomControl_DISPATCH(struct Subdevi
return pSubdevice->__subdeviceCtrlCmdNvlinkEomControl__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkL1Threshold_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *pParams);
NV_STATUS subdeviceCtrlCmdNvlinkSetL1Threshold_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkL1Threshold_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_L1_THRESHOLD_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkL1Threshold__(pSubdevice, pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkSetL1Threshold_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SET_L1_THRESHOLD_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkSetL1Threshold__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkDirectConnectCheck_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS *pParams);
@ -1748,6 +1750,12 @@ static inline NV_STATUS subdeviceCtrlCmdNvlinkDirectConnectCheck_DISPATCH(struct
return pSubdevice->__subdeviceCtrlCmdNvlinkDirectConnectCheck__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdNvlinkGetL1Threshold_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS *pParams);
static inline NV_STATUS subdeviceCtrlCmdNvlinkGetL1Threshold_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_L1_THRESHOLD_PARAMS *pParams) {
return pSubdevice->__subdeviceCtrlCmdNvlinkGetL1Threshold__(pSubdevice, pParams);
}
NV_STATUS subdeviceCtrlCmdI2cReadBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams);
static inline NV_STATUS subdeviceCtrlCmdI2cReadBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams) {

View File

@ -320,7 +320,7 @@ typedef struct
#else // PORT_MEM_TRACK_USE_CALLERINFO
#define PORT_MEM_CALLERINFO_PARAM
#define PORT_MEM_CALLERINFO_TYPE_PARAM
#define PORT_MEM_CALLERINFO_TYPE_PARAM void
#define PORT_MEM_CALLERINFO_COMMA_PARAM
#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM
#define PORT_MEM_CALLINFO_FUNC(f) f

View File

@ -138,7 +138,7 @@ PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n)
#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU32 lo;
NvU32 hi;
@ -148,7 +148,7 @@ PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
#define portUtilExReadTimestampCounter_SUPPORTED 1
#elif NVCPU_IS_AARCH64 && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU64 ts = 0;
__asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (ts));
@ -157,7 +157,7 @@ PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
#define portUtilExReadTimestampCounter_SUPPORTED 1
#elif NVCPU_IS_PPC64LE && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU64 ts;
__asm__ __volatile__ ("mfspr %0,268" : "=r"(ts));
@ -166,7 +166,7 @@ PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
#define portUtilExReadTimestampCounter_SUPPORTED 1
#elif NVCPU_IS_PPC && !defined(NV_MODS)
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter()
PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void)
{
NvU32 lo, hi, tmp;
__asm__ __volatile__ (

View File

@ -26,6 +26,226 @@
#include "ctrl/ctrl0000/ctrl0000system.h"
/*
* Definitions for the static params table.
*/
/*!
* Layout of SysDev 2x data used for static config
*/
#define NVPCF_SYSDEV_STATIC_TABLE_VERSION_2X (0x20)
#define NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03 (0x03U)
#define NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_FMT_SIZE_03 ("3b")
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_SIZE_01 (0x01U)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_FMT_SIZE_01 ("1b")
/*!
* Static system dev header table, unpacked
*/
typedef struct
{
/*
* System device Table Version.
*/
NvU32 version;
/*
* Size of device Table Header in bytes .
*/
NvU32 headerSize;
/*
* Size of common entry in bytes.
*/
NvU32 commonSize;
} SYSDEV_STATIC_TABLE_HEADER_2X;
/*!
* Static system dev common entry
*/
typedef struct
{
NvU32 param0;
} SYSDEV_STATIC_TABLE_COMMON_2X;
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE 3:0
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_INTEL (0x00000000)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_AMD (0x00000001)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_CPU_TYPE_NVIDIA (0x00000002)
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_GPU_TYPE 7:4
#define NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_PARAM0_GPU_TYPE_NVIDIA (0x00000000)
/*!
* Layout of Controller 2x data used for static config
*/
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_20 (0x20)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_21 (0x21)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22 (0x22)
#define NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23 (0x23)
// format for 2.0 and 2.1
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05 (0x05U)
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_FMT_SIZE_05 ("5b")
#define NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_SIZE_02 (0x02U)
#define NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_FMT_SIZE_02 ("1w")
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F (0x0FU)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FMT_SIZE_0F ("1b1w3d")
/*!
* Static system controller header table v2.0/2.1, unpacked
*/
typedef struct
{
/*
* System controller Table Version.
*/
NvU32 version;
/*
* Size of controller Table Header in bytes .
*/
NvU32 headerSize;
/*
* Size of controller Table Common/Global Entry in bytes.
*/
NvU32 commonSize;
/*
* Size of controller Table Entry in bytes.
*/
NvU32 entrySize;
/*
* Number of controller Entries
*/
NvU32 entryCount;
} CONTROLLER_STATIC_TABLE_HEADER_V20;
/*!
* Static system controller common/global entry v2.0/2.1, unpacked
*/
typedef struct
{
/*
* Base sampling period in ms
*/
NvU32 samplingPeriodms;
} CONTROLLER_STATIC_TABLE_COMMON_V20;
/*!
* Static system controller entry v2.0/2.1, unpacked
*/
typedef struct
{
/*
* System controller entry type specific flag (Flags0).
*/
NvU32 flags0;
/*
* Sampling Multiplier.
*/
NvU32 samplingMulti;
/*
* System controller entry filter parameters.
*/
NvU32 filterParams;
/*
* System controller entry Usage-Specific Parameter (Param0).
*/
NvU32 param0;
/*
* System controller entry Usage-Specific Parameter (Param1).
*/
NvU32 param1;
} CONTROLLER_STATIC_TABLE_ENTRY_V20;
// FLAGS0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS 3:0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_DISABLED (0x00000000)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_PPAB (0x00000001)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_CTGP (0x00000002)
// Filter
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTER_TYPE 7:0
// filterType = EWMA
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTERPARAM_EWMA_WEIGHT 15:8
// filterType = MAX, others
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTERPARAM_WINDOW_SIZE 15:8
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FILTER_RESERVED 31:16
// Param0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM0_QBOOST_INCREASE_GAIN 15:0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM0_QBOOST_DECREASE_GAIN 31:16
// Param1
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_PARAM1_QBOOST_DC_SUPPORT 0:0
// format for 2.2
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04 (0x04U)
#define NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_FMT_SIZE_04 ("4b")
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05 (0x05U)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FMT_SIZE_05 ("1b1d")
/*!
* Static system controller header table v2.2, unpacked
*/
typedef struct
{
/*
* System controller Table Version.
*/
NvU32 version;
/*
* Size of controller Table Header in bytes .
*/
NvU32 headerSize;
/*
* Size of controller Table Entry in bytes.
*/
NvU32 entrySize;
/*
* Number of controller Entries
*/
NvU32 entryCount;
} CONTROLLER_STATIC_TABLE_HEADER_V22;
/*!
* Static system controller entry v2.2, unpacked
*/
typedef struct
{
/*
* System controller entry type specific flag (Flags0).
*/
NvU32 flags0;
/*
* System controller entry Usage-Specific Parameter (Param0).
*/
NvU32 param0;
} CONTROLLER_STATIC_TABLE_ENTRY_V22;
// FLAGS0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS 3:0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_DISABLED (0x00000000)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_PPAB (0x00000001)
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_CTGP (0x00000002)
// Param0
#define NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_PARAM0_QBOOST_DC_SUPPORT 0:0
/*
* Definitions for the dynamic params table.
*/
@ -70,8 +290,14 @@ typedef struct
/*!
* Config DSM NVPCF 2x version specific defines
*/
/*
* Definitions for the dynamic params table.
*/
#define NVPCF_DYNAMIC_PARAMS_20_VERSION (0x20)
#define NVPCF_DYNAMIC_PARAMS_21_VERSION (0x21)
#define NVPCF_DYNAMIC_PARAMS_22_VERSION (0x22)
#define NVPCF_DYNAMIC_PARAMS_23_VERSION (0x23)
#define NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05 (0x05U)
#define NVPCF_DYNAMIC_PARAMS_2X_HEADER_FMT_SIZE_05 ("5b")
#define NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10 (0x10U)

View File

@ -135,6 +135,7 @@
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */
#define NV_VGPU_MSG_RESULT_RPC_CUDA_PROFILING_DISABLED 0xFF100008 /* RW--V */
/* RPC-specific code in result for incomplete request */
#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */
/* shared union field */

View File

@ -92,11 +92,11 @@ nvlogInit(void *pData)
return NV_OK;
}
void nvlogUpdate() {
void nvlogUpdate(void) {
}
NV_STATUS
nvlogDestroy()
nvlogDestroy(void)
{
NvU32 i;

View File

@ -59,7 +59,7 @@ enum {
// nvDbgBreakpointEnabled - Returns true if triggering a breakpoint is allowed
//
NvBool osDbgBreakpointEnabled(void);
NvBool nvDbgBreakpointEnabled()
NvBool nvDbgBreakpointEnabled(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
if (pSys != NULL)

View File

@ -704,6 +704,7 @@ _kbifInitRegistryOverrides
NV_PRINTF(LEVEL_INFO, "allow peermapping reg key = %d\n", data32);
pKernelBif->peerMappingOverride = !!data32;
}
}
/*!

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -1348,3 +1348,30 @@ kbusSetupUnbindFla_GA100
return status;
}
NV_STATUS
kbusGetFlaRange_GA100
(
OBJGPU *pGpu,
KernelBus *pKernelBus,
NvU64 *ucFlaBase,
NvU64 *ucFlaSize,
NvBool bIsConntectedToNvswitch
)
{
if (gpuIsSriovEnabled(pGpu) && bIsConntectedToNvswitch)
{
if (pKernelBus->flaInfo.bFlaRangeRegistered)
{
*ucFlaBase = pKernelBus->flaInfo.base;
*ucFlaSize = pKernelBus->flaInfo.size;
}
}
else // direct connected system
{
*ucFlaSize = gpuGetFlaVasSize_HAL(pGpu, NV_FALSE);
*ucFlaBase = pGpu->gpuInstance * (*ucFlaSize);
}
return NV_OK;
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -1595,3 +1595,36 @@ kbusSetupUnbindFla_GH100
return status;
}
NV_STATUS
kbusGetFlaRange_GH100
(
OBJGPU *pGpu,
KernelBus *pKernelBus,
NvU64 *ucFlaBase,
NvU64 *ucFlaSize,
NvBool bIsConntectedToNvswitch
)
{
if (!GPU_IS_NVSWITCH_DETECTED(pGpu))
{
*ucFlaSize = gpuGetFlaVasSize_HAL(pGpu, NV_FALSE);
*ucFlaBase = pGpu->gpuInstance * (*ucFlaSize);
}
else
{
FABRIC_VASPACE *pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE);
NvU64 ucFlaLimit;
if (pFabricVAS == NULL)
return NV_ERR_INVALID_STATE;
ucFlaLimit = fabricvaspaceGetUCFlaLimit(pFabricVAS);
if (ucFlaLimit == 0)
return NV_ERR_INVALID_STATE;
*ucFlaBase = fabricvaspaceGetUCFlaStart(pFabricVAS);
*ucFlaSize = ucFlaLimit - *ucFlaBase + 1;
}
return NV_OK;
}

View File

@ -460,6 +460,9 @@ kbusStateUnload_GM107
NV_STATUS status = NV_OK;
KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu);
if (IS_VIRTUAL(pGpu) && !(flags & GPU_STATE_FLAGS_PRESERVING))
return NV_OK;
if ((pKernelBif != NULL)
&&
(!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) ||
@ -476,9 +479,12 @@ kbusStateUnload_GM107
{
if (!IS_GPU_GC6_STATE_ENTERING(pGpu))
{
status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF);
// Do not use BAR2 physical mode for bootstrapping BAR2 across S/R.
pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE;
status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF);
if (!IS_VIRTUAL_WITH_SRIOV(pGpu))
{
// Do not use BAR2 physical mode for bootstrapping BAR2 across S/R.
pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE;
}
}
}
else

View File

@ -1242,3 +1242,12 @@ kbusIsGpuP2pAlive_IMPL
{
return (pKernelBus->totalP2pObjectsAliveRefCount > 0);
}
/**
* @brief Setup VF BAR2 during hibernate resume
*
* @param[in] pGpu
* @param[in] pKernelBus
* @param[in] flags
*/

View File

@ -145,6 +145,8 @@ kchannelConstruct_IMPL
NvBool bTsgAllocated = NV_FALSE;
NvHandle hChanGrp = NV01_NULL_OBJECT;
RsResourceRef *pDeviceRef = NULL;
RsResourceRef *pVASpaceRef = NULL;
KernelGraphicsContext *pKernelGraphicsContext = NULL;
NvBool bMIGInUse;
KernelChannelGroup *pKernelChannelGroup = NULL;
NvU32 chID = ~0;
@ -694,7 +696,7 @@ kchannelConstruct_IMPL
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id %d for hClient %d hKernelChannel %d \n",
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
chID, hClient, pResourceRef->hResource);
DBG_BREAKPOINT();
goto cleanup;
@ -729,7 +731,7 @@ kchannelConstruct_IMPL
if (status != NV_OK)
{
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id %d for hClient %d hKernelChannel %d \n",
NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id 0x%x for hClient 0x%x hKernelChannel 0x%x \n",
chID, hClient, pResourceRef->hResource);
chID = ~0;
DBG_BREAKPOINT();
@ -852,8 +854,6 @@ kchannelConstruct_IMPL
// We depend on VASpace if it was provided
if (pChannelGpfifoParams->hVASpace != NV01_NULL_OBJECT)
{
RsResourceRef *pVASpaceRef = NULL;
NV_ASSERT_OK_OR_GOTO(status, clientGetResourceRef(pRsClient, pChannelGpfifoParams->hVASpace, &pVASpaceRef), cleanup);
NV_ASSERT_OR_ELSE(pVASpaceRef != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup);
@ -875,8 +875,6 @@ kchannelConstruct_IMPL
pKernelChannel->hKernelGraphicsContext = pKernelChannelGroupApi->hKernelGraphicsContext;
if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT)
{
KernelGraphicsContext *pKernelGraphicsContext;
NV_ASSERT_OK_OR_GOTO(status,
kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext),
cleanup);
@ -920,6 +918,24 @@ cleanup:
_kchannelCleanupNotifyActions(pKernelChannel);
}
// Remove any dependencies we may have added; we don't want our destructor called when freeing anything below
if (pKernelGraphicsContext != NULL)
{
refRemoveDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef);
}
if (pKernelChannel->pKernelCtxShareApi != NULL)
{
refRemoveDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef);
}
if (pVASpaceRef != NULL)
{
refRemoveDependant(pVASpaceRef, pResourceRef);
}
if (bTsgAllocated)
{
refRemoveDependant(pChanGrpRef, pResourceRef);
}
if (bAddedToGroup)
{
kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel);

View File

@ -1058,6 +1058,7 @@ kfspSendBootCommands_GH100
}
pCotPayload = portMemAllocNonPaged(sizeof(NVDM_PAYLOAD_COT));
NV_CHECK_OR_RETURN(LEVEL_ERROR, pCotPayload != NULL, NV_ERR_NO_MEMORY);
portMemSet(pCotPayload, 0, sizeof(NVDM_PAYLOAD_COT));
frtsSize = NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K << 12;

View File

@ -367,6 +367,7 @@ kfspReadMessage
}
pPacketBuffer = portMemAllocNonPaged(kfspGetRmChannelSize_HAL(pGpu, pKernelFsp));
NV_CHECK_OR_RETURN(LEVEL_ERROR, pPacketBuffer != NULL, NV_ERR_NO_MEMORY);
while ((packetState != MCTP_PACKET_STATE_END) && (packetState != MCTP_PACKET_STATE_SINGLE_PACKET))
{
@ -483,6 +484,7 @@ kfspSendPacket_IMPL
// Pad to align size to 4-bytes boundary since EMEMC increments by DWORDS
paddedSize = NV_ALIGN_UP(packetSize, sizeof(NvU32));
pBuffer = portMemAllocNonPaged(paddedSize);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuffer != NULL, NV_ERR_NO_MEMORY);
portMemSet(pBuffer, 0, paddedSize);
portMemCopy(pBuffer, paddedSize, pPacket, paddedSize);
@ -537,6 +539,7 @@ kfspSendAndReadMessage_IMPL
// Allocate buffer of same size as channel
fspEmemRmChannelSize = kfspGetRmChannelSize_HAL(pGpu, pKernelFsp);
pBuffer = portMemAllocNonPaged(fspEmemRmChannelSize);
NV_CHECK_OR_RETURN(LEVEL_ERROR, pBuffer != NULL, NV_ERR_NO_MEMORY);
portMemSet(pBuffer, 0, fspEmemRmChannelSize);
//

View File

@ -2181,13 +2181,15 @@ gpuStateLoad_IMPL
return status;
}
// It is a no-op on baremetal and inside non SRIOV guest.
rmStatus = gpuCreateDefaultClientShare_HAL(pGpu);
if (rmStatus != NV_OK)
if (!(flags & GPU_STATE_FLAGS_PRESERVING))
{
return rmStatus;
// It is a no-op on baremetal and inside non SRIOV guest.
rmStatus = gpuCreateDefaultClientShare_HAL(pGpu);
if (rmStatus != NV_OK)
{
return rmStatus;
}
}
NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu));
rmStatus = gpuStatePreLoad(pGpu, flags);
@ -2823,7 +2825,8 @@ gpuStateUnload_IMPL
rmStatus = gpuStatePostUnload(pGpu, flags);
NV_ASSERT_OK(rmStatus);
gpuDestroyDefaultClientShare_HAL(pGpu);
if(!(flags & GPU_STATE_FLAGS_PRESERVING))
gpuDestroyDefaultClientShare_HAL(pGpu);
// De-init SRIOV
gpuDeinitSriov_HAL(pGpu);

View File

@ -502,6 +502,7 @@ gpuNotifySubDeviceEvent_IMPL
for (i = 0; i < pGpu->numSubdeviceBackReferences; i++)
{
Subdevice *pSubdevice = pGpu->pSubdeviceBackReferences[i];
NV_ASSERT_OR_RETURN_VOID(pSubdevice != NULL);
INotifier *pNotifier = staticCast(pSubdevice, INotifier);
GPU_RES_SET_THREAD_BC_STATE(pSubdevice);

View File

@ -1139,7 +1139,7 @@ _kgspRpcRecvPoll
KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu);
NV_STATUS nvStatus;
RMTIMEOUT timeout;
NvU32 timeoutUs = GPU_TIMEOUT_DEFAULT;
NvU32 timeoutUs;
NvBool bSlowGspRpc = IS_EMULATION(pGpu) || IS_SIMULATION(pGpu);
//
@ -1172,12 +1172,20 @@ _kgspRpcRecvPoll
}
else
{
// We should only ever timeout this when GSP is in really bad state, so if it just
// happens to timeout on default timeout it should be OK for us to give it a little
// more time - make this timeout 1.5 of the default to allow some leeway.
NvU32 defaultus = pGpu->timeoutData.defaultus;
timeoutUs = defaultus + defaultus / 2;
if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu))
{
// Ensure at least 3.1s for vGPU-GSP before adding leeway (Bug 3928607)
timeoutUs = NV_MAX(3100 * 1000, defaultus) + (defaultus / 2);
}
else
{
// We should only ever timeout this when GSP is in really bad state, so if it just
// happens to timeout on default timeout it should be OK for us to give it a little
// more time - make this timeout 1.5 of the default to allow some leeway.
timeoutUs = defaultus + defaultus / 2;
}
}
NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance));

View File

@ -124,7 +124,7 @@ kmigmgrMakeCIReference_IMPL
/*! @brief create a Ref referencing no GI/CI */
MIG_INSTANCE_REF
kmigmgrMakeNoMIGReference_IMPL()
kmigmgrMakeNoMIGReference_IMPL(void)
{
MIG_INSTANCE_REF ref = { NULL, NULL };
return ref;

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2018-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a

View File

@ -178,7 +178,7 @@ initAPIOSFunctionPointers(OBJOS *pOS)
//
// Function to find the maximum number of cores in the system
//
NvU32 osGetMaximumCoreCount()
NvU32 osGetMaximumCoreCount(void)
{
//
// Windows provides an API to query this that supports CPU hotadd that our
@ -599,7 +599,7 @@ osMemGetFilter(NvUPtr address)
* full call stack that is much helpful for debugging.
*/
void osPagedSegmentAccessCheck()
void osPagedSegmentAccessCheck(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJOS *pOS = SYS_GET_OS(pSys);

View File

@ -3364,7 +3364,7 @@ void initNbsiObject(NBSI_OBJ *pNbsiObj)
}
}
NBSI_OBJ *getNbsiObject()
NBSI_OBJ *getNbsiObject(void)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
OBJPFM *pPfm = SYS_GET_PFM(pSys);

View File

@ -69,6 +69,26 @@
#include "gpu/gsp/kernel_gsp.h"
#include "power/gpu_boost_mgr.h"
#define CONFIG_2X_BUFF_SIZE_MIN (2)
//
// Controller Table v2.2 has removed some params, set them using these
// default values instead
//
// EWMA retention weight (232/256) results in tau being 10x the sampling period
//
#define CONTROLLER_GRP_DEFAULT_BASE_SAMPLING_PERIOD_MS (100)
#define CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER (1)
#define CONTROLLER_GRP_DEFAULT_EWMA_WEIGHT (232)
#define CONTROLLER_GRP_DEFAULT_INCREASE_GAIN_UFXP4_12 (3686)
#define CONTROLLER_GRP_DEFAULT_DECREASE_GAIN_UFXP4_12 (4096)
/*!
* Define the filter types.
*/
#define NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA (0)
#define NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_MOVING_MAX (1)
NV_STATUS
cliresConstruct_IMPL
(
@ -1899,6 +1919,580 @@ cliresCtrlCmdGpuAcctGetAccountingPids_IMPL
}
/*!
* Helper to build config data from unpacked table data,
* static config v2.0/2.1.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildConfig_StaticTable_v20
(
CONTROLLER_STATIC_TABLE_ENTRY_V20 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->samplingMulti =
(NvU16)pEntry->samplingMulti;
pParams->filterType =
(NvU8)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTER, _TYPE,
pEntry->filterParams);
pParams->filterReserved =
(NvU16)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTER, _RESERVED,
pEntry->filterParams);
// Get the filter param based on filter type
switch (pParams->filterType)
{
case NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA:
{
pParams->filterParam.weight =
(NvU8)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTERPARAM, _EWMA_WEIGHT,
pEntry->filterParams);
break;
}
case NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_MOVING_MAX:
default:
{
pParams->filterParam.windowSize =
(NvU8)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FILTERPARAM, _WINDOW_SIZE,
pEntry->filterParams);
break;
}
}
}
/*!
* Helper to build Qboost's config data from unpacked table data,
* static config v2.0/2.1.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildQboostConfig_StaticTable_v20
(
CONTROLLER_STATIC_TABLE_ENTRY_V20 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->bIsBoostController = NV_TRUE;
// Type-specific param0
pParams->incRatio =
(NvUFXP4_12)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _PARAM0, _QBOOST_INCREASE_GAIN,
pEntry->param0);
pParams->decRatio =
(NvUFXP4_12)DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _PARAM0, _QBOOST_DECREASE_GAIN,
pEntry->param0);
// Type-specific param1
pParams->bSupportBatt =
(NvBool)(DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _PARAM1, _QBOOST_DC_SUPPORT,
pEntry->param1));
}
/*!
* Helper to build config data from unpacked table data,
* static config v2.2.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildConfig_StaticTable_v22
(
CONTROLLER_STATIC_TABLE_ENTRY_V22 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->samplingMulti = CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER;
pParams->filterType = NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA;
pParams->filterParam.weight = CONTROLLER_GRP_DEFAULT_EWMA_WEIGHT;
}
/*!
* Helper to build Qboost's config data from unpacked table data,
* static config v2.2.
*
* @param[in] pEntry Unpacked data from static table
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildQboostConfig_StaticTable_v22
(
CONTROLLER_STATIC_TABLE_ENTRY_V22 *pEntry,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
pParams->bIsBoostController = NV_TRUE;
// Use increase gain of 90%, decrease gain of 100%
pParams->incRatio = CONTROLLER_GRP_DEFAULT_INCREASE_GAIN_UFXP4_12;
pParams->decRatio = CONTROLLER_GRP_DEFAULT_DECREASE_GAIN_UFXP4_12;
// Type-specific param0
pParams->bSupportBatt =
(NvBool)(DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V22, _PARAM0, _QBOOST_DC_SUPPORT,
pEntry->param0));
}
/*!
* Helper to build CTGP controller's config data from unpacked table data,
* static config 2x version. Re-uses struct types from normal Qboost
* controller.
*
* @param[out] pParams Structure to fill parsed info
*
*/
static void
_controllerBuildCtgpConfig_StaticTable_2x
(
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
//
// Sampling period only really only affects the delay in handling
// CTGP changes, so just set sampling period multiplier to 1
//
// Force EWMA filter type with weight 0, since currently the reading
// and filtering of CPU power is still done
//
pParams->samplingMulti = CONTROLLER_GRP_DEFAULT_SAMPLING_MULTIPLIER;
pParams->filterType = NVPCF0100_CTRL_CONTROLLER_FILTER_TYPE_EMWA;
pParams->filterParam.weight = 0;
// Inform apps that there is no Dynamic Boost support
pParams->bIsBoostController = NV_FALSE;
pParams->incRatio = 0;
pParams->decRatio = 0;
pParams->bSupportBatt = NV_FALSE;
}
/*!
* Attempts to parse the static controller table, as v2.0 or v2.1.
*
* @param[in] pData Pointer to start (header) of the table
* @param[in] dataSize Size of entire table, including header
* @param[out] pEntryCount Number of controller entries found
* @param[out] pParams Structure to fill parsed info
*
* @return NV_OK
* Table was successfully parsed; caller should remember to free object array
* @return NV_ERR_NOT_SUPPORTED
* Failed to detect correct table version, no output
* @return Other errors
* NV_ERR_INVALID_DATA or errors propogated up from functions called
*/
static NV_STATUS
_controllerParseStaticTable_v20
(
NvU8 *pData,
NvU32 dataSize,
NvU8 *pEntryCount,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
const char *pHeaderFmt = NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_FMT_SIZE_05;
const char *pCommonFmt = NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_FMT_SIZE_02;
const char *pEntryFmt = NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FMT_SIZE_0F;
NvU32 loop = 0;
NV_STATUS status = NV_OK;
CONTROLLER_STATIC_TABLE_HEADER_V20 header = { 0 };
CONTROLLER_STATIC_TABLE_COMMON_V20 common = { 0 };
// Check if we can safely parse the header
if (dataSize < NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v20_exit;
}
// Unpack the table header
configReadStructure(pData, &header, 0, pHeaderFmt);
switch (header.version)
{
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_20:
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_21:
{
NvU32 expectedSize;
// check rest of header
if ((header.headerSize != NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V20_SIZE_05)
|| (header.commonSize != NVPCF_CONTROLLER_STATIC_TABLE_COMMON_V20_SIZE_02)
|| (header.entrySize != NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F))
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
// must have at least one entry
if (header.entryCount == 0)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
// check data size
expectedSize = header.headerSize + header.commonSize
+ (header.entryCount * header.entrySize);
if (expectedSize != dataSize)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
break;
}
default:
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v20_exit;
}
}
// Unpack the common data, base sampling period cannot be 0
configReadStructure(pData, &common, header.headerSize, pCommonFmt);
if (common.samplingPeriodms == 0)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v20_exit;
}
pParams->samplingPeriodmS = (NvU16)common.samplingPeriodms;
// Parse each entry
for (loop = 0; loop < header.entryCount; loop++)
{
CONTROLLER_STATIC_TABLE_ENTRY_V20 entry = { 0 };
NvU32 offset = header.headerSize + header.commonSize +
(loop * NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_SIZE_0F);
// Unpack the controller entry
configReadStructure(pData, &entry, offset, pEntryFmt);
switch (DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V20, _FLAGS0, _CLASS,
entry.flags0))
{
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_PPAB:
{
_controllerBuildConfig_StaticTable_v20(&entry, pParams);
_controllerBuildQboostConfig_StaticTable_v20(&entry, pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_CTGP:
{
_controllerBuildCtgpConfig_StaticTable_2x(pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V20_FLAGS0_CLASS_DISABLED:
default:
{
}
}
}
pParams->version = (NvU8)header.version;
*pEntryCount = (NvU8)header.entryCount;
_controllerParseStaticTable_v20_exit:
return status;
}
/*!
* Attempts to parse the static controller table, as v2.2.
*
* @param[in] pData Pointer to start (header) of the table
* @param[in] dataSize Size of entire table, including header
* @param[out] pEntryCount Number of controller entries found
* @param[out] pParams Structure to fill parsed info
*
* @return NV_OK
* Table was successfully parsed; caller should remember to free object array
* @return NV_ERR_NOT_SUPPORTED
* Failed to detect correct table version, no output
* @return Other errors
* NV_ERR_INVALID_DATA or errors propogated up from functions called
*/
static NV_STATUS
_controllerParseStaticTable_v22
(
NvU8 *pData,
NvU32 dataSize,
NvU8 *pEntryCount,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
const char *pHeaderFmt = NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_FMT_SIZE_04;
const char *pEntryFmt = NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FMT_SIZE_05;
NV_STATUS status = NV_OK;
NvU32 loop = 0;
CONTROLLER_STATIC_TABLE_HEADER_V22 header = { 0 };
// Check if we can safely parse the header
if (dataSize < NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v22_exit;
}
// Unpack the table header
configReadStructure(pData, &header, 0, pHeaderFmt);
switch (header.version)
{
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_23:
case NVPCF_CONTROLLER_STATIC_TABLE_VERSION_22:
{
NvU32 expectedSize;
// check rest of header
if ((header.headerSize != NVPCF_CONTROLLER_STATIC_TABLE_HEADER_V22_SIZE_04)
|| (header.entrySize != NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05))
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v22_exit;
}
// must have at least one entry
if (header.entryCount == 0)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v22_exit;
}
// check data size
expectedSize = header.headerSize + (header.entryCount * header.entrySize);
if (expectedSize != dataSize)
{
status = NV_ERR_INVALID_DATA;
goto _controllerParseStaticTable_v22_exit;
}
break;
}
default:
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_v22_exit;
}
}
// Parse each entry
for (loop = 0; loop < header.entryCount; loop++)
{
CONTROLLER_STATIC_TABLE_ENTRY_V22 entry = { 0 };
NvU32 offset = header.headerSize +
(loop * NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_SIZE_05);
// Unpack the controller entry
configReadStructure(pData, &entry, offset, pEntryFmt);
switch (DRF_VAL(PCF_CONTROLLER_STATIC_TABLE_ENTRY_V22, _FLAGS0, _CLASS,
entry.flags0))
{
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_PPAB:
{
_controllerBuildConfig_StaticTable_v22(&entry, pParams);
_controllerBuildQboostConfig_StaticTable_v22(&entry, pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_CTGP:
{
_controllerBuildCtgpConfig_StaticTable_2x(pParams);
break;
}
case NVPCF_CONTROLLER_STATIC_TABLE_ENTRY_V22_FLAGS0_CLASS_DISABLED:
default:
{
}
}
}
pParams->version = (NvU8)header.version;
pParams->samplingPeriodmS = CONTROLLER_GRP_DEFAULT_BASE_SAMPLING_PERIOD_MS;
*pEntryCount = (NvU8)header.entryCount;
_controllerParseStaticTable_v22_exit:
return status;
}
static NV_STATUS
_sysDeviceParseStaticTable_2x
(
NvU8 *pData,
NvU32 *dataSize,
NvU32 *controllerTableOffset,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
NV_STATUS status = NV_OK;
NvU32 deviceTableOffset = 0;
SYSDEV_STATIC_TABLE_HEADER_2X sysdevHeader = { 0 };
SYSDEV_STATIC_TABLE_COMMON_2X common = { 0 };
const char *pSzSysDevHeaderFmt =
NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_FMT_SIZE_03;
const char *pSzCommonFmt =
NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_FMT_SIZE_01;
// Unpack the table header
configReadStructure(pData, &sysdevHeader, deviceTableOffset, pSzSysDevHeaderFmt);
// Check the header version and sizes
if ((sysdevHeader.version != NVPCF_SYSDEV_STATIC_TABLE_VERSION_2X) ||
(sysdevHeader.headerSize != NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03) ||
(sysdevHeader.commonSize != NVPCF_SYSDEV_STATIC_TABLE_COMMON_2X_SIZE_01))
{
NV_PRINTF(LEVEL_ERROR, "NVPCF: %s: Unsupported header\n",
__FUNCTION__);
status = NV_ERR_INVALID_DATA;
goto _sysDeviceParseStaticTable_2x_exit;
}
// Update controller table pointer based on sysdev header data
*controllerTableOffset = deviceTableOffset + sysdevHeader.headerSize + sysdevHeader.commonSize;
configReadStructure(pData,
&common,
deviceTableOffset + sysdevHeader.headerSize,
pSzCommonFmt);
pParams->cpuType = (DRF_VAL(PCF_SYSDEV_STATIC_TABLE_COMMON_2X, _PARAM0, _CPU_TYPE,
common.param0));
pParams->gpuType = (DRF_VAL(PCF_SYSDEV_STATIC_TABLE_COMMON_2X, _PARAM0, _GPU_TYPE,
common.param0));
_sysDeviceParseStaticTable_2x_exit:
return status;
}
static NV_STATUS
_controllerParseStaticTable_2x
(
NvU8 *pData,
NvU32 dataSize,
NV0000_CTRL_CMD_SYSTEM_NVPCF_GET_POWER_MODE_INFO_PARAMS *pParams
)
{
NvU32 controllerTableOffset = 0;
NvU8 entryCount = 0;
NV_STATUS status = NV_OK;
// Make sure we can safely parse the sysdev header
if (dataSize < NVPCF_SYSDEV_STATIC_TABLE_HEADER_2X_SIZE_03)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_exit;
}
_sysDeviceParseStaticTable_2x(pData, &dataSize, &controllerTableOffset, pParams);
// Make sure data size is at least the controller table offset
if (dataSize < controllerTableOffset)
{
status = NV_ERR_NOT_SUPPORTED;
goto _controllerParseStaticTable_exit;
}
pData += controllerTableOffset;
dataSize -= controllerTableOffset;
status = _controllerParseStaticTable_v22(pData,
dataSize,
&entryCount,
pParams);
if (status == NV_ERR_NOT_SUPPORTED)
{
status = _controllerParseStaticTable_v20(pData,
dataSize,
&entryCount,
pParams);
}
if (status != NV_OK)
{
goto _controllerParseStaticTable_exit;
}
_controllerParseStaticTable_exit:
return status;
}
/*!
* Helper function to validate the config static data that can be
* received from various sources, using one byte two's complement
* checksum. And match is against the last byte the original
* checksum byte is stored in the data.
*
* @param[in/out] pData NvU8 data buffer pointer
* @param[in] pDataSize NvU32 pointer to the data size in bytes
*
* @return NV_OK
* Checksum successfully matched.
*
* @return NV_ERR_INVALID_POINTER
* Invalid input data buffer pointer. *
* @return NV_ERR_INVALID_DATA
* Checksum failure or wrong data size.
*/
static NV_STATUS
_validateConfigStaticTable_2x
(
NvU8 *pData,
NvU16 *pDataSize
)
{
NV_STATUS status = NV_OK;
NvU8 checkSum;
NvU8 idx;
NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_POINTER);
NV_ASSERT_OR_RETURN(pDataSize != NULL, NV_ERR_INVALID_POINTER);
//
// Check data size length for static2x data. Must be min 2 bytes
// (CONFIG_2X_BUFF_SIZE_MIN) including 1 byte for checksum. The
// max allowed for static2x is CONFIG_2X_BUFF_SIZE_MAX.
//
if ((*pDataSize < CONFIG_2X_BUFF_SIZE_MIN) ||
(*pDataSize > NVPCF0100_CTRL_CONFIG_2X_BUFF_SIZE_MAX))
{
status = NV_ERR_INVALID_DATA;
goto validateConfigStaticTable_2x_exit;
}
checkSum = 0;
for (idx = 0; idx < (*pDataSize - 1); idx++)
{
checkSum += pData[idx];
}
checkSum = (~checkSum) + 1;
// Match with the original checksum
if (checkSum != pData[*pDataSize - 1])
{
status = NV_ERR_INVALID_DATA;
goto validateConfigStaticTable_2x_exit;
}
validateConfigStaticTable_2x_exit:
return status;
}
NV_STATUS
cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
(
@ -2026,7 +2620,7 @@ cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
portMemSet(&header, 0, sizeof(header));
header.version = NVPCF_DYNAMIC_PARAMS_20_VERSION;
header.version = pParams->version;
header.headerSize = NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05;
header.commonSize = NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10;
header.entrySize = NVPCF_DYNAMIC_PARAMS_2X_ENTRY_SIZE_1C;
@ -2073,6 +2667,12 @@ cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
// Unpack the header part
configReadStructure(pData, (void *)&headerOut, 0, pSzHeaderFmt);
if (headerOut.version != pParams->version)
{
status = NV_ERR_INVALID_DATA;
goto nvpcf2xGetDynamicParams_exit;
}
if ((headerOut.headerSize != NVPCF_DYNAMIC_PARAMS_2X_HEADER_SIZE_05) ||
(headerOut.commonSize != NVPCF_DYNAMIC_PARAMS_2X_COMMON_SIZE_10) ||
(headerOut.entrySize != NVPCF_DYNAMIC_PARAMS_2X_ENTRY_SIZE_1C))
@ -2127,8 +2727,51 @@ cliresCtrlCmdSystemNVPCFGetPowerModeInfo_IMPL
nvpcf2xGetDynamicParams_exit:
portMemFree(pData);
break;
}
case NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CASE:
{
NvU8 *pData = NULL;
NvU16 dataSize = NVPCF0100_CTRL_CONFIG_2X_BUFF_SIZE_MAX;
pData = portMemAllocNonPaged(dataSize);
if ((rc = pOS->osCallACPI_DSM(pGpu,
ACPI_DSM_FUNCTION_NVPCF_2X,
NVPCF0100_CTRL_CONFIG_DSM_2X_FUNC_GET_STATIC_CONFIG_TABLES,
(NvU32 *)pData,
&dataSize)) != NV_OK)
{
NV_PRINTF(LEVEL_WARNING,
"Unable to retrieve NVPCF Static data. Possibly not supported by SBIOS"
"rc = %x\n", rc);
status = NV_ERR_NOT_SUPPORTED;
goto nvpcf2xGetStaticParams_exit;
}
status = _validateConfigStaticTable_2x(pData, &dataSize);
if (NV_OK != status)
{
NV_PRINTF(LEVEL_WARNING, "Config Static Data checksum failed\n");
status = NV_ERR_NOT_SUPPORTED;
goto nvpcf2xGetStaticParams_exit;
}
// Subtract 1 byte for the checksum
dataSize--;
status = _controllerParseStaticTable_2x(pData, dataSize, pParams);
if (NV_OK != status)
{
status = NV_ERR_NOT_SUPPORTED;
}
nvpcf2xGetStaticParams_exit:
portMemFree(pData);
break;
}
default:
{
NV_PRINTF(LEVEL_INFO, "Inavlid NVPCF subFunc : 0x%x\n", pParams->subFunc);

View File

@ -662,6 +662,12 @@ NV_STATUS serverControl_ValidateCookie
OBJGPU *pGpu;
CALL_CONTEXT *pCallContext = resservGetTlsCallContext();
if (pCallContext == NULL)
{
NV_PRINTF(LEVEL_ERROR, "Calling context is NULL!\n");
return NV_ERR_INVALID_PARAMETER;
}
if (RMCFG_FEATURE_PLATFORM_GSP)
{
pGpu = gpumgrGetSomeGpu();

View File

@ -125,7 +125,7 @@ NvBool rmapiCmdIsCacheable(NvU32 cmd, NvBool bAllowInternal)
return rmapiControlIsCacheable(flags, accessRight, bAllowInternal);
}
NV_STATUS rmapiControlCacheInit()
NV_STATUS rmapiControlCacheInit(void)
{
#if defined(DEBUG)
RmapiControlCache.mode = NV0000_CTRL_SYSTEM_RMCTRL_CACHE_MODE_CTRL_MODE_VERIFY_ONLY;
@ -492,4 +492,4 @@ void rmapiControlCacheSetMode(NvU32 mode)
NvU32 rmapiControlCacheGetMode(void)
{
return RmapiControlCache.mode;
}
}

View File

@ -40,7 +40,7 @@ static PORT_STATE portState;
/// @todo Add better way to initialize all modules
NV_STATUS portInitialize()
NV_STATUS portInitialize(void)
{
if (PORT_INC(portState.initCount) == 1)
{
@ -66,7 +66,7 @@ NV_STATUS portInitialize()
return NV_OK;
}
void portShutdown()
void portShutdown(void)
{
if (PORT_DEC(portState.initCount) == 0)
{
@ -92,7 +92,7 @@ void portShutdown()
}
}
NvBool portIsInitialized()
NvBool portIsInitialized(void)
{
return portState.initCount > 0;
}

View File

@ -53,7 +53,7 @@ struct PORT_CRYPTO_PRNG
};
PORT_CRYPTO_PRNG *portCryptoDefaultGenerator;
void portCryptoInitialize()
void portCryptoInitialize(void)
{
NvU64 seed;
#if defined(PORT_CRYPTO_PRNG_SEED)
@ -73,7 +73,7 @@ void portCryptoInitialize()
portCryptoPseudoRandomSetSeed(seed);
}
void portCryptoShutdown()
void portCryptoShutdown(void)
{
portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator);
portCryptoDefaultGenerator = NULL;
@ -174,12 +174,12 @@ void portCryptoPseudoRandomSetSeed(NvU64 seed)
portCryptoDefaultGenerator = portCryptoPseudoRandomGeneratorCreate(seed);
}
NvU32 portCryptoPseudoRandomGetU32()
NvU32 portCryptoPseudoRandomGetU32(void)
{
return portCryptoPseudoRandomGeneratorGetU32(portCryptoDefaultGenerator);
}
NvU64 portCryptoPseudoRandomGetU64()
NvU64 portCryptoPseudoRandomGetU64(void)
{
return portCryptoPseudoRandomGeneratorGetU64(portCryptoDefaultGenerator);
}

View File

@ -34,45 +34,15 @@
#error "DEBUG module must be present for memory tracking"
#endif
#if !PORT_IS_MODULE_SUPPORTED(atomic)
#error "ATOMIC module must be present for memory tracking"
#endif
#if PORT_MEM_TRACK_USE_LIMIT
#include "os/os.h"
#define PORT_MEM_LIMIT_MAX_PIDS 32
#endif
#if NVOS_IS_LIBOS
#define PORT_MEM_THREAD_SAFE_ALLOCATIONS 0
#else
#define PORT_MEM_THREAD_SAFE_ALLOCATIONS 1
#endif
#if PORT_MEM_THREAD_SAFE_ALLOCATIONS && !PORT_IS_MODULE_SUPPORTED(atomic)
#error "ATOMIC module must be present for memory tracking"
#endif
#if PORT_MEM_THREAD_SAFE_ALLOCATIONS
#define PORT_MEM_ATOMIC_ADD_SIZE portAtomicAddSize
#define PORT_MEM_ATOMIC_SUB_SIZE portAtomicSubSize
#define PORT_MEM_ATOMIC_DEC_U32 portAtomicDecrementU32
#define PORT_MEM_ATOMIC_INC_U32 portAtomicIncrementU32
#define PORT_MEM_ATOMIC_SET_U32 portAtomicSetU32
#define PORT_MEM_ATOMIC_CAS_SIZE portAtomicCompareAndSwapSize
#define PORT_MEM_ATOMIC_CAS_U32 portAtomicCompareAndSwapU32
#else
//
// We can just stub out the atomic operations for non-atomic ones and not waste
// waste cycles on synchronization
//
#define PORT_MEM_ATOMIC_ADD_SIZE(pVal, val) (*((NvSPtr *)pVal) += val)
#define PORT_MEM_ATOMIC_SUB_SIZE(pVal, val) (*((NvSPtr *)pVal) -= val)
#define PORT_MEM_ATOMIC_DEC_U32(pVal) (--(*((NvU32 *)pVal)))
#define PORT_MEM_ATOMIC_INC_U32(pVal) (++(*((NvU32 *)pVal)))
#define PORT_MEM_ATOMIC_SET_U32(pVal, val) (*((NvU32 *)pVal) = val)
#define PORT_MEM_ATOMIC_CAS_SIZE(pVal, newVal, oldVal) \
((*pVal == oldVal) ? ((*((NvSPtr *)pVal) = newVal), NV_TRUE) : NV_FALSE)
#define PORT_MEM_ATOMIC_CAS_U32(pVal, newVal, oldVal) \
((*pVal == oldVal) ? ((*((NvU32 *)pVal) = newVal), NV_TRUE) : NV_FALSE)
#endif // !PORT_MEM_THREAD_SAFE_ALLOCATIONS
struct PORT_MEM_ALLOCATOR_IMPL
{
PORT_MEM_ALLOCATOR_TRACKING tracking;
@ -108,11 +78,11 @@ static NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock)
}
static void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock)
{
while (!PORT_MEM_ATOMIC_CAS_U32(pSpinlock, 1, 0));
while (!portAtomicCompareAndSwapU32(pSpinlock, 1, 0));
}
static void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock)
{
PORT_MEM_ATOMIC_SET_U32(pSpinlock, 0);
portAtomicSetU32(pSpinlock, 0);
}
static void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock)
{
@ -180,13 +150,13 @@ _portMemCounterInc
NvU32 activeAllocs;
NvLength activeSize = 0;
activeAllocs = PORT_MEM_ATOMIC_INC_U32(&pCounter->activeAllocs);
PORT_MEM_ATOMIC_INC_U32(&pCounter->totalAllocs);
activeAllocs = portAtomicIncrementU32(&pCounter->activeAllocs);
portAtomicIncrementU32(&pCounter->totalAllocs);
if (PORT_MEM_TRACK_USE_FENCEPOSTS)
{
activeSize = PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->activeSize, size);
activeSize = portAtomicAddSize(&pCounter->activeSize, size);
}
PORT_MEM_ATOMIC_ADD_SIZE(&pCounter->totalSize, size);
portAtomicAddSize(&pCounter->totalSize, size);
// Atomically compare the peak value with the active, and update if greater.
while (1)
@ -194,14 +164,14 @@ _portMemCounterInc
NvU32 peakAllocs = pCounter->peakAllocs;
if (activeAllocs <= peakAllocs)
break;
PORT_MEM_ATOMIC_CAS_U32(&pCounter->peakAllocs, activeAllocs, peakAllocs);
portAtomicCompareAndSwapU32(&pCounter->peakAllocs, activeAllocs, peakAllocs);
}
while (1)
{
NvLength peakSize = pCounter->peakSize;
if (activeSize <= peakSize)
break;
PORT_MEM_ATOMIC_CAS_SIZE(&pCounter->peakSize, activeSize, peakSize);
portAtomicCompareAndSwapSize(&pCounter->peakSize, activeSize, peakSize);
}
}
static NV_INLINE void
@ -211,11 +181,11 @@ _portMemCounterDec
void *pMem
)
{
PORT_MEM_ATOMIC_DEC_U32(&pCounter->activeAllocs);
portAtomicDecrementU32(&pCounter->activeAllocs);
if (PORT_MEM_TRACK_USE_FENCEPOSTS)
{
PORT_MEM_ATOMIC_SUB_SIZE(&pCounter->activeSize,
((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize);
portAtomicSubSize(&pCounter->activeSize,
((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize);
}
}
@ -303,7 +273,7 @@ _portMemListAdd
PORT_MEM_LIST *pList = &pHead->list;
pList->pNext = pList;
pList->pPrev = pList;
if (!PORT_MEM_ATOMIC_CAS_SIZE(&pTracking->pFirstAlloc, pList, NULL))
if (!portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList, NULL))
{
PORT_LOCKED_LIST_LINK(pTracking->pFirstAlloc, pList, pTracking->listLock);
}
@ -318,11 +288,11 @@ _portMemListRemove
PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1;
PORT_MEM_LIST *pList = &pHead->list;
if (!PORT_MEM_ATOMIC_CAS_SIZE(&pList->pNext, NULL, pList))
if (!portAtomicCompareAndSwapSize(&pList->pNext, NULL, pList))
{
PORT_LOCKED_LIST_UNLINK(pTracking->pFirstAlloc, pList, pTracking->listLock);
}
PORT_MEM_ATOMIC_CAS_SIZE(&pTracking->pFirstAlloc, pList->pNext, pList);
portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList->pNext, pList);
}
static NV_INLINE PORT_MEM_HEADER *
@ -417,7 +387,7 @@ typedef struct PORT_MEM_LOG_ENTRY
#define PORT_MEM_LOG_ENTRIES 4096
static void
_portMemLogInit()
_portMemLogInit(void)
{
NVLOG_BUFFER_HANDLE hBuffer;
nvlogAllocBuffer(PORT_MEM_LOG_ENTRIES * sizeof(PORT_MEM_LOG_ENTRY),
@ -426,7 +396,7 @@ _portMemLogInit()
}
static void
_portMemLogDestroy()
_portMemLogDestroy(void)
{
NVLOG_BUFFER_HANDLE hBuffer;
nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer);
@ -547,7 +517,7 @@ _portMemLimitInc(NvU32 pid, void *pMem, NvU64 size)
{
NvU32 pidIdx = pid - 1;
pMemHeader->blockSize = size;
PORT_MEM_ATOMIC_ADD_SIZE(&portMemGlobals.counterPid[pidIdx], size);
portAtomicAddSize(&portMemGlobals.counterPid[pidIdx], size);
}
}
}
@ -571,7 +541,7 @@ _portMemLimitDec(void *pMem)
}
else
{
PORT_MEM_ATOMIC_SUB_SIZE(&portMemGlobals.counterPid[pidIdx], pMemHeader->blockSize);
portAtomicSubSize(&portMemGlobals.counterPid[pidIdx], pMemHeader->blockSize);
}
}
}
@ -626,7 +596,7 @@ portMemInitialize(void)
#if PORT_MEM_TRACK_USE_CALLERINFO
PORT_MEM_CALLERINFO_TYPE_PARAM = PORT_MEM_CALLERINFO_MAKE;
#endif
if (PORT_MEM_ATOMIC_INC_U32(&portMemGlobals.initCount) != 1)
if (portAtomicIncrementU32(&portMemGlobals.initCount) != 1)
return;
portMemGlobals.mainTracking.pAllocator = NULL;
@ -679,7 +649,7 @@ void
portMemShutdown(NvBool bForceSilent)
{
PORT_UNREFERENCED_VARIABLE(bForceSilent);
if (PORT_MEM_ATOMIC_DEC_U32(&portMemGlobals.initCount) != 0)
if (portAtomicDecrementU32(&portMemGlobals.initCount) != 0)
return;
#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT)
@ -880,7 +850,7 @@ portMemInitializeAllocatorTracking
PORT_MEM_COUNTER_INIT(&pTracking->counter);
PORT_MEM_LIST_INIT(pTracking);
PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking);
PORT_MEM_ATOMIC_INC_U32(&portMemGlobals.totalAllocators);
portAtomicIncrementU32(&portMemGlobals.totalAllocators);
}
#if PORT_MEM_TRACK_USE_LIMIT
@ -1231,7 +1201,7 @@ _portMemTrackingRelease
PORT_LOCKED_LIST_UNLINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock);
PORT_MEM_LIST_DESTROY(pTracking);
PORT_MEM_ATOMIC_DEC_U32(&portMemGlobals.totalAllocators);
portAtomicDecrementU32(&portMemGlobals.totalAllocators);
}
static void

View File

@ -315,15 +315,17 @@ portSyncRwLockReleaseWrite
os_release_rwlock_write(pRwLock->rwlock);
}
NvBool portSyncExSafeToSleep()
NvBool portSyncExSafeToSleep(void)
{
return os_semaphore_may_sleep();
}
NvBool portSyncExSafeToWake()
NvBool portSyncExSafeToWake(void)
{
return NV_TRUE;
}
NvU64 portSyncExGetInterruptLevel()
NvU64 portSyncExGetInterruptLevel(void)
{
return !os_semaphore_may_sleep();
}

View File

@ -46,14 +46,14 @@ const PORT_THREAD PORT_THREAD_INVALID = {0ULL};
// Invalid value for process.
const PORT_PROCESS PORT_PROCESS_INVALID = {0ULL};
NvU64 portThreadGetCurrentThreadId()
NvU64 portThreadGetCurrentThreadId(void)
{
NvU64 tid = 0;
os_get_current_thread(&tid);
return tid;
}
void portThreadYield()
void portThreadYield(void)
{
os_schedule();
}

View File

@ -74,7 +74,7 @@ portUtilExGetStackTrace
}
#endif
NV_NOINLINE NvUPtr portUtilGetIPAddress()
NV_NOINLINE NvUPtr portUtilGetIPAddress(void)
{
return portUtilGetReturnAddress();
}

View File

@ -130,18 +130,6 @@ NvU32 osGetMaximumCoreCount(void);
#endif
#endif
#if NVOS_IS_LIBOS
//
// On LibOS we have at most one passive thread (task_rm) and one ISR
// (task_interrupt) active at once (on same CPU core). Since these two will
// use different maps, we don't need to protect them with spinlocks.
//
#define TLS_SPINLOCK_ACQUIRE(x)
#define TLS_SPINLOCK_RELEASE(x)
#else
#define TLS_SPINLOCK_ACQUIRE(x) portSyncSpinlockAcquire(x)
#define TLS_SPINLOCK_RELEASE(x) portSyncSpinlockRelease(x)
#endif // NVOS_IS_LIBOS
#if !PORT_IS_FUNC_SUPPORTED(portSyncExSafeToSleep)
#define portSyncExSafeToSleep() NV_TRUE
@ -158,7 +146,7 @@ NvU32 osGetMaximumCoreCount(void);
NV_STATUS tlsInitialize()
NV_STATUS tlsInitialize(void)
{
NV_STATUS status;
@ -216,7 +204,7 @@ done:
return status;
}
void tlsShutdown()
void tlsShutdown(void)
{
if (portAtomicDecrementU32(&tlsDatabase.initCount) != 0)
{
@ -313,7 +301,7 @@ PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void)
return _tlsIsrAllocatorGet();
}
NvU64 tlsEntryAlloc()
NvU64 tlsEntryAlloc(void)
{
NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL);
return portAtomicExIncrementU64(&tlsDatabase.lastEntryId);
@ -427,7 +415,7 @@ NvU32 tlsEntryUnreference(NvU64 entryId)
static ThreadEntry *
_tlsThreadEntryGet()
_tlsThreadEntryGet(void)
{
ThreadEntry *pThreadEntry;
@ -438,16 +426,16 @@ _tlsThreadEntryGet()
else
{
NvU64 threadId = portThreadGetCurrentThreadId();
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock);
pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId);
TLS_SPINLOCK_RELEASE(tlsDatabase.pLock);
portSyncSpinlockAcquire(tlsDatabase.pLock);
pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId);
portSyncSpinlockRelease(tlsDatabase.pLock);
}
return pThreadEntry;
}
static ThreadEntry *
_tlsThreadEntryGetOrAlloc()
_tlsThreadEntryGetOrAlloc(void)
{
ThreadEntry* pThreadEntry = NULL;
@ -460,11 +448,11 @@ _tlsThreadEntryGetOrAlloc()
{
pThreadEntry->key.threadId = portThreadGetCurrentThreadId();
mapInitIntrusive(&pThreadEntry->map);
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock);
mapInsertExisting(&tlsDatabase.threadEntries,
pThreadEntry->key.threadId,
pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pLock);
portSyncSpinlockAcquire(tlsDatabase.pLock);
mapInsertExisting(&tlsDatabase.threadEntries,
pThreadEntry->key.threadId,
pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pLock);
}
}
@ -522,9 +510,9 @@ _tlsEntryRelease
{
NV_ASSERT(portMemExSafeForNonPagedAlloc());
mapDestroy(&pThreadEntry->map);
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pLock);
mapRemove(&tlsDatabase.threadEntries, pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pLock);
portSyncSpinlockAcquire(tlsDatabase.pLock);
mapRemove(&tlsDatabase.threadEntries, pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pLock);
PORT_FREE(tlsDatabase.pAllocator, pThreadEntry);
}
}
@ -549,7 +537,7 @@ static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void)
#if TLS_ISR_CAN_USE_LOCK
static NV_STATUS _tlsIsrEntriesInit()
static NV_STATUS _tlsIsrEntriesInit(void)
{
tlsDatabase.pIsrLock = portSyncSpinlockCreate(tlsDatabase.pAllocator);
if (tlsDatabase.pLock == NULL)
@ -559,7 +547,7 @@ static NV_STATUS _tlsIsrEntriesInit()
mapInitIntrusive(&tlsDatabase.isrEntries);
return NV_OK;
}
static void _tlsIsrEntriesDestroy()
static void _tlsIsrEntriesDestroy(void)
{
if (tlsDatabase.pIsrLock)
portSyncSpinlockDestroy(tlsDatabase.pIsrLock);
@ -567,40 +555,40 @@ static void _tlsIsrEntriesDestroy()
}
static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry)
{
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock);
mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock);
portSyncSpinlockAcquire(tlsDatabase.pIsrLock);
mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pIsrLock);
}
static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp)
{
ThreadEntry *pThreadEntry;
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock);
pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp);
mapRemove(&tlsDatabase.isrEntries, pThreadEntry);
TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock);
portSyncSpinlockAcquire(tlsDatabase.pIsrLock);
pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp);
mapRemove(&tlsDatabase.isrEntries, pThreadEntry);
portSyncSpinlockRelease(tlsDatabase.pIsrLock);
return pThreadEntry;
}
static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp)
{
ThreadEntry *pThreadEntry;
TLS_SPINLOCK_ACQUIRE(tlsDatabase.pIsrLock);
portSyncSpinlockAcquire(tlsDatabase.pIsrLock);
#if STACK_GROWS_DOWNWARD
pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp);
pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp);
#else
pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp);
pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp);
#endif
TLS_SPINLOCK_RELEASE(tlsDatabase.pIsrLock);
portSyncSpinlockRelease(tlsDatabase.pIsrLock);
return pThreadEntry;
}
#else // Lockless
static NV_STATUS _tlsIsrEntriesInit()
static NV_STATUS _tlsIsrEntriesInit(void)
{
portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries));
return NV_OK;
}
static void _tlsIsrEntriesDestroy()
static void _tlsIsrEntriesDestroy(void)
{
portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries));
}
@ -656,7 +644,7 @@ static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp)
static NvBool _tlsIsIsr()
static NvBool _tlsIsIsr(void)
{
#if defined (TLS_ISR_UNIT_TEST)
// In unit tests we simulate ISR tests in different ways, so tests define this

View File

@ -40,7 +40,7 @@ AR ?= ar
CFLAGS ?= -Wall
# always set these -f CFLAGS
CFLAGS += -fno-strict-aliasing -fno-omit-frame-pointer -Wformat=2
CC_ONLY_CFLAGS ?=
CC_ONLY_CFLAGS ?= -Wstrict-prototypes -Wold-style-definition
CXX_ONLY_CFLAGS ?=
LDFLAGS ?=
BIN_LDFLAGS ?=

View File

@ -1,4 +1,4 @@
NVIDIA_VERSION = 525.78.01
NVIDIA_VERSION = 525.85.05
# This file.
VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST))