525.78.01

This commit is contained in:
Andy Ritger 2023-01-05 10:40:27 -08:00
parent 9594cc0169
commit dac2350c7f
No known key found for this signature in database
GPG Key ID: 6D466BB75E006CFC
180 changed files with 9465 additions and 4853 deletions

View File

@ -2,6 +2,8 @@
## Release 525 Entries
### [525.78.01] 2023-01-05
### [525.60.13] 2022-12-05
### [525.60.11] 2022-11-28

View File

@ -1,7 +1,7 @@
# NVIDIA Linux Open GPU Kernel Module Source
This is the source release of the NVIDIA Linux open GPU kernel modules,
version 525.60.13.
version 525.78.01.
## How to Build
@ -17,7 +17,7 @@ as root:
Note that the kernel modules built here must be used with GSP
firmware and user-space NVIDIA GPU driver components from a corresponding
525.60.13 driver release. This can be achieved by installing
525.78.01 driver release. This can be achieved by installing
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
option. E.g.,
@ -167,7 +167,7 @@ for the target kernel.
## Compatible GPUs
The open-gpu-kernel-modules can be used on any Turing or later GPU
(see the table below). However, in the 525.60.13 release,
(see the table below). However, in the 525.78.01 release,
GeForce and Workstation support is still considered alpha-quality.
To enable use of the open kernel modules on GeForce and Workstation GPUs,
@ -175,7 +175,7 @@ set the "NVreg_OpenRmEnableUnsupportedGpus" nvidia.ko kernel module
parameter to 1. For more details, see the NVIDIA GPU driver end user
README here:
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.60.13/README/kernel_open.html
https://us.download.nvidia.com/XFree86/Linux-x86_64/525.78.01/README/kernel_open.html
In the below table, if three IDs are listed, the first is the PCI Device
ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI
@ -645,8 +645,8 @@ Subsystem Device ID.
| NVIDIA A100-SXM4-80GB | 20B2 10DE 147F |
| NVIDIA A100-SXM4-80GB | 20B2 10DE 1622 |
| NVIDIA A100-SXM4-80GB | 20B2 10DE 1623 |
| NVIDIA PG506-242 | 20B3 10DE 14A7 |
| NVIDIA PG506-243 | 20B3 10DE 14A8 |
| NVIDIA A100-SXM-64GB | 20B3 10DE 14A7 |
| NVIDIA A100-SXM-64GB | 20B3 10DE 14A8 |
| NVIDIA A100 80GB PCIe | 20B5 10DE 1533 |
| NVIDIA A100 80GB PCIe | 20B5 10DE 1642 |
| NVIDIA PG506-232 | 20B6 10DE 1492 |
@ -771,6 +771,7 @@ Subsystem Device ID.
| NVIDIA RTX A2000 12GB | 2571 103C 1611 |
| NVIDIA RTX A2000 12GB | 2571 10DE 1611 |
| NVIDIA RTX A2000 12GB | 2571 17AA 1611 |
| NVIDIA GeForce RTX 3050 | 2582 |
| NVIDIA GeForce RTX 3050 Ti Laptop GPU | 25A0 |
| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 8928 |
| NVIDIA GeForce RTX 3050Ti Laptop GPU | 25A0 103C 89F9 |
@ -795,3 +796,11 @@ Subsystem Device ID.
| NVIDIA RTX A1000 Embedded GPU | 25F9 |
| NVIDIA RTX A2000 Embedded GPU | 25FA |
| NVIDIA RTX A500 Embedded GPU | 25FB |
| NVIDIA GeForce RTX 4090 | 2684 |
| NVIDIA RTX 6000 Ada Generation | 26B1 1028 16A1 |
| NVIDIA RTX 6000 Ada Generation | 26B1 103C 16A1 |
| NVIDIA RTX 6000 Ada Generation | 26B1 10DE 16A1 |
| NVIDIA RTX 6000 Ada Generation | 26B1 17AA 16A1 |
| NVIDIA L40 | 26B5 10DE 169D |
| NVIDIA GeForce RTX 4080 | 2704 |
| NVIDIA GeForce RTX 4070 Ti | 2782 |

View File

@ -72,7 +72,7 @@ EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.60.13\"
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"525.78.01\"
EXTRA_CFLAGS += -Wno-unused-function

View File

@ -958,7 +958,6 @@ NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);

View File

@ -1059,6 +1059,23 @@ compile_test() {
compile_check_conftest "$CODE" "NV_MDEV_DRIVER_HAS_SUPPORTED_TYPE_GROUPS" "" "types"
;;
vfio_device_ops_has_dma_unmap)
#
# Determine if 'vfio_device_ops' struct has 'dma_unmap' field.
#
# Added by commit ce4b4657ff18 ("vfio: Replace the DMA unmapping
# notifier with a callback") in v6.0
#
CODE="
#include <linux/pci.h>
#include <linux/vfio.h>
int conftest_vfio_device_ops_has_dma_unmap(void) {
return offsetof(struct vfio_device_ops, dma_unmap);
}"
compile_check_conftest "$CODE" "NV_VFIO_DEVICE_OPS_HAS_DMA_UNMAP" "" "types"
;;
pci_irq_vector_helpers)
#
# Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors()
@ -2611,7 +2628,7 @@ compile_test() {
fi
;;
vfio_pin_pages)
vfio_pin_pages_has_vfio_device_arg)
#
# Determine if vfio_pin_pages() kABI accepts "struct vfio_device *"
# argument instead of "struct device *"
@ -2642,6 +2659,37 @@ compile_test() {
fi
;;
vfio_pin_pages_has_pages_arg)
#
# Determine if vfio_pin_pages() kABI accepts "struct pages **:
# argument instead of "unsigned long *phys_pfn"
#
# Replaced "unsigned long *phys_pfn" with "struct pages **pages"
# in commit 34a255e676159 ("vfio: Replace phys_pfn with pages for
# vfio_pin_pages()") in v6.0.
#
echo "$CONFTEST_PREAMBLE
#include <linux/pci.h>
#include <linux/vfio.h>
int vfio_pin_pages(struct vfio_device *device,
dma_addr_t iova,
int npage,
int prot,
struct page **pages) {
return 0;
}" > conftest$$.c
$CC $CFLAGS -c conftest$$.c > /dev/null 2>&1
rm -f conftest$$.c
if [ -f conftest$$.o ]; then
echo "#define NV_VFIO_PIN_PAGES_HAS_PAGES_ARG" | append_conftest "functions"
rm -f conftest$$.o
else
echo "#undef NV_VFIO_PIN_PAGES_HAS_PAGES_ARG" | append_conftest "functions"
fi
;;
pci_driver_has_driver_managed_dma)
#
# Determine if "struct pci_driver" has .driver_managed_dma member.
@ -5404,6 +5452,28 @@ compile_test() {
compile_check_conftest "$CODE" "NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE" "" "functions"
;;
drm_connector_has_override_edid)
#
# Determine if 'struct drm_connector' has an 'override_edid' member.
#
# Removed by commit 90b575f52c6ab ("drm/edid: detach debugfs EDID
# override from EDID property update") in linux-next, expected in
# v6.2-rc1.
#
CODE="
#if defined(NV_DRM_DRM_CRTC_H_PRESENT)
#include <drm/drm_crtc.h>
#endif
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
int conftest_drm_connector_has_override_edid(void) {
return offsetof(struct drm_connector, override_edid);
}"
compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID" "" "types"
;;
# When adding a new conftest entry, please use the correct format for
# specifying the relevant upstream Linux kernel commit.
#

View File

@ -42,6 +42,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
static void nv_drm_connector_destroy(struct drm_connector *connector)
{
@ -98,7 +99,11 @@ __nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams,
break;
}
#if defined(NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID)
if (connector->override_edid) {
#else
if (drm_edid_override_connector_update(connector) > 0) {
#endif
const struct drm_property_blob *edid = connector->edid_blob_ptr;
if (edid->length <= sizeof(pDetectParams->edid.buffer)) {

View File

@ -257,10 +257,6 @@ nv_drm_init_mode_config(struct nv_drm_device *nv_dev,
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
/* Currently unused. Update when needed. */
dev->mode_config.fb_base = 0;
#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) || \
defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS)
dev->mode_config.async_page_flip = true;

View File

@ -123,3 +123,4 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid

View File

@ -288,8 +288,9 @@ nvswitch_i2c_add_adapter
rc = nvswitch_os_snprintf(adapter->name,
sizeof(adapter->name),
"NVIDIA NVSwitch i2c adapter %u at %x:%02x.%u",
"NVIDIA NVSwitch i2c adapter %u at %04x:%x:%02x.%u",
port,
NV_PCI_DOMAIN_NUMBER(pci_dev),
NV_PCI_BUS_NUMBER(pci_dev),
NV_PCI_SLOT_NUMBER(pci_dev),
PCI_FUNC(pci_dev->devfn));

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -245,6 +245,7 @@ namespace DisplayPort
public:
virtual bool getOuiSupported() = 0;
virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0;
virtual bool getOuiSource(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0;
virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0;
};

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -89,6 +89,11 @@ namespace DisplayPort
Timer * timer; // OS provided timer services
Connector::EventSink * sink; // Event Sink
// Cached Source OUI for restoring eDP OUI when powering up
unsigned cachedSourceOUI;
char cachedSourceModelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1];
NvU8 cachedSourceChipRevision;
unsigned ouiId; // Sink ouiId
char modelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; // Device Model-name
bool bIgnoreSrcOuiHandshake; // Skip writing source OUI
@ -294,6 +299,8 @@ namespace DisplayPort
bool bEnableFastLT;
NvU32 maxLinkRateFromRegkey;
bool bEnableOuiRestoring;
//
// Latency(ms) to apply between link-train and FEC enable for bug
// 2561206.
@ -322,6 +329,12 @@ namespace DisplayPort
//
bool bDscCapBasedOnParent;
//
// MST device connnected to dock may issue IRQ for link lost.
// Send PowerDown path msg to suppress that.
//
bool bPowerDownPhyBeforeD3;
void sharedInit();
ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink);

View File

@ -57,7 +57,7 @@ namespace DisplayPort
bool videoSink; // Should be true when a video sink is supported
NvU64 maxTmdsClkRate;
Device():peerDevice(None),SDPStreams(0),SDPStreamSinks(0),dirty(false),videoSink(false)
Device():peerDevice(None),SDPStreams(0),SDPStreamSinks(0),dirty(false),videoSink(false),maxTmdsClkRate(0)
{
portMap.validMap = portMap.inputMap = portMap.internalMap = 0;
}

View File

@ -385,6 +385,11 @@ namespace DisplayPort
void pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn)
{
base_pbn = pbnForMode(modesetInfo);
if (bEnableFEC)
{
// IF FEC is enabled, we need to consider 3% overhead as per DP1.4 spec.
base_pbn = (NvU32)(divide_ceil(base_pbn * 100, 97));
}
slots = slotsForPBN(base_pbn);
slots_pbn = PBNForSlots(slots);
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -59,6 +59,12 @@
#define NV_DP_REGKEY_FORCE_EDP_ILR "DP_BYPASS_EDP_ILR_REV_CHECK"
// Regkey to enable OUI caching/restoring in release branch.
#define NV_DP_REGKEY_ENABLE_OUI_RESTORING "DP_ENABLE_OUI_RESTORING"
// Message to power down video stream before power down link (set D3)
#define NV_DP_REGKEY_POWER_DOWN_PHY "DP_POWER_DOWN_PHY"
//
// DSC capability of downstream device should be decided based on device's own
// and its parent's DSC capability.
@ -96,6 +102,8 @@ struct DP_REGKEY_DATABASE
bool bOptLinkKeptAliveSst;
bool bBypassEDPRevCheck;
bool bDscMstCapBug3143315;
bool bEnableOuiRestoring;
bool bPowerDownPhyBeforeD3;
};
#endif //INCLUDED_DP_REGKEYDATABASE_H

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -863,6 +863,42 @@ struct DPCDHALImpl : DPCDHAL
return bus.write(NV_DPCD_SOURCE_IEEE_OUI, &ouiBuffer[0], sizeof ouiBuffer);
}
virtual bool getOuiSource(unsigned &ouiId, char * modelName,
size_t modelNameBufferSize, NvU8 & chipRevision)
{
NvU8 ouiBuffer[16];
int address = NV_DPCD_SOURCE_IEEE_OUI;
if (caps.revisionMajor <= 0)
DP_ASSERT(0 && "Something is wrong, revision major should be > 0");
// If buffer size is larger than dev_id size, the extras are not used.
// If buffer size is smaller, than we can only get certain bytes.
if (modelNameBufferSize > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE)
{
modelNameBufferSize = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE;
}
if (AuxRetry::ack != bus.read(address, &ouiBuffer[0], sizeof ouiBuffer))
{
*modelName = 0;
ouiId = 0;
chipRevision = 0;
return false;
}
// The first 3 bytes are IEEE_OUI. 2 hex digits per register.
ouiId = ouiBuffer[0] | (ouiBuffer[1] << 8) | (ouiBuffer[2] << 16);
// Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case).
unsigned int i;
for (i = 0; i < modelNameBufferSize; i++)
modelName[i] = ouiBuffer[3+i];
chipRevision = ouiBuffer[9];
return true;
}
virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision)
{
NvU8 ouiBuffer[16];

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -188,6 +188,8 @@ void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatab
this->bDisableSSC = dpRegkeyDatabase.bSscDisabled;
this->bEnableFastLT = dpRegkeyDatabase.bFastLinkTrainingEnabled;
this->bDscMstCapBug3143315 = dpRegkeyDatabase.bDscMstCapBug3143315;
this->bEnableOuiRestoring = dpRegkeyDatabase.bEnableOuiRestoring;
this->bPowerDownPhyBeforeD3 = dpRegkeyDatabase.bPowerDownPhyBeforeD3;
}
void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled)
@ -704,6 +706,13 @@ create:
newDev->applyOUIOverrides();
if (main->isEDP() && this->bEnableOuiRestoring)
{
// Save Source OUI information for eDP.
hal->getOuiSource(cachedSourceOUI, &cachedSourceModelName[0],
sizeof(cachedSourceModelName), cachedSourceChipRevision);
}
fireEvents();
}
@ -1178,7 +1187,7 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
this->isFECSupported() && // If GPU supports FEC
pDscParams && // If client sent DSC info
pDscParams->bCheckWithDsc && // If client wants to check with DSC
(dev && dev->isDSCPossible()) && // Either device or it's parent supports DSC
(dev && dev->devDoingDscDecompression) && // Either device or it's parent supports DSC
bFecCapable && // If path up to dsc decoding device supports FEC
(modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6
{
@ -1239,9 +1248,13 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
(modesetParams.colorFormat == dpColorFormat_YCbCr444 && !dev->parent->dscCaps.dscDecoderColorFormatCaps.bYCbCr444) ||
(modesetParams.colorFormat == dpColorFormat_YCbCr422 && !dev->parent->dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422))
{
if (pDscParams->forceDsc == DSC_FORCE_ENABLE)
if ((pDscParams->forceDsc == DSC_FORCE_ENABLE) ||
(modesetParams.modesetInfo.mode == DSC_DUAL))
{
// If DSC is force enabled then return failure here
//
// If DSC is force enabled or DSC_DUAL mode is requested,
// then return failure here
//
compoundQueryResult = false;
pDscParams->bEnableDsc = false;
return false;
@ -1270,9 +1283,24 @@ bool ConnectorImpl::compoundQueryAttach(Group * target,
(NvU32*)(PPS),
(NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS)
{
if (pDscParams->forceDsc == DSC_FORCE_ENABLE)
//
// If generating PPS failed
// AND
// (DSC is force enabled
// OR
// the requested DSC mode = DUAL)
//then
// return failure here
// Else
// we will check if non DSC path is possible.
//
// If dsc mode = DUAL failed to generate PPS and if we pursue
// non DSC path, DD will still follow 2Head1OR modeset path with
// DSC disabled, eventually leading to HW hang. Bug 3632901
//
if ((pDscParams->forceDsc == DSC_FORCE_ENABLE) ||
(modesetParams.modesetInfo.mode == DSC_DUAL))
{
// If DSC is force enabled then return failure here
compoundQueryResult = false;
pDscParams->bEnableDsc = false;
return false;
@ -2667,6 +2695,21 @@ bool ConnectorImpl::notifyAttachBegin(Group * target, // Gr
this->bFECEnable |= bEnableFEC;
highestAssessedLC.enableFEC(this->bFECEnable);
if (main->isEDP() && this->bEnableOuiRestoring)
{
// Power-up eDP and restore eDP OUI if it's powered off now.
bool bPanelPowerOn;
main->getEdpPowerData(&bPanelPowerOn, NULL);
if (!bPanelPowerOn)
{
main->configurePowerState(true);
hal->setOuiSource(cachedSourceOUI,
&cachedSourceModelName[0],
6 /* string length of ieeeOuiDevId */,
cachedSourceChipRevision);
}
}
// if failed, we're guaranteed that assessed link rate didn't meet the mode requirements
// isZombie() will catch this
bLinkTrainingStatus = trainLinkOptimized(getMaxLinkConfig());
@ -3248,6 +3291,22 @@ void ConnectorImpl::powerdownLink(bool bPowerdownPanel)
powerOff.lanes = 0;
// Inform Sink about Main Link Power Down.
if (linkUseMultistream() && bPowerDownPhyBeforeD3)
{
PowerDownPhyMessage powerDownPhyMsg;
NakData nack;
for (Device * i = enumDevices(0); i; i=enumDevices(i))
{
if (i->isPlugged() && i->isVideoSink())
{
Address devAddress = ((DeviceImpl*)i)->address;
powerDownPhyMsg.set(devAddress.parent(), devAddress.tail(), NV_TRUE);
this->messageManager->send(&powerDownPhyMsg, nack);
}
}
}
//
// 1> If it is eDP and the power is not on, we don't need to put it into D3 here
// 2> If FEC is enabled then we have to put panel in D3 after powering down mainlink

View File

@ -920,23 +920,31 @@ void DeviceImpl::applyOUIOverrides()
if ((buffer[3] == 0x53) && (buffer[4] == 0x59) && (buffer[5] == 0x4E) && (buffer[6] == 0x41))
{
// For Synaptic VMM5331 and VMM5320, it only support MSA-Over-MST for DP after Firmware 5.4.5
if (buffer[7] == 0x53 &&
(buffer[8] == 0x31 || buffer[8] == 0x20))
if (buffer[7] == 0x53)
{
this->bSdpExtCapable = False;
//
// This flag will be checked only in DSC Pass through cases (MST).
// All Synaptics VMM53XX chips which support pass through can only support
// color formats that are listed in 0x69h even in pass through mode.
//
this->bDscPassThroughColorFormatWar = true;
//
// Check firmware version
// 0x50A: FW/SW Major Revision.
// 0x50B: FW/SW Minor Revision.
// 0x50C: Build Number.
//
if ((buffer[10] >= 0x06) ||
((buffer[10] == 0x05) && (buffer[11] >= 0x05)) ||
((buffer[10] == 0x05) && (buffer[11] == 0x04) && (buffer[12] >= 0x05)))
if ((buffer[8] == 0x31) || (buffer[8] == 0x20))
{
this->bSdpExtCapable = True;
this->bSdpExtCapable = False;
//
// Check firmware version
// 0x50A: FW/SW Major Revision.
// 0x50B: FW/SW Minor Revision.
// 0x50C: Build Number.
//
if ((buffer[10] >= 0x06) ||
((buffer[10] == 0x05) && (buffer[11] >= 0x05)) ||
((buffer[10] == 0x05) && (buffer[11] == 0x04) && (buffer[12] >= 0x05)))
{
this->bSdpExtCapable = True;
}
}
}
}

View File

@ -93,7 +93,9 @@ const struct
{NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST, &dpRegkeyDatabase.bOptLinkKeptAliveMst, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST, &dpRegkeyDatabase.bOptLinkKeptAliveSst, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_FORCE_EDP_ILR, &dpRegkeyDatabase.bBypassEDPRevCheck, DP_REG_VAL_BOOL},
{NV_DP_DSC_MST_CAP_BUG_3143315, &dpRegkeyDatabase.bDscMstCapBug3143315, DP_REG_VAL_BOOL}
{NV_DP_DSC_MST_CAP_BUG_3143315, &dpRegkeyDatabase.bDscMstCapBug3143315, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_ENABLE_OUI_RESTORING, &dpRegkeyDatabase.bEnableOuiRestoring, DP_REG_VAL_BOOL},
{NV_DP_REGKEY_POWER_DOWN_PHY, &dpRegkeyDatabase.bPowerDownPhyBeforeD3, DP_REG_VAL_BOOL}
};
EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) :

View File

@ -852,21 +852,16 @@ bool DisplayPort::isModePossibleMSTWithFEC
unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo)
{
// When DSC is enabled consider depth will multiplied by 16
unsigned dsc_factor = modesetInfo.bEnableDsc ? 16 : 1;
//
// Calculate PBN in terms of 54/64 mbyte/sec
// round up by .6% for spread de-rate. Note: if we're not spreading our link
// this MUST still be counted. It's also to allow downstream links to be spread.
//
unsigned pbnForMode = (NvU32)(divide_ceil(modesetInfo.pixelClockHz * modesetInfo.depth * 1006 * 64 / 8,
(NvU64)54000000 *1000));
if(modesetInfo.bEnableDsc)
{
//
// When DSC is enabled consider depth will multiplied by 16 and also 3% FEC Overhead
// as per DP1.4 spec
pbnForMode = (NvU32)(divide_ceil(pbnForMode * 100, 97 * DSC_DEPTH_FACTOR));
}
(NvU64)54000000 * 1000 * dsc_factor));
return pbnForMode;
}

View File

@ -36,25 +36,25 @@
// and then checked back in. You cannot make changes to these sections without
// corresponding changes to the buildmeister script
#ifndef NV_BUILD_BRANCH
#define NV_BUILD_BRANCH r526_91
#define NV_BUILD_BRANCH r527_92
#endif
#ifndef NV_PUBLIC_BRANCH
#define NV_PUBLIC_BRANCH r526_91
#define NV_PUBLIC_BRANCH r527_92
#endif
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r526_91-183"
#define NV_BUILD_CHANGELIST_NUM (32139144)
#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r525/r527_92-225"
#define NV_BUILD_CHANGELIST_NUM (32231446)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "rel/gpu_drv/r525/r526_91-183"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32139144)
#define NV_BUILD_NAME "rel/gpu_drv/r525/r527_92-225"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32231446)
#else /* Windows builds */
#define NV_BUILD_BRANCH_VERSION "r526_91-9"
#define NV_BUILD_CHANGELIST_NUM (32103636)
#define NV_BUILD_BRANCH_VERSION "r527_92-2"
#define NV_BUILD_CHANGELIST_NUM (32226812)
#define NV_BUILD_TYPE "Official"
#define NV_BUILD_NAME "527.27"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32103636)
#define NV_BUILD_NAME "528.02"
#define NV_LAST_OFFICIAL_CHANGELIST_NUM (32226812)
#define NV_BUILD_BRANCH_BASE_VERSION R525
#endif
// End buildmeister python edited section

View File

@ -120,6 +120,7 @@ static const PNPVendorId PNPVendorIds[] =
{ "BCD", _VENDOR_NAME_ENTRY("Dr. Seufert GmbH") },
{ "BEO", _VENDOR_NAME_ENTRY("Bang & Olufsen") },
{ "BGT", _VENDOR_NAME_ENTRY("Budzetron") },
{ "BIG", _VENDOR_NAME_ENTRY("Bigscreen, Inc.") },
{ "BMM", _VENDOR_NAME_ENTRY("MAG Technology") },
{ "BNQ", _VENDOR_NAME_ENTRY("BenQ") },
{ "BOE", _VENDOR_NAME_ENTRY("BOE Technology Group Co., Ltd") },
@ -276,6 +277,7 @@ static const PNPVendorId PNPVendorIds[] =
{ "IMA", _VENDOR_NAME_ENTRY("Imagraph") },
{ "IMC", _VENDOR_NAME_ENTRY("IMC Networks") },
{ "IMP", _VENDOR_NAME_ENTRY("Impression") },
{ "IMX", _VENDOR_NAME_ENTRY("Arpara Technology Co., Ltd.") },
{ "INF", _VENDOR_NAME_ENTRY("Inframetrics") },
{ "INL", _VENDOR_NAME_ENTRY("InnoLux Display Corporation") },
{ "INP", _VENDOR_NAME_ENTRY("Interphase") },

View File

@ -4,7 +4,7 @@
#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \
(defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1)
#define NV_VERSION_STRING "525.60.13"
#define NV_VERSION_STRING "525.78.01"
#else

View File

@ -0,0 +1,29 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __ad102_dev_gc6_island_h__
#define __ad102_dev_gc6_island_h__
#define NV_PGC6_BSI_SECURE_SCRATCH_15 0x001180fc /* RW-4R */
#endif // __ad102_dev_gc6_island_h__

View File

@ -0,0 +1,32 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __ad102_dev_gc6_island_addendum_h__
#define __ad102_dev_gc6_island_addendum_h__
#define NV_PGC6_BSI_VPR_SECURE_SCRATCH_15 NV_PGC6_BSI_SECURE_SCRATCH_15
#define NV_PGC6_BSI_VPR_SECURE_SCRATCH_15_SCRUBBER_HANDOFF 31:29
#define NV_PGC6_BSI_VPR_SECURE_SCRATCH_15_SCRUBBER_HANDOFF_DONE 0x00000003
#endif // __ad102_dev_gc6_island_addendum_h__

View File

@ -32,6 +32,8 @@
#include "edid.h"
PUSH_SEGMENTS
#define EIA_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format) \
@ -1111,10 +1113,12 @@ void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861,
case NVT_CEA861_NVDA_IEEE_ID:
parseEdidNvidiaVSDBBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pNvVsdb);
pExt861->valid.nvda_vsdb = 1;
break;
case NVT_CEA861_MSFT_IEEE_ID:
parseEdidMsftVsdbBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pMsftVsdb);
pExt861->valid.msft_vsdb = 1;
break;
}
@ -1233,12 +1237,6 @@ NVT_STATUS get861ExtInfo(NvU8 *p, NvU32 size, NVT_EDID_CEA861_INFO *p861info)
return NVT_STATUS_ERR;
}
// DTD offset sanity check
if (p[2] >= 1 && p[2] <= 3)
{
return NVT_STATUS_ERR;
}
// don't do anything further if p861info is NULL
if (p861info == NULL)
{

View File

@ -245,13 +245,20 @@ parseDisplayId20EDIDExtDataBlocks(
// copy all the vendor specific data block from DisplayId20 to pEdidInfo
// NOTE: mixed CTA extension block and DID2.0 extension block are not handled
NVMISC_MEMCPY(&pEdidInfo->hdmiLlcInfo, &pDisplayId20Info->vendor_specific.hdmiLlc, sizeof(NVT_HDMI_LLC_INFO));
NVMISC_MEMCPY(&pEdidInfo->hdmiForumInfo, &pDisplayId20Info->vendor_specific.hfvs, sizeof(NVT_HDMI_FORUM_INFO));
NVMISC_MEMCPY(&pEdidInfo->nvdaVsdbInfo, &pDisplayId20Info->vendor_specific.nvVsdb, sizeof(NVDA_VSDB_PARSED_INFO));
NVMISC_MEMCPY(&pEdidInfo->msftVsdbInfo, &pDisplayId20Info->vendor_specific.msftVsdb, sizeof(MSFT_VSDB_PARSED_INFO));
NVMISC_MEMCPY(&pEdidInfo->hdr_static_metadata_info, &pDisplayId20Info->cta.hdrInfo, sizeof(NVT_HDR_STATIC_METADATA));
NVMISC_MEMCPY(&pEdidInfo->dv_static_metadata_info, &pDisplayId20Info->cta.dvInfo, sizeof(NVT_DV_STATIC_METADATA));
NVMISC_MEMCPY(&pEdidInfo->hdr10PlusInfo, &pDisplayId20Info->cta.hdr10PlusInfo, sizeof(NVT_HDR10PLUS_INFO));
if (pEdidInfo->ext861.valid.H14B_VSDB == 0 && pEdidInfo->ext861_2.valid.H14B_VSDB == 0 && pDisplayId20Info->cta.cta861_info.valid.H14B_VSDB)
NVMISC_MEMCPY(&pEdidInfo->hdmiLlcInfo, &pDisplayId20Info->vendor_specific.hdmiLlc, sizeof(NVT_HDMI_LLC_INFO));
if (pEdidInfo->ext861.valid.H20_HF_VSDB == 0 && pEdidInfo->ext861_2.valid.H20_HF_VSDB == 0 && pDisplayId20Info->cta.cta861_info.valid.H20_HF_VSDB)
NVMISC_MEMCPY(&pEdidInfo->hdmiForumInfo, &pDisplayId20Info->vendor_specific.hfvs, sizeof(NVT_HDMI_FORUM_INFO));
if (pEdidInfo->ext861.valid.nvda_vsdb == 0 && pEdidInfo->ext861_2.valid.nvda_vsdb == 0 && pDisplayId20Info->cta.cta861_info.valid.nvda_vsdb)
NVMISC_MEMCPY(&pEdidInfo->nvdaVsdbInfo, &pDisplayId20Info->vendor_specific.nvVsdb, sizeof(NVDA_VSDB_PARSED_INFO));
if (pEdidInfo->ext861.valid.msft_vsdb == 0 && pEdidInfo->ext861_2.valid.msft_vsdb == 0 && pDisplayId20Info->cta.cta861_info.valid.msft_vsdb)
NVMISC_MEMCPY(&pEdidInfo->msftVsdbInfo, &pDisplayId20Info->vendor_specific.msftVsdb, sizeof(MSFT_VSDB_PARSED_INFO));
if (pEdidInfo->ext861.valid.hdr_static_metadata == 0 && pEdidInfo->ext861_2.valid.hdr_static_metadata == 0 && pDisplayId20Info->cta.cta861_info.valid.hdr_static_metadata)
NVMISC_MEMCPY(&pEdidInfo->hdr_static_metadata_info, &pDisplayId20Info->cta.hdrInfo, sizeof(NVT_HDR_STATIC_METADATA));
if (pEdidInfo->ext861.valid.dv_static_metadata == 0 && pEdidInfo->ext861_2.valid.dv_static_metadata == 0 && pDisplayId20Info->cta.cta861_info.valid.dv_static_metadata)
NVMISC_MEMCPY(&pEdidInfo->dv_static_metadata_info, &pDisplayId20Info->cta.dvInfo, sizeof(NVT_DV_STATIC_METADATA));
if (pEdidInfo->ext861.valid.hdr10Plus == 0 && pEdidInfo->ext861_2.valid.hdr10Plus == 0 && pDisplayId20Info->cta.cta861_info.valid.hdr10Plus)
NVMISC_MEMCPY(&pEdidInfo->hdr10PlusInfo, &pDisplayId20Info->cta.hdr10PlusInfo, sizeof(NVT_HDR10PLUS_INFO));
// If the CTA861 extension existed already, we need to synced the revision/basic_caps to CTA which is embedded in DID20
if (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B)
@ -262,7 +269,10 @@ parseDisplayId20EDIDExtDataBlocks(
}
// this is the DisplayID20 Extension, so we need to copy from what is the CTA raw data in DID20 to Edid's CTA block
NVMISC_MEMCPY(&pEdidInfo->ext861, &pDisplayId20Info->cta.cta861_info, sizeof(NVT_EDID_CEA861_INFO));
if (pEdidInfo->ext861.revision == 0)
NVMISC_MEMCPY(&pEdidInfo->ext861, &pDisplayId20Info->cta.cta861_info, sizeof(NVT_EDID_CEA861_INFO));
else if (pEdidInfo->ext861_2.revision == 0)
NVMISC_MEMCPY(&pEdidInfo->ext861_2, &pDisplayId20Info->cta.cta861_info, sizeof(NVT_EDID_CEA861_INFO));
break;
case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM:

View File

@ -1123,6 +1123,8 @@ typedef struct tagNVT_VALID_EXTENDED_BLOCKS
NvU32 hdr10Plus : 1;
NvU32 SCDB : 1;
NvU32 HF_EEODB : 1;
NvU32 nvda_vsdb : 1;
NvU32 msft_vsdb : 1;
} NVT_VALID_EXTENDED_BLOCKS;

View File

@ -141,6 +141,20 @@ struct nvlink_link_change
enum nvlink_link_change_type change_type;
};
//
// Structure representing Nvlink Error Threshold
//
struct nvlink_link_error_threshold
{
NvU8 thresholdMan;
NvU8 thresholdExp;
NvU8 timescaleMan;
NvU8 timescaleExp;
NvBool bInterruptEn;
NvBool bUserConfig;
NvBool bInterruptTrigerred; // Error threshold interrupt generated
};
// nvlink link state
struct nvlink_link
{
@ -233,6 +247,8 @@ struct nvlink_link
//seed data for given nvlink
NvU32 seedData[NVLINK_MAX_SEED_BUFFER_SIZE];
struct nvlink_link_error_threshold errorThreshold;
};
// nvlink link handler ops

View File

@ -1047,7 +1047,7 @@ nvlink_core_powerdown_floorswept_conns_to_off
if (numConnsToShutdown != 0)
{
nvlink_core_powerdown_intranode_conns_from_active_to_off(connsToShutdown, numConnsToShutdown, 0);
nvlink_core_reset_intranode_conns(connsToShutdown, numConnsToShutdown, NVLINK_STATE_CHANGE_ASYNC);
nvlink_core_reset_intranode_conns(connsToShutdown, numConnsToShutdown, NVLINK_STATE_CHANGE_SYNC);
for (j = 0; j < numConnsToShutdown; ++j)
{
@ -1056,5 +1056,8 @@ nvlink_core_powerdown_floorswept_conns_to_off
}
}
nvlink_free(visitedConns);
nvlink_free(connsToShutdown);
return NVL_SUCCESS;
}

View File

@ -70,7 +70,18 @@ enum
/*!
* Set NPORT TPROD state
*/
RM_SOE_CORE_CMD_SET_NPORT_TPROD_STATE
RM_SOE_CORE_CMD_SET_NPORT_TPROD_STATE,
/*!
* Read VRs
* Needed to be in sync with chips_a defines
*/
RM_SOE_CORE_CMD_GET_VOLTAGE_VALUES,
/*!
* Init PLM2 protected registers
*/
RM_SOE_CORE_CMD_INIT_L2_STATE
};
// Timeout for SOE reset callback function
@ -132,6 +143,11 @@ typedef struct
NvU32 nport;
} RM_SOE_CORE_CMD_NPORT_TPROD_STATE;
typedef struct
{
NvU8 cmdType;
} RM_SOE_CORE_CMD_L2_STATE;
typedef union
{
NvU8 cmdType;
@ -141,5 +157,6 @@ typedef union
RM_SOE_CORE_CMD_NPORT_RESET nportReset;
RM_SOE_CORE_CMD_NPORT_STATE nportState;
RM_SOE_CORE_CMD_NPORT_TPROD_STATE nportTprodState;
RM_SOE_CORE_CMD_L2_STATE l2State;
} RM_SOE_CORE_CMD;
#endif // _SOECORE_H_

View File

@ -99,7 +99,7 @@ typedef enum nvswitch_get_info_index
NVSWITCH_GET_INFO_INDEX_PCI_DOMAIN = 0x300,
NVSWITCH_GET_INFO_INDEX_PCI_BUS,
NVSWITCH_GET_INFO_INDEX_PCI_DEVICE,
NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION
NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION,
/* See enum modification guidelines at the top of this file */
} NVSWITCH_GET_INFO_INDEX;
@ -3285,6 +3285,23 @@ typedef struct nvswitch_inband_pending_data_stats_params
NV_DECLARE_ALIGNED(NvU64 linkMask, 8);
} NVSWITCH_INBAND_PENDING_DATA_STATS_PARAMS;
/*
* CTRL_NVSWITCH_GET_BOARD_PART_NUMBER
*
* Control for querying the board part number
*
* Parameters:
* [out] data[]
* Byte vector of the board part number.
*/
#define NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES 20
typedef struct nvswitch_get_board_part_number_vector
{
NvU8 data[NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES];
} NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR;
#define NVSWITCH_GET_SW_INFO_COUNT_MAX 32
typedef enum nvswitch_get_sw_info_index
@ -3603,6 +3620,58 @@ typedef struct
NvU32 val; // in: register value to write
} NVSWITCH_REGISTER_WRITE;
typedef struct
{
NvU8 thresholdMan;
NvU8 thresholdExp;
NvU8 timescaleMan;
NvU8 timescaleExp;
NvBool bInterruptEn;
NvBool bInterruptTrigerred;
NvU32 flags;
} NVSWITCH_NVLINK_ERROR_THRESHOLD_VALUES;
#define NVSWITCH_NVLINK_ERROR_THRESHOLD_RESET 0x1
/*
* CTRL_NVSWITCH_SET_NVLINK_ERROR_THRESHOLD
*
* Set the Nvlink Error Rate Threshold.
*
* Parameters:
* linkMask [IN]
* A valid link mask for which we need to set the Error Threshold
*
* errorThreshold [IN]
* Threshold values, interrupt enable/disable and flags
*/
typedef struct
{
NV_DECLARE_ALIGNED(NvU64 link_mask, 8);
NVSWITCH_NVLINK_ERROR_THRESHOLD_VALUES errorThreshold[NVSWITCH_NVLINK_MAX_LINKS];
} NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS;
/*
* CTRL_NVSWITCH_GET_NVLINK_ERROR_THRESHOLD
*
* Control to query NVLIPT counter configuration.
*
* Parameters:
* linkMask [IN]
* A valid link mask for which we need to get the Error Threshold
*
* errorThreshold [OUT]
* Threshold values, interrupt enable/disable and flags
*/
typedef struct
{
NV_DECLARE_ALIGNED(NvU64 link_mask, 8);
NVSWITCH_NVLINK_ERROR_THRESHOLD_VALUES errorThreshold[NVSWITCH_NVLINK_MAX_LINKS];
} NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS;
#define REGISTER_RW_ENGINE_RAW 0x00
#define REGISTER_RW_ENGINE_CLKS 0x10
@ -3731,6 +3800,9 @@ typedef struct
#define CTRL_NVSWITCH_GET_INFOROM_VERSION 0x4F
#define CTRL_NVSWITCH_GET_ERR_INFO 0x50
#define CTRL_NVSWITCH_CLEAR_COUNTERS 0x51
#define CTRL_NVSWITCH_SET_NVLINK_ERROR_THRESHOLD 0x52
#define CTRL_NVSWITCH_GET_NVLINK_ERROR_THRESHOLD 0x53
#define CTRL_NVSWITCH_GET_BOARD_PART_NUMBER 0x54
#ifdef __cplusplus
}

View File

@ -23,6 +23,7 @@
#include "flcn/haldefs_flcn_nvswitch.h"
#include "flcn/flcn_nvswitch.h"
#include "rmflcncmdif_nvswitch.h"
#include "flcnifcmn.h"
@ -109,7 +110,10 @@ flcnQueueCmdPostBlocking
if (status != NV_OK)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_COMMAND_QUEUE,
"Fatal, Failed to post command to SOE\n");
"Fatal, Failed to post command to SOE. Data {0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x}\n",
pCmd->cmdGen.hdr.unitId, pCmd->cmdGen.hdr.size, pCmd->cmdGen.hdr.ctrlFlags,
pCmd->cmdGen.hdr.seqNumId, pCmd->cmdGen.cmd, (NvU8)pCmd->cmdGen.cmd);
return status;
}
@ -117,7 +121,9 @@ flcnQueueCmdPostBlocking
if (status == NV_ERR_TIMEOUT)
{
NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT,
"Fatal, Timed out while waiting for SOE command completion\n");
"Fatal, Timed out while waiting for SOE command completion. Data {0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x}\n",
pCmd->cmdGen.hdr.unitId, pCmd->cmdGen.hdr.size, pCmd->cmdGen.hdr.ctrlFlags,
pCmd->cmdGen.hdr.seqNumId, pCmd->cmdGen.cmd, (NvU8)pCmd->cmdGen.cmd);
flcnQueueCmdCancel(device, pFlcn, *pSeqDesc);
}

View File

@ -119,6 +119,7 @@
_op(NvlStatus, nvswitch_deassert_link_reset, (nvswitch_device *device, nvlink_link *link), _arch) \
_op(NvBool, nvswitch_is_soe_supported, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_init_soe, (nvswitch_device *device), _arch) \
_op(void, nvswitch_soe_init_l2_state, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_inforom_supported, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_spi_supported, (nvswitch_device *device), _arch) \
_op(NvBool, nvswitch_is_smbpbi_supported, (nvswitch_device *device), _arch) \
@ -147,6 +148,7 @@
_op(void, nvswitch_initialize_oms_state, (nvswitch_device *device, INFOROM_OMS_STATE *pOmsState), _arch) \
_op(NvlStatus, nvswitch_oms_inforom_flush, (nvswitch_device *device), _arch) \
_op(void, nvswitch_inforom_ecc_get_total_errors, (nvswitch_device *device, INFOROM_ECC_OBJECT *pEccGeneric, NvU64 *corCount, NvU64 *uncCount), _arch) \
_op(NvlStatus, nvswitch_inforom_load_obd, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_bbx_add_sxid, (nvswitch_device *device, NvU32 exceptionType, NvU32 data0, NvU32 data1, NvU32 data2), _arch) \
_op(NvlStatus, nvswitch_bbx_unload, (nvswitch_device *device), _arch) \
_op(NvlStatus, nvswitch_bbx_load, (nvswitch_device *device, NvU64 time_ns, NvU8 osType, NvU32 osVersion), _arch) \
@ -218,7 +220,10 @@
_op(NvlStatus, nvswitch_service_minion_link, (nvswitch_device *device, NvU32 link_id), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_sw_info, (nvswitch_device *device, NVSWITCH_GET_SW_INFO_PARAMS *p), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_err_info, (nvswitch_device *device, NVSWITCH_NVLINK_GET_ERR_INFO_PARAMS *ret), _arch) \
_op(NvlStatus, nvswitch_ctrl_clear_counters, (nvswitch_device *device, NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS *ret), _arch)
_op(NvlStatus, nvswitch_ctrl_clear_counters, (nvswitch_device *device, NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS *ret), _arch) \
_op(NvlStatus, nvswitch_ctrl_set_nvlink_error_threshold, (nvswitch_device *device, NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_nvlink_error_threshold, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams), _arch) \
_op(NvlStatus, nvswitch_ctrl_get_board_part_number, (nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p), _arch)
#define NVSWITCH_HAL_FUNCTION_LIST_LS10(_op, _arch) \
_op(NvlStatus, nvswitch_launch_ALI, (nvswitch_device *device), _arch) \

View File

@ -74,8 +74,15 @@ struct inforom
struct
{
NvBool bValid;
NvU8 packedObject[INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE];
INFOROM_OBD_OBJECT_V1_XX object;
union {
NvU8 v1[INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE];
NvU8 v2[INFOROM_OBD_OBJECT_V2_XX_PACKED_SIZE];
} packedObject;
union {
INFOROM_OBJECT_HEADER_V1_00 header;
INFOROM_OBD_OBJECT_V1_XX v1;
INFOROM_OBD_OBJECT_V2_XX v2;
} object;
} OBD;
struct

View File

@ -108,6 +108,12 @@ nvswitch_oms_inforom_flush_lr10
struct nvswitch_device *device
);
NvlStatus
nvswitch_inforom_load_obd_lr10
(
nvswitch_device *device
);
NvlStatus
nvswitch_bbx_add_sxid_lr10
(

View File

@ -652,6 +652,7 @@ NvlStatus nvswitch_ctrl_get_sw_info_lr10(nvswitch_device *device, NVSWITCH_GET_S
void nvswitch_setup_link_system_registers_lr10(nvswitch_device *device, nvlink_link *link);
void nvswitch_load_link_disable_settings_lr10(nvswitch_device *device, nvlink_link *link);
NvBool nvswitch_is_smbpbi_supported_lr10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_get_board_part_number_lr10(nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p);
NvlStatus nvswitch_ctrl_set_mc_rid_table_lr10(nvswitch_device *device, NVSWITCH_SET_MC_RID_TABLE_PARAMS *p);
NvlStatus nvswitch_ctrl_get_mc_rid_table_lr10(nvswitch_device *device, NVSWITCH_GET_MC_RID_TABLE_PARAMS *p);

View File

@ -62,4 +62,5 @@ void nvswitch_soe_unregister_events_lr10(nvswitch_device *device);
void nvswitch_therm_soe_callback_lr10(nvswitch_device *device, union RM_FLCN_MSG *pMsg,
void *pParams, NvU32 seqDesc, NV_STATUS status);
NvlStatus nvswitch_soe_register_event_callbacks_lr10(nvswitch_device *device);
void nvswitch_soe_init_l2_state_lr10(nvswitch_device *device);
#endif //_SOE_LR10_H_

View File

@ -79,6 +79,12 @@ nvswitch_inforom_ecc_get_total_errors_ls10
NvU64 *pUncorrectedTotal
);
NvlStatus
nvswitch_inforom_load_obd_ls10
(
nvswitch_device *device
);
NvlStatus
nvswitch_bbx_add_sxid_ls10
(

View File

@ -371,6 +371,7 @@ typedef struct
_op(XAL, ) \
_op(XPL, ) \
_op(XTL, ) \
_op(XTL_CONFIG, ) \
_op(SAW, ) \
_op(SOE, ) \
_op(SMR, ) \
@ -962,12 +963,14 @@ NvBool nvswitch_i2c_is_device_access_allowed_ls10(nvswitch_device *device, Nv
NvlStatus nvswitch_minion_get_ali_debug_registers_ls10(nvswitch_device *device, nvlink_link *link, NVSWITCH_MINION_ALI_DEBUG_REGISTERS *params);
void nvswitch_setup_link_system_registers_ls10(nvswitch_device *device, nvlink_link *link);
void nvswitch_load_link_disable_settings_ls10(nvswitch_device *device, nvlink_link *link);
void nvswitch_link_disable_interrupts_ls10(nvswitch_device *device, NvU32 link);
void nvswitch_execute_unilateral_link_shutdown_ls10(nvlink_link *link);
void nvswitch_init_dlpl_interrupts_ls10(nvlink_link *link);
NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 link_mask);
void nvswitch_service_minion_all_links_ls10(nvswitch_device *device);
NvlStatus nvswitch_ctrl_get_board_part_number_ls10(nvswitch_device *device, NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p);
//
// SU generated functions
@ -988,6 +991,8 @@ NvlStatus nvswitch_reset_and_drain_links_ls10(nvswitch_device *device, NvU64 lin
void nvswitch_service_minion_all_links_ls10(nvswitch_device *device);
NvBool nvswitch_is_inforom_supported_ls10(nvswitch_device *device);
void nvswitch_set_error_rate_threshold_ls10(nvlink_link *link, NvBool bIsDefault);
void nvswitch_configure_error_rate_threshold_interrupt_ls10(nvlink_link *link, NvBool bEnable);
#endif //_LS10_H_

View File

@ -42,5 +42,6 @@ void nvswitch_soe_unregister_events_ls10(nvswitch_device *device);
NvlStatus nvswitch_soe_register_event_callbacks_ls10(nvswitch_device *device);
NvlStatus nvswitch_soe_restore_nport_state_ls10(nvswitch_device *device, NvU32 nport);
NvlStatus nvswitch_soe_issue_nport_reset_ls10(nvswitch_device *device, NvU32 nport);
void nvswitch_soe_init_l2_state_ls10(nvswitch_device *device);
#endif //_SOE_LS10_H_

View File

@ -272,8 +272,8 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0xa6b0001d, 0x240cf409, 0x001da03e, 0x0049190f, 0x009ff711, 0x00f802f8, 0xb50294b6, 0x00f804b9,
0xb602af92, 0xb9bc0294, 0xf400f8f9, 0x82f9d430, 0x301590b4, 0xc1b027e1, 0x0ad1b00b, 0x94b6f4bd,
0x0c91b002, 0x900149fe, 0x9fa04499, 0x20079990, 0x0b99929f, 0x95b29fa0, 0xa0049992, 0x9297b29f,
0x9fa00499, 0x0005dcdf, 0x90ffbf00, 0x4efe1499, 0xa0a6b201, 0x34ee909f, 0xb4b20209, 0x14bde9a0,
0x34bd84bd, 0x001eef3e, 0x277e6ab2, 0x49bf001a, 0x4bfea2b2, 0x014cfe01, 0x9044bb90, 0x95f94bcc,
0x9fa00499, 0x0005dcdf, 0x90ffbf00, 0x4efe1499, 0xa0a6b201, 0x34ee909f, 0xb4b20209, 0x84bde9a0,
0x14bd34bd, 0x001eef3e, 0x277e6ab2, 0x49bf001a, 0x4bfea2b2, 0x014cfe01, 0x9044bb90, 0x95f94bcc,
0xb31100b4, 0x008e0209, 0x9e0309b3, 0x010db300, 0x499800a8, 0xb27cb201, 0xfe5bb22a, 0xdd90014d,
0x3295f938, 0x0be0b40c, 0xa53ed4bd, 0x5fbf001e, 0xf9a6e9bf, 0x34381bf4, 0xe89827b0, 0x987fbf01,
0xb03302e9, 0xb0b40a00, 0x90b9bc0c, 0x1bf4f9a6, 0x1444df1e, 0xf9180000, 0x0094330c, 0x90f1b206,
@ -569,7 +569,7 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0x328908f4, 0xfbfa324f, 0xbf02f971, 0xbcb0b2b9, 0xb9a6b0c9, 0xe41708f4, 0xbcffffd9, 0xfba6f09b,
0x980b08f4, 0xf9a60109, 0xf8050df4, 0xb2dc7202, 0x28d77eed, 0xb201fb00, 0x05ab98b9, 0xdeb2cfb2,
0xfd729cb2, 0x0042a97e, 0xf0fc00f8, 0xf9fc30f4, 0xbf62f9f0, 0x08e1b0b9, 0xd4b2a5b2, 0xa630c9bc,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x010124bd, 0x763efc06, 0x02f80043,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x24bd0101, 0x763efc06, 0x02f80043,
0x853e0101, 0x42bc0043, 0x0096b192, 0x060df401, 0x90010049, 0x96ff0399, 0x0b947e04, 0xb23bb200,
0xdd0c725a, 0x00001200, 0x7e3030bc, 0x320028d7, 0x00a433a1, 0x08b0b434, 0xb209c0b4, 0x1200da2d,
0x20bc0000, 0x01004e20, 0x0021367e, 0x0a00a033, 0x853e02f8, 0x00da0043, 0xbd000012, 0x01004cb4,
@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_dbg[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xa1f0af45, 0xc2521354, 0x427cca67, 0x3b102336,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0xcdd28e1e, 0x6000ae0f, 0x492dfb26, 0x422cf074,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0x62f5c2c4, 0xc41c2f31, 0x9af0cbcc, 0xb7efe098,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x5f24a73a, 0x55cea6d1, 0x59205a69, 0x18a31f2d,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,

View File

@ -272,8 +272,8 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0xa6b0001d, 0x240cf409, 0x001da03e, 0x0049190f, 0x009ff711, 0x00f802f8, 0xb50294b6, 0x00f804b9,
0xb602af92, 0xb9bc0294, 0xf400f8f9, 0x82f9d430, 0x301590b4, 0xc1b027e1, 0x0ad1b00b, 0x94b6f4bd,
0x0c91b002, 0x900149fe, 0x9fa04499, 0x20079990, 0x0b99929f, 0x95b29fa0, 0xa0049992, 0x9297b29f,
0x9fa00499, 0x0005dcdf, 0x90ffbf00, 0x4efe1499, 0xa0a6b201, 0x34ee909f, 0xb4b20209, 0x14bde9a0,
0x34bd84bd, 0x001eef3e, 0x277e6ab2, 0x49bf001a, 0x4bfea2b2, 0x014cfe01, 0x9044bb90, 0x95f94bcc,
0x9fa00499, 0x0005dcdf, 0x90ffbf00, 0x4efe1499, 0xa0a6b201, 0x34ee909f, 0xb4b20209, 0x84bde9a0,
0x14bd34bd, 0x001eef3e, 0x277e6ab2, 0x49bf001a, 0x4bfea2b2, 0x014cfe01, 0x9044bb90, 0x95f94bcc,
0xb31100b4, 0x008e0209, 0x9e0309b3, 0x010db300, 0x499800a8, 0xb27cb201, 0xfe5bb22a, 0xdd90014d,
0x3295f938, 0x0be0b40c, 0xa53ed4bd, 0x5fbf001e, 0xf9a6e9bf, 0x34381bf4, 0xe89827b0, 0x987fbf01,
0xb03302e9, 0xb0b40a00, 0x90b9bc0c, 0x1bf4f9a6, 0x1444df1e, 0xf9180000, 0x0094330c, 0x90f1b206,
@ -569,7 +569,7 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0x328908f4, 0xfbfa324f, 0xbf02f971, 0xbcb0b2b9, 0xb9a6b0c9, 0xe41708f4, 0xbcffffd9, 0xfba6f09b,
0x980b08f4, 0xf9a60109, 0xf8050df4, 0xb2dc7202, 0x28d77eed, 0xb201fb00, 0x05ab98b9, 0xdeb2cfb2,
0xfd729cb2, 0x0042a97e, 0xf0fc00f8, 0xf9fc30f4, 0xbf62f9f0, 0x08e1b0b9, 0xd4b2a5b2, 0xa630c9bc,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x010124bd, 0x763efc06, 0x02f80043,
0x1d08f439, 0xa6f0d3bc, 0x1508f4f3, 0xa601b998, 0x0d0cf4f9, 0x24bd0101, 0x763efc06, 0x02f80043,
0x853e0101, 0x42bc0043, 0x0096b192, 0x060df401, 0x90010049, 0x96ff0399, 0x0b947e04, 0xb23bb200,
0xdd0c725a, 0x00001200, 0x7e3030bc, 0x320028d7, 0x00a433a1, 0x08b0b434, 0xb209c0b4, 0x1200da2d,
0x20bc0000, 0x01004e20, 0x0021367e, 0x0a00a033, 0x853e02f8, 0x00da0043, 0xbd000012, 0x01004cb4,
@ -2269,8 +2269,8 @@ const NvU32 soe_ucode_data_lr10_prd[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0xa1f0af45, 0xc2521354, 0x427cca67, 0x3b102336,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0xcdd28e1e, 0x6000ae0f, 0x492dfb26, 0x422cf074,
0xb32dc4cc, 0x58018cca, 0x7c52cad0, 0x4a5277fe, 0x62f5c2c4, 0xc41c2f31, 0x9af0cbcc, 0xb7efe098,
0x705ea2e7, 0x0577e70f, 0xcf75f41f, 0xfe6e071a, 0x5f24a73a, 0x55cea6d1, 0x59205a69, 0x18a31f2d,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,

View File

@ -38,10 +38,7 @@ nvswitch_inforom_read_only_objects_load
return -NVL_ERR_NOT_SUPPORTED;
}
status = nvswitch_inforom_load_object(device, pInforom, "OBD",
INFOROM_OBD_OBJECT_V1_XX_FMT,
pInforom->OBD.packedObject,
&pInforom->OBD.object);
status = device->hal.nvswitch_inforom_load_obd(device);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR, "Failed to load OBD object, rc:%d\n",

View File

@ -493,7 +493,7 @@ _nvswitch_inforom_read_file
nvswitch_os_memset(pDmaBuf, 0, transferSize);
cmdSeqDesc = 0;
nvswitch_timeout_create(NVSWITCH_INTERVAL_750MSEC_IN_NS, &timeout);
nvswitch_timeout_create(NVSWITCH_INTERVAL_4SEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn, (PRM_FLCN_CMD)&soeCmd, NULL, NULL,
SOE_RM_CMDQ_LOG_ID, &cmdSeqDesc, &timeout);
if (status != NV_OK)
@ -591,7 +591,8 @@ _nvswitch_inforom_write_file
}
cmdSeqDesc = 0;
nvswitch_timeout_create(NVSWITCH_INTERVAL_750MSEC_IN_NS, &timeout);
nvswitch_timeout_create(NVSWITCH_INTERVAL_4SEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn, (PRM_FLCN_CMD)&soeCmd, NULL, NULL,
SOE_RM_CMDQ_LOG_ID, &cmdSeqDesc, &timeout);
if (status != NV_OK)
@ -899,6 +900,7 @@ done:
/*!
* Fill in the static identification data structure for the use by the SOE
* to be passed on to a BMC over the I2CS interface.
* For LR10 only so no HAL is needed.
*
* @param[in] device switch device pointer
* @param[in] pInforom INFOROM object pointer
@ -916,13 +918,13 @@ nvswitch_inforom_read_static_data
#define _INFOROM_TO_SOE_STRING_COPY(obj, irName, soeName) \
{ \
NvU32 _i; \
ct_assert(NV_ARRAY_ELEMENTS(pInforom->obj.object.irName) <= \
ct_assert(NV_ARRAY_ELEMENTS(pInforom->obj.object.v1.irName) <= \
NV_ARRAY_ELEMENTS(pData->obj.soeName)); \
for (_i = 0; _i < NV_ARRAY_ELEMENTS(pInforom->obj.object.irName); ++_i) \
for (_i = 0; _i < NV_ARRAY_ELEMENTS(pInforom->obj.object.v1.irName); ++_i) \
{ \
pData->obj.soeName[_i] = (NvU8)(pInforom->obj.object.irName[_i] & 0xff); \
pData->obj.soeName[_i] = (NvU8)(pInforom->obj.object.v1.irName[_i] & 0xff); \
} \
if (NV_ARRAY_ELEMENTS(pInforom->obj.object.irName) < \
if (NV_ARRAY_ELEMENTS(pInforom->obj.object.v1.irName) < \
NV_ARRAY_ELEMENTS(pData->obj.soeName)) \
{ \
do \
@ -935,21 +937,25 @@ nvswitch_inforom_read_static_data
if (pInforom->OBD.bValid)
{
pData->OBD.bValid = NV_TRUE;
pData->OBD.buildDate = (NvU32)pInforom->OBD.object.buildDate;
nvswitch_inforom_string_copy(pInforom->OBD.object.marketingName,
pData->OBD.marketingName,
NV_ARRAY_ELEMENTS(pData->OBD.marketingName));
/* This should be called for LR10 (i.e., version 1.xx) only */
if ((pInforom->OBD.object.header.version & 0xFF) == 1)
{
pData->OBD.bValid = NV_TRUE;
pData->OBD.buildDate = (NvU32)pInforom->OBD.object.v1.buildDate;
nvswitch_inforom_string_copy(pInforom->OBD.object.v1.marketingName,
pData->OBD.marketingName,
NV_ARRAY_ELEMENTS(pData->OBD.marketingName));
nvswitch_inforom_string_copy(pInforom->OBD.object.serialNumber,
pData->OBD.serialNum,
NV_ARRAY_ELEMENTS(pData->OBD.serialNum));
nvswitch_inforom_string_copy(pInforom->OBD.object.v1.serialNumber,
pData->OBD.serialNum,
NV_ARRAY_ELEMENTS(pData->OBD.serialNum));
//
// boardPartNum requires special handling, as its size exceeds that
// of its InfoROM representation
//
_INFOROM_TO_SOE_STRING_COPY(OBD, productPartNumber, boardPartNum);
//
// boardPartNum requires special handling, as its size exceeds that
// of its InfoROM representation
//
_INFOROM_TO_SOE_STRING_COPY(OBD, productPartNumber, boardPartNum);
}
}
if (pInforom->OEM.bValid)

View File

@ -747,6 +747,25 @@ nvswitch_oms_set_device_disable_lr10
_oms_update_entry_checksum(pVerData->pNext);
}
NvlStatus
nvswitch_inforom_load_obd_lr10
(
nvswitch_device *device
)
{
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
return nvswitch_inforom_load_object(device, pInforom, "OBD",
INFOROM_OBD_OBJECT_V1_XX_FMT,
pInforom->OBD.packedObject.v1,
&pInforom->OBD.object.v1);
}
NvlStatus
nvswitch_bbx_add_sxid_lr10
(

View File

@ -872,7 +872,7 @@ nvswitch_corelib_set_dl_link_mode_lr10
NVSWITCH_PRINT(device, ERROR,
"%s: link #%d is still in reset, cannot change link state\n",
__FUNCTION__, link->linkNumber);
return NVL_ERR_INVALID_STATE;
return -NVL_ERR_INVALID_STATE;
}
val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE);

View File

@ -6403,8 +6403,22 @@ nvswitch_ctrl_get_fom_values_lr10
{
NvlStatus status;
NvU32 statData;
nvlink_link *link;
NVSWITCH_ASSERT(p->linkId < nvswitch_get_num_links(device));
link = nvswitch_get_link(device, p->linkId);
if (link == NULL)
{
NVSWITCH_PRINT(device, ERROR, "%s: link #%d invalid\n",
__FUNCTION__, p->linkId);
return -NVL_BAD_ARGS;
}
if (nvswitch_is_link_in_reset(device, link))
{
NVSWITCH_PRINT(device, ERROR, "%s: link #%d is in reset\n",
__FUNCTION__, p->linkId);
return -NVL_ERR_INVALID_STATE;
}
status = nvswitch_minion_get_dl_status(device, p->linkId,
NV_NVLSTAT_TR16, 0, &statData);
@ -6791,6 +6805,8 @@ nvswitch_post_init_device_setup_lr10
NVSWITCH_PRINT(device, SETUP, "Skipping INFOROM init\n");
}
nvswitch_soe_init_l2_state(device);
return NVL_SUCCESS;
}
@ -7373,6 +7389,52 @@ nvswitch_ctrl_inband_read_data_lr10
return -NVL_ERR_NOT_SUPPORTED;
}
/*
* CTRL_NVSWITCH_GET_BOARD_PART_NUMBER
*/
NvlStatus
nvswitch_ctrl_get_board_part_number_lr10
(
nvswitch_device *device,
NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
)
{
struct inforom *pInforom = device->pInforom;
INFOROM_OBD_OBJECT_V1_XX *pOBDObj;
int byteIdx;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (!pInforom->OBD.bValid)
{
NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n");
return -NVL_ERR_GENERIC;
}
pOBDObj = &pInforom->OBD.object.v1;
if (sizeof(p->data) != sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008))
{
NVSWITCH_PRINT(device, ERROR,
"board part number available size %lu is not same as the request size %lu\n",
sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008), sizeof(p->data));
return -NVL_ERR_GENERIC;
}
nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR));
/* Copy board type data */
for (byteIdx = 0; byteIdx < NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES; byteIdx++)
{
p->data[byteIdx] =(NvU8)(pOBDObj->productPartNumber[byteIdx] & 0xFF);
}
return NVL_SUCCESS;
}
/*
* @brief: This function retrieves the NVLIPT public ID for a given global link idx
* @params[in] device reference to current nvswitch device
@ -7765,6 +7827,26 @@ nvswitch_ctrl_clear_counters_lr10
return status;
}
NvlStatus
nvswitch_ctrl_set_nvlink_error_threshold_lr10
(
nvswitch_device *device,
NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
static NvlStatus
nvswitch_ctrl_get_nvlink_error_threshold_lr10
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *ret
)
{
return -NVL_ERR_NOT_SUPPORTED;
}
//
// This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL
// macro in haldef_nvswitch.h

View File

@ -872,6 +872,17 @@ nvswitch_init_soe_lr10
return status;
}
void
nvswitch_soe_init_l2_state_lr10
(
nvswitch_device *device
)
{
NVSWITCH_PRINT(device, WARN,
"%s: Function not implemented on lr10\n",
__FUNCTION__);
}
/**
* @brief SOE construct
*

View File

@ -1316,6 +1316,30 @@ nvswitch_process_discovery_ls10
NVSWITCH_ENG_VALID_LS10(device, NVLIPT, NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT));
}
//
// Process common engine information
//
// Mark all entries as invalid
for (i = 0; i < NVSWITCH_ENGINE_ID_SIZE; i++)
{
chip_device->io.common[i].eng_name = "";
chip_device->io.common[i].eng_id = NVSWITCH_ENGINE_ID_SIZE; // Out of range
chip_device->io.common[i].eng_count = 0;
for (j = 0; j < NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE; j++)
{
chip_device->io.common[i].uc_addr[j] = NVSWITCH_BASE_ADDR_INVALID;
}
chip_device->io.common[i].bc_addr = NVSWITCH_BASE_ADDR_INVALID;
for (j = 0; j < NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE; j++)
{
chip_device->io.common[i].mc_addr[j] = NVSWITCH_BASE_ADDR_INVALID;
}
chip_device->io.common[i].mc_addr_count = 0;
}
NVSWITCH_LIST_LS10_ENGINES(NVSWITCH_PROCESS_COMMON)
//
// Disable engines requested by regkey "LinkEnableMask".
// All the links are enabled by default.
@ -1330,6 +1354,7 @@ nvswitch_process_discovery_ls10
NVSWITCH_PRINT(device, SETUP,
"%s: Disable link #%d\n",
__FUNCTION__, i);
nvswitch_link_disable_interrupts_ls10(device, i);
device->link[i].valid = NV_FALSE;
chip_device->engNPORT[i].valid = NV_FALSE;
chip_device->engNPORT_PERFMON[i].valid = NV_FALSE;
@ -1362,30 +1387,6 @@ nvswitch_process_discovery_ls10
}
}
//
// Process common engine information
//
// Mark all entries as invalid
for (i = 0; i < NVSWITCH_ENGINE_ID_SIZE; i++)
{
chip_device->io.common[i].eng_name = "";
chip_device->io.common[i].eng_id = NVSWITCH_ENGINE_ID_SIZE; // Out of range
chip_device->io.common[i].eng_count = 0;
for (j = 0; j < NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE; j++)
{
chip_device->io.common[i].uc_addr[j] = NVSWITCH_BASE_ADDR_INVALID;
}
chip_device->io.common[i].bc_addr = NVSWITCH_BASE_ADDR_INVALID;
for (j = 0; j < NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE; j++)
{
chip_device->io.common[i].mc_addr[j] = NVSWITCH_BASE_ADDR_INVALID;
}
chip_device->io.common[i].mc_addr_count = 0;
}
NVSWITCH_LIST_LS10_ENGINES(NVSWITCH_PROCESS_COMMON)
return retval;
}

View File

@ -277,8 +277,8 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
PFLCN pFlcn
)
{
NvU32 ctl, ridx, widx, count, bufferSize;
NvBool full;
NvU32 ctl, ridx, widx, bufferSize;
NvBool bWasFull;
// Only supported on riscv
if (!UPROC_ENG_ARCH_FALCON_RISCV(pFlcn))
@ -290,23 +290,22 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
return;
}
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL,
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _MODE, _FULL) |
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _UMODE_ENABLE, _TRUE) |
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _MMODE_ENABLE, _TRUE) |
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _INTR_ENABLE, _FALSE) |
DRF_DEF(_PRISCV_RISCV, _TRACECTL, _HIGH_THSHD, _INIT));
ctl = flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL);
if (ctl == 0)
{
NVSWITCH_PRINT(device, ERROR, "Trace buffer is disabled.\n");
return;
}
full = FLD_TEST_DRF_NUM(_PRISCV_RISCV, _TRACECTL,_FULL, 1, ctl);
if (full)
bWasFull = FLD_TEST_DRF_NUM(_PRISCV_RISCV, _TRACECTL,_FULL, 1, ctl);
if (bWasFull)
{
NVSWITCH_PRINT(device, INFO, "%s: Trace buffer full. Entries may have been lost.\n", __FUNCTION__);
}
// Reset and disable buffer, we don't need it during dump
// Reset and disable buffer, we don't need it during dump (and if core is running)
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL, 0);
widx = flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACE_WTIDX);
@ -316,35 +315,48 @@ _flcnDbgInfoCaptureRiscvPcTrace_LS10
bufferSize = DRF_VAL(_PRISCV_RISCV, _TRACE_RDIDX, _MAXIDX, ridx);
ridx = DRF_VAL(_PRISCV_RISCV, _TRACE_RDIDX, _RDIDX, ridx);
count = widx > ridx ? widx - ridx : bufferSize + widx - ridx;
//
// Trace buffer is full when write idx == read idx and full is set,
// otherwise it is empty.
//
if (widx == ridx && !full)
count = 0;
if (count)
if (bufferSize > 0)
{
NvU32 entry;
NVSWITCH_PRINT(device, INFO, "%s: Tracebuffer has %d entries. Starting with latest.\n", __FUNCTION__, count);
switch (DRF_VAL(_PRISCV_RISCV, _TRACECTL, _MODE, ctl))
{
case NV_PRISCV_RISCV_TRACECTL_MODE_FULL:
NVSWITCH_PRINT(device, ERROR, "Tracebuffer is in full mode.\n");
break;
case NV_PRISCV_RISCV_TRACECTL_MODE_REDUCED:
NVSWITCH_PRINT(device, ERROR, "Tracebuffer is in reduced mode.\n");
break;
case NV_PRISCV_RISCV_TRACECTL_MODE_STACK:
NVSWITCH_PRINT(device, ERROR, "Tracebuffer is in stack mode.\n");
break;
default:
NVSWITCH_PRINT(device, ERROR, "Tracebuffer is in unknown mode.\n");
}
NVSWITCH_PRINT(device, ERROR, "Entries (most recent first):\n");
ridx = widx;
for (entry = 0; entry < count; ++entry)
for (entry = 0; entry < bufferSize; entry++)
{
NvU64 pc;
ridx = ridx > 0 ? ridx - 1 : bufferSize - 1;
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACE_RDIDX, DRF_NUM(_PRISCV_RISCV, _TRACE_RDIDX, _RDIDX, ridx));
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACE_RDIDX,
DRF_NUM(_PRISCV_RISCV, _TRACE_RDIDX, _RDIDX, ridx));
pc = flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACEPC_HI);
pc = (pc << 32) | flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACEPC_LO);
NVSWITCH_PRINT(device, INFO, "%s: TRACE[%d] = 0x%16llx\n", __FUNCTION__, entry, pc);
// Non-mod2 values are invalid here, so stop (this likely indicates an init-marker val)
if (NvU64_LO32(pc) % 2U != 0U)
{
break;
}
NVSWITCH_PRINT(device, ERROR, "%s: TRACE[%d] = 0x%16llx\n", __FUNCTION__, entry, pc);
}
}
else
{
NVSWITCH_PRINT(device, INFO, "%s: Trace buffer is empty.\n", __FUNCTION__);
}
// reset trace buffer
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACE_RDIDX, 0);

View File

@ -108,6 +108,25 @@ nvswitch_inforom_ecc_get_total_errors_ls10
return;
}
NvlStatus
nvswitch_inforom_load_obd_ls10
(
nvswitch_device *device
)
{
struct inforom *pInforom = device->pInforom;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
return nvswitch_inforom_load_object(device, pInforom, "OBD",
INFOROM_OBD_OBJECT_V2_XX_FMT,
pInforom->OBD.packedObject.v2,
&pInforom->OBD.object.v2);
}
NvlStatus
nvswitch_bbx_add_sxid_ls10
(
@ -178,7 +197,7 @@ nvswitch_bbx_unload_ls10
NVSWITCH_TIMEOUT timeout;
pFlcn = device->pSoe->pFlcn;
nvswitch_timeout_create(NVSWITCH_INTERVAL_750MSEC_IN_NS, &timeout);
nvswitch_timeout_create(NVSWITCH_INTERVAL_4SEC_IN_NS, &timeout);
nvswitch_os_memset(&bbxCmd, 0, sizeof(bbxCmd));
bbxCmd.hdr.unitId = RM_SOE_UNIT_IFR;
@ -217,7 +236,7 @@ nvswitch_bbx_load_ls10
NVSWITCH_TIMEOUT timeout;
pFlcn = device->pSoe->pFlcn;
nvswitch_timeout_create(NVSWITCH_INTERVAL_750MSEC_IN_NS, &timeout);
nvswitch_timeout_create(NVSWITCH_INTERVAL_4SEC_IN_NS, &timeout);
nvswitch_os_memset(&bbxCmd, 0, sizeof(bbxCmd));
bbxCmd.hdr.unitId = RM_SOE_UNIT_IFR;
@ -254,4 +273,3 @@ nvswitch_bbx_get_sxid_ls10
{
return -NVL_ERR_NOT_SUPPORTED;
}

View File

@ -455,6 +455,8 @@ _nvswitch_initialize_nport_interrupts_ls10
nvswitch_device *device
)
{
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NvU32 val;
val =
@ -462,6 +464,7 @@ _nvswitch_initialize_nport_interrupts_ls10
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 1) |
DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 1);
NVSWITCH_NPORT_BCAST_WR32_LS10(device, _NPORT, _ERR_CONTROL_COMMON_NPORT, val);
#endif // 0
_nvswitch_initialize_route_interrupts(device);
_nvswitch_initialize_ingress_interrupts(device);
@ -494,7 +497,10 @@ _nvswitch_initialize_nxbar_interrupts_ls10
DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_SIDEBAND_PARITY_ERROR, 1) |
DRF_NUM(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_REDUCTION_PKT_ERROR, 1);
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_BCAST_WR32_LS10(device, NXBAR, _NXBAR_TILE, _ERR_FATAL_INTR_EN, report_fatal);
#endif // 0
chip_device->intr_mask.tile.fatal = report_fatal;
chip_device->intr_mask.tile.nonfatal = 0;
@ -509,7 +515,10 @@ _nvswitch_initialize_nxbar_interrupts_ls10
DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1) |
DRF_NUM(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _EGRESS_CDT_PARITY_ERROR, 1);
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_BCAST_WR32_LS10(device, NXBAR, _NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, report_fatal);
#endif // 0
chip_device->intr_mask.tileout.fatal = report_fatal;
chip_device->intr_mask.tileout.nonfatal = 0;
@ -5341,6 +5350,10 @@ _nvswitch_emit_link_errors_nvldl_nonfatal_link_ls10
bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 1);
if (nvswitch_test_flags(pending, bit))
{
// Disable further interrupts
nvlink_link *nvlink = nvswitch_get_link(device, link);
nvlink->errorThreshold.bInterruptTrigerred = NV_TRUE;
nvswitch_configure_error_rate_threshold_interrupt_ls10(nvlink, NV_FALSE);
NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_SHORT_ERROR_RATE, "RX Short Error Rate");
}
}
@ -6295,18 +6308,24 @@ _nvswitch_service_nvlipt_link_nonfatal_ls10
NvU32 interruptingLinks = 0;
NvU32 lnkStatusChangeLinks = 0;
NvlStatus status;
NvU64 link_enable_mask;
link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 |
(NvU64)device->regkeys.link_enable_mask);
for (i = 0; i < NVSWITCH_LINKS_PER_NVLIPT_LS10; ++i)
{
globalLink = (instance * NVSWITCH_LINKS_PER_NVLIPT_LS10) + i;
if ((NVBIT64(globalLink) & link_enable_mask) == 0)
{
continue;
}
intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0);
if(intrLink)
{
interruptingLinks |= NVBIT(i);
}
intrLink = NVSWITCH_LINK_RD32(device, globalLink, NVLIPT_LNK, _NVLIPT_LNK, _INTR_STATUS);
if(intrLink)

View File

@ -1070,17 +1070,16 @@ nvswitch_store_topology_information_ls10
}
void
nvswitch_init_dlpl_interrupts_ls10
nvswitch_set_error_rate_threshold_ls10
(
nvlink_link *link
nvlink_link *link,
NvBool bSetDefault
)
{
nvswitch_device *device = link->dev->pDevInfo;
NvU32 linkNumber = link->linkNumber;
NvU32 intrRegVal;
NvU32 crcRegVal;
NvU32 shortRateMask;
NvU32 crcShortRegkeyVal = device->regkeys.crc_bit_error_rate_short;
nvswitch_device *device = link->dev->pDevInfo;
NvU32 linkNumber = link->linkNumber;
NvU32 crcShortRegkeyVal = device->regkeys.crc_bit_error_rate_short;
NvU32 crcRegVal;
ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) ==
DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_MAN));
@ -1099,6 +1098,124 @@ nvswitch_init_dlpl_interrupts_ls10
ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP) ==
DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_EXP));
crcRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL,
_NVLDL_RX, _ERROR_RATE_CTRL);
//
// Case 1: When a Regkey is provided. We use it to calculate crcRegVal.
//
// Case 2: When the bSetDefault variable is set to NV_FALSE. This can happen
// when any client/application like NSCQ would provide specific values for
// the error threshold. In this case we use those values to calculate crcRegVal.
//
// Case 3: In all other cases, we want the default values to be used, which are
// provided in Bug 3365481.
//
if(crcShortRegkeyVal != NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_DEFAULT)
{
NvU32 shortRateMask;
shortRateMask = DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) |
DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP) |
DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN) |
DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP);
crcRegVal &= ~shortRateMask;
crcRegVal |= crcShortRegkeyVal;
link->errorThreshold.bUserConfig = NV_FALSE;
link->errorThreshold.bInterruptTrigerred = NV_FALSE;
}
else if (!bSetDefault)
{
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_THRESHOLD_MAN,
link->errorThreshold.thresholdMan,
crcRegVal);
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_THRESHOLD_EXP,
link->errorThreshold.thresholdExp,
crcRegVal);
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_TIMESCALE_MAN,
link->errorThreshold.timescaleMan,
crcRegVal);
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_TIMESCALE_EXP,
link->errorThreshold.timescaleExp,
crcRegVal);
}
else
{
//
// Please refer to Bug 3365481 for details about the CRC_BIT_ERROR_RATE_SHORT
// default values used below.
//
link->errorThreshold.thresholdMan =
NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN_DEFAULT;
link->errorThreshold.thresholdExp =
NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP_DEFAULT;
link->errorThreshold.timescaleMan =
NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN_DEFAULT;
link->errorThreshold.timescaleExp =
NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP_DEFAULT;
link->errorThreshold.bUserConfig = NV_FALSE;
link->errorThreshold.bInterruptTrigerred = NV_FALSE;
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_THRESHOLD_MAN,
link->errorThreshold.thresholdMan,
crcRegVal);
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_THRESHOLD_EXP,
link->errorThreshold.thresholdExp,
crcRegVal);
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_TIMESCALE_MAN,
link->errorThreshold.timescaleMan,
crcRegVal);
crcRegVal = FLD_SET_DRF_NUM(_NVLDL_RX, _ERROR_RATE_CTRL, _SHORT_TIMESCALE_EXP,
link->errorThreshold.timescaleExp,
crcRegVal);
}
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL,
_NVLDL_RX, _ERROR_RATE_CTRL, crcRegVal);
}
void
nvswitch_configure_error_rate_threshold_interrupt_ls10
(
nvlink_link *link,
NvBool bEnable
)
{
nvswitch_device *device = link->dev->pDevInfo;
NvU32 linkNumber = link->linkNumber;
NvU32 intrRegVal;
link->errorThreshold.bInterruptEn = bEnable;
intrRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL,
_NVLDL_TOP, _INTR_NONSTALL_EN);
if (bEnable)
{
link->errorThreshold.bInterruptTrigerred = NV_FALSE;
intrRegVal = FLD_SET_DRF_NUM(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_SHORT_ERROR_RATE, 1,
intrRegVal);
}
else
{
intrRegVal = FLD_SET_DRF_NUM(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_SHORT_ERROR_RATE, 0,
intrRegVal);
}
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL,
_NVLDL_TOP, _INTR_NONSTALL_EN, intrRegVal);
}
void
nvswitch_init_dlpl_interrupts_ls10
(
nvlink_link *link
)
{
nvswitch_device *device = link->dev->pDevInfo;
NvU32 linkNumber = link->linkNumber;
// W1C any stale state.
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR, 0xffffffff);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_SW2, 0xffffffff);
@ -1136,44 +1253,8 @@ nvswitch_init_dlpl_interrupts_ls10
DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _LTSSM_PROTOCOL, _DISABLE) |
DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _MINION_REQUEST, _DISABLE));
intrRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL,
_NVLDL_TOP, _INTR_NONSTALL_EN);
crcRegVal = NVSWITCH_LINK_RD32_LS10(device, linkNumber, NVLDL,
_NVLDL_RX, _ERROR_RATE_CTRL);
//
// Enable RX error rate short interrupt.
// Please refer to Bug 3365481 for details about the CRC_BIT_ERROR_RATE_SHORT
// values used below.
//
// Enable RX error rate short interrupt if the regkey is set
if (crcShortRegkeyVal != NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_DEFAULT)
{
shortRateMask = DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) |
DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP) |
DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN) |
DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP);
intrRegVal |= DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_SHORT_ERROR_RATE, _ENABLE);
crcRegVal &= ~shortRateMask;
crcRegVal |= crcShortRegkeyVal;
}
else
{
shortRateMask = DRF_SHIFTMASK(NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) |
DRF_SHIFTMASK(NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP) |
DRF_SHIFTMASK(NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN) |
DRF_SHIFTMASK(NV_NVLDL_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP);
intrRegVal |= DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_SHORT_ERROR_RATE, _ENABLE);
crcRegVal &= ~shortRateMask;
}
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL,
_NVLDL_TOP, _INTR_NONSTALL_EN, intrRegVal);
NVSWITCH_LINK_WR32_LS10(device, linkNumber, NVLDL,
_NVLDL_RX, _ERROR_RATE_CTRL, crcRegVal);
nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE);
nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE);
}
static NvU32
@ -1318,12 +1399,12 @@ nvswitch_execute_unilateral_link_shutdown_ls10
)
{
nvswitch_device *device = link->dev->pDevInfo;
NvlStatus status = NVL_SUCCESS;
NvlStatus status = NVL_SUCCESS;
NvU32 retry_count = 3;
NvU32 link_state_request;
NvU32 link_state;
NvU32 stat_data;
NvU32 link_intr_subcode;
NvU32 stat_data = 0;
NvU32 link_intr_subcode = 0;
if (!NVSWITCH_IS_LINK_ENG_VALID_LS10(device, NVLDL, link->linkNumber))
{

View File

@ -65,7 +65,7 @@
#include "ls10/minion_nvlink_defines_public_ls10.h"
#define NVSWITCH_IFR_MIN_BIOS_VER_LS10 0x9610170000ull
#define NVSWITCH_SMBPBI_MIN_BIOS_VER_LS10 0x9610170000ull
#define NVSWITCH_SMBPBI_MIN_BIOS_VER_LS10 0x9610220000ull
void *
nvswitch_alloc_chipdevice_ls10
@ -1091,8 +1091,8 @@ _nvswitch_portstat_reset_latency_counters_ls10
/*
* Disable interrupts comming from NPG & NVLW blocks.
*/
static void
_nvswitch_link_disable_interrupts_ls10
void
nvswitch_link_disable_interrupts_ls10
(
nvswitch_device *device,
NvU32 link
@ -1434,7 +1434,7 @@ nvswitch_reset_and_drain_links_ls10
//
// Step 2.0 : Disable NPG & NVLW interrupts
//
_nvswitch_link_disable_interrupts_ls10(device, link);
nvswitch_link_disable_interrupts_ls10(device, link);
//
// Step 3.0 :
@ -1575,6 +1575,7 @@ nvswitch_set_nport_port_config_ls10
)
{
NvU32 val;
NvlStatus status = NVL_SUCCESS;
if (p->requesterLinkID >= NVBIT(
DRF_SIZE(NV_NPORT_REQLINKID_REQROUTINGID) +
@ -1624,7 +1625,7 @@ nvswitch_set_nport_port_config_ls10
if (p->type == CONNECT_TRUNK_SWITCH)
{
if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device))
if (!nvswitch_is_soe_supported(device))
{
// Set trunk specific settings (TPROD) on PRE-SILION
@ -1773,7 +1774,13 @@ nvswitch_set_nport_port_config_ls10
else
{
// Set trunk specific settings (TPROD) in SOE
// nvswitch_set_nport_tprod_state_ls10(device, p->portNum);
status = nvswitch_set_nport_tprod_state_ls10(device, p->portNum);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: Failed to set NPORT TPROD state\n",
__FUNCTION__);
}
}
}
else
@ -1784,7 +1791,7 @@ nvswitch_set_nport_port_config_ls10
NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _SRC_PORT_TYPE0, NvU64_LO32(p->trunkSrcMask));
NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _SRC_PORT_TYPE1, NvU64_HI32(p->trunkSrcMask));
return NVL_SUCCESS;
return status;
}
/*
@ -4216,6 +4223,8 @@ _nvswitch_init_nport_ecc_control_ls10
nvswitch_device *device
)
{
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
// Set ingress ECC error limits
NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER,
DRF_NUM(_INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0));
@ -4274,6 +4283,7 @@ _nvswitch_init_nport_ecc_control_ls10
NVSWITCH_ENG_WR32(device, NPORT, _BCAST, 0, _SOURCETRACK, _ERR_ECC_CTRL,
DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE, __PROD));
#endif // 0
}
NvlStatus
@ -4306,6 +4316,8 @@ nvswitch_init_nport_ls10
_nvswitch_init_nport_ecc_control_ls10(device);
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
if (DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _DISABLE, device->regkeys.ato_control) ==
NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE)
{
@ -4329,7 +4341,7 @@ nvswitch_init_nport_ls10
DRF_NUM(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, timeout));
}
}
#endif // 0
if (DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _DISABLE, device->regkeys.sto_control) ==
NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE)
{
@ -4366,17 +4378,7 @@ nvswitch_init_nxbar_ls10
nvswitch_device *device
)
{
NvlStatus status = NVL_SUCCESS;
status = nvswitch_apply_prod_nxbar_ls10(device);
if (status != NVL_SUCCESS)
{
NVSWITCH_PRINT(device, ERROR,
"%s: NXBAR PRODs failed\n",
__FUNCTION__);
return status;
}
NVSWITCH_PRINT(device, WARN, "%s: Function not implemented\n", __FUNCTION__);
return NVL_SUCCESS;
}
@ -5314,6 +5316,52 @@ nvswitch_ctrl_inband_read_data_ls10
return nvswitch_inband_read_data(device, p->buffer, p->linkId, &p->dataSize);
}
/*
* CTRL_NVSWITCH_GET_BOARD_PART_NUMBER
*/
NvlStatus
nvswitch_ctrl_get_board_part_number_ls10
(
nvswitch_device *device,
NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
)
{
struct inforom *pInforom = device->pInforom;
INFOROM_OBD_OBJECT_V2_XX *pOBDObj;
int byteIdx;
if (pInforom == NULL)
{
return -NVL_ERR_NOT_SUPPORTED;
}
if (!pInforom->OBD.bValid)
{
NVSWITCH_PRINT(device, ERROR, "OBD data is not available\n");
return -NVL_ERR_GENERIC;
}
pOBDObj = &pInforom->OBD.object.v2;
if (sizeof(p->data) != sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008))
{
NVSWITCH_PRINT(device, ERROR,
"board part number available size %lu is not same as the request size %lu\n",
sizeof(pOBDObj->productPartNumber)/sizeof(inforom_U008), sizeof(p->data));
return -NVL_ERR_GENERIC;
}
nvswitch_os_memset(p, 0, sizeof(NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR));
/* Copy board type data */
for (byteIdx = 0; byteIdx < NVSWITCH_BOARD_PART_NUMBER_SIZE_IN_BYTES; byteIdx++)
{
p->data[byteIdx] =(NvU8)(pOBDObj->productPartNumber[byteIdx] & 0xFF);
}
return NVL_SUCCESS;
}
NvlStatus
nvswitch_ctrl_get_nvlink_lp_counters_ls10
(
@ -5466,6 +5514,103 @@ nvswitch_ctrl_clear_counters_ls10
return status;
}
NvlStatus
nvswitch_ctrl_set_nvlink_error_threshold_ls10
(
nvswitch_device *device,
NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams
)
{
nvlink_link *link;
NvU8 i;
FOR_EACH_INDEX_IN_MASK(64, i, pParams->link_mask)
{
link = nvswitch_get_link(device, i);
if (link == NULL)
{
continue;
}
if (pParams->errorThreshold[link->linkNumber].flags & NVSWITCH_NVLINK_ERROR_THRESHOLD_RESET)
{
link->errorThreshold.bUserConfig = NV_FALSE;
// Disable the interrupt
nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_FALSE);
// Set to default value
nvswitch_set_error_rate_threshold_ls10(link, NV_TRUE);
// Enable the interrupt
nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_TRUE);
}
else
{
link->errorThreshold.thresholdMan =
pParams->errorThreshold[link->linkNumber].thresholdMan;
link->errorThreshold.thresholdExp =
pParams->errorThreshold[link->linkNumber].thresholdExp;
link->errorThreshold.timescaleMan =
pParams->errorThreshold[link->linkNumber].timescaleMan;
link->errorThreshold.timescaleExp =
pParams->errorThreshold[link->linkNumber].timescaleExp;
link->errorThreshold.bInterruptEn =
pParams->errorThreshold[link->linkNumber].bInterruptEn;
link->errorThreshold.bUserConfig = NV_TRUE;
// Disable the interrupt
nvswitch_configure_error_rate_threshold_interrupt_ls10(link, NV_FALSE);
// Set the Error threshold
nvswitch_set_error_rate_threshold_ls10(link, NV_FALSE);
// Configure the interrupt
nvswitch_configure_error_rate_threshold_interrupt_ls10(link,
link->errorThreshold.bInterruptEn);
}
}
FOR_EACH_INDEX_IN_MASK_END;
return NVL_SUCCESS;
}
NvlStatus
nvswitch_ctrl_get_nvlink_error_threshold_ls10
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams
)
{
nvlink_link *link;
NvU8 i;
FOR_EACH_INDEX_IN_MASK(64, i, pParams->link_mask)
{
link = nvswitch_get_link(device, i);
if (link == NULL)
{
continue;
}
pParams->errorThreshold[link->linkNumber].thresholdMan =
link->errorThreshold.thresholdMan;
pParams->errorThreshold[link->linkNumber].thresholdExp =
link->errorThreshold.thresholdExp;
pParams->errorThreshold[link->linkNumber].timescaleMan =
link->errorThreshold.timescaleMan;
pParams->errorThreshold[link->linkNumber].timescaleExp =
link->errorThreshold.timescaleExp;
pParams->errorThreshold[link->linkNumber].bInterruptEn =
link->errorThreshold.bInterruptEn;
pParams->errorThreshold[link->linkNumber].bInterruptTrigerred =
link->errorThreshold.bInterruptTrigerred;
}
FOR_EACH_INDEX_IN_MASK_END;
return NVL_SUCCESS;
}
NvlStatus
nvswitch_read_vbios_link_entries_ls10
(

View File

@ -337,6 +337,13 @@ nvswitch_set_nport_tprod_state_ls10
NVSWITCH_TIMEOUT timeout;
RM_SOE_CORE_CMD_NPORT_TPROD_STATE *nportTprodState;
if (!NVSWITCH_ENG_IS_VALID(device, NPORT, nport))
{
NVSWITCH_PRINT(device, ERROR, "%s: NPORT #%d invalid\n",
__FUNCTION__, nport);
return -NVL_BAD_ARGS;
}
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
@ -364,6 +371,56 @@ nvswitch_set_nport_tprod_state_ls10
return NVL_SUCCESS;
}
/*
* @Brief : INIT L2 register state in SOE
*
* @param[in] device
* @param[in] nport
*/
void
nvswitch_soe_init_l2_state_ls10
(
nvswitch_device *device
)
{
FLCN *pFlcn;
NvU32 cmdSeqDesc = 0;
NV_STATUS status;
RM_FLCN_CMD_SOE cmd;
NVSWITCH_TIMEOUT timeout;
RM_SOE_CORE_CMD_L2_STATE *pL2State;
if (!nvswitch_is_soe_supported(device))
{
NVSWITCH_PRINT(device, INFO, "%s: SOE is not supported. skipping!\n",
__FUNCTION__);
return;
}
pFlcn = device->pSoe->pFlcn;
nvswitch_os_memset(&cmd, 0, sizeof(cmd));
cmd.hdr.unitId = RM_SOE_UNIT_CORE;
cmd.hdr.size = sizeof(cmd);
pL2State = &cmd.cmd.core.l2State;
pL2State->cmdType = RM_SOE_CORE_CMD_INIT_L2_STATE;
nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout);
status = flcnQueueCmdPostBlocking(device, pFlcn,
(PRM_FLCN_CMD)&cmd,
NULL, // pMsg
NULL, // pPayload
SOE_RM_CMDQ_LOG_ID,
&cmdSeqDesc,
&timeout);
if (status != NV_OK)
{
NVSWITCH_PRINT(device, ERROR, "%s: Failed to send INIT_L2_STATE command to SOE, status 0x%x\n",
__FUNCTION__, status);
}
}
/*
* @Brief : Init sequence for SOE FSP RISCV image
*
@ -424,6 +481,14 @@ nvswitch_init_soe_ls10
return status;
}
//
// Set TRACEPC to stack mode for better ucode trace
// In Vulcan CR firmware, this is set to reduced mode in the SOE's manifest
//
data = flcnRiscvRegRead_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL);
data = FLD_SET_DRF(_PRISCV, _RISCV_TRACECTL, _MODE, _STACK, data);
flcnRiscvRegWrite_HAL(device, pFlcn, NV_PRISCV_RISCV_TRACECTL, data);
// Sanity the command and message queues as a final check
if (_nvswitch_soe_send_test_cmd(device) != NV_OK)
{

View File

@ -706,8 +706,8 @@ nvswitch_apply_prod_nxbar_ls10
nvswitch_device *device
)
{
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
// .NXBAR PROD value application
NVSWITCH_ENG_WR32(device, TILEOUT, _BCAST, 0, _NXBAR_TILEOUT, _CTRL0,
@ -724,6 +724,7 @@ nvswitch_apply_prod_nxbar_ls10
DRF_DEF(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, __PROD) |
DRF_DEF(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_NON_BURSTY_PKT, __PROD) |
DRF_DEF(_NXBAR_TILEOUT, _ERR_FATAL_INTR_EN, _INGRESS_NON_STICKY_PKT, __PROD));
#endif // 0
NVSWITCH_ENG_WR32(device, TILEOUT, _BCAST, 0, _NXBAR_TILEOUT, _PRI_NXBAR_TILEOUT_CG,
DRF_DEF(_NXBAR_TILEOUT, _PRI_NXBAR_TILEOUT_CG, _DI_DT_SKEW_VAL, __PROD) |
@ -742,7 +743,9 @@ nvswitch_apply_prod_nxbar_ls10
NVSWITCH_ENG_WR32(device, TILEOUT, _BCAST, 0, _NXBAR_TILEOUT, _PRI_NXBAR_TILEOUT_CG1,
DRF_DEF(_NXBAR_TILEOUT, _PRI_NXBAR_TILEOUT_CG1, _MONITOR_CG_EN, __PROD));
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_ENG_WR32(device, TILE, _BCAST, 0, _NXBAR_TILE, _CTRL0,
DRF_DEF(_NXBAR_TILE, _CTRL0, _MULTI_VALID_XFN_CTRL, _ENABLE) |
DRF_DEF(_NXBAR_TILE, _CTRL0, _PARTIAL_RAM_WR_CTRL, _ENABLE) |
@ -760,7 +763,7 @@ nvswitch_apply_prod_nxbar_ls10
DRF_DEF(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_PKT_PARITY_ERROR, __PROD) |
DRF_DEF(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_REDUCTION_PKT_ERROR, __PROD) |
DRF_DEF(_NXBAR_TILE, _ERR_FATAL_INTR_EN, _INGRESS_SIDEBAND_PARITY_ERROR, __PROD));
#endif // 0
NVSWITCH_ENG_WR32(device, TILE, _BCAST, 0, _NXBAR_TILE, _PRI_NXBAR_TILE_CG,
DRF_DEF(_NXBAR_TILE, _PRI_NXBAR_TILE_CG, _DI_DT_SKEW_VAL, __PROD) |
DRF_DEF(_NXBAR_TILE, _PRI_NXBAR_TILE_CG, _HALT_CG_EN, __PROD) |
@ -801,7 +804,8 @@ nvswitch_nvs_top_prod_ls10
NvU32 i;
// .NVS_TOP PROD application
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_ENG_WR32(device, CLKS_P0, , 0, _CLOCK_NVSW_PRT, _NVLINK_UPHY0_PLL0_SLCG,
DRF_DEF(_CLOCK_NVSW_PRT, _NVLINK_UPHY0_PLL0_SLCG, _CFGSM, __PROD));
@ -813,7 +817,7 @@ nvswitch_nvs_top_prod_ls10
NVSWITCH_ENG_WR32(device, CLKS_P0, , 3, _CLOCK_NVSW_PRT, _NVLINK_UPHY0_PLL0_SLCG,
DRF_DEF(_CLOCK_NVSW_PRT, _NVLINK_UPHY0_PLL0_SLCG, _CFGSM, __PROD));
#endif // 0
NVSWITCH_ENG_WR32(device, GIN, , 0, _CTRL, _PRI_CTRL_CG1,
DRF_DEF(_CTRL, _PRI_CTRL_CG1, _SLCG_CTRLPRI, __PROD) |
DRF_DEF(_CTRL, _PRI_CTRL_CG1, _SLCG_MSIX, __PROD));
@ -855,16 +859,21 @@ nvswitch_nvs_top_prod_ls10
NVSWITCH_ENG_WR32(device, PTIMER, , 0, _PTIMER, _PRI_TMR_CG1,
DRF_DEF(_PTIMER, _PRI_TMR_CG1, _MONITOR_CG_EN, __PROD) |
DRF_DEF(_PTIMER, _PRI_TMR_CG1, _SLCG, __PROD));
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_ENG_WR32(device, SAW, , 0, _NVLSAW, _CTRL_CLOCK_GATING,
DRF_DEF(_NVLSAW, _CTRL_CLOCK_GATING, _CG1_SLCG_PCIE, __PROD) |
DRF_DEF(_NVLSAW, _CTRL_CLOCK_GATING, _CG1_SLCG_SAW, __PROD));
#endif // 0
NVSWITCH_ENG_WR32(device, SAW, , 0, _NVLSAW, _GLBLLATENCYTIMERCTRL,
DRF_DEF(_NVLSAW, _GLBLLATENCYTIMERCTRL, _ENABLE, __PROD));
// Moving this L2 register access to SOE. Refer bug #3747687
#if 0
NVSWITCH_ENG_WR32(device, SAW, , 0, _NVLSAW, _PCIE_PRI_CLOCK_GATING,
DRF_DEF(_NVLSAW, _PCIE_PRI_CLOCK_GATING, _CG1_SLCG, __PROD));
#endif // 0
NVSWITCH_REG_WR32(device, _PSE, _CG1,
DRF_DEF(_PSE, _CG1, _SLCG, __PROD));

View File

@ -743,6 +743,15 @@ nvswitch_init_soe
return device->hal.nvswitch_init_soe(device);
}
void
nvswitch_soe_init_l2_state
(
nvswitch_device *device
)
{
device->hal.nvswitch_soe_init_l2_state(device);
}
static NvlStatus
_nvswitch_construct_soe
(
@ -3217,6 +3226,22 @@ _nvswitch_ctrl_inband_pending_data_stats
return NVL_SUCCESS;
}
static NvlStatus
_nvswitch_ctrl_get_board_part_number
(
nvswitch_device *device,
NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR *p
)
{
if (!nvswitch_is_inforom_supported(device))
{
NVSWITCH_PRINT(device, ERROR, "InfoROM is not supported\n");
return -NVL_ERR_NOT_SUPPORTED;
}
return device->hal.nvswitch_ctrl_get_board_part_number(device, p);
}
static NvlStatus
_nvswitch_ctrl_i2c_smbus_command
(
@ -4647,6 +4672,26 @@ nvswitch_load_link_disable_settings
device->hal.nvswitch_load_link_disable_settings(device, link);
}
static NvlStatus
_nvswitch_ctrl_set_nvlink_error_threshold
(
nvswitch_device *device,
NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams
)
{
return device->hal.nvswitch_ctrl_set_nvlink_error_threshold(device, pParams);
}
static NvlStatus
_nvswitch_ctrl_get_nvlink_error_threshold
(
nvswitch_device *device,
NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS *pParams
)
{
return device->hal.nvswitch_ctrl_get_nvlink_error_threshold(device, pParams);
}
NvlStatus
nvswitch_lib_ctrl
(
@ -4952,6 +4997,9 @@ nvswitch_lib_ctrl
_nvswitch_ctrl_inband_pending_data_stats,
NVSWITCH_INBAND_PENDING_DATA_STATS_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_BOARD_PART_NUMBER,
_nvswitch_ctrl_get_board_part_number,
NVSWITCH_GET_BOARD_PART_NUMBER_VECTOR);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(
CTRL_NVSWITCH_GET_SW_INFO,
_nvswitch_ctrl_get_sw_info,
@ -4973,6 +5021,13 @@ nvswitch_lib_ctrl
_nvswitch_ctrl_clear_counters,
NVSWITCH_NVLINK_CLEAR_COUNTERS_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_NVLINK_ERROR_THRESHOLD,
_nvswitch_ctrl_set_nvlink_error_threshold,
NVSWITCH_SET_NVLINK_ERROR_THRESHOLD_PARAMS,
osPrivate, flags);
NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_NVLINK_ERROR_THRESHOLD,
_nvswitch_ctrl_get_nvlink_error_threshold,
NVSWITCH_GET_NVLINK_ERROR_THRESHOLD_PARAMS);
default:
nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "unknown ioctl %x\n", cmd);

View File

@ -73,5 +73,9 @@ typedef struct NVA084_ALLOC_PARAMETERS {
NvHandle guestFbHandleList[NVA084_MAX_VMMU_SEGMENTS];
NvHandle hPluginHeapMemory;
NV_DECLARE_ALIGNED(NvU64 ctrlBuffOffset, 8);
NV_DECLARE_ALIGNED(NvU64 initTaskLogBuffOffset, 8);
NV_DECLARE_ALIGNED(NvU64 initTaskLogBuffSize, 8);
NV_DECLARE_ALIGNED(NvU64 vgpuTaskLogBuffOffset, 8);
NV_DECLARE_ALIGNED(NvU64 vgpuTaskLogBuffSize, 8);
NvBool bDeviceProfilingEnabled;
} NVA084_ALLOC_PARAMETERS;

View File

@ -39,7 +39,8 @@ extern "C" {
#define NVA084_NOTIFIERS_EVENT_VGPU_PLUGIN_TASK_CRASHED (2)
#define NVA084_NOTIFIERS_EVENT_GUEST_DRIVER_LOADED (3)
#define NVA084_NOTIFIERS_EVENT_GUEST_DRIVER_UNLOADED (4)
#define NVA084_NOTIFIERS_MAXCOUNT (5)
#define NVA084_NOTIFIERS_EVENT_PRINT_ERROR_MESSAGE (5)
#define NVA084_NOTIFIERS_MAXCOUNT (6)
#define NVA084_NOTIFICATION_STATUS_IN_PROGRESS (0x8000)
#define NVA084_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000)

View File

@ -296,8 +296,7 @@ typedef struct NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS {
/* Generic types */
#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC (0xA00FF000U)
#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV9A_GENERIC (0xA00FF001U)
/* processor capabilities */
#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX (0x00000001U)

View File

@ -2945,6 +2945,22 @@ typedef struct NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS {
#define NV2080_CTRL_CMD_NVLINK_DIRECT_CONNECT_CHECK (0x20803041U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_DIRECT_CONNECT_CHECK_PARAMS_MESSAGE_ID" */
/*
* NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP
*
* This command is to trigger the next sequence after the 10 sec delay
*
* [out] linkId
* Link number which the sequence should be triggered
*/
#define NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID (0x42U)
typedef struct NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS {
NvU32 linkId;
} NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS;
#define NV2080_CTRL_CMD_NVLINK_POST_FAULT_UP (0x20803042U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_POST_FAULT_UP_PARAMS_MESSAGE_ID" */
/* _ctrl2080nvlink_h_ */

View File

@ -79,6 +79,10 @@ typedef struct NV2080_CTRL_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK_PARAM
NV_DECLARE_ALIGNED(NvU64 pluginHeapMemoryPhysAddr, 8);
NV_DECLARE_ALIGNED(NvU64 pluginHeapMemoryLength, 8);
NV_DECLARE_ALIGNED(NvU64 ctrlBuffOffset, 8);
NV_DECLARE_ALIGNED(NvU64 initTaskLogBuffOffset, 8);
NV_DECLARE_ALIGNED(NvU64 initTaskLogBuffSize, 8);
NV_DECLARE_ALIGNED(NvU64 vgpuTaskLogBuffOffset, 8);
NV_DECLARE_ALIGNED(NvU64 vgpuTaskLogBuffSize, 8);
NvBool bDeviceProfilingEnabled;
} NV2080_CTRL_VGPU_MGR_INTERNAL_BOOTLOAD_GSP_VGPU_PLUGIN_TASK_PARAMS;

View File

@ -625,6 +625,9 @@ typedef struct NVA080_CTRL_UPDATE_SYSMEM_BITMAP_PARAMS {
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_VGPU_DEV_CAPS_TIMESLICE_OVERRIDE_ENABLED 20:20
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_VGPU_DEV_CAPS_TIMESLICE_OVERRIDE_ENABLED_FALSE (0x00000000)
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_VGPU_DEV_CAPS_TIMESLICE_OVERRIDE_ENABLED_TRUE (0x00000001)
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_VGPU_DEV_CAPS_USE_NON_STALL_LINUX_EVENTS 21:21
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_VGPU_DEV_CAPS_USE_NON_STALL_LINUX_EVENTS_FALSE (0x00000000)
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_VGPU_DEV_CAPS_USE_NON_STALL_LINUX_EVENTS_TRUE (0x00000001)
/* UVM supported features */
#define NVA080_CTRL_CMD_VGPU_GET_CONFIG_PARAMS_UVM_FEATURES_REPLAYABLE_FAULTS_ENABLED 0:0

View File

@ -40,7 +40,7 @@
* FINN compiler version
*/
#define FINN_VERSION_MAJOR 1
#define FINN_VERSION_MINOR 14
#define FINN_VERSION_MINOR 15
#define FINN_VERSION_PATCH 0
typedef struct FINN_RM_API
@ -130,7 +130,7 @@ NV_STATUS FinnRmApiSerializeDown(NvU64 interface, NvU64 message, const void *src
*/
/**@{*/
NV_STATUS FinnRmApiDeserializeDown(NvU8 **src, NvLength src_size, void *dst, NvLength dst_size);
NV_STATUS FinnRmApiDeserializeUp(NvU8 * const *src, NvLength src_size, void *dst, NvLength dst_size);
NV_STATUS FinnRmApiDeserializeUp(NvU8 **src, NvLength src_size, void *dst, NvLength dst_size);
/**@}*/
/*!

View File

@ -1446,7 +1446,7 @@ typedef struct
#define NVOS32_ALLOC_FLAGS_BANK_GROW_UP 0x00000000
#define NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN 0x00000200
#define NVOS32_ALLOC_FLAGS_LAZY 0x00000400
// unused 0x00000800
#define NVOS32_ALLOC_FLAGS_FORCE_REVERSE_ALLOC 0x00000800
#define NVOS32_ALLOC_FLAGS_NO_SCANOUT 0x00001000
#define NVOS32_ALLOC_FLAGS_PITCH_FORCE 0x00002000
#define NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED 0x00004000

View File

@ -141,6 +141,11 @@ typedef struct
char lineBuffer[LIBOS_LOG_LINE_BUFFER_SIZE];
NvBool bSynchronousBuffer;
NvBool bPtrSymbolResolve;
// Attempt to decode %s format arguments
// This requires all %s libos log arguments to be present in the logging elf, otherwise
// garbage output may be produced.
NvBool bDecodeStrFmt;
#endif // LIBOS_LOG_DECODE_ENABLE
#if defined(NVSYM_STANDALONE) && !defined(PROTODMP_BUILD)
@ -168,11 +173,13 @@ void libosLogAddLog(LIBOS_LOG_DECODE *logDecode, void *buffer, NvU64 bufferSize,
#if LIBOS_LOG_DECODE_ENABLE
void libosLogInit(LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvU64 elfSize);
void libosLogInitEx(
LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve, NvU64 elfSize);
LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvBool bSynchronousBuffer,
NvBool bPtrSymbolResolve, NvBool bDecodeStrFmt, NvU64 elfSize);
#else
void libosLogInit(LIBOS_LOG_DECODE *logDecode, void *elf, NvU64 elfSize);
void libosLogInitEx(
LIBOS_LOG_DECODE *logDecode, void *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve, NvU64 elfSize);
LIBOS_LOG_DECODE *logDecode, void *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve,
NvBool bDecodeStrFmt, NvU64 elfSize);
#endif // LIBOS_LOG_DECODE_ENABLE
void libosLogSymbolicateAddress(LIBOS_LOG_DECODE *logDecode, char *decodedLine, NvLength decodedLineSize, NvUPtr addr);

View File

@ -195,6 +195,10 @@ NvBool LibosDebugResolveSymbolToName(
{
LibosElf64Symbol *i = (LibosElf64Symbol *)pThis->symtabStart;
NvU64 count = (pThis->symtabEnd - pThis->symtabStart) / sizeof(LibosElf64Symbol);
if (i == NULL)
return NV_FALSE;
while (count--)
{
if (i->name && (symbolAddress == i->value || (symbolAddress >= i->value && symbolAddress < i->value + i->size)))
@ -732,6 +736,9 @@ static void libosDwarfBuildTables(LibosDebugResolver *pThis)
pThis->arangeTable = NULL;
pThis->nARangeEntries = 0;
if (pThis->debugARangesStart == NULL || pThis->debugLineStart == NULL)
return;
// Run through the .debug_aranges elf section to get a count of consolidated ranges.
dwarfBuildARangeTable(pThis);

View File

@ -725,7 +725,7 @@ static int libos_printf_a(
}
goto print_string;
case 's':
a = (char *)LibosElfMapVirtualString(&pRec->log->elfImage, (NvUPtr)arg.p, NV_FALSE);
a = (char *)LibosElfMapVirtualString(&pRec->log->elfImage, (NvUPtr)arg.p, logDecode->bDecodeStrFmt);
if (!a)
a = (char *)"(bad-pointer)";
print_string:
@ -1467,8 +1467,8 @@ void libosLogInit(LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvU64 elfS
// This will allow us to calculate for max possible number of log entries,
// i.e. if none of them have args and are thus the smallest size possible.
//
NvU64 minLogBufferEntryLength = 0;
minLogBufferEntryLength++; // account for metadata pointer
NvU64 minLogBufferEntryLength = 1;// account for metadata pointer
if (!logDecode->bSynchronousBuffer)
{
minLogBufferEntryLength++; // account for timestamp
@ -1530,11 +1530,13 @@ void libosLogInit(LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvU64 elfS
}
void libosLogInitEx(
LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve, NvU64 elfSize)
LIBOS_LOG_DECODE *logDecode, LibosElf64Header *elf, NvBool bSynchronousBuffer,
NvBool bPtrSymbolResolve, NvBool bDecodeStrFmt, NvU64 elfSize)
{
// Set extended config
logDecode->bSynchronousBuffer = bSynchronousBuffer;
logDecode->bPtrSymbolResolve = bPtrSymbolResolve;
logDecode->bDecodeStrFmt = bDecodeStrFmt;
// Complete init
libosLogInit(logDecode, elf, elfSize);
@ -1546,7 +1548,7 @@ void libosLogInit(LIBOS_LOG_DECODE *logDecode, void *elf, NvU64 elfSize) {}
void libosLogInitEx(
LIBOS_LOG_DECODE *logDecode, void *elf,
NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve, NvU64 elfSize)
NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve, NvBool bDecodeStrFmt, NvU64 elfSize)
{
// No extended config to set when decode is disabled
}
@ -1589,6 +1591,9 @@ void libosLogDestroy(LIBOS_LOG_DECODE *logDecode)
logDecode->scratchBuffer = NULL;
}
#endif // LIBOS_LOG_DECODE_ENABLE
logDecode->numLogBuffers = 0;
logDecode->bIsDecodable = NV_FALSE;
}
void libosExtractLogs(LIBOS_LOG_DECODE *logDecode, NvBool bSyncNvLog)

View File

@ -58,6 +58,9 @@ static inline NvU32 nvKmsSizeOfNotifier(enum NvKmsNIsoFormat format,
}
}
void nvKmsSetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base, NvU64 timeStamp);
void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base);

View File

@ -75,46 +75,59 @@ static void GetNotifierTimeStamp(volatile const NvU32 *notif,
} while (1);
}
static void ResetNotifierLegacy(NvBool overlay, volatile void *in)
static void SetNotifierLegacy(NvBool overlay, volatile void *in, NvBool begun,
NvU64 timeStamp)
{
volatile NvU32 *notif = in;
if (overlay) {
notif[NV_DISP_NOTIFICATION_2_INFO16_3] =
notif[NV_DISP_NOTIFICATION_2_INFO16_3] = begun ?
DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _BEGUN) :
DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN);
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] =
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = begun ? NvU64_LO32(timeStamp) :
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID;
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_1] = begun ? NvU64_HI32(timeStamp) :
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID;
} else {
notif[NV_DISP_BASE_NOTIFIER_1__0] =
notif[NV_DISP_BASE_NOTIFIER_1__0] = begun ?
DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _BEGUN) :
DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _NOT_BEGUN);
}
}
static void ResetNotifierFourWord(volatile void *in)
static void SetNotifierFourWord(volatile void *in, NvBool begun,
NvU64 timeStamp)
{
volatile NvU32 *notif = in;
notif[NV_DISP_NOTIFICATION_2_INFO16_3] =
notif[NV_DISP_NOTIFICATION_2_INFO16_3] = begun ?
DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _BEGUN) :
DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN);
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] =
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = begun ? NvU64_LO32(timeStamp) :
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID;
notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_1] = begun ? NvU64_HI32(timeStamp) :
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID;
}
static void ResetNotifierFourWordNVDisplay(volatile void *in)
static void SetNotifierFourWordNVDisplay(volatile void *in, NvBool begun,
NvU64 timeStamp)
{
volatile NvU32 *notif = in;
notif[NV_DISP_NOTIFIER__0] =
notif[NV_DISP_NOTIFIER__0] = begun ?
DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _BEGUN) :
DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN);
notif[NV_DISP_NOTIFIER__2] =
notif[NV_DISP_NOTIFIER__2] = begun ? NvU64_LO32(timeStamp) :
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID;
notif[NV_DISP_NOTIFIER__3] = begun ? NvU64_HI32(timeStamp) :
NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID;
}
void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base)
static void SetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base, NvBool begun, NvU64 timeStamp)
{
const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay);
void *notif =
@ -122,17 +135,29 @@ void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
switch (format) {
case NVKMS_NISO_FORMAT_LEGACY:
ResetNotifierLegacy(overlay, notif);
SetNotifierLegacy(overlay, notif, begun, timeStamp);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD:
ResetNotifierFourWord(notif);
SetNotifierFourWord(notif, begun, timeStamp);
break;
case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY:
ResetNotifierFourWordNVDisplay(notif);
SetNotifierFourWordNVDisplay(notif, begun, timeStamp);
break;
}
}
void nvKmsSetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base, NvU64 timeStamp)
{
SetNotifier(format, overlay, index, base, NV_TRUE, timeStamp);
}
void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay,
NvU32 index, void *base)
{
SetNotifier(format, overlay, index, base, NV_FALSE, 0);
}
static void ParseNotifierLegacy(NvBool overlay, volatile const void *in,
struct nvKmsParsedNotifier *out)
{

View File

@ -325,13 +325,6 @@ static NvBool HsIoctlFlipValidateOneHwState(
const NVFlipChannelEvoHwState *pHwState,
const NvU32 sd)
{
/* HeadSurface does not support completion notifiers, yet. */
if ((pHwState->completionNotifier.surface.pSurfaceEvo != NULL) ||
(pHwState->completionNotifier.awaken)) {
return FALSE;
}
/* The semaphore surface must have a CPU mapping. */
if (!pHwState->syncObject.usingSyncpt) {
@ -406,6 +399,17 @@ static NvBool HsIoctlFlipAssignHwStateOneHead(
continue;
}
/*
* HeadSurface only supports client notifiers when running in
* swapgroup mode where each flip IOCTL will result in a real
* flip in HW.
*/
if (((pFlipState->layer[layer].completionNotifier.surface.pSurfaceEvo != NULL) ||
pFlipState->layer[layer].completionNotifier.awaken) &&
!pHsChannel->config.neededForSwapGroup) {
return FALSE;
}
if (!HsIoctlFlipValidateOneHwState(&pFlipState->layer[layer], sd)) {
return FALSE;
}

View File

@ -1960,6 +1960,53 @@ static NvBool IsPreviousFrameDone(NVHsChannelEvoPtr pHsChannel)
}
}
/*!
* If the client provided a notifier surface with a real flip
* request while swap groups were enabled, write to that
* notifier with the BEGUN status and the most recent
* headsurface notifier timestamp to emulate what the client
* would observe if their notifier was used in hardware.
*/
static void HsUpdateClientNotifier(NVHsChannelEvoPtr pHsChannel)
{
const NVDispEvoRec *pDispEvo = pHsChannel->pDispEvo;
const NvU32 apiHead = pHsChannel->apiHead;
const NvU32 sd = pDispEvo->displayOwner;
const NVHsDeviceEvoRec *pHsDevice = pDispEvo->pDevEvo->pHsDevice;
const NVHsNotifiersRec *pHsNotifiers = &pHsDevice->notifiers;
const NVHsNotifiersOneSdRec *pHsNotifiersOneSd = pHsNotifiers->sd[sd].ptr;
const NvU8 nextSlot = pHsNotifiers->sd[sd].apiHead[apiHead].nextSlot;
struct nvKmsParsedNotifier parsed = { };
NVFlipNIsoSurfaceEvoHwState *pClientNotifier =
&pHsChannel->flipQueue[NVKMS_MAIN_LAYER].current.completionNotifier.surface;
if (pClientNotifier->pSurfaceEvo == NULL) {
return;
}
const NvU8 prevSlot =
A_minus_b_with_wrap_U8(nextSlot, 1,
NVKMS_HEAD_SURFACE_MAX_NOTIFIERS_PER_HEAD);
nvKmsParseNotifier(pHsNotifiers->nIsoFormat, FALSE /* overlay */,
prevSlot, pHsNotifiersOneSd->notifier[apiHead], &parsed);
nvAssert(parsed.status == NVKMS_NOTIFIER_STATUS_BEGUN);
/*
* XXX NVKMS HEADSURFACE TODO: Get valid timestamp through other means to
* support this on platforms with legacy HW semaphores without valid
* HW notifier timestamps in the main channel.
*/
nvAssert(parsed.timeStampValid);
nvKmsSetNotifier(pClientNotifier->format,
FALSE /* overlay */,
pClientNotifier->offsetInWords / 4,
pClientNotifier->pSurfaceEvo->cpuAddress[sd],
parsed.timeStamp);
}
/*!
* Check if all flips completed for this SwapGroup. If so, release the
* SwapGroup.
@ -2004,8 +2051,9 @@ static void HsCheckSwapGroupFlipDone(
}
/*
* The SwapGroup is ready: increment nextIndex for all active heads, so that
* subsequent frames of headSurface render to the next buffer.
* The SwapGroup is ready: update client notifiers if necessary and
* increment nextIndex for all active heads, so that subsequent frames of
* headSurface render to the next buffer.
*/
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
NvU32 apiHead;
@ -2022,6 +2070,7 @@ static void HsCheckSwapGroupFlipDone(
nvAssert(pHsChannel->config.neededForSwapGroup);
nvAssert(IsPreviousFlipDone(pHsChannel));
HsUpdateClientNotifier(pHsChannel);
HsIncrementNextIndex(pHsDevice, pHsChannel);
}
}

View File

@ -53,6 +53,8 @@
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i) ((i)*2)
#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i) (((i)*2) + 1)
#define NV_CTRL_INTR_LEAF_IDX_TO_SUBTREE(i) ((i)/2)
//
// Given a LEAF register index, the below macros give us the range of GPU
// interrupt vectors that correspond to those leafs.

View File

@ -139,17 +139,22 @@ typedef struct
NvU32 elfCodeSize;
NvU32 elfDataSize;
// Bit 0 is used to check if [VGPU-GSP] mode is active in init partition
NvU8 driverModel;
// Pad structure to exactly 256 bytes. Can replace padding with additional
// fields without incrementing revision. Padding initialized to 0.
NvU32 padding[3];
NvU8 padding[11];
// BL to use for verification (i.e. Booter says OK to boot)
NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
} GspFwWprMeta;
#define GSP_FW_WPR_META_VERIFIED 0xa0a0a0a0a0a0a0a0ULL
#define GSP_FW_WPR_META_REVISION 1
#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
// Bit 0 is used to check if [VGPU-GSP] mode is active in init partition
#define DRIVERMODEL_VGPU 0
#endif // GSP_FW_WPR_META_H_

View File

@ -28,6 +28,7 @@
#include "inforom/ifrecc.h"
#include "inforom/ifrdem.h"
#include "inforom/omsdef.h"
#define INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE 128
@ -49,6 +50,29 @@ struct INFOROM_OBD_OBJECT_V1_XX
#define INFOROM_OBD_OBJECT_V1_XX_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d116b"
typedef struct INFOROM_OBD_OBJECT_V1_XX INFOROM_OBD_OBJECT_V1_XX;
#define INFOROM_OBD_OBJECT_V2_XX_PACKED_SIZE 224
struct INFOROM_OBD_OBJECT_V2_XX
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U032 buildDate;
inforom_U008 marketingName[24];
inforom_U008 serialNumber[16];
inforom_U008 memoryManufacturer;
inforom_U008 memoryPartID[20];
inforom_U008 memoryDateCode[6];
inforom_U008 productPartNumber[20];
inforom_U008 boardRevision[3];
inforom_U008 boardType;
inforom_U008 board699PartNumber[20];
inforom_U032 productLength;
inforom_U032 productWidth;
inforom_U032 productHeight;
inforom_U008 reserved[89];
};
#define INFOROM_OBD_OBJECT_V2_XX_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d111b3d89b"
typedef struct INFOROM_OBD_OBJECT_V2_XX INFOROM_OBD_OBJECT_V2_XX;
//
// OEM 1.0
//

View File

@ -35,6 +35,29 @@
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE_NO 0
#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE_YES 1
#define INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY_DATA_RESERVED 23:2
#define INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY_DATA_ENTRY_CHECKSUM 31:24
typedef struct INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY
{
inforom_U032 data;
inforom_U032 clockLimitMin;
inforom_U032 clockLimitMax;
} INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY;
#define INFOROM_OMS_OBJECT_V1G_NUM_SETTINGS_ENTRIES 8
typedef struct INFOROM_OMS_OBJECT_V1G
{
INFOROM_OBJECT_HEADER_V1_00 header;
inforom_U032 lifetimeRefreshCount;
INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY settings[
INFOROM_OMS_OBJECT_V1G_NUM_SETTINGS_ENTRIES];
inforom_U032 reserved;
} INFOROM_OMS_OBJECT_V1G;
#define INFOROM_OMS_OBJECT_V1G_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "26d"
#define INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_RESERVED 7:2
#define INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_ENTRY_CHECKSUM 15:8
@ -62,15 +85,24 @@ typedef struct INFOROM_OMS_V1S_DATA
INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pNext;
} INFOROM_OMS_V1S_DATA;
typedef struct INFOROM_OMS_V1G_DATA
{
INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY *pIter;
INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY prev;
INFOROM_OMS_OBJECT_V1G_SETTINGS_ENTRY *pNext;
} INFOROM_OMS_V1G_DATA;
typedef union
{
INFOROM_OBJECT_HEADER_V1_00 header;
INFOROM_OMS_OBJECT_V1S v1s;
INFOROM_OMS_OBJECT_V1G v1g;
} INFOROM_OMS_OBJECT;
typedef union
{
INFOROM_OMS_V1S_DATA v1s;
INFOROM_OMS_V1G_DATA v1g;
} INFOROM_OMS_DATA;
typedef struct

View File

@ -290,6 +290,7 @@ VENDORNAME vendorName[] =
{PCI_VENDOR_ID_FUJITSU, "Fujitsu"},
{PCI_VENDOR_ID_CADENCE, "Cadence"},
{PCI_VENDOR_ID_ARM, "ARM"},
{PCI_VENDOR_ID_ALIBABA, "Alibaba"},
{0, "Unknown"} // Indicates end of the table
};
@ -345,6 +346,7 @@ ARMCSALLOWLISTINFO armChipsetAllowListInfo[] =
{PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1}, // Arm Neoverse N1
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX}, // Marvell OCTEON CN96xx
{PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX}, // Marvell OCTEON CN98xx
{PCI_VENDOR_ID_ALIBABA, 0x8000, CS_ALIBABA_YITIAN}, // Alibaba Yitian
// last element must have chipset CS_UNKNOWN (zero)
{0, 0, CS_UNKNOWN}

View File

@ -145,6 +145,7 @@
#define PCI_VENDOR_ID_CADENCE 0x17CD
#define PCI_VENDOR_ID_ARM 0x13B5
#define PCI_VENDOR_ID_HYGON 0x1D94
#define PCI_VENDOR_ID_ALIBABA 0x1DED
#define NV_PCI_DEVID_DEVICE 31:16 /* RW--F */
#define NV_PCI_SUBID_DEVICE 31:16 /* RW--F */
@ -642,6 +643,7 @@ enum {
, CS_HYGON_C86
, CS_PHYTIUM_S2500
, CS_MELLANOX_BLUEFIELD2
, CS_ALIBABA_YITIAN
, CS_INTEL_1B81
, CS_INTEL_18DC
, CS_INTEL_7A04

View File

@ -111,6 +111,7 @@
#define NV_MSGBOX_CMD_ARG1_TEMP_NUM_SENSORS 8
#define NV_MSGBOX_CMD_ARG1_POWER_TOTAL 0x00000000
#define NV_MSGBOX_CMD_ARG1_SMBPBI_POWER 0x00000001
#define NV_MSGBOX_CMD_ARG1_POWER_FB 0x00000002
/* SysId info type encodings for opcode NV_MSGBOX_CMD_OPCODE_GET_SYS_ID_DATA (0x05) */
#define NV_MSGBOX_CMD_ARG1_BOARD_PART_NUM_V1 0x00000000
#define NV_MSGBOX_CMD_ARG1_OEM_INFO_V1 0x00000001
@ -171,6 +172,8 @@
#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_MIN 0x00000001
#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_MAX 0x00000002
#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_PAGE_3 0x00000003
#define NV_MSGBOX_CMD_ARG1_GET_SUPPORTED_CLOCK_THROTTLE_REASONS 0x00000004
#define NV_MSGBOX_CMD_ARG1_GET_CURRENT_CLOCK_THROTTLE_REASONS 0x00000005
#define NV_MSGBOX_CMD_ARG1_REMAP_ROWS_RAW_COUNTS 0x00000000
#define NV_MSGBOX_CMD_ARG1_REMAP_ROWS_STATE_FLAGS 0x00000001
#define NV_MSGBOX_CMD_ARG1_REMAP_ROWS_HISTOGRAM 0x00000002
@ -639,6 +642,9 @@
#define NV_MSGBOX_DATA_CAP_0_GET_FABRIC_STATE_FLAGS 29:29
#define NV_MSGBOX_DATA_CAP_0_GET_FABRIC_STATE_FLAGS_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_GET_FABRIC_STATE_FLAGS_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_0_POWER_FB 30:30
#define NV_MSGBOX_DATA_CAP_0_POWER_FB_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_0_POWER_FB_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_1 1
#define NV_MSGBOX_DATA_CAP_1_BOARD_PART_NUM_V1 0:0
@ -731,6 +737,9 @@
#define NV_MSGBOX_DATA_CAP_1_ECC_V6 30:30
#define NV_MSGBOX_DATA_CAP_1_ECC_V6_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_1_ECC_V6_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_1_CLOCK_THROTTLE_REASON 31:31
#define NV_MSGBOX_DATA_CAP_1_CLOCK_THROTTLE_REASON_NOT_AVAILABLE 0x00000000
#define NV_MSGBOX_DATA_CAP_1_CLOCK_THROTTLE_REASON_AVAILABLE 0x00000001
#define NV_MSGBOX_DATA_CAP_2 2
#define NV_MSGBOX_DATA_CAP_2_GPU_DRIVER 0:0
@ -1057,6 +1066,21 @@
#define NV_MSGBOX_DATA_GET_CLOCK_FREQ_INFO_PAGE_3_CURRENT_PSTATE 3:0
#define NV_MSGBOX_DATA_GET_CLOCK_FREQ_INFO_PAGE_3_CURRENT_PSTATE_INVALID 0x0000000F
/**
* Response to
* NV_MSGBOX_CMD_ARG1_GET_CLOCK_THROTTLE_REASON
*/
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON 31:0
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_NONE 0x00000000
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_POWER_CAP 0x00000001
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_SLOWDOWN 0x00000002
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SYNC_BOOST 0x00000004
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN_TLIMIT 0x00000008
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN_TAVG 0x00000010
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_SW_THERMAL_SLOWDOWN_TMEM 0x00000020
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_THERMAL_SLOWDOWN 0x00000040
#define NV_MSGBOX_DATA_CLOCK_THROTTLE_REASON_HW_POWER_BREAK_SLOWDOWN 0x00000080
/*
* Number of Nvlink data outputs (dataOut, extData) for
* NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK queries
@ -2525,7 +2549,7 @@ typedef union {
NV_MSGBOX_CMD(_GPU_PERFORMANCE_MONITORING, 0, 0) | \
DRF_DEF(_MSGBOX, _CMD, _ARG1_GPM_ACTION, type) | \
DRF_NUM(_MSGBOX, _CMD, _ARG1_GPM_METRIC, metric) | \
DRF_NUM(_MSGBOX, _CMD, _ARG2_GPM_PARTITION, partition) \
DRF_NUM(_MSGBOX, _CMD, _ARG2_GPM_PARTITION_INDEX, partition) \
)
#define NV_MSGBOX_CMD_GPM_SET_MULTIPLIER(multiplier) \

View File

@ -112,7 +112,6 @@ typedef struct
* scanning of the whole of memory (e.g. when something goes wrong).
*/
#define RM_IFR_GC6_CTX_END_OFFSET_MAX 0x1000000 // 16MB
#define RM_IFR_GC6_CTX_END_OFFSET_MAX_WITH_GSP 0x10000000 // 256MB
/*!
* Maximum size of the context data in bytes.

View File

@ -958,7 +958,6 @@ NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_handle_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *);
NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);

View File

@ -1221,12 +1221,11 @@ static void postEvent(
NvBool dataValid
)
{
nv_state_t *nv = nv_get_ctl_state();
portSyncSpinlockAcquire(nv->event_spinlock);
if (event->active)
nv_post_event(event, hEvent, notifyIndex,
info32, info16, dataValid);
portSyncSpinlockRelease(nv->event_spinlock);
if (osReferenceObjectCount(event) != NV_OK)
return;
nv_post_event(event, hEvent, notifyIndex,
info32, info16, dataValid);
osDereferenceObjectCount(event);
}
NvU32 osSetEvent
@ -1445,6 +1444,12 @@ NV_STATUS osReferenceObjectCount(void *pEvent)
nv_event_t *event = pEvent;
portSyncSpinlockAcquire(nv->event_spinlock);
// If event->active is false, don't allow any more reference
if (!event->active)
{
portSyncSpinlockRelease(nv->event_spinlock);
return NV_ERR_INVALID_EVENT;
}
++event->refcount;
portSyncSpinlockRelease(nv->event_spinlock);
return NV_OK;
@ -1457,11 +1462,10 @@ NV_STATUS osDereferenceObjectCount(void *pOSEvent)
portSyncSpinlockAcquire(nv->event_spinlock);
NV_ASSERT(event->refcount > 0);
--event->refcount;
// If event->refcount == 0 but event->active is true, the client
// has not yet freed the OS event. free_os_event will free its
// memory when they do, or else when the client itself is freed.
if (event->refcount == 0 && !event->active)
if (--event->refcount == 0 && !event->active)
portMemFree(event);
portSyncSpinlockRelease(nv->event_spinlock);

View File

@ -354,9 +354,7 @@ static void free_os_event_under_lock(nv_event_t *event)
// If refcount > 0, event will be freed by osDereferenceObjectCount
// when the last associated RM event is freed.
if (event->refcount == 0)
{
portMemFree(event);
}
}
static void free_os_events(
@ -2910,23 +2908,21 @@ static NV_STATUS RmRunNanoTimerCallback(
void *pTmrEvent
)
{
OBJSYS *pSys = SYS_GET_INSTANCE();
POBJTMR pTmr = GPU_GET_TIMER(pGpu);
THREAD_STATE_NODE threadState;
NV_STATUS status = NV_OK;
// LOCK: try to acquire GPUs lock
if ((status = rmGpuLocksAcquire(GPU_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK)
{
PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT) pTmrEvent;
// We failed to acquire the lock; schedule a timer to try again.
return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->super.pOSTmrCBdata, 1000);
}
TMR_EVENT *pEvent = (TMR_EVENT *)pTmrEvent;
if ((status = osCondAcquireRmSema(pSys->pSema)) != NV_OK)
{
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL);
return status;
//
// We failed to acquire the lock - depending on what's holding it,
// the lock could be held for a while, so try again soon, but not too
// soon to prevent the owner from making forward progress indefinitely.
//
return osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->pOSTmrCBdata,
osGetTickResolution());
}
threadStateInitISRAndDeferredIntHandler(&threadState, pGpu,
@ -2939,7 +2935,6 @@ static NV_STATUS RmRunNanoTimerCallback(
threadStateFreeISRAndDeferredIntHandler(&threadState,
pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER);
osReleaseRmSema(pSys->pSema, NULL);
// UNLOCK: release GPUs lock
rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu);

View File

@ -608,36 +608,19 @@ done:
// Use this call when MMU faults needs to be copied
// outisde of RM lock.
//
NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(
nvidia_stack_t *sp,
nv_state_t *nv,
NvU32 *faultsCopied
static NV_STATUS _rm_gpu_copy_mmu_faults_unlocked(
OBJGPU *pGpu,
NvU32 *pFaultsCopied,
THREAD_STATE_NODE *pThreadState
)
{
OBJGPU *pGpu;
void *fp;
NV_STATUS status = NV_OK;
NV_ENTER_RM_RUNTIME(sp,fp);
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL || faultsCopied == NULL)
{
status = NV_ERR_OBJECT_NOT_FOUND;
goto done;
}
// Non-replayable faults are copied to the client shadow buffer by GSP-RM.
if (IS_GSP_CLIENT(pGpu))
{
status = NV_ERR_NOT_SUPPORTED;
goto done;
return NV_ERR_NOT_SUPPORTED;
}
done:
NV_EXIT_RM_RUNTIME(sp,fp);
return status;
return NV_OK;
}
//
@ -650,10 +633,12 @@ NV_STATUS rm_gpu_handle_mmu_faults(
)
{
NvU32 status = NV_OK;
OBJGPU *pGpu;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
*faultsCopied = 0;
OBJGPU *pGpu;
pGpu = NV_GET_NV_PRIV_PGPU(nv);
if (pGpu == NULL)
@ -661,40 +646,50 @@ NV_STATUS rm_gpu_handle_mmu_faults(
return NV_ERR_OBJECT_NOT_FOUND;
}
if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_VIRTUAL(pGpu))
{
KernelGmmu *pKernelGmmu;
PORT_MEM_ALLOCATOR *pIsrAllocator;
THREAD_STATE_NODE threadState;
NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer
KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu);
Intr *pIntr = GPU_GET_INTR(pGpu);
NvU32 hw_put = 0;
NvU32 hw_get = 0;
pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator));
tlsIsrInit(pIsrAllocator);
threadStateInitISRLockless(&threadState, pGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER,
&hw_put, &threadState);
pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu);
kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER,
&hw_get, &threadState);
if(hw_get != hw_put)
if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && !IS_VIRTUAL(pGpu))
{
// We have to clear the top level interrupt bit here since otherwise
// the bottom half will attempt to service the interrupt on the CPU
// side before GSP recieves the notification and services it
kgmmuClearNonReplayableFaultIntr(pGpu, pKernelGmmu, &threadState);
status = intrTriggerPrivDoorbell_HAL(pGpu, pIntr, NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE);
Intr *pIntr = GPU_GET_INTR(pGpu);
if (kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, &threadState))
{
// We have to clear the top level interrupt bit here since otherwise
// the bottom half will attempt to service the interrupt on the CPU
// side before GSP recieves the notification and services it
kgmmuClearNonReplayableFaultIntr_HAL(pGpu, pKernelGmmu, &threadState);
status = intrTriggerPrivDoorbell_HAL(pGpu, pIntr, NV_DOORBELL_NOTIFY_LEAF_SERVICE_NON_REPLAYABLE_FAULT_HANDLE);
}
}
else if (IS_VIRTUAL_WITH_SRIOV(pGpu))
{
if (kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, &threadState))
{
status = _rm_gpu_copy_mmu_faults_unlocked(pGpu, faultsCopied, &threadState);
}
}
else
{
status = _rm_gpu_copy_mmu_faults_unlocked(pGpu, faultsCopied, &threadState);
}
threadStateFreeISRLockless(&threadState, pGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS);
tlsIsrDestroy(pIsrAllocator);
portMemAllocatorRelease(pIsrAllocator);
}
else
{
status = rm_gpu_copy_mmu_faults_unlocked(sp, nv, faultsCopied);
}
NV_EXIT_RM_RUNTIME(sp,fp);
return status;
}

View File

@ -154,7 +154,6 @@
--undefined=nvswitch_lib_i2c_transfer
--undefined=rm_gpu_copy_mmu_faults
--undefined=rm_gpu_handle_mmu_faults
--undefined=rm_gpu_copy_mmu_faults_unlocked
--undefined=rm_gpu_need_4k_page_isolation
--undefined=rm_is_chipset_io_coherent
--undefined=rm_get_device_remove_flag

View File

@ -69,6 +69,7 @@ struct BINDATA_STORAGE_PVT_ALL
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c"
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_AD102.c"
#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c"
#include "g_bindata_ksec2GetBinArchiveSecurescrubUcode_AD10X.c"
#undef BINDATA_INCLUDE_STORAGE_PVT_DECL
#undef BINDATA_NO_SEGMENTS
@ -97,6 +98,7 @@ struct BINDATA_STORAGE_PVT_ALL
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c"
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_AD102.c"
#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c"
#include "g_bindata_ksec2GetBinArchiveSecurescrubUcode_AD10X.c"
#undef BINDATA_INCLUDE_DATA
@ -123,6 +125,7 @@ BINDATA_CONST struct BINDATA_STORAGE_PVT_ALL g_bindata_pvt =
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c"
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_AD102.c"
#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c"
#include "g_bindata_ksec2GetBinArchiveSecurescrubUcode_AD10X.c"
#undef BINDATA_INCLUDE_STORAGE_PVT_DEFN
#undef BINDATA_NO_SEGMENTS
@ -152,6 +155,7 @@ const NvU32 g_bindata_pvt_count = sizeof(g_bindata_pvt) / sizeof(BINDATA_STORAGE
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c"
#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_AD102.c"
#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c"
#include "g_bindata_ksec2GetBinArchiveSecurescrubUcode_AD10X.c"
#undef BINDATA_INCLUDE_FUNCTION
#undef BINDATA_INCLUDE_ARCHIVE

File diff suppressed because it is too large Load Diff

View File

@ -538,6 +538,11 @@ NV_STATUS engineNonStallIntrNotify(OBJGPU *, RM_ENGINE_TYPE);
NV_STATUS notifyEvents(OBJGPU*, EVENTNOTIFICATION*, NvU32, NvU32, NvU32, NV_STATUS, NvU32);
NV_STATUS engineNonStallIntrNotifyEvent(OBJGPU *, RM_ENGINE_TYPE, NvHandle);
typedef struct GpuEngineEventNotificationList GpuEngineEventNotificationList;
NV_STATUS gpuEngineEventNotificationListCreate(OBJGPU *, GpuEngineEventNotificationList **);
void gpuEngineEventNotificationListDestroy(OBJGPU *, GpuEngineEventNotificationList *);
#endif // _EVENT_H_
#ifdef __cplusplus

View File

@ -126,7 +126,7 @@ void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
}
// NVOC Property Hal field -- PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->setProperty(pThis, PDB_PROP_GPU_BUG_3007008_EMULATE_VF_MMU_TLB_INVALIDATE, ((NvBool)(0 == 0)));
}
@ -199,7 +199,15 @@ void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
}
// NVOC Property Hal field -- PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE
pThis->setProperty(pThis, PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE, ((NvBool)(0 != 0)));
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->setProperty(pThis, PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE, ((NvBool)(0 == 0)));
}
// default
else
{
pThis->setProperty(pThis, PDB_PROP_GPU_VGPU_OFFLOAD_CAPABLE, ((NvBool)(0 != 0)));
}
// NVOC Property Hal field -- PDB_PROP_GPU_SKIP_CE_MAPPINGS_NO_NVLINK
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */

View File

@ -609,21 +609,6 @@ typedef struct // GPU specific data for core logic object, stored in GPU object
#define GPU_STATE_DEFAULT 0 // Default flags for destructive state loads
// and unloads
typedef struct engine_event_node
{
PEVENTNOTIFICATION pEventNotify;
struct Memory *pMemory;
struct engine_event_node *pNext;
} ENGINE_EVENT_NODE;
// Linked list of per engine non-stall event nodes
typedef struct
{
ENGINE_EVENT_NODE *pEventNode;
// lock to protect above list
PORT_SPINLOCK *pSpinlock;
} ENGINE_EVENT_LIST;
struct OBJHWBC;
typedef struct hwbc_list
{
@ -936,7 +921,7 @@ struct OBJGPU {
OS_RM_CAPS *pOsRmCaps;
NvU32 halImpl;
void *hPci;
ENGINE_EVENT_LIST engineNonstallIntr[62];
GpuEngineEventNotificationList *engineNonstallIntrEventNotifications[62];
NvBool bIsSOC;
NvU32 gpuInstance;
NvU32 gpuDisabled;

View File

@ -874,19 +874,19 @@ static inline void kgmmuWriteMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKe
#define kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0)
NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);
NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0);
#ifdef __nvoc_kern_gmmu_h_disabled
static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) {
static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct THREAD_STATE_NODE *arg0) {
NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!");
return NV_FALSE;
}
#else //__nvoc_kern_gmmu_h_disabled
#define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu)
#define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu, arg0)
#endif //__nvoc_kern_gmmu_h_disabled
#define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu)
#define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu, arg0) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu, arg0)
NV_STATUS kgmmuClientShadowFaultBufferAlloc_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu);

View File

@ -673,6 +673,16 @@ NV_STATUS kceGetFaultMethodBufferSize_IMPL(OBJGPU *pGpu, NvU32 *size);
NV_STATUS kceGetAvailableHubPceMask_IMPL(OBJGPU *pGpu, NVLINK_TOPOLOGY_PARAMS *pTopoParams);
#define kceGetAvailableHubPceMask(pGpu, pTopoParams) kceGetAvailableHubPceMask_IMPL(pGpu, pTopoParams)
void kceGetNvlinkCaps_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps);
#ifdef __nvoc_kernel_ce_h_disabled
static inline void kceGetNvlinkCaps(OBJGPU *pGpu, struct KernelCE *pKCe, NvU8 *pKCeCaps) {
NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!");
}
#else //__nvoc_kernel_ce_h_disabled
#define kceGetNvlinkCaps(pGpu, pKCe, pKCeCaps) kceGetNvlinkCaps_IMPL(pGpu, pKCe, pKCeCaps)
#endif //__nvoc_kernel_ce_h_disabled
NV_STATUS kceGetDeviceCaps_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, RM_ENGINE_TYPE rmEngineType, NvU8 *ceCaps);
#ifdef __nvoc_kernel_ce_h_disabled

View File

@ -740,12 +740,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelCh
#endif
},
{ /* [34] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) kchannelCtrlCmdSetChannelHwState_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
/*flags=*/ 0x102204u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
/*flags=*/ 0x142204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb06f0110u,
/*paramSize=*/ sizeof(NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS),
@ -770,12 +770,12 @@ static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelCh
#endif
},
{ /* [36] */
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
/*pFunc=*/ (void (*)(void)) NULL,
#else
/*pFunc=*/ (void (*)(void)) kchannelCtrlCmdRestoreEngineCtxData_IMPL,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
/*flags=*/ 0x102204u,
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
/*flags=*/ 0x142204u,
/*accessRight=*/0x0u,
/*methodId=*/ 0xb06f0112u,
/*paramSize=*/ sizeof(NVB06F_CTRL_RESTORE_ENGINE_CTX_DATA_PARAMS),
@ -1166,7 +1166,7 @@ static void __nvoc_init_funcTable_KernelChannel_1(KernelChannel *pThis, RmHalspe
pThis->__kchannelCtrlCmdGetChannelHwState__ = &kchannelCtrlCmdGetChannelHwState_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
pThis->__kchannelCtrlCmdSetChannelHwState__ = &kchannelCtrlCmdSetChannelHwState_IMPL;
#endif
@ -1174,7 +1174,7 @@ static void __nvoc_init_funcTable_KernelChannel_1(KernelChannel *pThis, RmHalspe
pThis->__kchannelCtrlCmdSaveEngineCtxData__ = &kchannelCtrlCmdSaveEngineCtxData_IMPL;
#endif
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x102204u)
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x142204u)
pThis->__kchannelCtrlCmdRestoreEngineCtxData__ = &kchannelCtrlCmdRestoreEngineCtxData_IMPL;
#endif

View File

@ -268,6 +268,7 @@ struct channel_iterator
NvU32 numRunlists;
NvU32 physicalChannelID;
NvU32 runlistId;
EMEMBLOCK *pFifoDataBlock;
CHANNEL_NODE channelNode;
};
@ -455,6 +456,7 @@ struct KernelFifo {
FifoSchedulingHandlerEntryList preSchedulingDisableHandlerList;
NvBool bUseChidHeap;
NvBool bUsePerRunlistChram;
NvBool bDisableChidIsolation;
NvBool bIsPerRunlistChramSupportedInHw;
NvBool bHostEngineExpansion;
NvBool bHostHasLbOverflow;

View File

@ -497,6 +497,19 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
}
}
// Hal function -- kgspGetPrescrubbedTopFbSize
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f0fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kgspGetPrescrubbedTopFbSize__ = &kgspGetPrescrubbedTopFbSize_e1e623;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000400UL) )) /* ChipHal: GA100 | GH100 */
{
pThis->__kgspGetPrescrubbedTopFbSize__ = &kgspGetPrescrubbedTopFbSize_dd2c0b;
}
}
// Hal function -- kgspExtractVbiosFromRom
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
@ -536,6 +549,19 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
}
}
// Hal function -- kgspExecuteScrubberIfNeeded
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kgspExecuteScrubberIfNeeded__ = &kgspExecuteScrubberIfNeeded_AD102;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x1000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | GH100 */
{
pThis->__kgspExecuteScrubberIfNeeded__ = &kgspExecuteScrubberIfNeeded_5baef9;
}
}
// Hal function -- kgspExecuteBooterLoad
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
@ -653,13 +679,43 @@ static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *
// Hal function -- kgspGetWprHeapSize
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x1000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | GH100 */
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x100007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GH100 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_e77d51;
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_5661b8;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_15390a;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_38f3bc;
pThis->__kgspGetWprHeapSize__ = &kgspGetWprHeapSize_AD102;
}
}
// Hal function -- kgspInitVgpuPartitionLogging
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x1000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | GH100 */
{
pThis->__kgspInitVgpuPartitionLogging__ = &kgspInitVgpuPartitionLogging_395e98;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kgspInitVgpuPartitionLogging__ = &kgspInitVgpuPartitionLogging_IMPL;
}
}
// Hal function -- kgspFreeVgpuPartitionLogging
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x1000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 | GH100 */
{
pThis->__kgspFreeVgpuPartitionLogging__ = &kgspFreeVgpuPartitionLogging_395e98;
}
else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x01f00000UL) )) /* ChipHal: AD102 | AD103 | AD104 | AD106 | AD107 */
{
pThis->__kgspFreeVgpuPartitionLogging__ = &kgspFreeVgpuPartitionLogging_IMPL;
}
}

View File

@ -49,6 +49,7 @@ extern "C" {
#include "gpu/gsp/gsp_static_config.h"
#include "gpu/gsp/gsp_init_args.h"
#include "nv-firmware.h"
#include "nv_sriov_defines.h"
#include "rmRiscvUcode.h"
#include "libos_init_args.h"
@ -213,7 +214,7 @@ typedef struct GSP_FIRMWARE
enum
{
LOGIDX_INIT,
LOGIDX_VGPU,
LOGIDX_INTR,
LOGIDX_RM,
LOGIDX_SIZE
};
@ -267,9 +268,11 @@ struct KernelGsp {
NV_STATUS (*__kgspResetHw__)(struct OBJGPU *, struct KernelGsp *);
NvBool (*__kgspIsEngineInReset__)(struct OBJGPU *, struct KernelGsp *);
NvU32 (*__kgspGetFrtsSize__)(struct OBJGPU *, struct KernelGsp *);
NvU64 (*__kgspGetPrescrubbedTopFbSize__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspExtractVbiosFromRom__)(struct OBJGPU *, struct KernelGsp *, KernelGspVbiosImg **);
NV_STATUS (*__kgspExecuteFwsecFrts__)(struct OBJGPU *, struct KernelGsp *, KernelGspFlcnUcode *, const NvU64);
NV_STATUS (*__kgspExecuteFwsecSb__)(struct OBJGPU *, struct KernelGsp *, KernelGspFlcnUcode *);
NV_STATUS (*__kgspExecuteScrubberIfNeeded__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspExecuteBooterLoad__)(struct OBJGPU *, struct KernelGsp *, const NvU64);
NV_STATUS (*__kgspExecuteBooterUnloadIfNeeded__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspExecuteHsFalcon__)(struct OBJGPU *, struct KernelGsp *, KernelGspFlcnUcode *, struct KernelFalcon *, NvU32 *, NvU32 *);
@ -277,6 +280,8 @@ struct KernelGsp {
const BINDATA_ARCHIVE *(*__kgspGetBinArchiveBooterLoadUcode__)(struct KernelGsp *);
const BINDATA_ARCHIVE *(*__kgspGetBinArchiveBooterUnloadUcode__)(struct KernelGsp *);
NvU64 (*__kgspGetWprHeapSize__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspInitVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32, NvU64, NvU64, NvU64, NvU64);
NV_STATUS (*__kgspFreeVgpuPartitionLogging__)(struct OBJGPU *, struct KernelGsp *, NvU32);
const char *(*__kgspGetSignatureSectionNamePrefix__)(struct OBJGPU *, struct KernelGsp *);
NV_STATUS (*__kgspSetupGspFmcArgs__)(struct OBJGPU *, struct KernelGsp *, GSP_FIRMWARE *);
void (*__kgspStateDestroy__)(POBJGPU, struct KernelGsp *);
@ -302,6 +307,7 @@ struct KernelGsp {
NV_STATUS (*__kgspSetTunableState__)(POBJGPU, struct KernelGsp *, void *);
struct OBJRPC *pRpc;
KernelGspFlcnUcode *pFwsecUcode;
KernelGspFlcnUcode *pScrubberUcode;
KernelGspFlcnUcode *pBooterLoadUcode;
KernelGspFlcnUcode *pBooterUnloadUcode;
MEMORY_DESCRIPTOR *pWprMetaDescriptor;
@ -324,8 +330,12 @@ struct KernelGsp {
MEMORY_DESCRIPTOR *pGspUCodeRadix3Descriptor;
MEMORY_DESCRIPTOR *pSignatureMemdesc;
LIBOS_LOG_DECODE logDecode;
LIBOS_LOG_DECODE logDecodeVgpuPartition[32];
RM_LIBOS_LOG_MEM rmLibosLogMem[3];
RM_LIBOS_LOG_MEM gspPluginInitTaskLogMem[32];
RM_LIBOS_LOG_MEM gspPluginVgpuTaskLogMem[32];
void *pLogElf;
NvU64 logElfDataSize;
NvBool bLibosLogsPollingEnabled;
NvBool bInInit;
NvBool bPollingForRpcResponse;
@ -402,12 +412,16 @@ NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp**, Dynamic*, NvU32);
#define kgspIsEngineInReset_HAL(pGpu, pKernelGsp) kgspIsEngineInReset_DISPATCH(pGpu, pKernelGsp)
#define kgspGetFrtsSize(pGpu, pKernelGsp) kgspGetFrtsSize_DISPATCH(pGpu, pKernelGsp)
#define kgspGetFrtsSize_HAL(pGpu, pKernelGsp) kgspGetFrtsSize_DISPATCH(pGpu, pKernelGsp)
#define kgspGetPrescrubbedTopFbSize(pGpu, pKernelGsp) kgspGetPrescrubbedTopFbSize_DISPATCH(pGpu, pKernelGsp)
#define kgspGetPrescrubbedTopFbSize_HAL(pGpu, pKernelGsp) kgspGetPrescrubbedTopFbSize_DISPATCH(pGpu, pKernelGsp)
#define kgspExtractVbiosFromRom(pGpu, pKernelGsp, ppVbiosImg) kgspExtractVbiosFromRom_DISPATCH(pGpu, pKernelGsp, ppVbiosImg)
#define kgspExtractVbiosFromRom_HAL(pGpu, pKernelGsp, ppVbiosImg) kgspExtractVbiosFromRom_DISPATCH(pGpu, pKernelGsp, ppVbiosImg)
#define kgspExecuteFwsecFrts(pGpu, pKernelGsp, pFwsecUcode, frtsOffset) kgspExecuteFwsecFrts_DISPATCH(pGpu, pKernelGsp, pFwsecUcode, frtsOffset)
#define kgspExecuteFwsecFrts_HAL(pGpu, pKernelGsp, pFwsecUcode, frtsOffset) kgspExecuteFwsecFrts_DISPATCH(pGpu, pKernelGsp, pFwsecUcode, frtsOffset)
#define kgspExecuteFwsecSb(pGpu, pKernelGsp, pFwsecUcode) kgspExecuteFwsecSb_DISPATCH(pGpu, pKernelGsp, pFwsecUcode)
#define kgspExecuteFwsecSb_HAL(pGpu, pKernelGsp, pFwsecUcode) kgspExecuteFwsecSb_DISPATCH(pGpu, pKernelGsp, pFwsecUcode)
#define kgspExecuteScrubberIfNeeded(pGpu, pKernelGsp) kgspExecuteScrubberIfNeeded_DISPATCH(pGpu, pKernelGsp)
#define kgspExecuteScrubberIfNeeded_HAL(pGpu, pKernelGsp) kgspExecuteScrubberIfNeeded_DISPATCH(pGpu, pKernelGsp)
#define kgspExecuteBooterLoad(pGpu, pKernelGsp, gspFwWprMetaOffset) kgspExecuteBooterLoad_DISPATCH(pGpu, pKernelGsp, gspFwWprMetaOffset)
#define kgspExecuteBooterLoad_HAL(pGpu, pKernelGsp, gspFwWprMetaOffset) kgspExecuteBooterLoad_DISPATCH(pGpu, pKernelGsp, gspFwWprMetaOffset)
#define kgspExecuteBooterUnloadIfNeeded(pGpu, pKernelGsp) kgspExecuteBooterUnloadIfNeeded_DISPATCH(pGpu, pKernelGsp)
@ -422,6 +436,10 @@ NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp**, Dynamic*, NvU32);
#define kgspGetBinArchiveBooterUnloadUcode_HAL(pKernelGsp) kgspGetBinArchiveBooterUnloadUcode_DISPATCH(pKernelGsp)
#define kgspGetWprHeapSize(pGpu, pKernelGsp) kgspGetWprHeapSize_DISPATCH(pGpu, pKernelGsp)
#define kgspGetWprHeapSize_HAL(pGpu, pKernelGsp) kgspGetWprHeapSize_DISPATCH(pGpu, pKernelGsp)
#define kgspInitVgpuPartitionLogging(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize) kgspInitVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize)
#define kgspInitVgpuPartitionLogging_HAL(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize) kgspInitVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize)
#define kgspFreeVgpuPartitionLogging(pGpu, pKernelGsp, gfid) kgspFreeVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid)
#define kgspFreeVgpuPartitionLogging_HAL(pGpu, pKernelGsp, gfid) kgspFreeVgpuPartitionLogging_DISPATCH(pGpu, pKernelGsp, gfid)
#define kgspGetSignatureSectionNamePrefix(pGpu, pKernelGsp) kgspGetSignatureSectionNamePrefix_DISPATCH(pGpu, pKernelGsp)
#define kgspGetSignatureSectionNamePrefix_HAL(pGpu, pKernelGsp) kgspGetSignatureSectionNamePrefix_DISPATCH(pGpu, pKernelGsp)
#define kgspSetupGspFmcArgs(pGpu, pKernelGsp, pGspFw) kgspSetupGspFmcArgs_DISPATCH(pGpu, pKernelGsp, pGspFw)
@ -681,6 +699,18 @@ static inline NvU32 kgspGetFrtsSize_DISPATCH(struct OBJGPU *pGpu, struct KernelG
return pKernelGsp->__kgspGetFrtsSize__(pGpu, pKernelGsp);
}
static inline NvU64 kgspGetPrescrubbedTopFbSize_e1e623(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 256 * 1024 * 1024;
}
static inline NvU64 kgspGetPrescrubbedTopFbSize_dd2c0b(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return (NvU64)(-1);
}
static inline NvU64 kgspGetPrescrubbedTopFbSize_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return pKernelGsp->__kgspGetPrescrubbedTopFbSize__(pGpu, pKernelGsp);
}
NV_STATUS kgspExtractVbiosFromRom_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspVbiosImg **ppVbiosImg);
static inline NV_STATUS kgspExtractVbiosFromRom_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspVbiosImg **ppVbiosImg) {
@ -711,6 +741,16 @@ static inline NV_STATUS kgspExecuteFwsecSb_DISPATCH(struct OBJGPU *pGpu, struct
return pKernelGsp->__kgspExecuteFwsecSb__(pGpu, pKernelGsp, pFwsecUcode);
}
NV_STATUS kgspExecuteScrubberIfNeeded_AD102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
static inline NV_STATUS kgspExecuteScrubberIfNeeded_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED);
}
static inline NV_STATUS kgspExecuteScrubberIfNeeded_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return pKernelGsp->__kgspExecuteScrubberIfNeeded__(pGpu, pKernelGsp);
}
NV_STATUS kgspExecuteBooterLoad_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, const NvU64 gspFwWprMetaOffset);
static inline NV_STATUS kgspExecuteBooterLoad_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, const NvU64 gspFwWprMetaOffset) {
@ -787,18 +827,40 @@ static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_DISPATCH
return pKernelGsp->__kgspGetBinArchiveBooterUnloadUcode__(pKernelGsp);
}
static inline NvU64 kgspGetWprHeapSize_e77d51(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return (64 * 1024 * 1024);
static inline NvU64 kgspGetWprHeapSize_5661b8(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 64 * 1024 * 1024;
}
static inline NvU64 kgspGetWprHeapSize_38f3bc(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return (80 * 1024 * 1024);
static inline NvU64 kgspGetWprHeapSize_15390a(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return 80 * 1024 * 1024;
}
NvU64 kgspGetWprHeapSize_AD102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
static inline NvU64 kgspGetWprHeapSize_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
return pKernelGsp->__kgspGetWprHeapSize__(pGpu, pKernelGsp);
}
static inline NV_STATUS kgspInitVgpuPartitionLogging_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS kgspInitVgpuPartitionLogging_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize);
static inline NV_STATUS kgspInitVgpuPartitionLogging_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid, NvU64 initTaskLogBUffOffset, NvU64 initTaskLogBUffSize, NvU64 vgpuTaskLogBUffOffset, NvU64 vgpuTaskLogBuffSize) {
return pKernelGsp->__kgspInitVgpuPartitionLogging__(pGpu, pKernelGsp, gfid, initTaskLogBUffOffset, initTaskLogBUffSize, vgpuTaskLogBUffOffset, vgpuTaskLogBuffSize);
}
static inline NV_STATUS kgspFreeVgpuPartitionLogging_395e98(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid) {
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS kgspFreeVgpuPartitionLogging_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid);
static inline NV_STATUS kgspFreeVgpuPartitionLogging_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 gfid) {
return pKernelGsp->__kgspFreeVgpuPartitionLogging__(pGpu, pKernelGsp, gfid);
}
const char *kgspGetSignatureSectionNamePrefix_GH100(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp);
static inline const char *kgspGetSignatureSectionNamePrefix_789efb(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) {
@ -1034,6 +1096,17 @@ static inline NV_STATUS kgspParseFwsecUcodeFromVbiosImg(struct OBJGPU *pGpu, str
#define kgspParseFwsecUcodeFromVbiosImg(pGpu, pKernelGsp, pVbiosImg, ppFwsecUcode) kgspParseFwsecUcodeFromVbiosImg_IMPL(pGpu, pKernelGsp, pVbiosImg, ppFwsecUcode)
#endif //__nvoc_kernel_gsp_h_disabled
NV_STATUS kgspAllocateScrubberUcodeImage_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppScrubberUcode);
#ifdef __nvoc_kernel_gsp_h_disabled
static inline NV_STATUS kgspAllocateScrubberUcodeImage(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppScrubberUcode) {
NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!");
return NV_ERR_NOT_SUPPORTED;
}
#else //__nvoc_kernel_gsp_h_disabled
#define kgspAllocateScrubberUcodeImage(pGpu, pKernelGsp, ppScrubberUcode) kgspAllocateScrubberUcodeImage_IMPL(pGpu, pKernelGsp, ppScrubberUcode)
#endif //__nvoc_kernel_gsp_h_disabled
NV_STATUS kgspAllocateBooterLoadUcodeImage_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterLoadUcode);
#ifdef __nvoc_kernel_gsp_h_disabled

View File

@ -149,7 +149,7 @@ struct KernelHostVgpuDeviceApi {
NV_STATUS (*__kernelhostvgpudeviceapiMap__)(struct KernelHostVgpuDeviceApi *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *);
NvBool (*__kernelhostvgpudeviceapiAccessCallback__)(struct KernelHostVgpuDeviceApi *, struct RsClient *, void *, RsAccessRight);
struct KernelHostVgpuDeviceShr *pShared;
NvU32 notifyActions[5];
NvU32 notifyActions[6];
};
#ifndef __NVOC_CLASS_KernelHostVgpuDeviceApi_TYPEDEF__

View File

@ -341,6 +341,20 @@ static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecO
}
}
// Hal function -- knvlinkHandleFaultUpInterrupt
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{
if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x10000000UL) )) /* ChipHal: GH100 */
{
pThis->__knvlinkHandleFaultUpInterrupt__ = &knvlinkHandleFaultUpInterrupt_GH100;
}
// default
else
{
pThis->__knvlinkHandleFaultUpInterrupt__ = &knvlinkHandleFaultUpInterrupt_46f6a7;
}
}
// Hal function -- knvlinkValidateFabricBaseAddress
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
{

View File

@ -38,6 +38,7 @@ extern "C" {
#include "core/locks.h"
#include "gpu/eng_state.h"
#include "lib/ref_count.h"
#include "objtmr.h"
#include "nvCpuUuid.h"
#include "gpu/bus/kern_bus.h"
@ -98,6 +99,9 @@ typedef struct _def_knvlink_conn_info
#define NVLINK_INITOPTIMIZE_POLL_TIMEOUT_EMU 20000000
#define NVLINK_INITOPTIMIZE_POLL_COUNT_DELAY_MS 1000
// Link Retrain after reset time = 10s
#define NVLINK_RETRAIN_TIME 10000000000
/**********************************************************/
// NvGpu identifier in nvlink core library
@ -179,6 +183,8 @@ typedef struct _def_knvlink_link
// RXDET per-lane status
NvU32 laneRxdetStatusMask;
TMR_EVENT *pTmrEvent;
} KNVLINK_RM_LINK, *PKNVLINK_RM_LINK;
typedef struct NVLINK_INBAND_CALLBACK
@ -189,6 +195,12 @@ typedef struct NVLINK_INBAND_CALLBACK
NvU32 wqItemFlags;
} NVLINK_INBAND_MSG_CALLBACK;
typedef struct
{
NvU8 linkId;
} NVLINK_ID, *PNVLINK_ID;
MAKE_LIST(FaultUpList, NVLINK_ID);
/*!
* KernelNvlink is a logical abstraction of the GPU Nvlink Engine. The
@ -215,6 +227,7 @@ struct KernelNvlink {
NV_STATUS (*__knvlinkStatePostUnload__)(OBJGPU *, struct KernelNvlink *, NvU32);
NvBool (*__knvlinkIsPresent__)(OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkSetUniqueFabricBaseAddress__)(OBJGPU *, struct KernelNvlink *, NvU64);
NV_STATUS (*__knvlinkHandleFaultUpInterrupt__)(OBJGPU *, struct KernelNvlink *, NvU32);
NV_STATUS (*__knvlinkValidateFabricBaseAddress__)(OBJGPU *, struct KernelNvlink *, NvU64);
NvU32 (*__knvlinkGetConnectedLinksMask__)(OBJGPU *, struct KernelNvlink *);
NV_STATUS (*__knvlinkEnableLinksPostTopology__)(OBJGPU *, struct KernelNvlink *, NvU32);
@ -283,6 +296,7 @@ struct KernelNvlink {
NvU32 bridgeSensableLinks;
NvU32 bridgedLinks;
NvU32 enabledLinks;
FaultUpList faultUpLinks;
NvU32 initializedLinks;
KNVLINK_RM_LINK nvlinkLinks[18];
NvBool bIsGpuDegraded;
@ -381,6 +395,8 @@ NV_STATUS __nvoc_objCreate_KernelNvlink(KernelNvlink**, Dynamic*, NvU32);
#define knvlinkIsPresent(arg0, arg1) knvlinkIsPresent_DISPATCH(arg0, arg1)
#define knvlinkSetUniqueFabricBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkSetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkHandleFaultUpInterrupt(pGpu, pKernelNvlink, arg0) knvlinkHandleFaultUpInterrupt_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkHandleFaultUpInterrupt_HAL(pGpu, pKernelNvlink, arg0) knvlinkHandleFaultUpInterrupt_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkValidateFabricBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkValidateFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0)
#define knvlinkGetConnectedLinksMask(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink)
@ -1351,6 +1367,16 @@ static inline NV_STATUS knvlinkSetUniqueFabricBaseAddress_DISPATCH(OBJGPU *pGpu,
return pKernelNvlink->__knvlinkSetUniqueFabricBaseAddress__(pGpu, pKernelNvlink, arg0);
}
NV_STATUS knvlinkHandleFaultUpInterrupt_GH100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0);
static inline NV_STATUS knvlinkHandleFaultUpInterrupt_46f6a7(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) {
return NV_ERR_NOT_SUPPORTED;
}
static inline NV_STATUS knvlinkHandleFaultUpInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) {
return pKernelNvlink->__knvlinkHandleFaultUpInterrupt__(pGpu, pKernelNvlink, arg0);
}
NV_STATUS knvlinkValidateFabricBaseAddress_GA100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0);
NV_STATUS knvlinkValidateFabricBaseAddress_GH100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0);
@ -1640,7 +1666,7 @@ NV_STATUS knvlinkRetrainLinkFromSafe(OBJGPU *pGpu, struct KernelNvlink *pKernelN
// NVLINK Callback functions from core library
//
#if defined(INCLUDE_NVLINK_LIB)
// Device callback functions
NvlStatus knvlinkCoreAddDeviceCallback (nvlink_device *dev);
@ -1674,6 +1700,8 @@ NvlStatus knvlinkCoreAliTrainingCallback (nvlink_link *link);
// NVLINK Utility Functions
void knvlinkUtoa(NvU8 *, NvU64, NvU64);
NV_STATUS ioctrlFaultUpTmrHandler(OBJGPU *, struct OBJTMR *,TMR_EVENT *);
#endif // _KERNEL_NVLINK_H_
#ifdef __cplusplus

Some files were not shown because too many files have changed in this diff Show More