First whack at radeon driver.

No hardware to test yet, but it builds.
This commit is contained in:
riastradh 2014-07-16 20:59:57 +00:00
parent 77b5597a2b
commit b81c64e1b2
139 changed files with 7158 additions and 429 deletions

View File

@ -1,6 +1,7 @@
include "arch/amd64/conf/NO_DRM"
i915drmkms* at pci? dev ? function ?
radeondrmkms* at pci? dev ? function ?
no options DIAGNOSTIC
options DIAGNOSTIC # expensive kernel consistency check

View File

@ -1,4 +1,4 @@
/* $NetBSD: kernel.h,v 1.3 2014/07/16 20:56:24 riastradh Exp $ */
/* $NetBSD: kernel.h,v 1.4 2014/07/16 20:59:57 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -82,6 +82,8 @@
#define round_up(X, N) ((((X) - 1) | ((N) - 1)) + 1)
#define round_down(X, N) ((X) & ~(uintmax_t)((N) - 1))
#define IS_ALIGNED(X, N) (((X) & ((N) - 1)) == 0)
/*
* These select 32-bit halves of what may be 32- or 64-bit quantities,
* for which straight 32-bit shifts may be undefined behaviour (and do

View File

@ -1,4 +1,4 @@
/* $NetBSD: list.h,v 1.3 2014/07/16 20:56:24 riastradh Exp $ */
/* $NetBSD: list.h,v 1.4 2014/07/16 20:59:57 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -223,6 +223,12 @@ list_del_init(struct list_head *node)
&(VAR)->FIELD != (HEAD); \
(VAR) = list_next_entry((VAR), FIELD))
#define list_for_each_entry_safe_from(VAR, NEXT, HEAD, FIELD) \
for (; \
(&(VAR)->FIELD != (HEAD)) && \
((NEXT) = list_next_entry((VAR), FIELD)); \
(VAR) = (NEXT))
/*
* `H'ead-only/`H'ash-table doubly-linked lists.
*/

View File

@ -30,7 +30,7 @@
#ifdef ATOM_DEBUG
#define ATOM_OP_NAMES_CNT 123
static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
static const char *atom_op_names[ATOM_OP_NAMES_CNT] = {
"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
@ -56,7 +56,7 @@ static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
};
#define ATOM_TABLE_NAMES_CNT 74
static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
static const char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
@ -85,7 +85,7 @@ static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
};
#define ATOM_IO_NAMES_CNT 5
static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
static const char *atom_io_names[ATOM_IO_NAMES_CNT] = {
"MM", "PLL", "MC", "PCIE", "PCIE PORT",
};

View File

@ -92,6 +92,15 @@ static void debug_print_spaces(int n)
printk(" ");
}
#ifdef __NetBSD__ /* XXX */
/*
* Kludge: NetBSD defines DEBUG to mean debugging is enabled. Since
* we're not going to include any more header files, it's OK for it to
* be defined unconditionally after this.
*/
#undef DEBUG
#endif
#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
#else

View File

@ -125,7 +125,7 @@ struct card_info {
struct atom_context {
struct card_info *card;
struct mutex mutex;
void *bios;
uint8_t *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;

View File

@ -28,6 +28,7 @@
#include <drm/radeon_drm.h>
#include <drm/drm_fixed.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "atom-bits.h"

View File

@ -36,10 +36,10 @@
#define DP_LINK_CONFIGURATION_SIZE 9
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
static const char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
static const char *pre_emph_names[] = {
"0dB", "3.5dB", "6dB", "9.5dB"
};
@ -404,7 +404,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@ -415,8 +414,6 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
if (!radeon_connector->con_priv)
return panel_mode;
dig_connector = radeon_connector->con_priv;
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,

View File

@ -27,6 +27,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include <linux/backlight.h>

View File

@ -63,10 +63,8 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
CTASSERT(ATOM_MAX_HW_I2C_READ <
(uintmax_t)1 << (CHAR_BIT*sizeof(num)));
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}

View File

@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "btcd.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
@ -2761,6 +2762,7 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
#ifdef CONFIG_DEBUG_FS
void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -2791,6 +2793,7 @@ void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
}
}
}
#endif
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{

View File

@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_ucode.h"
#include "cikd.h"
#include "r600_dpm.h"
@ -181,8 +182,10 @@ static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
struct atom_voltage_table_entry *voltage_table,
u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
#ifndef __NetBSD__ /* XXX unused? */
static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
u32 target_tdp);
#endif
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
@ -691,6 +694,7 @@ static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
return ret;
}
#ifndef __NetBSD__ /* XXX unused? */
static int ci_power_control_set_level(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
@ -714,6 +718,7 @@ static int ci_power_control_set_level(struct radeon_device *rdev)
return ret;
}
#endif
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
{
@ -995,6 +1000,8 @@ static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
tmp &= DPM_EVENT_SRC_MASK;
tmp |= DPM_EVENT_SRC(dpm_event_src);
WREG32_SMC(CG_THERMAL_CTRL, tmp);
#else
(void)dpm_event_src;
#endif
tmp = RREG32_SMC(GENERAL_PWRMGT);
@ -1245,6 +1252,7 @@ static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev
return ci_send_msg_to_smc(rdev, msg);
}
#ifndef __NetBSD__ /* XXX unused? */
static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 *parameter)
{
@ -1257,6 +1265,7 @@ static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rd
return smc_result;
}
#endif
static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
{
@ -1314,6 +1323,7 @@ static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
return 0;
}
#ifndef __NetBSD__ /* XXX unused? */
static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
u32 target_tdp)
{
@ -1328,7 +1338,9 @@ static int ci_set_boot_state(struct radeon_device *rdev)
{
return ci_enable_sclk_mclk_dpm(rdev, false);
}
#endif
#ifdef CONFIG_DEBUG_FS
static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
{
u32 sclk_freq;
@ -1354,6 +1366,7 @@ static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
return mclk_freq;
}
#endif
static void ci_dpm_start_smc(struct radeon_device *rdev)
{
@ -4815,6 +4828,7 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
#ifndef __NetBSD__ /* XXX unused? */
int ci_dpm_power_control_set_level(struct radeon_device *rdev)
{
return ci_power_control_set_level(rdev);
@ -4824,6 +4838,7 @@ void ci_dpm_reset_asic(struct radeon_device *rdev)
{
ci_set_boot_state(rdev);
}
#endif
void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
{
@ -5083,18 +5098,22 @@ int ci_dpm_init(struct radeon_device *rdev)
u8 frev, crev;
struct ci_power_info *pi;
int ret;
#ifndef __NetBSD__
u32 mask;
#endif
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
if (pi == NULL)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
#ifndef __NetBSD__
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret)
pi->sys_pcie_mask = 0;
else
pi->sys_pcie_mask = mask;
#endif
pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
@ -5276,6 +5295,7 @@ int ci_dpm_init(struct radeon_device *rdev)
return 0;
}
#ifdef CONFIG_DEBUG_FS
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -5285,6 +5305,7 @@ void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
seq_printf(m, "power level avg sclk: %u mclk: %u\n",
sclk, mclk);
}
#endif
void ci_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *rps)

View File

@ -1628,7 +1628,12 @@ u32 cik_get_xclk(struct radeon_device *rdev)
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{
if (index < rdev->doorbell.num_doorbells) {
#ifdef __NetBSD__
return bus_space_read_4(rdev->doorbell.bst, rdev->doorbell.bsh,
index*4);
#else
return readl(rdev->doorbell.ptr + index);
#endif
} else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
@ -1648,7 +1653,12 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{
if (index < rdev->doorbell.num_doorbells) {
#ifdef __NetBSD__
bus_space_write_4(rdev->doorbell.bst, rdev->doorbell.bsh,
index*4, v);
#else
writel(v, rdev->doorbell.ptr + index);
#endif
} else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
@ -1859,7 +1869,11 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc_req_size = BONAIRE_MC_UCODE_SIZE * 4;
mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(BONAIRE_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
@ -1871,7 +1885,11 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(HAWAII_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
@ -3254,7 +3272,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
static void cik_gpu_init(struct radeon_device *rdev)
{
u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 mc_shared_chmap __unused, mc_arb_ramcfg;
u32 hdp_host_path_cntl;
u32 tmp;
int i, j;
@ -7446,8 +7464,15 @@ restart_ih:
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
@ -7472,8 +7497,15 @@ restart_ih:
if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
@ -7498,8 +7530,15 @@ restart_ih:
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[2]) {
drm_handle_vblank(rdev->ddev, 2);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
@ -7524,8 +7563,15 @@ restart_ih:
if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[3]) {
drm_handle_vblank(rdev->ddev, 3);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
@ -7550,8 +7596,15 @@ restart_ih:
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[4]) {
drm_handle_vblank(rdev->ddev, 4);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
@ -7576,8 +7629,15 @@ restart_ih:
if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[5]) {
drm_handle_vblank(rdev->ddev, 5);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
@ -9145,6 +9205,7 @@ int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon pcie */
struct pci_dev *root = rdev->pdev->bus->self;
int bridge_pos, gpu_pos;
u32 speed_cntl, mask, current_data_rate;
@ -9298,6 +9359,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
break;
udelay(1);
}
#endif
}
static void cik_program_aspm(struct radeon_device *rdev)
@ -9378,13 +9440,17 @@ static void cik_program_aspm(struct radeon_device *rdev)
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
if (!disable_clkreq) {
#ifndef __NetBSD__ /* XXX radeon pcie */
struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap;
#endif
clk_req_support = false;
#ifndef __NetBSD__ /* XXX radeon pcie */
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
clk_req_support = true;
#endif
} else {
clk_req_support = false;
}

View File

@ -572,6 +572,37 @@ int cik_copy_dma(struct radeon_device *rdev,
return r;
}
#ifdef __NetBSD__
/*
* XXX Can't use bus_space here because this is all mapped through the
* radeon_bo abstraction. Can't assume we're x86 because this is
* AMD/ATI Radeon, not Intel.
*/
# define __iomem volatile
# define readl fake_readl
# define writel fake_writel
static inline uint32_t
fake_readl(const void __iomem *ptr)
{
uint32_t v;
v = *(const uint32_t __iomem *)ptr;
membar_consumer();
return v;
}
static inline void
fake_writel(uint32_t v, void __iomem *ptr)
{
membar_producer();
*(uint32_t __iomem *)ptr = v;
}
#endif
/**
* cik_sdma_ring_test - simple async dma engine test
*
@ -587,7 +618,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
{
unsigned i;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
void __iomem *ptr = rdev->vram_scratch.ptr;
u32 tmp;
if (!ptr) {
@ -641,7 +672,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
struct radeon_ib ib;
unsigned i;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
void __iomem *ptr = rdev->vram_scratch.ptr;
u32 tmp = 0;
if (!ptr) {
@ -692,6 +723,12 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
#ifdef __NetBSD__
# undef fake_writel
# undef fake_readl
# undef __iomem
#endif
/**
* cik_sdma_is_lockup - Check if the DMA engine is locked up
*

View File

@ -24,6 +24,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "evergreend.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
@ -2004,11 +2005,13 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
return 0;
}
#ifndef __NetBSD__ /* XXX unused? */
void cypress_dpm_reset_asic(struct radeon_device *rdev)
{
rv770_restrict_performance_levels_before_switch(rdev);
rv770_set_boot_state(rdev);
}
#endif
void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
{

View File

@ -21,6 +21,7 @@
*
* Authors: Alex Deucher
*/
#include <linux/bitops.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@ -1175,6 +1176,7 @@ int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon pcie */
int readrq;
u16 v;
@ -1185,6 +1187,7 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
if ((v == 0) || (v == 6) || (v == 7))
pcie_set_readrq(rdev->pdev, 512);
#endif
}
void dce4_program_fmt(struct drm_encoder *encoder)
@ -3006,7 +3009,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 mc_shared_chmap __unused, mc_arb_ramcfg;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
@ -3022,7 +3025,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count;
int i, j, ps_thread_count;
switch (rdev->family) {
case CHIP_CYPRESS:
@ -3320,8 +3323,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
u32 efuse_straps_4;
u32 efuse_straps_3;
@ -4047,7 +4048,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
return r;
}
r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)__UNVOLATILE(&rdev->rlc.sr_ptr));
if (r) {
dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
sumo_rlc_fini(rdev);
@ -4125,7 +4126,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
return r;
}
r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)__UNVOLATILE(&rdev->rlc.cs_ptr));
if (r) {
dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
@ -4201,7 +4202,7 @@ int sumo_rlc_init(struct radeon_device *rdev)
sumo_rlc_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)__UNVOLATILE(&rdev->rlc.cp_table_ptr));
if (r) {
dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
@ -4806,8 +4807,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
@ -4832,8 +4840,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
@ -4858,8 +4873,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[2]) {
drm_handle_vblank(rdev->ddev, 2);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
@ -4884,8 +4906,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[3]) {
drm_handle_vblank(rdev->ddev, 3);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
@ -4910,8 +4939,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[4]) {
drm_handle_vblank(rdev->ddev, 4);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
@ -4936,8 +4972,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[5]) {
drm_handle_vblank(rdev->ddev, 5);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
@ -5501,6 +5544,7 @@ void evergreen_fini(struct radeon_device *rdev)
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon pcie */
u32 link_width_cntl, speed_cntl;
if (radeon_pcie_gen2 == 0)
@ -5560,6 +5604,7 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
#endif
}
void evergreen_program_aspm(struct radeon_device *rdev)

View File

@ -31,8 +31,10 @@
#include "evergreen_reg_safe.h"
#include "cayman_reg_safe.h"
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
#ifndef __NetBSD__
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
#endif
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@ -818,7 +820,11 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
/* align height */
evergreen_surface_check(p, &surf, NULL);
#ifdef __NetBSD__ /* XXX ALIGN means something else */
surf.nby = round_up(surf.nby, surf.halign);
#else
surf.nby = ALIGN(surf.nby, surf.halign);
#endif
r = evergreen_surface_check(p, &surf, "texture");
if (r) {
@ -891,8 +897,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
__func__, __LINE__, surf.mode);
return -EINVAL;
}
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
surf.nbx = round_up(surf.nbx, surf.palign);
surf.nby = round_up(surf.nby, surf.halign);
#else
surf.nbx = ALIGN(surf.nbx, surf.palign);
surf.nby = ALIGN(surf.nby, surf.halign);
#endif
r = evergreen_surface_check(p, &surf, "mipmap");
if (r) {
@ -945,7 +956,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("streamout %d bo too small: 0x%"PRIx64", 0x%lx\n",
i, offset,
radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
@ -2109,7 +2120,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "CP DMA src buffer too small (%"PRIu64" %lu)\n",
tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2147,7 +2158,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "CP DMA dst buffer too small (%"PRIu64" %lu)\n",
tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2441,7 +2452,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2460,7 +2471,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2489,7 +2500,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad MEM_WRITE bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2514,7 +2525,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad COPY_DW src bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2538,7 +2549,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad COPY_DW dst bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2734,7 +2745,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA write buffer too small (%"PRIu64" %lu)\n",
dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -2759,12 +2770,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, dw src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -2799,12 +2810,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -2818,12 +2829,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, byte src buffer too small (%"PRIu64" %lu)\n",
src_offset + count, radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + count, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -2862,17 +2873,17 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+3);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%"PRIu64" %lu)\n",
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
@ -2902,17 +2913,17 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%"PRIu64" %lu)\n",
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
@ -2964,17 +2975,17 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%"PRIu64" %lu)\n",
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
@ -3010,12 +3021,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -3051,17 +3062,17 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%"PRIu64" %lu)\n",
dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
@ -3085,7 +3096,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA constant fill buffer too small (%"PRIu64" %lu)\n",
dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}

View File

@ -215,15 +215,15 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
* build a HDMI Video Info Frame
*/
static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
const void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
uint8_t *header = buffer;
const uint8_t *frame = (const uint8_t *)buffer + 3;
const uint8_t *header = buffer;
WREG32(AFMT_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));

View File

@ -2753,6 +2753,7 @@ int kv_dpm_init(struct radeon_device *rdev)
return 0;
}
#ifdef CONFIG_DEBUG_FS
void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -2774,6 +2775,7 @@ void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
current_index, sclk, vddc);
}
}
#endif /* CONFIG_DEBUG_FS */
void kv_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *rps)

View File

@ -700,7 +700,11 @@ int ni_init_microcode(struct radeon_device *rdev)
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(BARTS_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_TURKS:
chip_name = "TURKS";
@ -709,7 +713,11 @@ int ni_init_microcode(struct radeon_device *rdev)
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(TURKS_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_CAICOS:
chip_name = "CAICOS";
@ -718,7 +726,11 @@ int ni_init_microcode(struct radeon_device *rdev)
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(CAICOS_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_CAYMAN:
chip_name = "CAYMAN";
@ -727,7 +739,11 @@ int ni_init_microcode(struct radeon_device *rdev)
me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(CAYMAN_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_ARUBA:
chip_name = "ARUBA";
@ -841,7 +857,7 @@ int tn_get_temp(struct radeon_device *rdev)
static void cayman_gpu_init(struct radeon_device *rdev)
{
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 mc_shared_chmap __unused, mc_arb_ramcfg;
u32 cgts_tcc_disable;
u32 sx_debug_1;
u32 smx_dc_ctl0;
@ -2327,7 +2343,7 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
char *block;
const char *block;
switch (mc_id) {
case 32:

View File

@ -4319,6 +4319,7 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
#ifdef CONFIG_DEBUG_FS
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -4339,6 +4340,7 @@ void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
}
}
#endif /* CONFIG_DEBUG_FS */
u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
{

View File

@ -790,8 +790,15 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_CRTC_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
@ -799,8 +806,15 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_CRTC2_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
@ -1092,7 +1106,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
if (rdev->me_fw) {
size = rdev->me_fw->size / 4;
fw_data = (const __be32 *)&rdev->me_fw->data[0];
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(RADEON_CP_ME_RAM_ADDR, 0);
for (i = 0; i < size; i += 2) {
WREG32(RADEON_CP_ME_RAM_DATAH,
@ -4089,6 +4103,23 @@ int r100_init(struct radeon_device *rdev)
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
bool always_indirect)
{
#ifdef __NetBSD__
if (reg < rdev->rmmio_size && !always_indirect) {
return bus_space_read_4(rdev->rmmio_bst, rdev->rmmio_bsh, reg);
} else {
unsigned long flags;
uint32_t ret;
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh,
RADEON_MM_INDEX, reg);
ret = bus_space_read_4(rdev->rmmio_bst, rdev->rmmio_bsh,
RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
return ret;
}
#else
if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
@ -4102,11 +4133,26 @@ uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
return ret;
}
#endif
}
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
bool always_indirect)
{
#ifdef __NetBSD__
if (reg < rdev->rmmio_size && !always_indirect) {
bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh, reg, v);
} else {
unsigned long flags;
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh,
RADEON_MM_INDEX, reg);
bus_space_write_4(rdev->rmmio_bst, rdev->rmmio_bsh,
RADEON_MM_DATA, v);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
#else
if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
@ -4117,24 +4163,49 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
#endif
}
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
#ifdef __NetBSD__
if (reg < rdev->rio_mem_size) {
return bus_space_read_4(rdev->rio_mem_bst, rdev->rio_mem_bsh,
reg);
} else {
bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh,
RADEON_MM_INDEX, reg);
return bus_space_read_4(rdev->rio_mem_bst, rdev->rio_mem_bsh,
RADEON_MM_DATA);
}
#else
if (reg < rdev->rio_mem_size)
return ioread32(rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
return ioread32(rdev->rio_mem + RADEON_MM_DATA);
}
#endif
}
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
#ifdef __NetBSD__
if (reg < rdev->rio_mem_size) {
bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh, reg,
v);
} else {
bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh,
RADEON_MM_INDEX, reg);
bus_space_write_4(rdev->rio_mem_bst, rdev->rio_mem_bsh,
RADEON_MM_DATA, v);
}
#else
if (reg < rdev->rio_mem_size)
iowrite32(v, rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
}
#endif
}

View File

@ -72,6 +72,25 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
#ifdef __NetBSD__
/*
* XXX Can't use bus_space here because this is all mapped through the
* radeon_bo abstraction. Can't assume we're x86 because this is
* AMD/ATI Radeon, not Intel.
*/
# define __iomem volatile
# define writel fake_writel
static inline void
fake_writel(uint32_t v, void __iomem *ptr)
{
membar_producer();
*(uint32_t __iomem *)ptr = v;
}
#endif
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = rdev->gart.ptr;
@ -85,10 +104,15 @@ int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4));
writel(addr, (uint8_t __iomem *)ptr + (i * 4));
return 0;
}
#ifdef __NetBSD__
# undef __iomem
# undef writel
#endif
int rv370_pcie_gart_init(struct radeon_device *rdev)
{
int r;

View File

@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
@ -898,6 +899,28 @@ void r600_hpd_fini(struct radeon_device *rdev)
radeon_irq_kms_disable_hpd(rdev, disable);
}
#ifdef __NetBSD__
/*
* XXX Can't use bus_space here because this is all mapped through the
* radeon_bo abstraction. Can't assume we're x86 because this is
* AMD/ATI Radeon, not Intel.
*/
# define __iomem volatile
# define readl fake_readl
static inline uint32_t
fake_readl(const void __iomem *ptr)
{
uint32_t v;
v = *(const uint32_t __iomem *)ptr;
membar_consumer();
return v;
}
#endif
/*
* R600 PCIE GART
*/
@ -909,8 +932,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.ptr;
u32 tmp;
void __iomem *ptr = rdev->gart.ptr;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
@ -918,7 +940,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
* method for them.
*/
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
(void)readl(ptr);
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
@ -940,6 +962,11 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
}
}
#ifdef __NetBSD__
# undef __iomem
# undef readl
#endif
int r600_pcie_gart_init(struct radeon_device *rdev)
{
int r;
@ -1243,7 +1270,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
mc->vram_start = mc->gtt_end + 1;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%08"PRIX64" - 0x%08"PRIX64" (%"PRIu64"M used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
} else {
@ -1355,7 +1382,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
(void **)__UNVOLATILE(&rdev->vram_scratch.ptr));
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
@ -2304,50 +2331,82 @@ int r600_init_microcode(struct radeon_device *rdev)
chip_name = "RV770";
rlc_chip_name = "R700";
smc_chip_name = "RV770";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(RV770_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_RV730:
chip_name = "RV730";
rlc_chip_name = "R700";
smc_chip_name = "RV730";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(RV730_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_RV710:
chip_name = "RV710";
rlc_chip_name = "R700";
smc_chip_name = "RV710";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(RV710_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_RV740:
chip_name = "RV730";
rlc_chip_name = "R700";
smc_chip_name = "RV740";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(RV740_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_CEDAR:
chip_name = "CEDAR";
rlc_chip_name = "CEDAR";
smc_chip_name = "CEDAR";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(CEDAR_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_REDWOOD:
chip_name = "REDWOOD";
rlc_chip_name = "REDWOOD";
smc_chip_name = "REDWOOD";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(REDWOOD_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_JUNIPER:
chip_name = "JUNIPER";
rlc_chip_name = "JUNIPER";
smc_chip_name = "JUNIPER";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(JUNIPER_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
smc_chip_name = "CYPRESS";
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(CYPRESS_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_PALM:
chip_name = "PALM";
@ -3242,7 +3301,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
return r;
}
r = radeon_bo_kmap(rdev->ih.ring_obj,
(void **)&rdev->ih.ring);
(void **)__UNVOLATILE(&rdev->ih.ring));
radeon_bo_unreserve(rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
@ -3872,8 +3931,15 @@ restart_ih:
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
@ -3898,8 +3964,15 @@ restart_ih:
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
@ -4084,6 +4157,11 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
#endif
}
#ifdef __NetBSD__
# define __iomem volatile
# define readl fake_readl
#endif
/**
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
* rdev: radeon device structure
@ -4103,15 +4181,19 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
void __iomem *ptr = rdev->vram_scratch.ptr;
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
(void)readl(ptr);
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
#ifdef __NetBSD__
# undef __iomem
# undef readl
#endif
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
u32 link_width_cntl, mask;
@ -4204,6 +4286,7 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon pcie */
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
@ -4314,6 +4397,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
#endif
}
/**

View File

@ -349,7 +349,7 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
u32 slice_tile_max, size, tmp;
u32 slice_tile_max, size __unused, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
@ -420,7 +420,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
dev_warn(p->dev, "%s offset[%d] 0x%"PRIx64" 0x%"PRIx64", %d not aligned\n", __func__, i,
base_offset, base_align, array_mode);
return -EINVAL;
}
@ -448,7 +448,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
dev_warn(p->dev, "%s offset[%d] %d %"PRIu64" %d %lu too big (%d %d) (%d %d %d)\n",
__func__, i, array_mode,
track->cb_color_bo_offset[i], tmp,
radeon_bo_size(track->cb_color_bo[i]),
@ -480,7 +480,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_frag_offset[i] >
radeon_bo_size(track->cb_color_frag_bo[i])) {
dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
"(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
"(tile_max=%u, bytes=%u, offset=%"PRIu64", bo_size=%lu)\n",
__func__, tile_max, bytes,
track->cb_color_frag_offset[i],
radeon_bo_size(track->cb_color_frag_bo[i]));
@ -498,7 +498,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_tile_offset[i] >
radeon_bo_size(track->cb_color_tile_bo[i])) {
dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
"(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
"(block_max=%u, bytes=%u, offset=%"PRIu64", bo_size=%lu)\n",
__func__, block_max, bytes,
track->cb_color_tile_offset[i],
radeon_bo_size(track->cb_color_tile_bo[i]));
@ -516,7 +516,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
{
struct r600_cs_track *track = p->track;
u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
u32 nviews, bpe, ntiles, size __unused, slice_tile_max, tmp;
u32 height_align, pitch_align, depth_align;
u32 pitch = 8192;
u32 height = 8192;
@ -611,7 +611,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
dev_warn(p->dev, "%s offset 0x%"PRIx64", 0x%"PRIx64", %d not aligned\n", __func__,
base_offset, base_align, array_mode);
return -EINVAL;
}
@ -720,7 +720,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("streamout %d bo too small: 0x%"PRIx64", 0x%lx\n",
i, offset,
radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
@ -1569,12 +1569,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
dev_warn(p->dev, "%s:%d tex base offset (0x%"PRIx64", 0x%"PRIx64", %d) invalid\n",
__func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(mip_offset, base_align)) {
dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
dev_warn(p->dev, "%s:%d tex mip offset (0x%"PRIx64", 0x%"PRIx64", %d) invalid\n",
__func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
@ -1598,7 +1598,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
w0, h0, pitch_align, height_align,
array_check.array_mode, format, word2,
l0_size, radeon_bo_size(texture));
dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
dev_warn(p->dev, "alignments %d %d %d %"PRIu64"\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
@ -1808,7 +1808,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "CP DMA src buffer too small (%"PRIu64" %lu)\n",
tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -1838,7 +1838,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "CP DMA dst buffer too small (%"PRIu64" %lu)\n",
tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2108,13 +2108,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%"PRIx64", 0x%x\n",
offset, track->vgt_strmout_bo_offset[idx_value]);
return -EINVAL;
}
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2147,7 +2147,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2166,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2195,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad MEM_WRITE bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2220,7 +2220,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad COPY_DW src bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2244,7 +2244,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
DRM_ERROR("bad COPY_DW dst bo too small: 0x%"PRIx64", 0x%lx\n",
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@ -2439,7 +2439,7 @@ void r600_cs_legacy_init(void)
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_chunk *relocs_chunk __unused;
unsigned idx;
*cs_reloc = NULL;
@ -2516,7 +2516,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA write buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -2583,12 +2583,12 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
}
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA copy src buffer too small (%"PRIu64" %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA write dst buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
@ -2606,7 +2606,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dev_warn(p->dev, "DMA constant fill buffer too small (%"PRIu64" %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}

View File

@ -225,6 +225,36 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
#ifdef __NetBSD__
/*
* XXX Can't use bus_space here because this is all mapped through the
* radeon_bo abstraction. Can't assume we're x86 because this is
* AMD/ATI Radeon, not Intel.
*/
# define __iomem volatile
# define readl fake_readl
# define writel fake_writel
static inline uint32_t
fake_readl(const void __iomem *ptr)
{
uint32_t v;
v = *(const uint32_t __iomem *)ptr;
membar_consumer();
return v;
}
static inline void
fake_writel(uint32_t v, void __iomem *ptr)
{
membar_producer();
*(uint32_t __iomem *)ptr = v;
}
#endif
/**
* r600_dma_ring_test - simple async dma engine test
@ -241,7 +271,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
{
unsigned i;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
void __iomem *ptr = rdev->vram_scratch.ptr;
u32 tmp;
if (!ptr) {
@ -280,6 +310,12 @@ int r600_dma_ring_test(struct radeon_device *rdev,
return r;
}
#ifdef __NetBSD__
# undef __iomem
# undef fake_readl
# undef fake_writel
#endif
/**
* r600_dma_fence_ring_emit - emit a fence on the DMA ring
*
@ -331,6 +367,12 @@ bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
return true;
}
#ifdef __NetBSD__
# define __iomem volatile
# define readl fake_readl
# define writel fake_writel
#endif
/**
* r600_dma_ib_test - test an IB on the DMA engine
*
@ -345,7 +387,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
struct radeon_ib ib;
unsigned i;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
void __iomem *ptr = rdev->vram_scratch.ptr;
u32 tmp = 0;
if (!ptr) {
@ -395,6 +437,12 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
#ifdef __NetBSD__
# undef __iomem
# undef fake_readl
# undef fake_writel
#endif
/**
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
*

View File

@ -163,7 +163,7 @@ static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
const uint8_t *frame = (const uint8_t *)buffer + 3;
uint8_t *header = buffer;
WREG32(HDMI0_AVI_INFO0 + offset,
@ -187,7 +187,7 @@ static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
const u8 *frame = buffer + 3;
const u8 *frame = (const u8 *)buffer + 3;
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));

View File

@ -60,10 +60,16 @@
* are considered as fatal)
*/
#include <asm/byteorder.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/device.h>
#include <linux/log2.h>
#include <linux/notifier.h>
#include <linux/printk.h>
#include <linux/rwsem.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@ -106,7 +112,7 @@ extern int radeon_hard_reset;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
#define RADEON_FENCE_JIFFIES_TIMEOUT (DRM_HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
@ -237,7 +243,12 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
#ifdef __NetBSD__
bus_dma_segment_t rdp_seg;
bus_dmamap_t rdp_map;
#else
struct page *page;
#endif
dma_addr_t addr;
};
int radeon_dummy_page_init(struct radeon_device *rdev);
@ -475,7 +486,9 @@ struct radeon_bo {
struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
#ifndef __NetBSD__ /* XXX pid??? */
pid_t pid;
#endif
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@ -505,7 +518,12 @@ int radeon_gem_debugfs_init(struct radeon_device *rdev);
* alignment).
*/
struct radeon_sa_manager {
#ifdef __NetBSD__
spinlock_t wq_lock;
drm_waitqueue_t wq;
#else
wait_queue_head_t wq;
#endif
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
@ -587,6 +605,10 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
struct radeon_gart {
#ifdef __NetBSD__
bus_dma_segment_t rg_table_seg;
bus_dmamap_t rg_table_map;
#endif
dma_addr_t table_addr;
struct radeon_bo *robj;
void *ptr;
@ -606,11 +628,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev);
void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev);
#ifdef __NetBSD__
void radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start,
unsigned npages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start,
unsigned npages, struct page **pages,
bus_dmamap_t dmamap);
#else
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr);
#endif
void radeon_gart_restore(struct radeon_device *rdev);
@ -664,9 +694,18 @@ struct radeon_doorbell {
/* doorbell mmio */
resource_size_t base;
resource_size_t size;
#ifdef __NetBSD__
bus_space_tag_t bst;
bus_space_handle_t bsh;
#else
u32 __iomem *ptr;
#endif
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
#ifdef __NetBSD__
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, CHAR_BIT*sizeof(unsigned long))];
#else
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
#endif
};
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
@ -755,7 +794,12 @@ struct radeon_irq {
atomic_t ring_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
atomic_t pflip[RADEON_MAX_CRTCS];
#ifdef __NetBSD__
spinlock_t vblank_lock;
drm_waitqueue_t vblank_queue;
#else
wait_queue_head_t vblank_queue;
#endif
bool hpd[RADEON_MAX_HPD_PINS];
bool afmt[RADEON_MAX_AFMT_BLOCKS];
union radeon_irq_stat_regs stat_regs;
@ -2211,8 +2255,10 @@ struct radeon_device {
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
/* Register mmio */
#ifndef __NetBSD__
resource_size_t rmmio_base;
resource_size_t rmmio_size;
#endif
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
/* protects concurrent SMC based register access */
@ -2237,7 +2283,14 @@ struct radeon_device {
spinlock_t didt_idx_lock;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t end_idx_lock;
#ifdef __NetBSD__
bus_space_tag_t rmmio_bst;
bus_space_handle_t rmmio_bsh;
bus_addr_t rmmio_addr;
bus_size_t rmmio_size;
#else
void __iomem *rmmio;
#endif
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
@ -2246,8 +2299,14 @@ struct radeon_device {
radeon_rreg_t pciep_rreg;
radeon_wreg_t pciep_wreg;
/* io port */
#ifdef __NetBSD__
bus_space_tag_t rio_mem_bst;
bus_space_handle_t rio_mem_bsh;
bus_size_t rio_mem_size;
#else
void __iomem *rio_mem;
resource_size_t rio_mem_size;
#endif
struct radeon_clock clock;
struct radeon_mc mc;
struct radeon_gart gart;
@ -2256,7 +2315,12 @@ struct radeon_device {
struct radeon_doorbell doorbell;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
#ifdef __NetBSD__
spinlock_t fence_lock;
drm_waitqueue_t fence_queue;
#else
wait_queue_head_t fence_queue;
#endif
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
@ -2354,10 +2418,17 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/*
* Registers read & write functions.
*/
#ifdef __NetBSD__
#define RREG8(r) bus_space_read_1(rdev->rmmio_bst, rdev->rmmio_bsh, (r))
#define WREG8(r, v) bus_space_write_1(rdev->rmmio_bst, rdev->rmmio_bsh, (r), (v))
#define RREG16(r) bus_space_read_2(rdev->rmmio_bst, rdev->rmmio_bsh, (r))
#define WREG16(r, v) bus_space_write_2(rdev->rmmio_bst, rdev->rmmio_bsh, (r), (v))
#else
#define RREG8(reg) readb((rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
#endif
#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))

View File

@ -149,11 +149,11 @@ int radeon_agp_init(struct radeon_device *rdev)
return ret;
}
if (rdev->ddev->agp->agp_info.aper_size < 32) {
if (rdev->ddev->agp->agp_info.aki_info.ai_aperture_size >> 20 < 32) {
drm_agp_release(rdev->ddev);
dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
"need at least 32M, disabling AGP\n",
rdev->ddev->agp->agp_info.aper_size);
rdev->ddev->agp->agp_info.aki_info.ai_aperture_size >> 20);
return -EINVAL;
}
@ -241,11 +241,11 @@ int radeon_agp_init(struct radeon_device *rdev)
return ret;
}
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aki_info.ai_aperture_base;
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aki_info.ai_aperture_size;
rdev->mc.gtt_start = rdev->mc.agp_base;
rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%08"PRIX64" - 0x%08"PRIX64"\n",
rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
/* workaround some hw issues */

View File

@ -1055,7 +1055,9 @@ static struct radeon_asic rv6xx_asic = {
.get_sclk = &rv6xx_dpm_get_sclk,
.get_mclk = &rv6xx_dpm_get_mclk,
.print_power_state = &rv6xx_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
@ -1146,7 +1148,9 @@ static struct radeon_asic rs780_asic = {
.get_sclk = &rs780_dpm_get_sclk,
.get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
@ -1251,7 +1255,9 @@ static struct radeon_asic rv770_asic = {
.get_sclk = &rv770_dpm_get_sclk,
.get_mclk = &rv770_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &rv770_dpm_vblank_too_short,
},
@ -1370,7 +1376,9 @@ static struct radeon_asic evergreen_asic = {
.get_sclk = &rv770_dpm_get_sclk,
.get_mclk = &rv770_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &cypress_dpm_vblank_too_short,
},
@ -1463,7 +1471,9 @@ static struct radeon_asic sumo_asic = {
.get_sclk = &sumo_dpm_get_sclk,
.get_mclk = &sumo_dpm_get_mclk,
.print_power_state = &sumo_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &sumo_dpm_force_performance_level,
},
.pflip = {
@ -1555,7 +1565,9 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
@ -1699,7 +1711,9 @@ static struct radeon_asic cayman_asic = {
.get_sclk = &ni_dpm_get_sclk,
.get_mclk = &ni_dpm_get_mclk,
.print_power_state = &ni_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &ni_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
},
@ -1800,7 +1814,9 @@ static struct radeon_asic trinity_asic = {
.get_sclk = &trinity_dpm_get_sclk,
.get_mclk = &trinity_dpm_get_mclk,
.print_power_state = &trinity_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &trinity_dpm_force_performance_level,
.enable_bapm = &trinity_dpm_enable_bapm,
},
@ -1931,7 +1947,9 @@ static struct radeon_asic si_asic = {
.get_sclk = &ni_dpm_get_sclk,
.get_mclk = &ni_dpm_get_mclk,
.print_power_state = &ni_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &si_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
},
@ -2093,7 +2111,9 @@ static struct radeon_asic ci_asic = {
.get_sclk = &ci_dpm_get_sclk,
.get_mclk = &ci_dpm_get_mclk,
.print_power_state = &ci_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &ci_dpm_force_performance_level,
.vblank_too_short = &ci_dpm_vblank_too_short,
.powergate_uvd = &ci_dpm_powergate_uvd,
@ -2198,7 +2218,9 @@ static struct radeon_asic kv_asic = {
.get_sclk = &kv_dpm_get_sclk,
.get_mclk = &kv_dpm_get_mclk,
.print_power_state = &kv_dpm_print_power_state,
#ifdef CONFIG_DEBUGFS
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
#endif
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,

View File

@ -187,7 +187,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
snprintf(stmp, sizeof stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
@ -563,7 +563,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
uint8_t con_obj_id, con_obj_num, con_obj_type __unused;
con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
@ -641,7 +641,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
uint8_t grph_obj_id __unused, grph_obj_num __unused, grph_obj_type;
grph_obj_id =
(le16_to_cpu(path->usGraphicObjIds[j]) &

View File

@ -78,7 +78,7 @@ exit_do_move:
static void radeon_benchmark_log_results(int n, unsigned size,
unsigned int time,
unsigned sdomain, unsigned ddomain,
char *kind)
const char *kind)
{
unsigned int throughput = (n * (size >> 10)) / time;
DRM_INFO("radeon: %s %u bo moves of %u kB from"

View File

@ -33,6 +33,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/string.h>
/*
* BIOS.
*/
@ -45,15 +46,39 @@
*/
static bool igp_read_bios_from_vram(struct radeon_device *rdev)
{
#ifdef __NetBSD__
bus_space_tag_t bst;
bus_space_handle_t bsh;
bus_size_t size;
#else
uint8_t __iomem *bios;
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
#endif
if (!(rdev->flags & RADEON_IS_IGP))
if (!radeon_card_posted(rdev))
return false;
rdev->bios = NULL;
#ifdef __NetBSD__
if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(0),
/* XXX Dunno what type to expect here; fill me in... */
pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
rdev->pdev->pd_pa.pa_tag, PCI_BAR(0)),
0, &bst, &bsh, NULL, &size))
return false;
if ((size == 0) ||
(size < 256 * 1024) ||
(bus_space_read_1(bst, bsh, 0) != 0x55) ||
(bus_space_read_1(bst, bsh, 1) != 0xaa) ||
((rdev = kmalloc(size, GFP_KERNEL)) != NULL)) {
bus_space_unmap(bst, bsh, size);
return false;
}
bus_space_read_region_1(bst, bsh, 0, rdev->bios, size);
bus_space_unmap(bst, bsh, size);
#else
vram_base = pci_resource_start(rdev->pdev, 0);
bios = ioremap(vram_base, size);
if (!bios) {
@ -71,9 +96,14 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
}
memcpy_fromio(rdev->bios, bios, size);
iounmap(bios);
#endif
return true;
}
#ifdef __NetBSD__
#define __iomem __pci_rom_iomem
#endif
static bool radeon_read_bios(struct radeon_device *rdev)
{
uint8_t __iomem *bios;
@ -99,8 +129,15 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
#ifdef __NetBSD__
#undef __iomem
#endif
static bool radeon_read_platform_bios(struct radeon_device *rdev)
{
#ifdef __NetBSD__ /* XXX radeon platform bios */
return false;
#else
uint8_t __iomem *bios;
size_t size;
@ -120,8 +157,10 @@ static bool radeon_read_platform_bios(struct radeon_device *rdev)
}
return true;
#endif
}
/* XXX radeon acpi */
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.

View File

@ -2631,7 +2631,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
u16 offset, misc, misc2 = 0;
u8 rev, blocks, tmp;
u8 rev, blocks __unused, tmp;
int state_index = 0;
struct radeon_i2c_bus_rec i2c_bus;

View File

@ -33,6 +33,7 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include <linux/bitops.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
@ -260,6 +261,10 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/
static int radeon_doorbell_init(struct radeon_device *rdev)
{
#ifdef __NetBSD__
int r;
#endif
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
@ -268,10 +273,20 @@ static int radeon_doorbell_init(struct radeon_device *rdev)
if (rdev->doorbell.num_doorbells == 0)
return -EINVAL;
#ifdef __NetBSD__
/* XXX errno NetBSD->Linux */
rdev->doorbell.bst = rdev->pdev->pd_pa.pa_memt;
r = -bus_space_map(rdev->doorbell.bst, rdev->doorbell.base,
(rdev->doorbell.num_doorbells * sizeof(uint32_t)),
0, &rdev->doorbell.bsh);
if (r)
return r;
#else
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
#endif
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
@ -289,8 +304,13 @@ static int radeon_doorbell_init(struct radeon_device *rdev)
*/
static void radeon_doorbell_fini(struct radeon_device *rdev)
{
#ifdef __NetBSD__
bus_space_unmap(rdev->doorbell.bst, rdev->doorbell.bsh,
(rdev->doorbell.num_doorbells * sizeof(uint32_t)));
#else
iounmap(rdev->doorbell.ptr);
rdev->doorbell.ptr = NULL;
#endif
}
/**
@ -403,7 +423,7 @@ int radeon_wb_init(struct radeon_device *rdev)
radeon_wb_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)__UNVOLATILE(&rdev->wb.wb));
radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
@ -413,7 +433,7 @@ int radeon_wb_init(struct radeon_device *rdev)
}
/* clear wb memory */
memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
memset(__UNVOLATILE(rdev->wb.wb), 0, RADEON_GPU_PAGE_SIZE);
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
@ -505,7 +525,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64" (%"PRIu64"M used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
@ -542,7 +562,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
dev_info(rdev->dev, "GTT: %"PRIu64"M 0x%016"PRIX64" - 0x%016"PRIX64"\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
@ -562,11 +582,13 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
#ifndef __NetBSD__ /* XXX radeon efi */
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
if (efi_enabled(EFI_BOOT) &&
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
(rdev->family < CHIP_R600))
return false;
#endif
if (ASIC_IS_NODCE(rdev))
goto check_memsize;
@ -680,6 +702,41 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
*/
int radeon_dummy_page_init(struct radeon_device *rdev)
{
#ifdef __NetBSD__
int rsegs;
int error;
/* XXX Can this be called more than once?? */
if (rdev->dummy_page.rdp_map != NULL)
return 0;
error = bus_dmamem_alloc(rdev->ddev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
&rdev->dummy_page.rdp_seg, 1, &rsegs, BUS_DMA_WAITOK);
if (error)
goto fail0;
KASSERT(rsegs == 1);
error = bus_dmamap_create(rdev->ddev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
BUS_DMA_WAITOK, &rdev->dummy_page.rdp_map);
if (error)
goto fail1;
error = bus_dmamap_load_raw(rdev->ddev->dmat, rdev->dummy_page.rdp_map,
&rdev->dummy_page.rdp_seg, 1, PAGE_SIZE, BUS_DMA_WAITOK);
if (error)
goto fail2;
/* Success! */
rdev->dummy_page.addr = rdev->dummy_page.rdp_map->dm_segs[0].ds_addr;
return 0;
fail3: __unused
bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
fail2: bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
fail1: bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
fail0: KASSERT(error);
rdev->dummy_page.rdp_map = NULL;
/* XXX errno NetBSD->Linux */
return -error;
#else
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@ -694,6 +751,7 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
return -ENOMEM;
}
return 0;
#endif
}
/**
@ -705,12 +763,22 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
*/
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
#ifdef __NetBSD__
if (rdev->dummy_page.rdp_map == NULL)
return;
bus_dmamap_unload(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
bus_dmamap_destroy(rdev->ddev->dmat, rdev->dummy_page.rdp_map);
bus_dmamem_free(rdev->ddev->dmat, &rdev->dummy_page.rdp_seg, 1);
rdev->dummy_page.rdp_map = NULL;
#else
if (rdev->dummy_page.page == NULL)
return;
pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
#endif
}
@ -882,7 +950,12 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
/* needed for iio ops */
if (rdev->rio_mem) {
#ifdef __NetBSD__
if (rdev->rio_mem_size)
#else
if (rdev->rio_mem)
#endif
{
atom_card_info->ioreg_read = cail_ioreg_read;
atom_card_info->ioreg_write = cail_ioreg_write;
} else {
@ -901,7 +974,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
return -ENOMEM;
}
#ifdef __NetBSD__
linux_mutex_init(&rdev->mode_info.atom_context->mutex);
#else
mutex_init(&rdev->mode_info.atom_context->mutex);
#endif
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
@ -919,6 +996,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
#ifdef __NetBSD__
linux_mutex_destroy(&rdev->mode_info.atom_context->mutex);
#else
mutex_destroy(&rdev->mode_info.atom_context->mutex);
#endif
kfree(rdev->mode_info.atom_context->scratch);
}
kfree(rdev->mode_info.atom_context);
@ -961,6 +1043,7 @@ void radeon_combios_fini(struct radeon_device *rdev)
{
}
#ifndef __NetBSD__ /* XXX radeon vga */
/* if we get transitioned to only one device, take VGA back */
/**
* radeon_vga_set_decode - enable/disable vga decode
@ -981,6 +1064,7 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
#endif
/**
* radeon_check_pot_argument - check that argument is a power of two
@ -1054,6 +1138,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
}
#ifndef __NetBSD__ /* XXX radeon vga */
/**
* radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
* needed for waking up.
@ -1139,6 +1224,7 @@ static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
.reprobe = NULL,
.can_switch = radeon_switcheroo_can_switch,
};
#endif
/**
* radeon_device_init - initialize the driver
@ -1159,7 +1245,9 @@ int radeon_device_init(struct radeon_device *rdev,
{
int r, i;
int dma_bits;
#ifndef __NetBSD__
bool runtime = false;
#endif
rdev->shutdown = false;
rdev->dev = &pdev->dev;
@ -1182,16 +1270,33 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
#ifdef __NetBSD__
linux_mutex_init(&rdev->ring_lock);
linux_mutex_init(&rdev->dc_hw_i2c_mutex);
#else
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
#endif
atomic_set(&rdev->ih.lock, 0);
#ifdef __NetBSD__
linux_mutex_init(&rdev->gem.mutex);
linux_mutex_init(&rdev->pm.mutex);
linux_mutex_init(&rdev->gpu_clock_mutex);
linux_mutex_init(&rdev->srbm_mutex);
#else
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
#endif
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
#ifdef __NetBSD__
spin_lock_init(&rdev->irq.vblank_lock);
DRM_INIT_WAITQUEUE(&rdev->irq.vblank_queue, "radvblnk");
#else
init_waitqueue_head(&rdev->irq.vblank_queue);
#endif
r = radeon_gem_init(rdev);
if (r)
return r;
@ -1245,6 +1350,11 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
#ifdef __NetBSD__
r = drm_limit_dma_space(rdev->ddev, 0, __BITS(dma_bits - 1, 0));
if (r)
DRM_ERROR("No suitable DMA available.\n");
#else
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
@ -1256,9 +1366,11 @@ int radeon_device_init(struct radeon_device *rdev,
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "radeon: No coherent DMA available.\n");
}
#endif
/* Registers mapping */
/* TODO: block userspace mapping of io register */
/* XXX Destroy these locks on detach... */
spin_lock_init(&rdev->mmio_idx_lock);
spin_lock_init(&rdev->smc_idx_lock);
spin_lock_init(&rdev->pll_idx_lock);
@ -1271,6 +1383,27 @@ int radeon_device_init(struct radeon_device *rdev,
spin_lock_init(&rdev->rcu_idx_lock);
spin_lock_init(&rdev->didt_idx_lock);
spin_lock_init(&rdev->end_idx_lock);
#ifdef __NetBSD__
{
pcireg_t bar;
if (rdev->family >= CHIP_BONAIRE)
bar = 5;
else
bar = 2;
if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(bar),
pci_mapreg_type(rdev->pdev->pd_pa.pa_pc,
rdev->pdev->pd_pa.pa_tag, PCI_BAR(bar)),
0,
&rdev->rmmio_bst, &rdev->rmmio_bsh,
&rdev->rmmio_addr, &rdev->rmmio_size))
return -EIO;
}
DRM_INFO("register mmio base: 0x%"PRIxMAX"\n",
(uintmax_t)rdev->rmmio_addr);
DRM_INFO("register mmio size: %"PRIuMAX"\n",
(uintmax_t)rdev->rmmio_size);
#else
if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
@ -1284,6 +1417,7 @@ int radeon_device_init(struct radeon_device *rdev,
}
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
#endif
/* doorbell bar mapping */
if (rdev->family >= CHIP_BONAIRE)
@ -1291,15 +1425,30 @@ int radeon_device_init(struct radeon_device *rdev,
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
#ifdef __NetBSD__
if (pci_mapreg_map(&rdev->pdev->pd_pa, PCI_BAR(i),
PCI_MAPREG_TYPE_IO, 0,
&rdev->rio_mem_bst, &rdev->rio_mem_bsh,
NULL, &rdev->rio_mem_size))
continue;
break;
#else
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
break;
}
#endif
}
#ifdef __NetBSD__
if (i == DEVICE_COUNT_RESOURCE)
DRM_ERROR("Unable to find PCI I/O BAR\n");
#else
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
#endif
#ifndef __NetBSD__ /* XXX radeon vga */
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@ -1310,6 +1459,7 @@ int radeon_device_init(struct radeon_device *rdev,
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
#endif
r = radeon_init(rdev);
if (r)
@ -1374,16 +1524,46 @@ void radeon_device_fini(struct radeon_device *rdev)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
#ifndef __NetBSD__
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
#endif
#ifdef __NetBSD__
if (rdev->rio_mem_size)
bus_space_unmap(rdev->rio_mem_bst, rdev->rio_mem_bsh,
rdev->rio_mem_size);
rdev->rio_mem_size = 0;
bus_space_unmap(rdev->rmmio_bst, rdev->rmmio_bsh, rdev->rmmio_size);
#else
if (rdev->rio_mem)
pci_iounmap(rdev->pdev, rdev->rio_mem);
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
#endif
if (rdev->family >= CHIP_BONAIRE)
radeon_doorbell_fini(rdev);
radeon_debugfs_remove_files(rdev);
#ifdef __NetBSD__
DRM_DESTROY_WAITQUEUE(&rdev->irq.vblank_queue);
spin_lock_destroy(&rdev->irq.vblank_lock);
destroy_rwsem(&rdev->exclusive_lock);
destroy_rwsem(&rdev->pm.mclk_lock);
linux_mutex_destroy(&rdev->srbm_mutex);
linux_mutex_destroy(&rdev->gpu_clock_mutex);
linux_mutex_destroy(&rdev->pm.mutex);
linux_mutex_destroy(&rdev->gem.mutex);
linux_mutex_destroy(&rdev->dc_hw_i2c_mutex);
linux_mutex_destroy(&rdev->ring_lock);
#else
mutex_destroy(&rdev->srbm_mutex);
mutex_destroy(&rdev->gpu_clock_mutex);
mutex_destroy(&rdev->pm.mutex);
mutex_destroy(&rdev->gem.mutex);
mutex_destroy(&rdev->dc_hw_i2c_mutex);
mutex_destroy(&rdev->ring_lock);
#endif
}
@ -1466,18 +1646,22 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
radeon_agp_suspend(rdev);
#ifndef __NetBSD__ /* pmf handles this for us. */
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
#endif
#ifndef __NetBSD__ /* XXX radeon fb */
if (fbcon) {
console_lock();
radeon_fbdev_set_suspend(rdev, 1);
console_unlock();
}
#endif
return 0;
}
@ -1499,9 +1683,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
#ifndef __NetBSD__ /* XXX radeon fb */
if (fbcon) {
console_lock();
}
#endif
#ifndef __NetBSD__ /* pmf handles this for us. */
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
@ -1511,6 +1698,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
return -1;
}
}
#endif
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
@ -1562,10 +1750,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
#ifndef __NetBSD__ /* XXX radeon fb */
if (fbcon) {
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
}
#endif
return 0;
}

View File

@ -30,6 +30,7 @@
#include "atom.h"
#include <asm/div64.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
@ -1030,7 +1031,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
/* pre-avivo */
static inline uint32_t radeon_div(uint64_t n, uint32_t d)
{
uint64_t mod;
uint64_t mod __unused;
n += d / 2;
@ -1063,7 +1064,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint32_t post_div;
u32 pll_out_min, pll_out_max;
DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
DRM_DEBUG_KMS("PLL freq %"PRIu64" %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
if (pll->flags & RADEON_PLL_IS_LCD) {

View File

@ -36,6 +36,7 @@
#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include "drm_crtc_helper.h"
@ -105,7 +106,7 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
@ -118,7 +119,9 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
extern bool radeon_is_px(struct drm_device *dev);
extern const struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
#ifndef __NetBSD__
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
#endif
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
@ -328,6 +331,14 @@ static struct drm_driver driver_old = {
static struct drm_driver kms_driver;
#ifdef __NetBSD__
struct drm_driver *const radeon_drm_driver = &kms_driver;
const struct pci_device_id *const radeon_device_ids = pciidlist;
const size_t radeon_n_device_ids = __arraycount(pciidlist);
#else
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
@ -513,6 +524,7 @@ static const struct file_operations radeon_driver_kms_fops = {
.compat_ioctl = radeon_kms_compat_ioctl,
#endif
};
#endif /* __NetBSD__ */
static struct drm_driver kms_driver = {
.driver_features =
@ -546,8 +558,13 @@ static struct drm_driver kms_driver = {
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = drm_gem_dumb_destroy,
#ifdef __NetBSD__
.fops = NULL,
#else
.fops = &radeon_driver_kms_fops,
#endif
#ifndef __NetBSD__ /* XXX drm prime */
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@ -558,6 +575,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
.gem_prime_vmap = radeon_gem_prime_vmap,
.gem_prime_vunmap = radeon_gem_prime_vunmap,
#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@ -567,6 +585,8 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
#ifndef __NetBSD__
static struct drm_driver *driver;
static struct pci_driver *pdriver;
@ -628,6 +648,8 @@ static void __exit radeon_exit(void)
radeon_unregister_atpx_handler();
}
#endif
module_init(radeon_init);
module_exit(radeon_exit);

View File

@ -48,6 +48,7 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
#ifndef __NetBSD__
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@ -61,6 +62,7 @@ static struct fb_ops radeonfb_ops = {
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
#endif
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
@ -122,9 +124,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
fb_tiled) * ((bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
#ifdef __NetBSD__
height = DIV_ROUND_UP(mode_cmd->height, 8);
#else
height = ALIGN(mode_cmd->height, 8);
#endif
size = mode_cmd->pitches[0] * height;
#ifdef __NetBSD__
aligned_size = DIV_ROUND_UP (size, PAGE_SIZE);
#else
aligned_size = ALIGN(size, PAGE_SIZE);
#endif
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, true,
@ -192,14 +202,20 @@ static int radeonfb_create(struct drm_fb_helper *helper,
{
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
#ifndef __NetBSD__
struct fb_info *info;
#endif
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
#ifndef __NetBSD__
struct device *device = &rdev->pdev->dev;
#endif
int ret;
#ifndef __NetBSD__
unsigned long tmp;
#endif
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@ -219,6 +235,23 @@ static int radeonfb_create(struct drm_fb_helper *helper,
rbo = gem_to_radeon_bo(gobj);
#ifdef __NetBSD__
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out_unref;
}
(void)memset(rbo->kptr, 0, radeon_bo_size(rbo));
ret = radeon_genfb_attach(rdev->ddev, helper, sizes, rbo);
if (ret) {
DRM_ERROR("failed to attach genfb: %d\n", ret);
goto out_unref;
}
helper->genfb_attached = true;
fb = &rfbdev->rfb.base;
rfbdev->helper.fb = fb;
#else
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
@ -286,6 +319,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
#endif
return 0;
out_unref:
@ -308,9 +342,17 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
#ifndef __NetBSD__
struct fb_info *info;
#endif
struct radeon_framebuffer *rfb = &rfbdev->rfb;
#ifdef __NetBSD__
if (rfbdev->helper.genfb_attached) {
/* XXX detach genfb for real... */
(void)config_detach_children(dev->dev, DETACH_FORCE);
}
#else
if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev;
@ -319,6 +361,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
#endif
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
@ -384,7 +427,9 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
#ifndef __NetBSD__ /* XXX radeon fb suspend */
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
#endif
}
int radeon_fbdev_total_size(struct radeon_device *rdev)

View File

@ -191,7 +191,11 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
if (wake)
#ifdef __NetBSD__
DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock);
#else
wake_up_all(&rdev->fence_queue);
#endif
}
/**
@ -316,6 +320,22 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
radeon_irq_kms_sw_irq_get(rdev, i);
}
#ifdef __NetBSD__
spin_lock(&rdev->fence_lock);
if (intr)
DRM_SPIN_TIMED_WAIT_UNTIL(r, &rdev->fence_queue,
&rdev->fence_lock, RADEON_FENCE_JIFFIES_TIMEOUT,
((signaled = radeon_fence_any_seq_signaled(rdev,
target_seq))
|| rdev->needs_reset));
else
DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(r, &rdev->fence_queue,
&rdev->fence_lock, RADEON_FENCE_JIFFIES_TIMEOUT,
((signaled = radeon_fence_any_seq_signaled(rdev,
target_seq))
|| rdev->needs_reset));
spin_unlock(&rdev->fence_lock);
#else
if (intr) {
r = wait_event_interruptible_timeout(rdev->fence_queue, (
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
@ -325,6 +345,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
}
#endif
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
@ -368,13 +389,21 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
if (i < RADEON_NUM_RINGS) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for "
"0x%016llx last fence id 0x%016llx on"
"0x%016"PRIx64" last fence id 0x%016"PRIx64" on"
" ring %d)\n",
target_seq[i], last_seq[i], i);
/* remember that we need an reset */
#ifdef __NetBSD__
spin_lock(&rdev->fence_lock);
rdev->needs_reset = true;
DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue,
&rdev->fence_lock);
spin_unlock(&rdev->fence_lock);
#else
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
#endif
return -EDEADLK;
}
}
@ -666,8 +695,12 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
} else {
/* put fence directly behind firmware */
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
index = round_up(rdev->uvd_fw->size, 8);
#else
index = ALIGN(rdev->uvd_fw->size, 8);
rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
#endif
rdev->fence_drv[ring].cpu_addr = (uint32_t *)((uint8_t *)rdev->uvd.cpu_addr + index);
rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
}
@ -685,7 +718,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016"PRIx64" and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
return 0;
}
@ -729,7 +762,12 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
{
int ring;
#ifdef __NetBSD__
spin_lock_init(&rdev->fence_lock);
DRM_INIT_WAITQUEUE(&rdev->fence_queue, "radfence");
#else
init_waitqueue_head(&rdev->fence_queue);
#endif
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
@ -760,11 +798,22 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
/* no need to trigger GPU reset as we are unloading */
radeon_fence_driver_force_completion(rdev);
}
#ifdef __NetBSD__
spin_lock(&rdev->fence_lock);
DRM_SPIN_WAKEUP_ALL(&rdev->fence_queue, &rdev->fence_lock);
spin_unlock(&rdev->fence_lock);
#else
wake_up_all(&rdev->fence_queue);
#endif
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
}
mutex_unlock(&rdev->ring_lock);
#ifdef __NetBSD__
DRM_DESTROY_WAITQUEUE(&rdev->fence_queue);
spin_lock_destroy(&rdev->fence_lock);
#endif
}
/**
@ -807,12 +856,12 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
seq_printf(m, "Last emitted 0x%016llx\n",
seq_printf(m, "Last emitted 0x%016"PRIx64"\n",
rdev->fence_drv[i].sync_seq[i]);
for (j = 0; j < RADEON_NUM_RINGS; ++j) {
if (i != j && rdev->fence_drv[j].initialized)
seq_printf(m, "Last sync to ring %d 0x%016llx\n",
seq_printf(m, "Last sync to ring %d 0x%016"PRIx64"\n",
j, rdev->fence_drv[i].sync_seq[j]);
}
}

View File

@ -64,6 +64,44 @@
*/
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{
#ifdef __NetBSD__
int rsegs;
int error;
error = bus_dmamem_alloc(rdev->ddev->dmat, rdev->gart.table_size,
PAGE_SIZE, 0, &rdev->gart.rg_table_seg, 1, &rsegs, BUS_DMA_WAITOK);
if (error)
goto fail0;
KASSERT(rsegs == 1);
error = bus_dmamap_create(rdev->ddev->dmat, rdev->gart.table_size, 1,
rdev->gart.table_size, 0, BUS_DMA_WAITOK,
&rdev->gart.rg_table_map);
if (error)
goto fail1;
error = bus_dmamem_map(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1,
rdev->gart.table_size, &rdev->gart.ptr,
BUS_DMA_WAITOK|BUS_DMA_NOCACHE);
if (error)
goto fail2;
error = bus_dmamap_load(rdev->ddev->dmat, rdev->gart.rg_table_map,
rdev->gart.ptr, rdev->gart.table_size, NULL, BUS_DMA_WAITOK);
if (error)
goto fail3;
/* Success! */
rdev->gart.table_addr = rdev->gart.rg_table_map->dm_segs[0].ds_addr;
return 0;
fail4: __unused
bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
fail3: bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
rdev->gart.table_size);
fail2: bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
fail1: bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
fail0: KASSERT(error);
/* XXX errno NetBSD->Linux */
return -error;
#else
void *ptr;
ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
@ -81,6 +119,7 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
rdev->gart.ptr = ptr;
memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
return 0;
#endif
}
/**
@ -97,6 +136,13 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev)
if (rdev->gart.ptr == NULL) {
return;
}
#ifdef __NetBSD__
bus_dmamap_unload(rdev->ddev->dmat, rdev->gart.rg_table_map);
bus_dmamem_unmap(rdev->ddev->dmat, rdev->gart.ptr,
rdev->gart.table_size);
bus_dmamap_destroy(rdev->ddev->dmat, rdev->gart.rg_table_map);
bus_dmamem_free(rdev->ddev->dmat, &rdev->gart.rg_table_seg, 1);
#else
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
@ -109,6 +155,7 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev)
rdev->gart.table_addr);
rdev->gart.ptr = NULL;
rdev->gart.table_addr = 0;
#endif
}
/**
@ -209,9 +256,69 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
radeon_bo_unref(&rdev->gart.robj);
}
#ifdef __NetBSD__
static void
radeon_gart_pre_update(struct radeon_device *rdev, unsigned gpu_pgstart,
unsigned gpu_npages)
{
if (rdev->gart.rg_table_map != NULL)
bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
gpu_pgstart*4, gpu_npages*4, BUS_DMASYNC_PREWRITE);
}
static void
radeon_gart_post_update(struct radeon_device *rdev, unsigned gpu_pgstart,
unsigned gpu_npages)
{
membar_sync(); /* XXX overkill */
if (rdev->gart.rg_table_map != NULL)
bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map,
gpu_pgstart*4, gpu_npages*4, BUS_DMASYNC_PREWRITE);
radeon_gart_tlb_flush(rdev);
}
#endif
/*
* Common gart functions.
*/
#ifdef __NetBSD__
void
radeon_gart_unbind(struct radeon_device *rdev, unsigned gpu_start,
unsigned npages)
{
const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
const unsigned gpu_npages = (npages / gpu_per_cpu);
const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
unsigned pgno, gpu_pgno;
KASSERT(pgstart == (gpu_start / PAGE_SIZE));
KASSERT(npages <= rdev->gart.num_cpu_pages);
KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
if (!rdev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return;
}
radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
for (pgno = 0; pgno < npages; pgno++) {
if (rdev->gart.pages[pgstart + pgno] == NULL)
continue;
rdev->gart.pages[pgstart + pgno] = NULL;
rdev->gart.pages_addr[pgstart + pgno] = rdev->dummy_page.addr;
if (rdev->gart.ptr == NULL)
continue;
for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
radeon_gart_set_page(rdev, gpu_pgstart + gpu_pgno,
(rdev->dummy_page.addr +
gpu_pgno*RADEON_GPU_PAGE_SIZE));
}
radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
}
#else
/**
* radeon_gart_unbind - unbind pages from the gart page table
*
@ -252,7 +359,47 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
mb();
radeon_gart_tlb_flush(rdev);
}
#endif
#ifdef __NetBSD__
int
radeon_gart_bind(struct radeon_device *rdev, unsigned gpu_start,
unsigned npages, struct page **pages, bus_dmamap_t dmamap)
{
const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
const unsigned gpu_npages = (npages / gpu_per_cpu);
const unsigned gpu_pgstart = (gpu_start / RADEON_GPU_PAGE_SIZE);
const unsigned pgstart = (gpu_pgstart / gpu_per_cpu);
unsigned pgno, gpu_pgno;
KASSERT(pgstart == (gpu_start / PAGE_SIZE));
KASSERT(npages == dmamap->dm_nsegs);
KASSERT(npages <= rdev->gart.num_cpu_pages);
KASSERT(gpu_npages <= rdev->gart.num_cpu_pages);
if (!rdev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
radeon_gart_pre_update(rdev, gpu_pgstart, gpu_npages);
for (pgno = 0; pgno < npages; pgno++) {
const bus_addr_t addr = dmamap->dm_segs[pgno].ds_addr;
KASSERT(dmamap->dm_segs[pgno].ds_len == PAGE_SIZE);
rdev->gart.pages[pgstart + pgno] = pages[pgno];
rdev->gart.pages_addr[pgstart + pgno] = addr;
if (rdev->gart.ptr == NULL)
continue;
for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
radeon_gart_set_page(rdev, gpu_pgstart + gpu_pgno,
(addr + gpu_pgno*RADEON_GPU_PAGE_SIZE));
}
radeon_gart_post_update(rdev, gpu_pgstart, gpu_npages);
return 0;
}
#else
/**
* radeon_gart_bind - bind pages into the gart page table
*
@ -296,6 +443,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
radeon_gart_tlb_flush(rdev);
return 0;
}
#endif
/**
* radeon_gart_restore - bind all pages in the gart page table
@ -307,6 +455,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
*/
void radeon_gart_restore(struct radeon_device *rdev)
{
#ifdef __NetBSD__
const unsigned gpu_per_cpu = (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
unsigned pgno, gpu_pgno;
if (rdev->gart.ptr == NULL)
return;
radeon_gart_pre_update(rdev, 0, rdev->gart.num_gpu_pages);
for (pgno = 0; pgno < rdev->gart.num_cpu_pages; pgno++) {
const bus_addr_t addr = rdev->gart.pages_addr[pgno];
for (gpu_pgno = 0; gpu_pgno < gpu_per_cpu; gpu_pgno++)
radeon_gart_set_page(rdev, gpu_pgno,
(addr + gpu_pgno*RADEON_GPU_PAGE_SIZE));
}
radeon_gart_pre_update(rdev, 0, rdev->gart.num_gpu_pages);
#else
int i, j, t;
u64 page_base;
@ -322,6 +486,7 @@ void radeon_gart_restore(struct radeon_device *rdev)
}
mb();
radeon_gart_tlb_flush(rdev);
#endif
}
/**

View File

@ -34,8 +34,10 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
#ifndef __NetBSD__ /* XXX drm prime */
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
#endif
radeon_bo_unref(&robj);
}
}
@ -77,7 +79,9 @@ retry:
return r;
}
*obj = &robj->gem_base;
#ifndef __NetBSD__
robj->pid = task_pid_nr(current);
#endif
mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
@ -569,7 +573,11 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
args->size = args->pitch * args->height;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
args->size = round_up(args->size, PAGE_SIZE);
#else
args->size = ALIGN(args->size, PAGE_SIZE);
#endif
r = radeon_gem_object_create(rdev, args->size, 0,
RADEON_GEM_DOMAIN_VRAM,

View File

@ -24,6 +24,7 @@
* Alex Deucher
*/
#include <linux/export.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>

View File

@ -181,7 +181,7 @@ static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_dis
* tied to dma at all, this is just a hangover from dri prehistory.
*/
irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =

View File

@ -45,7 +45,7 @@
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;

View File

@ -58,8 +58,14 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev == NULL)
return 0;
#ifdef __NetBSD__
/* XXX ugh */
if (rdev->rmmio_size)
goto done_free;
#else
if (rdev->rmmio == NULL)
goto done_free;
#endif
pm_runtime_get_sync(dev->dev);
@ -537,7 +543,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*/
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
#ifndef __NetBSD__ /* XXX radeon vga */
vga_switcheroo_process_delayed_switch();
#endif
}
/**
@ -564,7 +572,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
struct radeon_bo_va *bo_va;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {

View File

@ -431,7 +431,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
u16 p1, p2, h_inc;
bool h_changed;
const struct radeon_tv_mode_constants *const_ptr;
struct radeon_pll *pll;
struct radeon_pll *pll __unused;
radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
if (radeon_crtc->crtc_id == 1)
@ -544,7 +544,7 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
uint32_t tv_pll_cntl, tv_pll_cntl1 __unused, tv_ftotal;
uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
uint32_t m, n, p;
const uint16_t *hor_timing;

View File

@ -909,4 +909,12 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
#ifdef __NetBSD__
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
int radeon_genfb_attach(struct drm_device *, struct drm_fb_helper *,
const struct drm_fb_helper_surface_size *, struct radeon_bo *);
#endif
#endif

View File

@ -154,7 +154,11 @@ int radeon_bo_create(struct radeon_device *rdev,
size_t acc_size;
int r;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
size = round_up(size, PAGE_SIZE);
#else
size = ALIGN(size, PAGE_SIZE);
#endif
if (kernel) {
type = ttm_bo_type_kernel;
@ -362,7 +366,7 @@ int radeon_bo_init(struct radeon_device *rdev)
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
rdev->mc.aper_size);
}
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
DRM_INFO("Detected VRAM RAM=%"PRIx64"M, BAR=%lluM\n",
rdev->mc.mc_vram_size >> 20,
(unsigned long long)rdev->mc.aper_size >> 20);
DRM_INFO("RAM width %dbits %cDR\n",
@ -490,11 +494,15 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
return 0;
}
#ifdef __NetBSD__
/* XXX Fill me in! */
#else
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
return ttm_fbdev_mmap(vma, &bo->tbo);
}
#endif
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{

View File

@ -165,7 +165,7 @@ static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
{
return sa_bo->manager->cpu_ptr + sa_bo->soffset;
return (char *)sa_bo->manager->cpu_ptr + sa_bo->soffset;
}
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,

View File

@ -154,10 +154,22 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
#ifdef __NetBSD__
int ret __unused;
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = false;
DRM_SPIN_TIMED_WAIT_UNTIL(ret, &rdev->irq.vblank_queue,
&rdev->irq.vblank_lock,
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT),
rdev->pm.vblank_sync);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = false;
wait_event_timeout(
rdev->irq.vblank_queue, rdev->pm.vblank_sync,
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
#endif
}
}
@ -338,6 +350,7 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
}
}
#ifndef __NetBSD__ /* XXX radeon power */
static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
@ -562,7 +575,9 @@ static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, rad
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
radeon_get_dpm_forced_performance_level,
radeon_set_dpm_forced_performance_level);
#endif
#ifndef __NetBSD__ /* XXX radeon hwmon */
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
@ -635,11 +650,13 @@ static const struct attribute_group *hwmon_groups[] = {
&hwmon_attrgroup,
NULL
};
#endif
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
#ifndef __NetBSD__ /* XXX radeon hwmon */
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
@ -663,14 +680,17 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
default:
break;
}
#endif
return err;
}
static void radeon_hwmon_fini(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon hwmon */
if (rdev->pm.int_hwmon_dev)
hwmon_device_unregister(rdev->pm.int_hwmon_dev);
#endif
}
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
@ -1178,6 +1198,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
#ifndef __NetBSD__ /* XXX radeon power */
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@ -1193,6 +1214,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
DRM_INFO("radeon: power management initialized\n");
}
#endif
return 0;
}
@ -1244,6 +1266,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
goto dpm_failed;
rdev->pm.dpm_enabled = true;
#ifndef __NetBSD__ /* XXX radeon power */
ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
if (ret)
DRM_ERROR("failed to create device file for dpm state\n");
@ -1257,6 +1280,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
ret = device_create_file(rdev->dev, &dev_attr_power_method);
if (ret)
DRM_ERROR("failed to create device file for power method\n");
#endif
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@ -1390,8 +1414,10 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
#ifndef __NetBSD__ /* XXX radeon power */
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
#endif
}
radeon_hwmon_fini(rdev);
@ -1407,11 +1433,13 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
radeon_dpm_disable(rdev);
mutex_unlock(&rdev->pm.mutex);
#ifndef __NetBSD__ /* XXX radeon power */
device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
/* XXX backwards compat */
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
#endif
}
radeon_dpm_fini(rdev);

View File

@ -26,6 +26,7 @@
* Jerome Glisse
* Christian König
*/
#include <linux/jiffies.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
@ -515,7 +516,7 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin
elapsed = jiffies_to_msecs(jiffies_64 - last);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
dev_err(rdev->dev, "ring %d stalled for more than %"PRIu64"msec\n",
ring->idx, elapsed);
return true;
}
@ -657,7 +658,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
return r;
}
r = radeon_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
(void **)__UNVOLATILE(&ring->ring));
radeon_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);

View File

@ -53,7 +53,12 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
{
int i, r;
#ifdef __NetBSD__
spin_lock_init(&sa_manager->wq_lock);
DRM_INIT_WAITQUEUE(&sa_manager->wq, "radsabom");
#else
init_waitqueue_head(&sa_manager->wq);
#endif
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
@ -91,6 +96,10 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
}
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
#ifdef __NetBSD__
DRM_DESTROY_WAITQUEUE(&sa_manager->wq);
spin_lock_destroy(&sa_manager->wq_lock);
#endif
}
int radeon_sa_bo_manager_start(struct radeon_device *rdev,
@ -330,7 +339,11 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
#ifdef __NetBSD__
spin_lock(&sa_manager->wq_lock);
#else
spin_lock(&sa_manager->wq.lock);
#endif
do {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
@ -342,13 +355,27 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
#ifdef __NetBSD__
spin_unlock(&sa_manager->wq_lock);
#else
spin_unlock(&sa_manager->wq.lock);
#endif
return 0;
}
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
#ifdef __NetBSD__
spin_unlock(&sa_manager->wq_lock);
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq_lock);
/* if we have nothing to wait for block */
if (r == -ENOENT)
DRM_SPIN_WAIT_UNTIL(r, &sa_manager->wq,
&sa_manager->wq_lock,
radeon_sa_event(sa_manager, size, align));
#else
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq.lock);
@ -359,10 +386,15 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
radeon_sa_event(sa_manager, size, align)
);
}
#endif
} while (!r);
#ifdef __NetBSD__
spin_unlock(&sa_manager->wq_lock);
#else
spin_unlock(&sa_manager->wq.lock);
#endif
kfree(*sa_bo);
*sa_bo = NULL;
return r;
@ -378,7 +410,11 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
}
sa_manager = (*sa_bo)->manager;
#ifdef __NetBSD__
spin_lock(&sa_manager->wq_lock);
#else
spin_lock(&sa_manager->wq.lock);
#endif
if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
@ -386,8 +422,13 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
#ifdef __NetBSD__
DRM_SPIN_WAKEUP_ALL(&sa_manager->wq, &sa_manager->wq_lock);
spin_unlock(&sa_manager->wq_lock);
#else
wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
#endif
*sa_bo = NULL;
}

View File

@ -114,7 +114,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
goto out_lclean_unpin;
}
for (gtt_start = gtt_map, gtt_end = gtt_map + size;
for (gtt_start = gtt_map, gtt_end = gtt_start + size;
gtt_start < gtt_end;
gtt_start++)
*gtt_start = gtt_start;
@ -144,8 +144,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
goto out_lclean_unpin;
}
for (gtt_start = gtt_map, gtt_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size;
for (gtt_start = gtt_map, gtt_end = gtt_start + size,
vram_start = vram_map, vram_end = vram_start + size;
vram_start < vram_end;
gtt_start++, vram_start++) {
if (*vram_start != gtt_start) {
@ -155,10 +155,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
i, *vram_start, gtt_start,
(unsigned long long)
(gtt_addr - rdev->mc.gtt_start +
(void*)gtt_start - gtt_map),
(u8*)gtt_start - (u8*)gtt_map),
(unsigned long long)
(vram_addr - rdev->mc.vram_start +
(void*)gtt_start - gtt_map));
(u8*)gtt_start - (u8*)gtt_map));
radeon_bo_kunmap(vram_obj);
goto out_lclean_unpin;
}
@ -190,8 +190,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
goto out_lclean_unpin;
}
for (gtt_start = gtt_map, gtt_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size;
for (gtt_start = gtt_map, gtt_end = gtt_start + size,
vram_start = vram_map, vram_end = vram_start + size;
gtt_start < gtt_end;
gtt_start++, vram_start++) {
if (*gtt_start != vram_start) {
@ -201,10 +201,10 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
i, *gtt_start, vram_start,
(unsigned long long)
(vram_addr - rdev->mc.vram_start +
(void*)vram_start - vram_map),
(u8*)vram_start - (u8*)vram_map),
(unsigned long long)
(gtt_addr - rdev->mc.gtt_start +
(void*)vram_start - vram_map));
(u8*)vram_start - (u8*)vram_map));
radeon_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin;
}
@ -212,7 +212,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%"PRIx64"\n",
gtt_addr - rdev->mc.gtt_start);
continue;

View File

@ -1,191 +0,0 @@
#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RADEON_TRACE_H_
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <drm/drmP.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM radeon
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE radeon_trace
TRACE_EVENT(radeon_bo_create,
TP_PROTO(struct radeon_bo *bo),
TP_ARGS(bo),
TP_STRUCT__entry(
__field(struct radeon_bo *, bo)
__field(u32, pages)
),
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
TRACE_EVENT(radeon_cs,
TP_PROTO(struct radeon_cs_parser *p),
TP_ARGS(p),
TP_STRUCT__entry(
__field(u32, ring)
__field(u32, dw)
__field(u32, fences)
),
TP_fast_assign(
__entry->ring = p->ring;
__entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
__entry->fences = radeon_fence_count_emitted(
p->rdev, p->ring);
),
TP_printk("ring=%u, dw=%u, fences=%u",
__entry->ring, __entry->dw,
__entry->fences)
);
TRACE_EVENT(radeon_vm_grab_id,
TP_PROTO(unsigned vmid, int ring),
TP_ARGS(vmid, ring),
TP_STRUCT__entry(
__field(u32, vmid)
__field(u32, ring)
),
TP_fast_assign(
__entry->vmid = vmid;
__entry->ring = ring;
),
TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
);
TRACE_EVENT(radeon_vm_bo_update,
TP_PROTO(struct radeon_bo_va *bo_va),
TP_ARGS(bo_va),
TP_STRUCT__entry(
__field(u64, soffset)
__field(u64, eoffset)
__field(u32, flags)
),
TP_fast_assign(
__entry->soffset = bo_va->soffset;
__entry->eoffset = bo_va->eoffset;
__entry->flags = bo_va->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
__entry->soffset, __entry->eoffset, __entry->flags)
);
TRACE_EVENT(radeon_vm_set_page,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags),
TP_ARGS(pe, addr, count, incr, flags),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
__field(u32, flags)
),
TP_fast_assign(
__entry->pe = pe;
__entry->addr = addr;
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
),
TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
__entry->pe, __entry->addr, __entry->incr,
__entry->flags, __entry->count)
);
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(int, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->ring = ring;
__entry->seqno = seqno;
),
TP_printk("dev=%u, ring=%d, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
TP_ARGS(dev, ring, seqno)
);
DECLARE_EVENT_CLASS(radeon_semaphore_request,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem),
TP_STRUCT__entry(
__field(int, ring)
__field(signed, waiters)
__field(uint64_t, gpu_addr)
),
TP_fast_assign(
__entry->ring = ring;
__entry->waiters = sem->waiters;
__entry->gpu_addr = sem->gpu_addr;
),
TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
__entry->waiters, __entry->gpu_addr)
);
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem)
);
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem)
);
#endif
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>

View File

@ -43,6 +43,12 @@
#include "radeon_reg.h"
#include "radeon.h"
#ifdef __NetBSD__
#include <uvm/uvm_extern.h>
#include <uvm/uvm_param.h>
#include <drm/bus_dma_hacks.h>
#endif
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
@ -281,7 +287,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct radeon_device *rdev __unused;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
u32 placements;
@ -328,7 +334,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct radeon_device *rdev __unused;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
@ -592,18 +598,24 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
#ifndef __NetBSD__
unsigned i;
int r;
#endif
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
if (slave && ttm->sg) {
#ifdef __NetBSD__ /* XXX drm prime */
return -EINVAL;
#else
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
return 0;
#endif
}
rdev = radeon_get_rdev(ttm->bdev);
@ -613,6 +625,11 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
}
#endif
#ifdef __NetBSD__
/* XXX errno NetBSD->Linux */
return ttm_bus_dma_populate(&gtt->ttm);
#else
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(&gtt->ttm, rdev->dev);
@ -639,13 +656,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
}
}
return 0;
#endif
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
#ifndef __NetBSD__
unsigned i;
#endif
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (slave)
@ -659,6 +679,11 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
#ifdef __NetBSD__
ttm_bus_dma_unpopulate(&gtt->ttm);
return;
#else
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
@ -674,6 +699,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
ttm_pool_unpopulate(ttm);
#endif
}
static struct ttm_bo_driver radeon_bo_driver = {
@ -708,7 +734,12 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver,
#ifdef __NetBSD__
rdev->ddev->bst,
rdev->ddev->dmat,
#else
rdev->ddev->anon_inode->i_mapping,
#endif
DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
@ -797,6 +828,52 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
man->size = size >> PAGE_SHIFT;
}
#ifdef __NetBSD__
#include <uvm/uvm_fault.h>
int
radeon_ttm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr,
struct vm_page **pps, int npages, int centeridx, vm_prot_t access_type,
int flags)
{
struct uvm_object *const uobj = ufi->entry->object.uvm_obj;
struct ttm_buffer_object *const bo = container_of(uobj,
struct ttm_buffer_object, uvmobj);
struct radeon_device *const rdev = radeon_get_rdev(bo->bdev);
int error;
KASSERT(rdev != NULL);
down_read(&rdev->pm.mclk_lock);
error = ttm_bo_uvm_fault(ufi, vaddr, pps, npages, centeridx,
access_type, flags);
up_read(&rdev->pm.mclk_lock);
return error;
}
int
radeon_mmap_object(struct drm_device *dev, off_t offset, size_t size,
vm_prot_t prot, struct uvm_object **uobjp, voff_t *uoffsetp,
struct file *file)
{
struct radeon_device *rdev = dev->dev_private;
KASSERT(0 == (offset & ~(PAGE_SIZE - 1)));
if (__predict_false(rdev == NULL)) /* XXX How?? */
return -EINVAL;
if (__predict_false((offset >> PAGE_SHIFT) < DRM_FILE_PAGE_OFFSET))
return drm_mmap_object(dev, offset, size, prot, uobjp,
uoffsetp /* , file */);
else
return ttm_bo_mmap_object(&rdev->mman.bdev, offset, size, prot,
uobjp, uoffsetp, file);
}
#else
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
@ -845,6 +922,8 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
#endif /* __NetBSD__ */
#if defined(CONFIG_DEBUG_FS)
static int radeon_mm_dump_table(struct seq_file *m, void *data)

View File

@ -180,7 +180,7 @@ void radeon_uvd_fini(struct radeon_device *rdev)
int radeon_uvd_suspend(struct radeon_device *rdev)
{
unsigned size;
void *ptr;
uint8_t *ptr;
int i;
if (rdev->uvd.vcpu_bo == NULL)
@ -208,7 +208,7 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
int radeon_uvd_resume(struct radeon_device *rdev)
{
unsigned size;
void *ptr;
uint8_t *ptr;
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
@ -272,13 +272,21 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
unsigned pitch = msg[28];
unsigned width_in_mb = width / 16;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
unsigned height_in_mb = round_up(height / 16, 2);
#else
unsigned height_in_mb = ALIGN(height / 16, 2);
#endif
unsigned image_size, tmp, min_dpb_size;
image_size = width * height;
image_size += image_size / 2;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
image_size = round_up(image_size, 1024);
#else
image_size = ALIGN(image_size, 1024);
#endif
switch (stream_type) {
case 0: /* H264 */
@ -309,7 +317,11 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
/* BP */
tmp = max(width_in_mb, height_in_mb);
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
min_dpb_size += round_up(tmp * 7 * 16, 64);
#else
min_dpb_size += ALIGN(tmp * 7 * 16, 64);
#endif
break;
case 3: /* MPEG2 */
@ -327,7 +339,11 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
min_dpb_size += width_in_mb * height_in_mb * 64;
/* IT surface buffer */
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
min_dpb_size += round_up(width_in_mb * height_in_mb * 32, 64);
#else
min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
#endif
break;
default:
@ -379,7 +395,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return r;
}
msg = ptr + offset;
msg = (int32_t *)((uint8_t *)ptr + offset);
msg_type = msg[1];
handle = msg[2];
@ -482,7 +498,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
}
if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
DRM_ERROR("reloc %"PRIX64"-%"PRIX64" crossing 256MB boundary!\n",
start, end);
return -EINVAL;
}
@ -490,7 +506,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
/* TODO: is this still necessary on NI+ ? */
if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
DRM_ERROR("msg/fb buffer %"PRIX64"-%"PRIX64" out of 256MB segment!\n",
start, end);
return -EINVAL;
}

View File

@ -44,6 +44,40 @@ MODULE_FIRMWARE(FIRMWARE_BONAIRE);
static void radeon_vce_idle_work_handler(struct work_struct *work);
#ifdef __NetBSD__ /* XXX Ugh! */
static bool
scan_2dec_u8(const char **sp, char delim, uint8_t *u8p)
{
char c0, c1;
if (!isdigit((unsigned char)(c0 = *(*sp)++)))
return false;
if (!isdigit((unsigned char)(c1 = *(*sp)++)))
return false;
if (*(*sp)++ != delim)
return false;
*u8p = ((c0 - '0') * 10) + (c1 - '0');
return true;
}
static bool
scan_2dec_uint(const char **sp, char delim, unsigned int *uintp)
{
char c0, c1;
if (!isdigit((unsigned char)(c0 = *(*sp)++)))
return false;
if (!isdigit((unsigned char)(c1 = *(*sp)++)))
return false;
if (*(*sp)++ != delim)
return false;
*uintp = ((c0 - '0') * 10) + (c1 - '0');
return true;
}
#endif
/**
* radeon_vce_init - allocate memory, load vce firmware
*
@ -93,8 +127,17 @@ int radeon_vce_init(struct radeon_device *rdev)
return -EINVAL;
c += strlen(fw_version);
#ifdef __NetBSD__
if (!scan_2dec_u8(&c, '.', &start))
return -EINVAL;
if (!scan_2dec_u8(&c, '.', &mid))
return -EINVAL;
if (!scan_2dec_u8(&c, ']', &end))
return -EINVAL;
#else
if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
return -EINVAL;
#endif
/* search for feedback version */
@ -108,8 +151,13 @@ int radeon_vce_init(struct radeon_device *rdev)
return -EINVAL;
c += strlen(fb_version);
#ifdef __NetBSD__
if (!scan_2dec_uint(&c, ']', &rdev->vce.fb_version))
return -EINVAL;
#else
if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
return -EINVAL;
#endif
DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
start, mid, end, rdev->vce.fb_version);
@ -474,7 +522,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
p->ib.ptr[hi] = start >> 32;
if (end <= start) {
DRM_ERROR("invalid reloc offset %llX!\n", offset);
DRM_ERROR("invalid reloc offset %"PRIX64"!\n", offset);
return -EINVAL;
}
if ((end - start) < size) {

View File

@ -899,7 +899,11 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
#ifdef __NetBSD__
linux_mutex_init(&vm->mutex);
#else
mutex_init(&vm->mutex);
#endif
INIT_LIST_HEAD(&vm->va);
pd_size = radeon_vm_directory_size(rdev);
@ -967,5 +971,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_fence_unref(&vm->last_flush);
radeon_fence_unref(&vm->last_id_use);
#ifdef __NetBSD__
linux_mutex_destroy(&vm->mutex);
#else
mutex_destroy(&vm->mutex);
#endif
}

View File

@ -225,7 +225,15 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
#ifdef __NetBSD__ /* XXX Batch syncs for batch GART updates. */
bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map, i*4, 4,
BUS_DMASYNC_PREWRITE);
#endif
gtt[i] = entry;
#ifdef __NetBSD__ /* XXX Batch syncs for batch GART updates. */
bus_dmamap_sync(rdev->ddev->dmat, rdev->gart.rg_table_map, i*4, 4,
BUS_DMASYNC_POSTWRITE);
#endif
return 0;
}

View File

@ -638,9 +638,22 @@ static void rs600_gart_fini(struct radeon_device *rdev)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
#ifdef __NetBSD__
# define __iomem volatile
# define writeq fake_writeq
static inline void
fake_writeq(uint64_t v, void __iomem *ptr)
{
membar_producer();
*(uint64_t __iomem *)ptr = v;
}
#endif
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
void __iomem *ptr = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
@ -648,10 +661,15 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
writeq(addr, ptr + (i * 8));
writeq(addr, (uint8_t __iomem *)ptr + (i * 8));
return 0;
}
#ifdef __NetBSD__
# undef writeq
# undef __iomem
#endif
int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
@ -783,8 +801,15 @@ int rs600_irq_process(struct radeon_device *rdev)
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
@ -792,8 +817,15 @@ int rs600_irq_process(struct radeon_device *rdev)
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);

View File

@ -976,6 +976,7 @@ u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low)
return pi->bootup_uma_clk;
}
#ifdef CONFIG_DEBUG_FS
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -999,6 +1000,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n",
ps->sclk_high, ps->max_voltage);
}
#endif
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)

View File

@ -2024,6 +2024,7 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
#ifdef CONFIG_DEBUG_FS
void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -2048,6 +2049,7 @@ void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
current_index, pl->sclk, pl->mclk, pl->vddc);
}
}
#endif
void rv6xx_dpm_fini(struct radeon_device *rdev)
{

View File

@ -1625,7 +1625,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->vram_start = mc->gtt_end + 1;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
dev_info(rdev->dev, "VRAM: %"PRIu64"M 0x%08"PRIX64" - 0x%08"PRIX64" (%"PRIu64"M used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
} else {
@ -1971,6 +1971,7 @@ void rv770_fini(struct radeon_device *rdev)
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon pcie */
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
@ -2048,4 +2049,5 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
#endif
}

View File

@ -2463,6 +2463,7 @@ void rv770_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
#ifdef CONFIG_DEBUG_FS
void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -2492,6 +2493,7 @@ void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rde
}
}
}
#endif
void rv770_dpm_fini(struct radeon_device *rdev)
{

View File

@ -1570,7 +1570,11 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(TAHITI_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
@ -1581,7 +1585,11 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(PITCAIRN_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_VERDE:
chip_name = "VERDE";
@ -1592,7 +1600,11 @@ static int si_init_microcode(struct radeon_device *rdev)
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(VERDE_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_OLAND:
chip_name = "OLAND";
@ -1602,7 +1614,11 @@ static int si_init_microcode(struct radeon_device *rdev)
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(OLAND_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
#endif
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
@ -1612,7 +1628,11 @@ static int si_init_microcode(struct radeon_device *rdev)
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
#ifdef __NetBSD__ /* XXX ALIGN means something else. */
smc_req_size = round_up(HAINAN_SMC_UCODE_SIZE, 4);
#else
smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
#endif
break;
default: BUG();
}
@ -2896,7 +2916,7 @@ static void si_setup_rb(struct radeon_device *rdev,
static void si_gpu_init(struct radeon_device *rdev)
{
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 mc_shared_chmap __unused, mc_arb_ramcfg;
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
@ -4544,7 +4564,7 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
char *block;
const char *block;
if (rdev->family == CHIP_TAHITI) {
switch (mc_id) {
@ -6147,8 +6167,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
@ -6173,8 +6200,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
@ -6199,8 +6233,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[2]) {
drm_handle_vblank(rdev->ddev, 2);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
@ -6225,8 +6266,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[3]) {
drm_handle_vblank(rdev->ddev, 3);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
@ -6251,8 +6299,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[4]) {
drm_handle_vblank(rdev->ddev, 4);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
@ -6277,8 +6332,15 @@ restart_ih:
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[5]) {
drm_handle_vblank(rdev->ddev, 5);
#ifdef __NetBSD__
spin_lock(&rdev->irq.vblank_lock);
rdev->pm.vblank_sync = true;
DRM_SPIN_WAKEUP_ONE(&rdev->irq.vblank_queue, &rdev->irq.vblank_lock);
spin_unlock(&rdev->irq.vblank_lock);
#else
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
#endif
}
if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
@ -6942,6 +7004,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
#ifndef __NetBSD__ /* XXX radeon pcie */
struct pci_dev *root = rdev->pdev->bus->self;
int bridge_pos, gpu_pos;
u32 speed_cntl, mask, current_data_rate;
@ -7095,6 +7158,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
break;
udelay(1);
}
#endif
}
static void si_program_aspm(struct radeon_device *rdev)
@ -7226,13 +7290,17 @@ static void si_program_aspm(struct radeon_device *rdev)
WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
if (!disable_clkreq) {
#ifndef __NetBSD__ /* XXX radeon pcie */
struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap;
#endif
clk_req_support = false;
#ifndef __NetBSD__ /* XXX radeon pcie */
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
clk_req_support = true;
#endif
} else {
clk_req_support = false;
}

View File

@ -29,6 +29,7 @@
#include "atom.h"
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@ -3511,7 +3512,7 @@ static int si_notify_smc_display_change(struct radeon_device *rdev,
static void si_program_response_times(struct radeon_device *rdev)
{
u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
u32 voltage_response_time, backbias_response_time __unused, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
@ -6318,7 +6319,9 @@ int si_dpm_init(struct radeon_device *rdev)
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
int ret;
#ifndef __NetBSD__ /* XXX radeon pcie */
u32 mask;
#endif
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
if (si_pi == NULL)
@ -6328,11 +6331,13 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
#ifndef __NetBSD__ /* XXX radeon pcie */
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret)
si_pi->sys_pcie_mask = 0;
else
si_pi->sys_pcie_mask = mask;
#endif
si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
@ -6470,6 +6475,7 @@ void si_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
#ifdef CONFIG_DEBUG_FS
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -6490,3 +6496,4 @@ void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
}
#endif

View File

@ -1804,6 +1804,7 @@ void sumo_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
#ifdef CONFIG_DEBUG_FS
void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -1831,6 +1832,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
sumo_convert_voltage_index_to_value(rdev, pl->vddc_index));
}
}
#endif
void sumo_dpm_fini(struct radeon_device *rdev)
{

View File

@ -1924,6 +1924,7 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
r600_dpm_print_ps_status(rdev, rps);
}
#ifdef CONFIG_DEBUG_FS
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
@ -1945,6 +1946,7 @@ void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *r
trinity_convert_voltage_index_to_value(rdev, pl->vddc_index));
}
}
#endif
void trinity_dpm_fini(struct radeon_device *rdev)
{

View File

@ -30,6 +30,11 @@
#define pr_fmt(fmt) "[TTM] " fmt
#ifdef __NetBSD__
#include <sys/types.h>
#include <uvm/uvm_extern.h>
#endif
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@ -40,18 +45,24 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/printk.h>
#include <linux/export.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_DEBUG(fmt, arg...) do {} while (0)
#define TTM_BO_HASH_ORDER 13
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
#ifndef __NetBSD__
static void ttm_bo_global_kobj_release(struct kobject *kobj);
#endif
#ifndef __NetBSD__ /* XXX sysfs */
static struct attribute ttm_bo_count = {
.name = "bo_count",
.mode = S_IRUGO
};
#endif
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
@ -73,7 +84,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
pr_err(" use_type: %d\n", man->use_type);
pr_err(" flags: 0x%08X\n", man->flags);
pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
pr_err(" size: %llu\n", man->size);
pr_err(" size: %"PRIu64"\n", man->size);
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
@ -99,6 +110,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
#ifndef __NetBSD__ /* XXX sysfs */
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
@ -124,6 +136,7 @@ static struct kobj_type ttm_bo_glob_kobj_type = {
.sysfs_ops = &ttm_bo_global_ops,
.default_attrs = ttm_bo_global_attrs
};
#endif /* __NetBSD__ */
static inline uint32_t ttm_bo_type_flags(unsigned type)
@ -138,8 +151,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
BUG_ON(kref_referenced_p(&bo->list_kref));
BUG_ON(kref_referenced_p(&bo->kref));
BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->sync_obj != NULL);
BUG_ON(bo->mem.mm_node != NULL);
@ -151,7 +164,11 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
#ifdef __NetBSD__
linux_mutex_destroy(&bo->wu_mutex);
#else
mutex_destroy(&bo->wu_mutex);
#endif
if (bo->destroy)
bo->destroy(bo);
else {
@ -455,7 +472,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
driver->sync_obj_unref(&sync_obj);
}
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
/**
@ -617,7 +634,7 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
if (ttm_bo_delayed_delete(bdev, false)) {
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
}
@ -628,7 +645,13 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
#ifdef __NetBSD__
uvm_obj_destroy(&bo->uvmobj, true);
#endif
drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
#ifdef __NetBSD__
drm_vma_node_destroy(&bo->vma_node);
#endif
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@ -655,7 +678,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
if (resched)
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
@ -1125,7 +1148,11 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
#ifdef __NetBSD__
linux_mutex_init(&bo->wu_mutex);
#else
mutex_init(&bo->wu_mutex);
#endif
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@ -1145,7 +1172,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->resv = &bo->ttm_resv;
reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
#ifdef __NetBSD__
drm_vma_node_init(&bo->vma_node);
uvm_obj_init(&bo->uvmobj, bdev->driver->ttm_uvm_ops, true, 1);
#else
drm_vma_node_reset(&bo->vma_node);
#endif
ret = ttm_bo_check_placement(bo, placement);
@ -1286,6 +1318,12 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = (*man->func->takedown)(man);
}
#ifdef __NetBSD__
linux_mutex_destroy(&man->io_reserve_mutex);
#else
mutex_destroy(&man->io_reserve_mutex);
#endif
return ret;
}
EXPORT_SYMBOL(ttm_bo_clean_mm);
@ -1319,7 +1357,11 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
BUG_ON(man->has_type);
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
#ifdef __NetBSD__
linux_mutex_init(&man->io_reserve_mutex);
#else
mutex_init(&man->io_reserve_mutex);
#endif
INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
@ -1343,6 +1385,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
}
EXPORT_SYMBOL(ttm_bo_init_mm);
#ifndef __NetBSD__
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
struct ttm_bo_global *glob =
@ -1350,15 +1393,25 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
__free_page(glob->dummy_read_page);
mutex_destroy(&glob->device_list_mutex);
kfree(glob);
}
#endif
void ttm_bo_global_release(struct drm_global_reference *ref)
{
struct ttm_bo_global *glob = ref->object;
#ifdef __NetBSD__
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
BUG_ON(glob->dummy_read_page != NULL);
spin_lock_destroy(&glob->lru_lock);
linux_mutex_destroy(&glob->device_list_mutex);
kfree(glob);
#else
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
#endif
}
EXPORT_SYMBOL(ttm_bo_global_release);
@ -1369,15 +1422,25 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
struct ttm_bo_global *glob = ref->object;
int ret;
#ifdef __NetBSD__
linux_mutex_init(&glob->device_list_mutex);
#else
mutex_init(&glob->device_list_mutex);
#endif
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
#ifdef __NetBSD__
/* Only used by agp back end, will fix there. */
/* XXX Fix agp back end to DTRT. */
glob->dummy_read_page = NULL;
#else
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
goto out_no_drp;
}
#endif
INIT_LIST_HEAD(&glob->swap_lru);
INIT_LIST_HEAD(&glob->device_list);
@ -1391,14 +1454,20 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
atomic_set(&glob->bo_count, 0);
#ifdef __NetBSD__
ret = 0;
#else
ret = kobject_init_and_add(
&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
#endif
return ret;
out_no_shrink:
#ifndef __NetBSD__
__free_page(glob->dummy_read_page);
out_no_drp:
#endif
kfree(glob);
return ret;
}
@ -1451,7 +1520,12 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
#ifdef __NetBSD__
bus_space_tag_t memt,
bus_dma_tag_t dmat,
#else
struct address_space *mapping,
#endif
uint64_t file_page_offset,
bool need_dma32)
{
@ -1473,7 +1547,12 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
#ifdef __NetBSD__
bdev->memt = memt;
bdev->dmat = dmat;
#else
bdev->dev_mapping = mapping;
#endif
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
@ -1511,9 +1590,27 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
#ifndef __NetBSD__
struct ttm_bo_device *bdev = bo->bdev;
#endif
#ifdef __NetBSD__
if (bo->mem.bus.is_iomem) {
/*
* XXX OOPS! NetBSD doesn't have a way to enumerate
* and remove the virtual mappings for device addresses
* or of a uvm object.
*/
} else if (bo->ttm != NULL) {
unsigned i;
for (i = 0; i < bo->ttm->num_pages; i++)
pmap_page_protect(&bo->ttm->pages[i]->p_vmp,
VM_PROT_NONE);
}
#else
drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
#endif
ttm_mem_io_free_vm(bo);
}

View File

@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/export.h>
/**
* Currently we use a spinlock for the lock, but a mutex *may* be

View File

@ -37,6 +37,11 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/export.h>
#ifdef __NetBSD__ /* PMAP_* caching flags for ttm_io_prot */
#include <uvm/uvm_pmap.h>
#endif
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
@ -204,6 +209,23 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
if (mem->bus.addr) {
addr = mem->bus.addr;
} else {
#ifdef __NetBSD__
const bus_addr_t bus_addr = (mem->bus.base + mem->bus.offset);
int flags = BUS_SPACE_MAP_LINEAR;
if (ISSET(mem->placement, TTM_PL_FLAG_WC))
flags |= BUS_SPACE_MAP_PREFETCHABLE;
/* XXX errno NetBSD->Linux */
ret = -bus_space_map(bdev->memt, bus_addr, mem->bus.size,
flags, &mem->bus.memh);
if (ret) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
return ret;
}
addr = bus_space_vaddr(bdev->memt, mem->bus.memh);
#else
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
@ -214,6 +236,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
ttm_mem_io_unlock(man);
return -ENOMEM;
}
#endif
}
*virtual = addr;
return 0;
@ -227,12 +250,40 @@ static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *
man = &bdev->man[mem->mem_type];
if (virtual && mem->bus.addr == NULL)
#ifdef __NetBSD__
bus_space_unmap(bdev->memt, mem->bus.memh, mem->bus.size);
#else
iounmap(virtual);
#endif
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
ttm_mem_io_unlock(man);
}
#ifdef __NetBSD__
# define ioread32 fake_ioread32
# define iowrite32 fake_iowrite32
static inline uint32_t
fake_ioread32(const volatile uint32_t *p)
{
uint32_t v;
v = *p;
__insn_barrier(); /* XXX */
return v;
}
static inline void
iowrite32(uint32_t v, volatile uint32_t *p)
{
__insn_barrier(); /* XXX */
*p = v;
}
#endif
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
@ -246,6 +297,11 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
#ifdef __NetBSD__
# undef ioread32
# undef iowrite32
#endif
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@ -275,7 +331,11 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
kunmap_atomic(dst);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
#ifdef __NetBSD__
vunmap(dst, 1);
#else
vunmap(dst);
#endif
else
kunmap(d);
#endif
@ -463,7 +523,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
#ifdef __NetBSD__
drm_vma_node_init(&fbo->vma_node);
#else
drm_vma_node_reset(&fbo->vma_node);
#endif
atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock);
@ -487,6 +551,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#ifdef __NetBSD__
switch (caching_flags & TTM_PL_MASK_CACHING) {
case TTM_PL_FLAG_CACHED:
return (tmp | PMAP_WRITE_BACK);
case TTM_PL_FLAG_WC:
return (tmp | PMAP_WRITE_COMBINE);
case TTM_PL_FLAG_UNCACHED:
return (tmp | PMAP_NOCACHE);
default:
panic("invalid caching flags: %"PRIx32"\n",
(caching_flags & TTM_PL_MASK_CACHING));
}
#else
#if defined(__i386__) || defined(__x86_64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
@ -511,6 +588,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
#endif
}
EXPORT_SYMBOL(ttm_io_prot);
@ -526,12 +604,31 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
#ifdef __NetBSD__
{
bus_addr_t addr;
int flags = BUS_SPACE_MAP_LINEAR;
int ret;
addr = (bo->mem.bus.base + bo->mem.bus.offset + offset);
if (ISSET(mem->placement, TTM_PL_FLAG_WC))
flags |= BUS_SPACE_MAP_PREFETCHABLE;
/* XXX errno NetBSD->Linux */
ret = -bus_space_map(bo->bdev->memt, addr, size, flags,
&map->u.io.memh);
if (ret)
return ret;
map->u.io.size = size;
map->virtual = bus_space_vaddr(bo->bdev->memt, map->u.io.memh);
}
#else
if (mem->placement & TTM_PL_FLAG_WC)
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
#endif
}
return (!map->virtual) ? -ENOMEM : 0;
}
@ -541,8 +638,13 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_mem_reg *mem = &bo->mem;
pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
#ifdef __NetBSD__
unsigned i;
vaddr_t vaddr;
#endif
int ret;
BUG_ON(!ttm);
@ -553,6 +655,30 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return ret;
}
#ifdef __NetBSD__
/*
* Can't use uvm_map here because it provides no way to pass
* along the cacheability flags. So we'll uvm_km_alloc
* ourselves some KVA and then pmap_kenter_pa directly.
*/
KASSERT(num_pages <= ttm->num_pages);
KASSERT(start_page <= (ttm->num_pages - num_pages));
prot = ttm_io_prot(mem->placement, (VM_PROT_READ | VM_PROT_WRITE));
vaddr = uvm_km_alloc(kernel_map, (num_pages << PAGE_SHIFT), PAGE_SIZE,
UVM_KMF_WIRED | UVM_KMF_VAONLY | UVM_KMF_CANFAIL | UVM_KMF_WAITVA);
if (vaddr == 0)
return -ENOMEM;
for (i = 0; i < num_pages; i++)
pmap_kenter_pa(vaddr + i*PAGE_SIZE,
page_to_phys(ttm->pages[start_page + i]),
(VM_PROT_READ | VM_PROT_WRITE), prot);
pmap_update(pmap_kernel());
map->bo_kmap_type = ttm_bo_map_vmap;
map->u.uvm.vsize = (num_pages << PAGE_SHIFT);
map->virtual = (void *)vaddr;
return 0;
#else
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
@ -575,6 +701,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
#endif
}
int ttm_bo_kmap(struct ttm_buffer_object *bo,
@ -622,13 +749,28 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
return;
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
#ifdef __NetBSD__
bus_space_unmap(bo->bdev->memt, map->u.io.memh,
map->u.io.size);
#else
iounmap(map->virtual);
#endif
break;
case ttm_bo_map_vmap:
#ifdef __NetBSD__
pmap_kremove((vaddr_t)map->virtual, map->u.uvm.vsize);
uvm_km_free(kernel_map, (vaddr_t)map->virtual,
map->u.uvm.vsize, UVM_KMF_VAONLY);
#else
vunmap(map->virtual);
#endif
break;
case ttm_bo_map_kmap:
#ifdef __NetBSD__
panic("ttm_bo_map_kmap does not exist in NetBSD");
#else
kunmap(map->page);
#endif
break;
case ttm_bo_map_premapped:
break;
@ -639,7 +781,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
ttm_mem_io_unlock(man);
map->virtual = NULL;
#ifndef __NetBSD__
map->page = NULL;
#endif
}
EXPORT_SYMBOL(ttm_bo_kunmap);

View File

@ -31,6 +31,7 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/export.h>
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{

View File

@ -27,6 +27,7 @@
#define pr_fmt(fmt) "[TTM] " fmt
#include <drm/drmP.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
@ -36,11 +37,15 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/printk.h>
#include <linux/export.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_zone {
#ifndef __NetBSD__
struct kobject kobj;
#endif
struct ttm_mem_global *glob;
const char *name;
uint64_t zone_mem;
@ -50,6 +55,7 @@ struct ttm_mem_zone {
uint64_t used_mem;
};
#ifndef __NetBSD__
static struct attribute ttm_mem_sys = {
.name = "zone_memory",
.mode = S_IRUGO
@ -177,6 +183,7 @@ static void ttm_mem_global_kobj_release(struct kobject *kobj)
static struct kobj_type ttm_mem_glob_kobj_type = {
.release = &ttm_mem_global_kobj_release,
};
#endif
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
bool from_wq, uint64_t extra)
@ -190,7 +197,11 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
if (from_wq)
target = zone->swap_limit;
#ifdef __NetBSD__
else if (DRM_SUSER())
#else
else if (capable(CAP_SYS_ADMIN))
#endif
target = zone->emer_mem;
else
target = zone->max_mem;
@ -247,7 +258,9 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
#ifndef __NetBSD__
int ret;
#endif
if (unlikely(!zone))
return -ENOMEM;
@ -263,12 +276,14 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
#ifndef __NetBSD__
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
#endif
glob->zones[glob->num_zones++] = zone;
return 0;
}
@ -279,7 +294,9 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
{
struct ttm_mem_zone *zone;
uint64_t mem;
#ifndef __NetBSD__
int ret;
#endif
if (si->totalhigh == 0)
return 0;
@ -299,12 +316,14 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_highmem = zone;
#ifndef __NetBSD__
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
#endif
glob->zones[glob->num_zones++] = zone;
return 0;
}
@ -314,7 +333,9 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
#ifndef __NetBSD__
int ret;
#endif
if (unlikely(!zone))
return -ENOMEM;
@ -346,12 +367,14 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
#ifndef __NetBSD__
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
#endif
glob->zones[glob->num_zones++] = zone;
return 0;
}
@ -367,12 +390,14 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
#ifndef __NetBSD__
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
kobject_put(&glob->kobj);
return ret;
}
#endif
si_meminfo(&si);
@ -416,11 +441,19 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
#ifdef __NetBSD__
kfree(zone);
#else
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
#endif
}
#ifdef __NetBSD__
kfree(glob);
#else
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
#endif
}
EXPORT_SYMBOL(ttm_mem_global_release);
@ -485,8 +518,13 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
if (single_zone && zone != single_zone)
continue;
#ifdef __NetBSD__
limit = DRM_SUSER() ?
zone->emer_mem : zone->max_mem;
#else
limit = (capable(CAP_SYS_ADMIN)) ?
zone->emer_mem : zone->max_mem;
#endif
if (zone->used_mem > limit)
goto out_unlock;

View File

@ -38,12 +38,14 @@
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/printk.h>
#include <drm/drm_cache.h>
#include <drm/drm_mem_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
#include <drm/bus_dma_hacks.h>
/**
* Allocates storage for pointers to the pages that back the ttm.
@ -56,8 +58,10 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
#ifndef __NetBSD__
ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
sizeof(*ttm->dma_address));
#endif
}
#ifdef CONFIG_X86
@ -65,6 +69,9 @@ static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
#ifdef __NetBSD__
return 0;
#else
int ret = 0;
if (PageHighMem(p))
@ -85,6 +92,7 @@ static inline int ttm_tt_set_page_caching(struct page *p,
ret = set_pages_uc(p, 1);
return ret;
#endif
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
@ -175,7 +183,11 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
#ifdef __NetBSD__
uao_detach(ttm->swap_storage);
#else
fput(ttm->swap_storage);
#endif
ttm->swap_storage = NULL;
ttm->func->destroy(ttm);
@ -192,7 +204,13 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
#ifdef __NetBSD__
ttm->swap_storage = uao_create(roundup2(size, PAGE_SIZE), 0);
uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(bdev->dmat));
#else
ttm->swap_storage = NULL;
#endif
TAILQ_INIT(&ttm->pglist);
ttm_tt_alloc_page_directory(ttm);
if (!ttm->pages) {
@ -206,6 +224,8 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
uao_detach(ttm->swap_storage);
ttm->swap_storage = NULL;
drm_free_large(ttm->pages);
ttm->pages = NULL;
}
@ -224,16 +244,52 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
ttm->state = tt_unpopulated;
#ifdef __NetBSD__
ttm->swap_storage = uao_create(roundup2(size, PAGE_SIZE), 0);
uao_set_pgfl(ttm->swap_storage, bus_dmamem_pgfl(bdev->dmat));
#else
ttm->swap_storage = NULL;
#endif
TAILQ_INIT(&ttm->pglist);
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
#ifdef __NetBSD__
{
int error;
if (ttm->num_pages > (SIZE_MAX /
MIN(sizeof(ttm_dma->dma_segs[0]), PAGE_SIZE))) {
error = ENOMEM;
goto fail0;
}
ttm_dma->dma_segs = kmem_alloc((ttm->num_pages *
sizeof(ttm_dma->dma_segs[0])), KM_SLEEP);
error = bus_dmamap_create(ttm->bdev->dmat,
(ttm->num_pages * PAGE_SIZE), ttm->num_pages, PAGE_SIZE, 0,
BUS_DMA_WAITOK, &ttm_dma->dma_address);
if (error)
goto fail1;
return 0;
fail2: __unused
bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
fail1: kmem_free(ttm_dma->dma_segs, (ttm->num_pages *
sizeof(ttm_dma->dma_segs[0])));
fail0: KASSERT(error);
ttm_tt_destroy(ttm);
/* XXX errno NetBSD->Linux */
return -error;
}
#else
if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
return 0;
#endif
}
EXPORT_SYMBOL(ttm_dma_tt_init);
@ -241,10 +297,17 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
uao_detach(ttm->swap_storage);
drm_free_large(ttm->pages);
ttm->pages = NULL;
#ifdef __NetBSD__
bus_dmamap_destroy(ttm->bdev->dmat, ttm_dma->dma_address);
kmem_free(ttm_dma->dma_segs, (ttm->num_pages *
sizeof(ttm_dma->dma_segs[0])));
#else
drm_free_large(ttm_dma->dma_address);
ttm_dma->dma_address = NULL;
#endif
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
@ -285,6 +348,30 @@ EXPORT_SYMBOL(ttm_tt_bind);
int ttm_tt_swapin(struct ttm_tt *ttm)
{
#ifdef __NetBSD__
struct uvm_object *uobj = ttm->swap_storage;
struct vm_page *page;
unsigned i;
int error;
KASSERT(uobj != NULL);
error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT),
&ttm->pglist);
if (error)
/* XXX errno NetBSD->Linux */
return -error;
i = 0;
TAILQ_FOREACH(page, &ttm->pglist, pageq.queue) {
KASSERT(i < ttm->num_pages);
KASSERT(ttm->pages[i] == NULL);
ttm->pages[i] = container_of(page, struct page, p_vmp);
}
KASSERT(i == ttm->num_pages);
/* Success! */
return 0;
#else
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
@ -319,8 +406,33 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
return ret;
#endif
}
#ifdef __NetBSD__
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
struct uvm_object *uobj = ttm->swap_storage;
unsigned i;
KASSERT((ttm->state == tt_unbound) || (ttm->state == tt_unpopulated));
KASSERT(ttm->caching_state == tt_cached);
KASSERT(uobj != NULL);
/*
* XXX Dunno what this persistent swap storage business is all
* about, but I see nothing using it and it doesn't make sense.
*/
KASSERT(persistent_swap_storage == NULL);
uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT));
for (i = 0; i < ttm->num_pages; i++)
ttm->pages[i] = NULL;
/* Success! */
return 0;
}
#else
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
{
struct address_space *swap_space;
@ -374,9 +486,11 @@ out_err:
return ret;
}
#endif
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
{
#ifndef __NetBSD__
pgoff_t i;
struct page **page = ttm->pages;
@ -387,6 +501,7 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
(*page)->mapping = NULL;
(*page++)->index = 0;
}
#endif
}
void ttm_tt_unpopulate(struct ttm_tt *ttm)

View File

@ -31,8 +31,18 @@
#ifndef _TTM_BO_API_H_
#define _TTM_BO_API_H_
#ifdef __NetBSD__
#include <sys/types.h>
#include <sys/param.h>
#include <sys/mutex.h> /* XXX ugh include order botch */
#include <uvm/uvm_object.h>
#include <uvm/uvm_param.h>
#include <uvm/uvm_prot.h>
#endif
#include <drm/drm_hashtab.h>
#include <drm/drm_vma_manager.h>
#include <linux/atomic.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/wait.h>
@ -88,6 +98,9 @@ struct ttm_bus_placement {
bool is_iomem;
bool io_reserved_vm;
uint64_t io_reserved_count;
#ifdef __NetBSD__
bus_space_handle_t memh;
#endif
};
@ -194,6 +207,9 @@ struct ttm_buffer_object {
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
size_t acc_size;
#ifdef __NetBSD__
struct uvm_object uvmobj;
#endif
/**
* Members not needing protection.
@ -270,7 +286,19 @@ struct ttm_buffer_object {
#define TTM_BO_MAP_IOMEM_MASK 0x80
struct ttm_bo_kmap_obj {
void *virtual;
#ifdef __NetBSD__
union {
struct {
bus_space_handle_t memh;
bus_size_t size;
} io;
struct {
vsize_t vsize;
} uvm;
} u;
#else
struct page *page;
#endif
enum {
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
ttm_bo_map_vmap = 2,
@ -650,6 +678,19 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
#ifdef __NetBSD__
/* XXX ttm_fbdev_mmap? */
extern void ttm_bo_uvm_reference(struct uvm_object *);
extern void ttm_bo_uvm_detach(struct uvm_object *);
extern int ttm_bo_uvm_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
int, int, vm_prot_t, int);
extern int ttm_bo_mmap_object(struct ttm_bo_device *, off_t, size_t, vm_prot_t,
struct uvm_object **, voff_t *, struct file *);
#else
/**
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
*
@ -678,6 +719,8 @@ extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
#endif /* __NetBSD__ */
/**
* ttm_bo_io
*

View File

@ -34,6 +34,7 @@
#include <ttm/ttm_memory.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_placement.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_mm.h>
#include <drm/drm_global.h>
#include <drm/drm_vma_manager.h>
@ -41,6 +42,7 @@
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/reservation.h>
#include <asm/page.h>
struct ttm_backend_func {
/**
@ -113,14 +115,19 @@ enum ttm_caching_state {
struct ttm_tt {
struct ttm_bo_device *bdev;
struct ttm_backend_func *func;
const struct ttm_backend_func *func;
struct page *dummy_read_page;
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */
struct ttm_bo_global *glob;
#ifdef __NetBSD__
struct uvm_object *swap_storage;
struct pglist pglist;
#else
struct file *swap_storage;
#endif
enum ttm_caching_state caching_state;
enum {
tt_bound,
@ -142,7 +149,12 @@ struct ttm_tt {
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
#ifdef __NetBSD__
bus_dma_segment_t *dma_segs;
bus_dmamap_t dma_address;
#else
dma_addr_t *dma_address;
#endif
struct list_head pages_list;
};
@ -453,6 +465,10 @@ struct ttm_bo_driver {
*/
int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
#ifdef __NetBSD__
const struct uvm_pagerops *ttm_uvm_ops;
#endif
};
/**
@ -484,7 +500,9 @@ struct ttm_bo_global {
* Constant after init.
*/
#ifndef __NetBSD__
struct kobject kobj;
#endif
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
@ -556,7 +574,12 @@ struct ttm_bo_device {
* Protected by load / firstopen / lastclose /unload sync.
*/
#ifdef __NetBSD__
bus_space_tag_t memt;
bus_dma_tag_t dmat;
#else
struct address_space *dev_mapping;
#endif
/*
* Internal protection.
@ -759,7 +782,12 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
#ifdef __NetBSD__
bus_space_tag_t memt,
bus_dma_tag_t dmat,
#else
struct address_space *mapping,
#endif
uint64_t file_page_offset, bool need_dma32);
/**
@ -886,7 +914,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
{
int ret;
WARN_ON(!atomic_read(&bo->kref.refcount));
WARN_ON(!kref_referenced_p(&bo->kref));
ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
if (likely(ret == 0))
@ -911,7 +939,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
{
int ret = 0;
WARN_ON(!atomic_read(&bo->kref.refcount));
WARN_ON(!kref_referenced_p(&bo->kref));
if (interruptible)
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,

View File

@ -36,6 +36,8 @@
#include <linux/kobject.h>
#include <linux/mm.h>
struct page;
/**
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
*
@ -75,7 +77,9 @@ struct ttm_mem_shrink {
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
struct ttm_mem_global {
#ifndef __NetBSD__
struct kobject kobj;
#endif
struct ttm_mem_shrink *shrink;
struct workqueue_struct *swap_queue;
struct work_struct work;

View File

@ -58,10 +58,12 @@ extern int ttm_pool_populate(struct ttm_tt *ttm);
*/
extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
#ifdef CONFIG_DEBUG_FS
/**
* Output the state of pools to debugfs file
*/
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
#endif
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
@ -75,10 +77,12 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
*/
void ttm_dma_page_alloc_fini(void);
#ifdef CONFIG_DEBUG_FS
/**
* Output the state of pools to debugfs file
*/
extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
#endif CONFIG_DEBUG_FS
extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
@ -92,10 +96,12 @@ static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
static inline void ttm_dma_page_alloc_fini(void) { return; }
#ifdef CONFIG_DEBUG_FS
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
return 0;
}
#endif
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
struct device *dev)
{

View File

@ -1,4 +1,4 @@
# $NetBSD: files.drmkms,v 1.6 2014/07/16 20:56:25 riastradh Exp $
# $NetBSD: files.drmkms,v 1.7 2014/07/16 20:59:57 riastradh Exp $
include "external/bsd/drm2/linux/files.drmkms_linux"
@ -12,6 +12,7 @@ define drmkms: drmkms_linux, drmkms_i2c
defflag opt_drmkms.h DRMKMS_DEBUG
makeoptions drmkms CPPFLAGS+="-I$S/external/bsd/drm2/include"
makeoptions drmkms_ttm CPPFLAGS+="-I$S/external/bsd/drm2/include/drm"
makeoptions drmkms CPPFLAGS+="-I$S/external/bsd/drm2/dist"
makeoptions drmkms CPPFLAGS+="-I$S/external/bsd/drm2/dist/include"
makeoptions drmkms CPPFLAGS+="-I$S/external/bsd/drm2/dist/include/drm"
@ -64,3 +65,28 @@ file external/bsd/drm2/drm/drm_vma_manager.c drmkms
file external/bsd/drm2/drm/drm_gem_vm.c drmkms
file external/bsd/drm2/drm/drm_module.c drmkms
# TTM, the texture and tiling manager.
define drmkms_ttm: drmkms
file external/bsd/drm2/ttm/ttm_agp_backend.c drmkms_ttm
file external/bsd/drm2/dist/drm/ttm/ttm_memory.c drmkms_ttm
file external/bsd/drm2/dist/drm/ttm/ttm_tt.c drmkms_ttm
file external/bsd/drm2/dist/drm/ttm/ttm_bo.c drmkms_ttm
file external/bsd/drm2/dist/drm/ttm/ttm_bo_util.c drmkms_ttm
file external/bsd/drm2/ttm/ttm_bo_vm.c drmkms_ttm
# Linux module goo.
#file external/bsd/drm2/dist/drm/ttm/ttm_module.c drmkms_ttm
# Used only by vmwgfx. Needs porting for rcu -> pserialize.
#file external/bsd/drm2/dist/drm/ttm/ttm_object.c drmkms_ttm
# Used only by vmwgfx. Needs porting. Does silly things like SIGKILL.
#file external/bsd/drm2/dist/drm/ttm/ttm_lock.c drmkms_ttm
file external/bsd/drm2/dist/drm/ttm/ttm_execbuf_util.c drmkms_ttm
# Replaced locally by ttm_bus_dma.c.
#file external/bsd/drm2/dist/drm/ttm/ttm_page_alloc.c drmkms_ttm
file external/bsd/drm2/dist/drm/ttm/ttm_bo_manager.c drmkms_ttm
# Replaced locally by ttm_bus_dma.c.
#file external/bsd/drm2/dist/drm/ttm/ttm_page_alloc_dma.c drmkms_ttm
file external/bsd/drm2/ttm/ttm_bus_dma.c drmkms_ttm

View File

@ -1,4 +1,4 @@
/* $NetBSD: byteorder.h,v 1.2 2014/03/18 18:20:42 riastradh Exp $ */
/* $NetBSD: byteorder.h,v 1.3 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -48,4 +48,11 @@
#define be32_to_cpu be32toh
#define be64_to_cpu be64toh
#define be16_to_cpup be16dec
#define be32_to_cpup be32dec
#define be64_to_cpup be64dec
#define le16_to_cpup le16dec
#define le32_to_cpup le32dec
#define le64_to_cpup le64dec
#endif /* _ASM_BYTEORDER_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: page.h,v 1.1 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: page.h,v 1.2 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -50,4 +50,10 @@ page_to_phys(struct page *page)
return VM_PAGE_TO_PHYS(&page->p_vmp);
}
static inline unsigned long
page_to_pfn(struct page *page)
{
return (page_to_phys(page) >> PAGE_SHIFT);
}
#endif /* _ASM_PAGE_H_ */

View File

@ -0,0 +1,44 @@
/* $NetBSD: unaligned.h,v 1.1 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Taylor R. Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _ASM_UNALIGNED_H_
#define _ASM_UNALIGNED_H_
#include <sys/endian.h>
static inline uint32_t
get_unaligned_le32(const void *p)
{
return le32dec(p);
}
#endif /* _ASM_UNALIGNED_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma_hacks.h,v 1.4 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: bus_dma_hacks.h,v 1.5 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -34,11 +34,19 @@
#include <sys/cdefs.h>
#include <sys/bus.h>
#include <sys/kmem.h>
#include <sys/queue.h>
#include <uvm/uvm.h>
#include <uvm/uvm_extern.h>
/* XXX This is x86-specific bollocks. */
#if !defined(__i386__) && !defined(__x86_64__)
#error DRM GEM/TTM need new MI bus_dma APIs! Halp!
#endif
#include <x86/bus_private.h>
#include <x86/machdep.h>
static inline int
bus_dmamem_wire_uvm_object(bus_dma_tag_t tag, struct uvm_object *uobj,
@ -105,4 +113,62 @@ bus_dmamem_unwire_uvm_object(bus_dma_tag_t tag __unused,
uvm_obj_unwirepages(uobj, start, (start + size));
}
static inline int
bus_dmamem_pgfl(bus_dma_tag_t tag)
{
return x86_select_freelist(tag->_bounce_alloc_hi - 1);
}
static inline int
bus_dmamap_load_pglist(bus_dma_tag_t tag, bus_dmamap_t map,
struct pglist *pglist, bus_size_t size, int flags)
{
km_flag_t kmflags;
bus_dma_segment_t *segs;
int nsegs, seg;
struct vm_page *page;
int error;
nsegs = 0;
TAILQ_FOREACH(page, pglist, pageq.queue) {
if (nsegs == INT_MAX)
return ENOMEM;
#if __i386__
if (nsegs == (SIZE_MAX / sizeof(segs[0])))
return ENOMEM;
#endif
nsegs++;
}
KASSERT(nsegs <= (SIZE_MAX / sizeof(segs[0])));
switch (flags & (BUS_DMA_WAITOK|BUS_DMA_NOWAIT)) {
case BUS_DMA_WAITOK: kmflags = KM_SLEEP; break;
case BUS_DMA_NOWAIT: kmflags = KM_NOSLEEP; break;
default: panic("invalid flags: %d", flags);
}
segs = kmem_alloc((nsegs * sizeof(segs[0])), kmflags);
if (segs == NULL)
return ENOMEM;
seg = 0;
TAILQ_FOREACH(page, pglist, pageq.queue) {
segs[seg].ds_addr = VM_PAGE_TO_PHYS(page);
segs[seg].ds_len = PAGE_SIZE;
seg++;
}
error = bus_dmamap_load_raw(tag, map, segs, nsegs, size, flags);
if (error)
goto fail0;
/* Success! */
return 0;
fail1: __unused
bus_dmamap_unload(tag, map);
fail0: KASSERT(error);
kmem_free(segs, (nsegs * sizeof(segs[0])));
return error;
}
#endif /* _DRM_BUS_DMA_HACKS_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: drm_wait_netbsd.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: drm_wait_netbsd.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -48,6 +48,8 @@ typedef kcondvar_t drm_waitqueue_t;
#define DRM_HZ hz /* XXX Hurk... */
#define DRM_UDELAY DELAY
static inline void
DRM_INIT_WAITQUEUE(drm_waitqueue_t *q, const char *name)
{

View File

@ -0,0 +1,65 @@
/* $NetBSD: ttm_page_alloc.h,v 1.1 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Taylor R. Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DRM_TTM_TTM_PAGE_ALLOC_H_
#define _DRM_TTM_TTM_PAGE_ALLOC_H_
struct ttm_dma_tt;
struct ttm_mem_global;
int ttm_bus_dma_populate(struct ttm_dma_tt *);
void ttm_bus_dma_unpopulate(struct ttm_dma_tt *);
static inline int
ttm_page_alloc_init(struct ttm_mem_global *glob __unused,
unsigned max_pages __unused)
{
return 0;
}
static inline void
ttm_page_alloc_fini(void)
{
}
static inline int
ttm_dma_page_alloc_init(struct ttm_mem_global *glob __unused,
unsigned max_pages __unused)
{
return 0;
}
static inline void
ttm_dma_page_alloc_fini(void)
{
}
#endif /* _DRM_TTM_TTM_PAGE_ALLOC_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: atomic.h,v 1.5 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: atomic.h,v 1.6 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -153,6 +153,42 @@ atomic_cmpxchg(atomic_t *atomic, int old, int new)
(unsigned)new);
}
struct atomic64 {
volatile uint64_t a_v;
};
typedef struct atomic64 atomic64_t;
static inline uint64_t
atomic64_read(const struct atomic64 *a)
{
return a->a_v;
}
static inline void
atomic64_set(struct atomic64 *a, uint64_t v)
{
a->a_v = v;
}
static inline void
atomic64_add(long long d, struct atomic64 *a)
{
atomic_add_64(&a->a_v, d);
}
static inline void
atomic64_sub(long long d, struct atomic64 *a)
{
atomic_add_64(&a->a_v, -d);
}
static inline uint64_t
atomic64_xchg(struct atomic64 *a, uint64_t v)
{
return atomic_swap_64(&a->a_v, v);
}
static inline void
set_bit(unsigned int bit, volatile unsigned long *ptr)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: bitops.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: bitops.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -47,6 +47,12 @@ hweight16(uint16_t n)
return popcount32(n);
}
static inline unsigned int
hweight32(uint16_t n)
{
return popcount32(n);
}
/*
* XXX Don't define BITS_PER_LONG as sizeof(unsigned long)*CHAR_BIT
* because that won't work in preprocessor conditionals, where it often
@ -90,4 +96,20 @@ __change_bit(unsigned int n, volatile unsigned long *p)
p[n / units] ^= (1UL << (n % units));
}
static inline unsigned long
find_first_zero_bit(const unsigned long *ptr, unsigned long nbits)
{
const size_t bpl = (CHAR_BIT * sizeof(*ptr));
const unsigned long *p;
unsigned long result = 0;
for (p = ptr; bpl < nbits; nbits -= bpl, p++, result += bpl) {
if (~*p)
break;
}
result += ffs(~*p | (~0UL << MIN(nbits, bpl)));
return result;
}
#endif /* _LINUX_BITOPS_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: device.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: device.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -36,13 +36,16 @@
#include <sys/systm.h>
#define dev_err(DEV, FMT, ...) \
device_printf((DEV), "error: " FMT, ##__VA_ARGS__)
#define dev_info(DEV, FMT, ...) \
device_printf((DEV), "info: " FMT, ##__VA_ARGS__)
aprint_error_dev((DEV), "error: " FMT, ##__VA_ARGS__)
#define dev_warn(DEV, FMT, ...) \
device_printf((DEV), "warning: " FMT, ##__VA_ARGS__)
aprint_error_dev((DEV), "warning: " FMT, ##__VA_ARGS__)
#define dev_info(DEV, FMT, ...) \
aprint_normal_dev((DEV), "info: " FMT, ##__VA_ARGS__)
#define dev_dbg(DEV, FMT, ...) \
aprint_debug_dev((DEV), "debug: " FMT, ##__VA_ARGS__)
#define dev_name device_xname

View File

@ -1,4 +1,4 @@
/* $NetBSD: firmware.h,v 1.2 2014/03/18 18:20:43 riastradh Exp $ */
/* $NetBSD: firmware.h,v 1.3 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -32,4 +32,59 @@
#ifndef _LINUX_FIRMWARE_H_
#define _LINUX_FIRMWARE_H_
#include <sys/types.h>
#include <sys/device.h>
#include <sys/kmem.h>
#include <sys/systm.h>
#include <dev/firmload.h>
struct device;
struct firmware {
firmware_handle_t fw_h;
void *data;
size_t size;
};
static inline int
request_firmware(const struct firmware **fwp, const char *image_name,
struct device *dev)
{
struct firmware *fw;
int ret;
fw = kmem_alloc(sizeof(*fw), KM_SLEEP);
/* XXX errno NetBSD->Linux */
ret = -firmware_open(device_cfdriver(dev)->cd_name, image_name,
&fw->fw_h);
if (ret)
goto fail0;
fw->size = firmware_get_size(fw->fw_h);
fw->data = firmware_malloc(fw->size);
/* XXX errno NetBSD->Linux */
ret = -firmware_read(fw->fw_h, 0, fw->data, fw->size);
if (ret)
goto fail1;
/* Success! */
*fwp = fw;
return 0;
fail1: firmware_free(fw->data, fw->size);
fail0: KASSERT(ret);
kmem_free(fw, sizeof(*fw));
return ret;
}
static inline void
release_firmware(const struct firmware *fw)
{
firmware_free(fw->data, fw->size);
kmem_free(__UNCONST(fw), sizeof(*fw));
}
#endif /* _LINUX_FIRMWARE_H_ */

View File

@ -0,0 +1,55 @@
/* $NetBSD: gcd.h,v 1.1 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Taylor R. Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LINUX_GCD_H_
#define _LINUX_GCD_H_
static inline unsigned long
gcd(unsigned long a, unsigned long b)
{
unsigned long t;
if (a < b) {
t = a;
a = b;
b = t;
}
while (0 < b) {
t = (a % b);
a = b;
b = t;
}
return a;
}
#endif /* _LINUX_GCD_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: gfp.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: gfp.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -68,7 +68,9 @@ typedef int gfp_t;
#define __free_page linux___free_page
struct page;
#if 0
struct page * alloc_page(gfp_t);
void __free_page(struct page *);
#endif
#endif /* _LINUX_GFP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: hdmi.h,v 1.1 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: hdmi.h,v 1.2 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -62,6 +62,49 @@ enum hdmi_active_aspect {
HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
};
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM = 0,
HDMI_AUDIO_CODING_TYPE_PCM = 1,
HDMI_AUDIO_CODING_TYPE_AC3 = 2,
HDMI_AUDIO_CODING_TYPE_MPEG1 = 3,
HDMI_AUDIO_CODING_TYPE_MP3 = 4,
HDMI_AUDIO_CODING_TYPE_MPEG2 = 5,
HDMI_AUDIO_CODING_TYPE_AAC_LC = 6,
HDMI_AUDIO_CODING_TYPE_DTS = 7,
HDMI_AUDIO_CODING_TYPE_ATRAC = 8,
HDMI_AUDIO_CODING_TYPE_DSD = 9,
HDMI_AUDIO_CODING_TYPE_EAC3 = 10,
HDMI_AUDIO_CODING_TYPE_DTS_HD = 11,
HDMI_AUDIO_CODING_TYPE_MLP = 12,
HDMI_AUDIO_CODING_TYPE_DST = 13,
HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14,
};
enum hdmi_audio_coding_type_ext {
HDMI_AUDIO_CODING_TYPE_EXT_STREAM = 0,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3,
};
enum hdmi_audio_sample_frequency {
HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0,
HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1,
HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2,
HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3,
HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4,
HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5,
HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6,
HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7,
};
enum hdmi_audio_sample_size {
HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0,
HDMI_AUDIO_SAMPLE_SIZE_16 = 1,
HDMI_AUDIO_SAMPLE_SIZE_20 = 2,
HDMI_AUDIO_SAMPLE_SIZE_24 = 3,
};
enum hdmi_colorimetry {
HDMI_COLORIMETRY_NONE = 0,
HDMI_COLORIMETRY_ITU_601 = 1,
@ -179,6 +222,73 @@ hdmi_infoframe_checksum(void *buf, size_t length)
p[3] = (256 - checksum);
}
#define HDMI_AUDIO_INFOFRAME_SIZE 10
struct hdmi_audio_infoframe {
struct hdmi_infoframe_header header;
uint8_t channels;
enum hdmi_audio_coding_type coding_type;
enum hdmi_audio_sample_size sample_size;
enum hdmi_audio_sample_frequency sample_frequency;
enum hdmi_audio_coding_type_ext coding_type_ext;
uint8_t channel_allocation;
uint8_t level_shift_value;
bool downmix_inhibit;
};
static inline int
hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
{
static const struct hdmi_audio_infoframe zero_frame;
*frame = zero_frame;
hdmi_infoframe_header_init(&frame->header, HDMI_INFOFRAME_TYPE_AUDIO,
1, HDMI_AUDIO_INFOFRAME_SIZE);
return 0;
}
static inline ssize_t
hdmi_audio_infoframe_pack(const struct hdmi_audio_infoframe *frame, void *buf,
size_t size)
{
const size_t length = HDMI_INFOFRAME_HEADER_SIZE +
HDMI_AUDIO_INFOFRAME_SIZE;
uint8_t channels = 0;
uint8_t *p = buf;
int ret;
KASSERT(frame->header.length == HDMI_AUDIO_INFOFRAME_SIZE);
ret = hdmi_infoframe_header_pack(&frame->header, length, p, size);
if (ret < 0)
return ret;
p += HDMI_INFOFRAME_HEADER_SIZE;
size -= HDMI_INFOFRAME_HEADER_SIZE;
if (frame->channels >= 2)
channels = frame->channels - 1;
p[0] = __SHIFTIN(frame->coding_type, __BITS(7,4));
p[0] |= __SHIFTIN(channels, __BITS(2,0));
p[1] = __SHIFTIN(frame->sample_frequency, __BITS(4,2));
p[1] |= __SHIFTIN(frame->sample_size, __BITS(1,0));
p[2] = __SHIFTIN(frame->coding_type_ext, __BITS(5,0));
p[3] = __SHIFTIN(frame->level_shift_value, __BITS(6, 3));
p[4] = __SHIFTIN(frame->downmix_inhibit? 1 : 0, __BIT(7));
/* XXX p[5], p[6], p[7], p[8], p[9]? */
CTASSERT(HDMI_AUDIO_INFOFRAME_SIZE == 10);
hdmi_infoframe_checksum(buf, length);
return length;
}
#define HDMI_AVI_INFOFRAME_SIZE 13
struct hdmi_avi_infoframe {
struct hdmi_infoframe_header header;

View File

@ -1,4 +1,4 @@
/* $NetBSD: highmem.h,v 1.2 2014/03/18 18:20:43 riastradh Exp $ */
/* $NetBSD: highmem.h,v 1.3 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -46,6 +46,9 @@
#define kmap linux_kmap
#define kunmap linux_kunmap
/* XXX Kludge! */
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
int linux_kmap_init(void);
void linux_kmap_fini(void);

View File

@ -0,0 +1,35 @@
/* $NetBSD: hwmon-sysfs.h,v 1.1 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Taylor R. Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LINUX_HWMON_SYSFS_H_
#define _LINUX_HWMON_SYSFS_H_
#endif /* _LINUX_HWMON_SYSFS_H_ */

View File

@ -0,0 +1,35 @@
/* $NetBSD: hwmon.h,v 1.1 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Taylor R. Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LINUX_HWMON_H_
#define _LINUX_HWMON_H_
#endif /* _LINUX_HWMON_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: i2c-algo-bit.h,v 1.2 2014/03/18 18:20:43 riastradh Exp $ */
/* $NetBSD: i2c-algo-bit.h,v 1.3 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -46,11 +46,14 @@ struct i2c_algo_bit_data {
int timeout;
};
/* XXX Make the nm output a little more greppable... */
#define i2c_bit_add_bus linux_i2c_bit_add_bus
#define i2c_bit_algo linux_i2c_bit_algo
static inline int
i2c_bit_add_bus(struct i2c_adapter *adapter __unused)
{
return 0;
}
int i2c_bit_add_bus(struct i2c_adapter *);
/* XXX Make the nm output a little more greppable... */
#define i2c_bit_algo linux_i2c_bit_algo
extern const struct i2c_algorithm i2c_bit_algo;

View File

@ -1,4 +1,4 @@
/* $NetBSD: i2c.h,v 1.2 2014/03/18 18:20:43 riastradh Exp $ */
/* $NetBSD: i2c.h,v 1.3 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -46,6 +46,17 @@ struct i2c_msg;
#define I2C_CLASS_DDC 0x01
struct i2c_board_info {
char type[I2C_NAME_SIZE];
uint16_t addr;
};
static inline void
i2c_new_device(struct i2c_adapter *adapter __unused,
struct i2c_board_info *board __unused)
{
}
struct i2c_adapter {
char name[I2C_NAME_SIZE];
const struct i2c_algorithm *algo;
@ -56,6 +67,7 @@ struct i2c_adapter {
struct {
device_t parent;
} dev; /* XXX Kludge for intel_dp. */
void *i2ca_adapdata;
};
static inline int
@ -69,6 +81,20 @@ i2c_del_adapter(struct i2c_adapter *adapter __unused)
{
}
static inline void *
i2c_get_adapdata(const struct i2c_adapter *adapter)
{
return adapter->i2ca_adapdata;
}
static inline void
i2c_set_adapdata(struct i2c_adapter *adapter, void *data)
{
adapter->i2ca_adapdata = data;
}
struct i2c_msg {
i2c_addr_t addr;
uint16_t flags;

View File

@ -1,4 +1,4 @@
/* $NetBSD: io.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: io.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -32,6 +32,8 @@
#ifndef _LINUX_IO_H_
#define _LINUX_IO_H_
#include <asm/io.h>
#define arch_phys_wc_add linux_arch_phys_wc_add
#define arch_phys_wc_del linux_arch_phys_wc_del
#define phys_wc_to_mtrr_index linux_phys_wc_to_mtrr_index

View File

@ -1,4 +1,4 @@
/* $NetBSD: jiffies.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: jiffies.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -35,7 +35,8 @@
#include <sys/param.h>
#include <sys/kernel.h>
#define jiffies hardclock_ticks
#define jiffies hardclock_ticks
#define jiffies_64 hardclock_ticks /* XXX */
/* XXX Er, what? */
#define MAX_JIFFY_OFFSET ((INT_MAX >> 1) - 1)

View File

@ -1,4 +1,4 @@
/* $NetBSD: kref.h,v 1.3 2014/07/16 20:56:25 riastradh Exp $ */
/* $NetBSD: kref.h,v 1.4 2014/07/16 20:59:58 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@ -57,6 +57,21 @@ kref_get(struct kref *kref)
KASSERTMSG((count > 1), "getting released kref");
}
static inline bool
kref_get_unless_zero(struct kref *kref)
{
unsigned count;
do {
count = kref->kr_count;
if ((count == 0) || (count == UINT_MAX))
return false;
} while (atomic_cas_uint(&kref->kr_count, count, (count + 1)) !=
count);
return true;
}
static inline int
kref_sub(struct kref *kref, unsigned int count, void (*release)(struct kref *))
{
@ -108,7 +123,17 @@ kref_put_mutex(struct kref *kref, void (*release)(struct kref *),
return 0;
}
/* Not native to Linux. */
/*
* Not native to Linux. Mostly used for assertions...
*/
static inline bool
kref_referenced_p(struct kref *kref)
{
return (0 < kref->kr_count);
}
static inline bool
kref_exclusive_p(struct kref *kref)
{

Some files were not shown because too many files have changed in this diff Show More