Intel-2D: usermode 2D driver
git-svn-id: svn://kolibrios.org@3254 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
4bceec50e4
commit
013e845fb3
|
@ -0,0 +1,17 @@
|
|||
#include "brw_eu.h"
|
||||
|
||||
bool brw_sf_kernel__nomask(struct brw_compile *p);
|
||||
bool brw_sf_kernel__mask(struct brw_compile *p);
|
||||
|
||||
bool brw_wm_kernel__affine(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch_width);
|
||||
|
||||
bool brw_wm_kernel__projective(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch_width);
|
||||
|
||||
bool brw_wm_kernel__affine_opacity(struct brw_compile *p, int dispatch_width);
|
||||
bool brw_wm_kernel__projective_opacity(struct brw_compile *p, int dispatch_width);
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
Copyright (C) Intel Corp. 2006. All Rights Reserved.
|
||||
Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
|
||||
develop this 3D driver.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice (including the
|
||||
next paragraph) shall be included in all copies or substantial
|
||||
portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
**********************************************************************/
|
||||
/*
|
||||
* Authors:
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
*/
|
||||
|
||||
#include "brw_eu.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/* Returns the corresponding conditional mod for swapping src0 and
|
||||
* src1 in e.g. CMP.
|
||||
*/
|
||||
uint32_t
|
||||
brw_swap_cmod(uint32_t cmod)
|
||||
{
|
||||
switch (cmod) {
|
||||
case BRW_CONDITIONAL_Z:
|
||||
case BRW_CONDITIONAL_NZ:
|
||||
return cmod;
|
||||
case BRW_CONDITIONAL_G:
|
||||
return BRW_CONDITIONAL_LE;
|
||||
case BRW_CONDITIONAL_GE:
|
||||
return BRW_CONDITIONAL_L;
|
||||
case BRW_CONDITIONAL_L:
|
||||
return BRW_CONDITIONAL_GE;
|
||||
case BRW_CONDITIONAL_LE:
|
||||
return BRW_CONDITIONAL_G;
|
||||
default:
|
||||
return ~0;
|
||||
}
|
||||
}
|
||||
|
||||
/* How does predicate control work when execution_size != 8? Do I
|
||||
* need to test/set for 0xffff when execution_size is 16?
|
||||
*/
|
||||
void brw_set_predicate_control_flag_value( struct brw_compile *p, unsigned value )
|
||||
{
|
||||
p->current->header.predicate_control = BRW_PREDICATE_NONE;
|
||||
|
||||
if (value != 0xff) {
|
||||
if (value != p->flag_value) {
|
||||
brw_MOV(p, brw_flag_reg(), brw_imm_uw(value));
|
||||
p->flag_value = value;
|
||||
}
|
||||
|
||||
p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
|
||||
}
|
||||
}
|
||||
|
||||
void brw_set_compression_control(struct brw_compile *p,
|
||||
enum brw_compression compression_control)
|
||||
{
|
||||
p->compressed = (compression_control == BRW_COMPRESSION_COMPRESSED);
|
||||
|
||||
if (p->gen >= 060) {
|
||||
/* Since we don't use the 32-wide support in gen6, we translate
|
||||
* the pre-gen6 compression control here.
|
||||
*/
|
||||
switch (compression_control) {
|
||||
case BRW_COMPRESSION_NONE:
|
||||
/* This is the "use the first set of bits of dmask/vmask/arf
|
||||
* according to execsize" option.
|
||||
*/
|
||||
p->current->header.compression_control = GEN6_COMPRESSION_1Q;
|
||||
break;
|
||||
case BRW_COMPRESSION_2NDHALF:
|
||||
/* For 8-wide, this is "use the second set of 8 bits." */
|
||||
p->current->header.compression_control = GEN6_COMPRESSION_2Q;
|
||||
break;
|
||||
case BRW_COMPRESSION_COMPRESSED:
|
||||
/* For 16-wide instruction compression, use the first set of 16 bits
|
||||
* since we don't do 32-wide dispatch.
|
||||
*/
|
||||
p->current->header.compression_control = GEN6_COMPRESSION_1H;
|
||||
break;
|
||||
default:
|
||||
assert(!"not reached");
|
||||
p->current->header.compression_control = GEN6_COMPRESSION_1H;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
p->current->header.compression_control = compression_control;
|
||||
}
|
||||
}
|
||||
|
||||
void brw_push_insn_state( struct brw_compile *p )
|
||||
{
|
||||
assert(p->current != &p->stack[BRW_EU_MAX_INSN_STACK-1]);
|
||||
memcpy(p->current+1, p->current, sizeof(struct brw_instruction));
|
||||
p->compressed_stack[p->current - p->stack] = p->compressed;
|
||||
p->current++;
|
||||
}
|
||||
|
||||
void brw_pop_insn_state( struct brw_compile *p )
|
||||
{
|
||||
assert(p->current != p->stack);
|
||||
p->current--;
|
||||
p->compressed = p->compressed_stack[p->current - p->stack];
|
||||
}
|
||||
|
||||
void brw_compile_init(struct brw_compile *p, int gen, void *store)
|
||||
{
|
||||
assert(gen);
|
||||
|
||||
p->gen = gen;
|
||||
p->store = store;
|
||||
|
||||
p->nr_insn = 0;
|
||||
p->current = p->stack;
|
||||
p->compressed = false;
|
||||
memset(p->current, 0, sizeof(p->current[0]));
|
||||
|
||||
/* Some defaults?
|
||||
*/
|
||||
brw_set_mask_control(p, BRW_MASK_ENABLE); /* what does this do? */
|
||||
brw_set_saturate(p, 0);
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_set_predicate_control_flag_value(p, 0xff);
|
||||
|
||||
p->if_stack_depth = 0;
|
||||
p->if_stack_array_size = 0;
|
||||
p->if_stack = NULL;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,681 @@
|
|||
#include "brw.h"
|
||||
|
||||
#define X16 8
|
||||
#define Y16 10
|
||||
|
||||
static void brw_wm_xy(struct brw_compile *p, int dw)
|
||||
{
|
||||
struct brw_reg r1 = brw_vec1_grf(1, 0);
|
||||
struct brw_reg r1_uw = __retype_uw(r1);
|
||||
struct brw_reg x_uw, y_uw;
|
||||
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
|
||||
if (dw == 16) {
|
||||
x_uw = brw_uw16_grf(30, 0);
|
||||
y_uw = brw_uw16_grf(28, 0);
|
||||
} else {
|
||||
x_uw = brw_uw8_grf(30, 0);
|
||||
y_uw = brw_uw8_grf(28, 0);
|
||||
}
|
||||
|
||||
brw_ADD(p,
|
||||
x_uw,
|
||||
__stride(__suboffset(r1_uw, 4), 2, 4, 0),
|
||||
brw_imm_v(0x10101010));
|
||||
brw_ADD(p,
|
||||
y_uw,
|
||||
__stride(__suboffset(r1_uw, 5), 2, 4, 0),
|
||||
brw_imm_v(0x11001100));
|
||||
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
|
||||
brw_ADD(p, brw_vec8_grf(X16, 0), vec8(x_uw), brw_negate(r1));
|
||||
brw_ADD(p, brw_vec8_grf(Y16, 0), vec8(y_uw), brw_negate(__suboffset(r1, 1)));
|
||||
}
|
||||
|
||||
static void brw_wm_affine_st(struct brw_compile *p, int dw,
|
||||
int channel, int msg)
|
||||
{
|
||||
int uv;
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
uv = p->gen >= 060 ? 6 : 3;
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
uv = p->gen >= 060 ? 4 : 3;
|
||||
}
|
||||
uv += 2*channel;
|
||||
|
||||
msg++;
|
||||
if (p->gen >= 060) {
|
||||
brw_PLN(p,
|
||||
brw_message_reg(msg),
|
||||
brw_vec1_grf(uv, 0),
|
||||
brw_vec8_grf(2, 0));
|
||||
msg += dw/8;
|
||||
|
||||
brw_PLN(p,
|
||||
brw_message_reg(msg),
|
||||
brw_vec1_grf(uv, 4),
|
||||
brw_vec8_grf(2, 0));
|
||||
} else {
|
||||
struct brw_reg r = brw_vec1_grf(uv, 0);
|
||||
|
||||
brw_LINE(p, brw_null_reg(), __suboffset(r, 0), brw_vec8_grf(X16, 0));
|
||||
brw_MAC(p, brw_message_reg(msg), __suboffset(r, 1), brw_vec8_grf(Y16, 0));
|
||||
msg += dw/8;
|
||||
|
||||
brw_LINE(p, brw_null_reg(), __suboffset(r, 4), brw_vec8_grf(X16, 0));
|
||||
brw_MAC(p, brw_message_reg(msg), __suboffset(r, 5), brw_vec8_grf(Y16, 0));
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned simd(int dw)
|
||||
{
|
||||
return dw == 16 ? BRW_SAMPLER_SIMD_MODE_SIMD16 : BRW_SAMPLER_SIMD_MODE_SIMD8;
|
||||
}
|
||||
|
||||
static inline struct brw_reg sample_result(int dw, int result)
|
||||
{
|
||||
return brw_reg(BRW_GENERAL_REGISTER_FILE, result, 0,
|
||||
BRW_REGISTER_TYPE_UW,
|
||||
dw == 16 ? BRW_VERTICAL_STRIDE_16 : BRW_VERTICAL_STRIDE_8,
|
||||
dw == 16 ? BRW_WIDTH_16 : BRW_WIDTH_8,
|
||||
BRW_HORIZONTAL_STRIDE_1,
|
||||
BRW_SWIZZLE_XYZW,
|
||||
WRITEMASK_XYZW);
|
||||
}
|
||||
|
||||
static int brw_wm_sample(struct brw_compile *p, int dw,
|
||||
int channel, int msg, int result)
|
||||
{
|
||||
struct brw_reg src0;
|
||||
bool header;
|
||||
int len;
|
||||
|
||||
len = dw == 16 ? 4 : 2;
|
||||
if (p->gen >= 060) {
|
||||
header = false;
|
||||
src0 = brw_message_reg(++msg);
|
||||
} else {
|
||||
header = true;
|
||||
src0 = brw_vec8_grf(0, 0);
|
||||
}
|
||||
|
||||
brw_SAMPLE(p, sample_result(dw, result), msg, src0,
|
||||
channel+1, channel, WRITEMASK_XYZW, 0,
|
||||
2*len, len+header, header, simd(dw));
|
||||
return result;
|
||||
}
|
||||
|
||||
static int brw_wm_sample__alpha(struct brw_compile *p, int dw,
|
||||
int channel, int msg, int result)
|
||||
{
|
||||
struct brw_reg src0;
|
||||
int mlen, rlen;
|
||||
|
||||
if (dw == 8) {
|
||||
/* SIMD8 sample return is not masked */
|
||||
mlen = 3;
|
||||
rlen = 4;
|
||||
} else {
|
||||
mlen = 5;
|
||||
rlen = 2;
|
||||
}
|
||||
|
||||
if (p->gen >= 060)
|
||||
src0 = brw_message_reg(msg);
|
||||
else
|
||||
src0 = brw_vec8_grf(0, 0);
|
||||
|
||||
brw_SAMPLE(p, sample_result(dw, result), msg, src0,
|
||||
channel+1, channel, WRITEMASK_W, 0,
|
||||
rlen, mlen, true, simd(dw));
|
||||
|
||||
if (dw == 8)
|
||||
result += 3;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int brw_wm_affine(struct brw_compile *p, int dw,
|
||||
int channel, int msg, int result)
|
||||
{
|
||||
brw_wm_affine_st(p, dw, channel, msg);
|
||||
return brw_wm_sample(p, dw, channel, msg, result);
|
||||
}
|
||||
|
||||
static int brw_wm_affine__alpha(struct brw_compile *p, int dw,
|
||||
int channel, int msg, int result)
|
||||
{
|
||||
brw_wm_affine_st(p, dw, channel, msg);
|
||||
return brw_wm_sample__alpha(p, dw, channel, msg, result);
|
||||
}
|
||||
|
||||
static inline struct brw_reg null_result(int dw)
|
||||
{
|
||||
return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE, BRW_ARF_NULL, 0,
|
||||
BRW_REGISTER_TYPE_UW,
|
||||
dw == 16 ? BRW_VERTICAL_STRIDE_16 : BRW_VERTICAL_STRIDE_8,
|
||||
dw == 16 ? BRW_WIDTH_16 : BRW_WIDTH_8,
|
||||
BRW_HORIZONTAL_STRIDE_1,
|
||||
BRW_SWIZZLE_XYZW,
|
||||
WRITEMASK_XYZW);
|
||||
}
|
||||
|
||||
static void brw_fb_write(struct brw_compile *p, int dw)
|
||||
{
|
||||
struct brw_instruction *insn;
|
||||
unsigned msg_control, msg_type, msg_len;
|
||||
struct brw_reg src0;
|
||||
bool header;
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
|
||||
msg_len = 8;
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
|
||||
msg_len = 4;
|
||||
}
|
||||
|
||||
if (p->gen < 060) {
|
||||
brw_push_insn_state(p);
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_set_mask_control(p, BRW_MASK_DISABLE);
|
||||
brw_MOV(p, brw_message_reg(1), brw_vec8_grf(1, 0));
|
||||
brw_pop_insn_state(p);
|
||||
|
||||
msg_len += 2;
|
||||
}
|
||||
|
||||
/* The execution mask is ignored for render target writes. */
|
||||
insn = brw_next_insn(p, BRW_OPCODE_SEND);
|
||||
insn->header.predicate_control = 0;
|
||||
insn->header.compression_control = BRW_COMPRESSION_NONE;
|
||||
|
||||
if (p->gen >= 060) {
|
||||
msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
|
||||
src0 = brw_message_reg(2);
|
||||
header = false;
|
||||
} else {
|
||||
insn->header.destreg__conditionalmod = 0;
|
||||
msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
|
||||
src0 = __retype_uw(brw_vec8_grf(0, 0));
|
||||
header = true;
|
||||
}
|
||||
|
||||
brw_set_dest(p, insn, null_result(dw));
|
||||
brw_set_src0(p, insn, src0);
|
||||
brw_set_dp_write_message(p, insn, 0,
|
||||
msg_control, msg_type, msg_len,
|
||||
header, true, 0, true, false);
|
||||
}
|
||||
|
||||
static void brw_wm_write(struct brw_compile *p, int dw, int src)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (dw == 8 && p->gen >= 060) {
|
||||
/* XXX pixel execution mask? */
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
|
||||
brw_MOV(p, brw_message_reg(2), brw_vec8_grf(src+0, 0));
|
||||
brw_MOV(p, brw_message_reg(3), brw_vec8_grf(src+1, 0));
|
||||
brw_MOV(p, brw_message_reg(4), brw_vec8_grf(src+2, 0));
|
||||
brw_MOV(p, brw_message_reg(5), brw_vec8_grf(src+3, 0));
|
||||
goto done;
|
||||
}
|
||||
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
|
||||
for (n = 0; n < 4; n++) {
|
||||
if (p->gen >= 060) {
|
||||
brw_MOV(p,
|
||||
brw_message_reg(2 + 2*n),
|
||||
brw_vec8_grf(src + 2*n, 0));
|
||||
} else if (p->gen >= 045 && dw == 16) {
|
||||
brw_MOV(p,
|
||||
brw_message_reg(2 + n + BRW_MRF_COMPR4),
|
||||
brw_vec8_grf(src + 2*n, 0));
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_MOV(p,
|
||||
brw_message_reg(2 + n),
|
||||
brw_vec8_grf(src + 2*n, 0));
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
|
||||
brw_MOV(p,
|
||||
brw_message_reg(2 + n + 4),
|
||||
brw_vec8_grf(src + 2*n+1, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
brw_fb_write(p, dw);
|
||||
}
|
||||
|
||||
static void brw_wm_write__mask(struct brw_compile *p, int dw,
|
||||
int src, int mask)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (dw == 8 && p->gen >= 060) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2),
|
||||
brw_vec8_grf(src+0, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(3),
|
||||
brw_vec8_grf(src+1, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(4),
|
||||
brw_vec8_grf(src+2, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(5),
|
||||
brw_vec8_grf(src+3, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
|
||||
for (n = 0; n < 4; n++) {
|
||||
if (p->gen >= 060) {
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + 2*n),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
} else if (p->gen >= 045 && dw == 16) {
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n + BRW_MRF_COMPR4),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec8_grf(mask, 0));
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n + 4),
|
||||
brw_vec8_grf(src + 2*n+1, 0),
|
||||
brw_vec8_grf(mask+1, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
brw_fb_write(p, dw);
|
||||
}
|
||||
|
||||
static void brw_wm_write__opacity(struct brw_compile *p, int dw,
|
||||
int src, int mask)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (dw == 8 && p->gen >= 060) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2),
|
||||
brw_vec8_grf(src+0, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(3),
|
||||
brw_vec8_grf(src+1, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(4),
|
||||
brw_vec8_grf(src+2, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(5),
|
||||
brw_vec8_grf(src+3, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
|
||||
for (n = 0; n < 4; n++) {
|
||||
if (p->gen >= 060) {
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + 2*n),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
} else if (p->gen >= 045 && dw == 16) {
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n + BRW_MRF_COMPR4),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n + 4),
|
||||
brw_vec8_grf(src + 2*n+1, 0),
|
||||
brw_vec1_grf(mask, 3));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
brw_fb_write(p, dw);
|
||||
}
|
||||
|
||||
static void brw_wm_write__mask_ca(struct brw_compile *p, int dw,
|
||||
int src, int mask)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (dw == 8 && p->gen >= 060) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2),
|
||||
brw_vec8_grf(src + 0, 0),
|
||||
brw_vec8_grf(mask + 0, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(3),
|
||||
brw_vec8_grf(src + 1, 0),
|
||||
brw_vec8_grf(mask + 1, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(4),
|
||||
brw_vec8_grf(src + 2, 0),
|
||||
brw_vec8_grf(mask + 2, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(5),
|
||||
brw_vec8_grf(src + 3, 0),
|
||||
brw_vec8_grf(mask + 3, 0));
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
|
||||
for (n = 0; n < 4; n++) {
|
||||
if (p->gen >= 060) {
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + 2*n),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec8_grf(mask + 2*n, 0));
|
||||
} else if (p->gen >= 045 && dw == 16) {
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n + BRW_MRF_COMPR4),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec8_grf(mask + 2*n, 0));
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n),
|
||||
brw_vec8_grf(src + 2*n, 0),
|
||||
brw_vec8_grf(mask + 2*n, 0));
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
|
||||
brw_MUL(p,
|
||||
brw_message_reg(2 + n + 4),
|
||||
brw_vec8_grf(src + 2*n + 1, 0),
|
||||
brw_vec8_grf(mask + 2*n + 1, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
brw_fb_write(p, dw);
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__affine(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
brw_wm_write(p, dispatch, brw_wm_affine(p, dispatch, 0, 1, 12));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
|
||||
src = brw_wm_affine(p, dispatch, 0, 1, 12);
|
||||
mask = brw_wm_affine__alpha(p, dispatch, 1, 6, 20);
|
||||
brw_wm_write__mask(p, dispatch, src, mask);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
|
||||
src = brw_wm_affine(p, dispatch, 0, 1, 12);
|
||||
mask = brw_wm_affine(p, dispatch, 1, 6, 20);
|
||||
brw_wm_write__mask_ca(p, dispatch, src, mask);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
|
||||
src = brw_wm_affine__alpha(p, dispatch, 0, 1, 12);
|
||||
mask = brw_wm_affine(p, dispatch, 1, 6, 16);
|
||||
brw_wm_write__mask(p, dispatch, mask, src);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Projective variants */
|
||||
|
||||
static void brw_wm_projective_st(struct brw_compile *p, int dw,
|
||||
int channel, int msg)
|
||||
{
|
||||
int uv;
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
uv = p->gen >= 060 ? 6 : 3;
|
||||
} else {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
uv = p->gen >= 060 ? 4 : 3;
|
||||
}
|
||||
uv += 2*channel;
|
||||
|
||||
msg++;
|
||||
if (p->gen >= 060) {
|
||||
/* First compute 1/z */
|
||||
brw_PLN(p,
|
||||
brw_message_reg(msg),
|
||||
brw_vec1_grf(uv+1, 0),
|
||||
brw_vec8_grf(2, 0));
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
|
||||
brw_math_invert(p, brw_vec8_grf(31, 0), brw_vec8_grf(31, 0));
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
} else
|
||||
brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
|
||||
brw_PLN(p,
|
||||
brw_vec8_grf(28, 0),
|
||||
brw_vec1_grf(uv, 0),
|
||||
brw_vec8_grf(2, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(msg),
|
||||
brw_vec8_grf(28, 0),
|
||||
brw_vec8_grf(30, 0));
|
||||
msg += dw/8;
|
||||
|
||||
brw_PLN(p,
|
||||
brw_vec8_grf(28, 0),
|
||||
brw_vec1_grf(uv, 0),
|
||||
brw_vec8_grf(4, 0));
|
||||
brw_MUL(p,
|
||||
brw_message_reg(msg),
|
||||
brw_vec8_grf(28, 0),
|
||||
brw_vec8_grf(30, 0));
|
||||
} else {
|
||||
struct brw_reg r = brw_vec1_grf(uv, 0);
|
||||
|
||||
/* First compute 1/z */
|
||||
brw_LINE(p, brw_null_reg(), brw_vec1_grf(uv+1, 0), brw_vec8_grf(X16, 0));
|
||||
brw_MAC(p, brw_vec8_grf(30, 0), brw_vec1_grf(uv+1, 1), brw_vec8_grf(Y16, 0));
|
||||
|
||||
if (dw == 16) {
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_NONE);
|
||||
brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
|
||||
brw_math_invert(p, brw_vec8_grf(31, 0), brw_vec8_grf(31, 0));
|
||||
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
|
||||
} else
|
||||
brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
|
||||
|
||||
/* Now compute the output s,t values */
|
||||
brw_LINE(p, brw_null_reg(), __suboffset(r, 0), brw_vec8_grf(X16, 0));
|
||||
brw_MAC(p, brw_vec8_grf(28, 0), __suboffset(r, 1), brw_vec8_grf(Y16, 0));
|
||||
brw_MUL(p, brw_message_reg(msg), brw_vec8_grf(28, 0), brw_vec8_grf(30, 0));
|
||||
msg += dw/8;
|
||||
|
||||
brw_LINE(p, brw_null_reg(), __suboffset(r, 4), brw_vec8_grf(X16, 0));
|
||||
brw_MAC(p, brw_vec8_grf(28, 0), __suboffset(r, 5), brw_vec8_grf(Y16, 0));
|
||||
brw_MUL(p, brw_message_reg(msg), brw_vec8_grf(28, 0), brw_vec8_grf(30, 0));
|
||||
}
|
||||
}
|
||||
|
||||
static int brw_wm_projective(struct brw_compile *p, int dw,
|
||||
int channel, int msg, int result)
|
||||
{
|
||||
brw_wm_projective_st(p, dw, channel, msg);
|
||||
return brw_wm_sample(p, dw, channel, msg, result);
|
||||
}
|
||||
|
||||
static int brw_wm_projective__alpha(struct brw_compile *p, int dw,
|
||||
int channel, int msg, int result)
|
||||
{
|
||||
brw_wm_projective_st(p, dw, channel, msg);
|
||||
return brw_wm_sample__alpha(p, dw, channel, msg, result);
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__projective(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
brw_wm_write(p, dispatch, brw_wm_projective(p, dispatch, 0, 1, 12));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
|
||||
src = brw_wm_projective(p, dispatch, 0, 1, 12);
|
||||
mask = brw_wm_projective__alpha(p, dispatch, 1, 6, 20);
|
||||
brw_wm_write__mask(p, dispatch, src, mask);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
|
||||
src = brw_wm_projective(p, dispatch, 0, 1, 12);
|
||||
mask = brw_wm_projective(p, dispatch, 1, 6, 20);
|
||||
brw_wm_write__mask_ca(p, dispatch, src, mask);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060)
|
||||
brw_wm_xy(p, dispatch);
|
||||
|
||||
src = brw_wm_projective__alpha(p, dispatch, 0, 1, 12);
|
||||
mask = brw_wm_projective(p, dispatch, 1, 6, 16);
|
||||
brw_wm_write__mask(p, dispatch, mask, src);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__affine_opacity(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060) {
|
||||
brw_wm_xy(p, dispatch);
|
||||
mask = 5;
|
||||
} else
|
||||
mask = dispatch == 16 ? 8 : 6;
|
||||
|
||||
src = brw_wm_affine(p, dispatch, 0, 1, 12);
|
||||
brw_wm_write__opacity(p, dispatch, src, mask);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
brw_wm_kernel__projective_opacity(struct brw_compile *p, int dispatch)
|
||||
{
|
||||
int src, mask;
|
||||
|
||||
if (p->gen < 060) {
|
||||
brw_wm_xy(p, dispatch);
|
||||
mask = 5;
|
||||
} else
|
||||
mask = dispatch == 16 ? 8 : 6;
|
||||
|
||||
src = brw_wm_projective(p, dispatch, 0, 1, 12);
|
||||
brw_wm_write__opacity(p, dispatch, src, mask);
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright (c) 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _SNA_COMPILER_H_
|
||||
#define _SNA_COMPILER_H_
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
|
||||
#define likely(expr) (__builtin_expect (!!(expr), 1))
|
||||
#define unlikely(expr) (__builtin_expect (!!(expr), 0))
|
||||
#define noinline __attribute__((noinline))
|
||||
#define force_inline inline __attribute__((always_inline))
|
||||
#define fastcall __attribute__((regparm(3)))
|
||||
#define must_check __attribute__((warn_unused_result))
|
||||
#define constant __attribute__((const))
|
||||
#define pure __attribute__((pure))
|
||||
#define __packed__ __attribute__((__packed__))
|
||||
#else
|
||||
#define likely(expr) (expr)
|
||||
#define unlikely(expr) (expr)
|
||||
#define noinline
|
||||
#define force_inline
|
||||
#define fastcall
|
||||
#define must_check
|
||||
#define constant
|
||||
#define pure
|
||||
#define __packed__
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_VALGRIND
|
||||
#define VG(x) x
|
||||
#else
|
||||
#define VG(x)
|
||||
#endif
|
||||
|
||||
#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
|
||||
|
||||
#define COMPILE_TIME_ASSERT(E) ((void)sizeof(char[1 - 2*!(E)]))
|
||||
|
||||
#endif /* _SNA_COMPILER_H_ */
|
|
@ -0,0 +1,825 @@
|
|||
/**
|
||||
* \file drm.h
|
||||
* Header for the Direct Rendering Manager
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
*
|
||||
* \par Acknowledgments:
|
||||
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DRM_H_
|
||||
#define _DRM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
//#include <asm/ioctl.h>
|
||||
|
||||
typedef int8_t __s8;
|
||||
typedef uint8_t __u8;
|
||||
typedef int16_t __s16;
|
||||
typedef uint16_t __u16;
|
||||
typedef int32_t __s32;
|
||||
typedef uint32_t __u32;
|
||||
typedef int64_t __s64;
|
||||
typedef uint64_t __u64;
|
||||
typedef unsigned int drm_handle_t;
|
||||
|
||||
|
||||
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
|
||||
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
|
||||
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
|
||||
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
|
||||
|
||||
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
|
||||
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
|
||||
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
|
||||
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
|
||||
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
||||
|
||||
typedef unsigned int drm_context_t;
|
||||
typedef unsigned int drm_drawable_t;
|
||||
typedef unsigned int drm_magic_t;
|
||||
|
||||
/**
|
||||
* Cliprect.
|
||||
*
|
||||
* \warning: If you change this structure, make sure you change
|
||||
* XF86DRIClipRectRec in the server as well
|
||||
*
|
||||
* \note KW: Actually it's illegal to change either for
|
||||
* backwards-compatibility reasons.
|
||||
*/
|
||||
struct drm_clip_rect {
|
||||
unsigned short x1;
|
||||
unsigned short y1;
|
||||
unsigned short x2;
|
||||
unsigned short y2;
|
||||
};
|
||||
|
||||
/**
|
||||
* Drawable information.
|
||||
*/
|
||||
struct drm_drawable_info {
|
||||
unsigned int num_rects;
|
||||
struct drm_clip_rect *rects;
|
||||
};
|
||||
|
||||
/**
|
||||
* Texture region,
|
||||
*/
|
||||
struct drm_tex_region {
|
||||
unsigned char next;
|
||||
unsigned char prev;
|
||||
unsigned char in_use;
|
||||
unsigned char padding;
|
||||
unsigned int age;
|
||||
};
|
||||
|
||||
/**
|
||||
* Hardware lock.
|
||||
*
|
||||
* The lock structure is a simple cache-line aligned integer. To avoid
|
||||
* processor bus contention on a multiprocessor system, there should not be any
|
||||
* other data stored in the same cache line.
|
||||
*/
|
||||
struct drm_hw_lock {
|
||||
__volatile__ unsigned int lock; /**< lock variable */
|
||||
char padding[60]; /**< Pad to cache line */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_VERSION ioctl argument type.
|
||||
*
|
||||
* \sa drmGetVersion().
|
||||
*/
|
||||
struct drm_version {
|
||||
int version_major; /**< Major version */
|
||||
int version_minor; /**< Minor version */
|
||||
int version_patchlevel; /**< Patch level */
|
||||
size_t name_len; /**< Length of name buffer */
|
||||
char *name; /**< Name of driver */
|
||||
size_t date_len; /**< Length of date buffer */
|
||||
char *date; /**< User-space buffer to hold date */
|
||||
size_t desc_len; /**< Length of desc buffer */
|
||||
char *desc; /**< User-space buffer to hold desc */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
|
||||
*
|
||||
* \sa drmGetBusid() and drmSetBusId().
|
||||
*/
|
||||
struct drm_unique {
|
||||
size_t unique_len; /**< Length of unique */
|
||||
char *unique; /**< Unique name for driver instantiation */
|
||||
};
|
||||
|
||||
struct drm_list {
|
||||
int count; /**< Length of user-space structures */
|
||||
struct drm_version *version;
|
||||
};
|
||||
|
||||
struct drm_block {
|
||||
int unused;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_CONTROL ioctl argument type.
|
||||
*
|
||||
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
|
||||
*/
|
||||
struct drm_control {
|
||||
enum {
|
||||
DRM_ADD_COMMAND,
|
||||
DRM_RM_COMMAND,
|
||||
DRM_INST_HANDLER,
|
||||
DRM_UNINST_HANDLER
|
||||
} func;
|
||||
int irq;
|
||||
};
|
||||
|
||||
/**
|
||||
* Type of memory to map.
|
||||
*/
|
||||
enum drm_map_type {
|
||||
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
|
||||
_DRM_REGISTERS = 1, /**< no caching, no core dump */
|
||||
_DRM_SHM = 2, /**< shared, cached */
|
||||
_DRM_AGP = 3, /**< AGP/GART */
|
||||
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
|
||||
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
|
||||
_DRM_GEM = 6, /**< GEM object */
|
||||
};
|
||||
|
||||
/**
|
||||
* Memory mapping flags.
|
||||
*/
|
||||
enum drm_map_flags {
|
||||
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
|
||||
_DRM_READ_ONLY = 0x02,
|
||||
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
|
||||
_DRM_KERNEL = 0x08, /**< kernel requires access */
|
||||
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
|
||||
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
|
||||
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
|
||||
_DRM_DRIVER = 0x80 /**< Managed by driver */
|
||||
};
|
||||
|
||||
struct drm_ctx_priv_map {
|
||||
unsigned int ctx_id; /**< Context requesting private mapping */
|
||||
void *handle; /**< Handle of map */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
|
||||
* argument type.
|
||||
*
|
||||
* \sa drmAddMap().
|
||||
*/
|
||||
struct drm_map {
|
||||
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
|
||||
unsigned long size; /**< Requested physical size (bytes) */
|
||||
enum drm_map_type type; /**< Type of memory to map */
|
||||
enum drm_map_flags flags; /**< Flags */
|
||||
void *handle; /**< User-space: "Handle" to pass to mmap() */
|
||||
/**< Kernel-space: kernel-virtual address */
|
||||
int mtrr; /**< MTRR slot used */
|
||||
/* Private data */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_CLIENT ioctl argument type.
|
||||
*/
|
||||
struct drm_client {
|
||||
int idx; /**< Which client desired? */
|
||||
int auth; /**< Is client authenticated? */
|
||||
unsigned long pid; /**< Process ID */
|
||||
unsigned long uid; /**< User ID */
|
||||
unsigned long magic; /**< Magic */
|
||||
unsigned long iocs; /**< Ioctl count */
|
||||
};
|
||||
|
||||
enum drm_stat_type {
|
||||
_DRM_STAT_LOCK,
|
||||
_DRM_STAT_OPENS,
|
||||
_DRM_STAT_CLOSES,
|
||||
_DRM_STAT_IOCTLS,
|
||||
_DRM_STAT_LOCKS,
|
||||
_DRM_STAT_UNLOCKS,
|
||||
_DRM_STAT_VALUE, /**< Generic value */
|
||||
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
|
||||
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
|
||||
|
||||
_DRM_STAT_IRQ, /**< IRQ */
|
||||
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
|
||||
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
|
||||
_DRM_STAT_DMA, /**< DMA */
|
||||
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
|
||||
_DRM_STAT_MISSED /**< Missed DMA opportunity */
|
||||
/* Add to the *END* of the list */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_STATS ioctl argument type.
|
||||
*/
|
||||
struct drm_stats {
|
||||
unsigned long count;
|
||||
struct {
|
||||
unsigned long value;
|
||||
enum drm_stat_type type;
|
||||
} data[15];
|
||||
};
|
||||
|
||||
/**
|
||||
* Hardware locking flags.
|
||||
*/
|
||||
enum drm_lock_flags {
|
||||
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
|
||||
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
|
||||
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
|
||||
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
|
||||
/* These *HALT* flags aren't supported yet
|
||||
-- they will be used to support the
|
||||
full-screen DGA-like mode. */
|
||||
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
|
||||
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
|
||||
*
|
||||
* \sa drmGetLock() and drmUnlock().
|
||||
*/
|
||||
struct drm_lock {
|
||||
int context;
|
||||
enum drm_lock_flags flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* DMA flags
|
||||
*
|
||||
* \warning
|
||||
* These values \e must match xf86drm.h.
|
||||
*
|
||||
* \sa drm_dma.
|
||||
*/
|
||||
enum drm_dma_flags {
|
||||
/* Flags for DMA buffer dispatch */
|
||||
_DRM_DMA_BLOCK = 0x01, /**<
|
||||
* Block until buffer dispatched.
|
||||
*
|
||||
* \note The buffer may not yet have
|
||||
* been processed by the hardware --
|
||||
* getting a hardware lock with the
|
||||
* hardware quiescent will ensure
|
||||
* that the buffer has been
|
||||
* processed.
|
||||
*/
|
||||
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
|
||||
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
|
||||
|
||||
/* Flags for DMA buffer request */
|
||||
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
|
||||
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
|
||||
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
|
||||
*
|
||||
* \sa drmAddBufs().
|
||||
*/
|
||||
struct drm_buf_desc {
|
||||
int count; /**< Number of buffers of this size */
|
||||
int size; /**< Size in bytes */
|
||||
int low_mark; /**< Low water mark */
|
||||
int high_mark; /**< High water mark */
|
||||
enum {
|
||||
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
|
||||
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
|
||||
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
|
||||
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
|
||||
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
|
||||
} flags;
|
||||
unsigned long agp_start; /**<
|
||||
* Start address of where the AGP buffers are
|
||||
* in the AGP aperture
|
||||
*/
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_INFO_BUFS ioctl argument type.
|
||||
*/
|
||||
struct drm_buf_info {
|
||||
int count; /**< Entries in list */
|
||||
struct drm_buf_desc *list;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_FREE_BUFS ioctl argument type.
|
||||
*/
|
||||
struct drm_buf_free {
|
||||
int count;
|
||||
int *list;
|
||||
};
|
||||
|
||||
/**
|
||||
* Buffer information
|
||||
*
|
||||
* \sa drm_buf_map.
|
||||
*/
|
||||
struct drm_buf_pub {
|
||||
int idx; /**< Index into the master buffer list */
|
||||
int total; /**< Buffer size */
|
||||
int used; /**< Amount of buffer in use (for DMA) */
|
||||
void *address; /**< Address of buffer */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MAP_BUFS ioctl argument type.
|
||||
*/
|
||||
struct drm_buf_map {
|
||||
int count; /**< Length of the buffer list */
|
||||
#ifdef __cplusplus
|
||||
void *virt;
|
||||
#else
|
||||
void *virtual; /**< Mmap'd area in user-virtual */
|
||||
#endif
|
||||
struct drm_buf_pub *list; /**< Buffer information */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_DMA ioctl argument type.
|
||||
*
|
||||
* Indices here refer to the offset into the buffer list in drm_buf_get.
|
||||
*
|
||||
* \sa drmDMA().
|
||||
*/
|
||||
struct drm_dma {
|
||||
int context; /**< Context handle */
|
||||
int send_count; /**< Number of buffers to send */
|
||||
int *send_indices; /**< List of handles to buffers */
|
||||
int *send_sizes; /**< Lengths of data to send */
|
||||
enum drm_dma_flags flags; /**< Flags */
|
||||
int request_count; /**< Number of buffers requested */
|
||||
int request_size; /**< Desired size for buffers */
|
||||
int *request_indices; /**< Buffer information */
|
||||
int *request_sizes;
|
||||
int granted_count; /**< Number of buffers granted */
|
||||
};
|
||||
|
||||
enum drm_ctx_flags {
|
||||
_DRM_CONTEXT_PRESERVED = 0x01,
|
||||
_DRM_CONTEXT_2DONLY = 0x02
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_CTX ioctl argument type.
|
||||
*
|
||||
* \sa drmCreateContext() and drmDestroyContext().
|
||||
*/
|
||||
struct drm_ctx {
|
||||
drm_context_t handle;
|
||||
enum drm_ctx_flags flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_RES_CTX ioctl argument type.
|
||||
*/
|
||||
struct drm_ctx_res {
|
||||
int count;
|
||||
struct drm_ctx *contexts;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
|
||||
*/
|
||||
struct drm_draw {
|
||||
drm_drawable_t handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
|
||||
*/
|
||||
typedef enum {
|
||||
DRM_DRAWABLE_CLIPRECTS,
|
||||
} drm_drawable_info_type_t;
|
||||
|
||||
struct drm_update_draw {
|
||||
drm_drawable_t handle;
|
||||
unsigned int type;
|
||||
unsigned int num;
|
||||
unsigned long long data;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
|
||||
*/
|
||||
struct drm_auth {
|
||||
drm_magic_t magic;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
|
||||
*
|
||||
* \sa drmGetInterruptFromBusID().
|
||||
*/
|
||||
struct drm_irq_busid {
|
||||
int irq; /**< IRQ number */
|
||||
int busnum; /**< bus number */
|
||||
int devnum; /**< device number */
|
||||
int funcnum; /**< function number */
|
||||
};
|
||||
|
||||
enum drm_vblank_seq_type {
|
||||
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
||||
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
||||
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
|
||||
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
|
||||
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
|
||||
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
|
||||
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
|
||||
};
|
||||
|
||||
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
|
||||
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
|
||||
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
|
||||
|
||||
struct drm_wait_vblank_request {
|
||||
enum drm_vblank_seq_type type;
|
||||
unsigned int sequence;
|
||||
unsigned long signal;
|
||||
};
|
||||
|
||||
struct drm_wait_vblank_reply {
|
||||
enum drm_vblank_seq_type type;
|
||||
unsigned int sequence;
|
||||
long tval_sec;
|
||||
long tval_usec;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
|
||||
*
|
||||
* \sa drmWaitVBlank().
|
||||
*/
|
||||
union drm_wait_vblank {
|
||||
struct drm_wait_vblank_request request;
|
||||
struct drm_wait_vblank_reply reply;
|
||||
};
|
||||
|
||||
#define _DRM_PRE_MODESET 1
|
||||
#define _DRM_POST_MODESET 2
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MODESET_CTL ioctl argument type
|
||||
*
|
||||
* \sa drmModesetCtl().
|
||||
*/
|
||||
struct drm_modeset_ctl {
|
||||
__u32 crtc;
|
||||
__u32 cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
|
||||
*
|
||||
* \sa drmAgpEnable().
|
||||
*/
|
||||
struct drm_agp_mode {
|
||||
unsigned long mode; /**< AGP mode */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
|
||||
*
|
||||
* \sa drmAgpAlloc() and drmAgpFree().
|
||||
*/
|
||||
struct drm_agp_buffer {
|
||||
unsigned long size; /**< In bytes -- will round to page boundary */
|
||||
unsigned long handle; /**< Used for binding / unbinding */
|
||||
unsigned long type; /**< Type of memory to allocate */
|
||||
unsigned long physical; /**< Physical used by i810 */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
|
||||
*
|
||||
* \sa drmAgpBind() and drmAgpUnbind().
|
||||
*/
|
||||
struct drm_agp_binding {
|
||||
unsigned long handle; /**< From drm_agp_buffer */
|
||||
unsigned long offset; /**< In bytes -- will round to page boundary */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_INFO ioctl argument type.
|
||||
*
|
||||
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
|
||||
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
|
||||
* drmAgpVendorId() and drmAgpDeviceId().
|
||||
*/
|
||||
struct drm_agp_info {
|
||||
int agp_version_major;
|
||||
int agp_version_minor;
|
||||
unsigned long mode;
|
||||
unsigned long aperture_base; /* physical address */
|
||||
unsigned long aperture_size; /* bytes */
|
||||
unsigned long memory_allowed; /* bytes */
|
||||
unsigned long memory_used;
|
||||
|
||||
/* PCI information */
|
||||
unsigned short id_vendor;
|
||||
unsigned short id_device;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_SG_ALLOC ioctl argument type.
|
||||
*/
|
||||
struct drm_scatter_gather {
|
||||
unsigned long size; /**< In bytes -- will round to page boundary */
|
||||
unsigned long handle; /**< Used for mapping / unmapping */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_SET_VERSION ioctl argument type.
|
||||
*/
|
||||
struct drm_set_version {
|
||||
int drm_di_major;
|
||||
int drm_di_minor;
|
||||
int drm_dd_major;
|
||||
int drm_dd_minor;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
|
||||
struct drm_gem_close {
|
||||
/** Handle of the object to be closed. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
|
||||
struct drm_gem_flink {
|
||||
/** Handle for the object being named */
|
||||
__u32 handle;
|
||||
|
||||
/** Returned global name */
|
||||
__u32 name;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
|
||||
struct drm_gem_open {
|
||||
/** Name of object being opened */
|
||||
__u32 name;
|
||||
|
||||
/** Returned handle for the object */
|
||||
__u32 handle;
|
||||
|
||||
/** Returned size of the object */
|
||||
__u64 size;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GET_CAP ioctl argument type */
|
||||
struct drm_get_cap {
|
||||
__u64 capability;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
#define DRM_CLOEXEC O_CLOEXEC
|
||||
struct drm_prime_handle {
|
||||
__u32 handle;
|
||||
|
||||
/** Flags.. only applicable for handle->fd */
|
||||
__u32 flags;
|
||||
|
||||
/** Returned dmabuf file descriptor */
|
||||
__s32 fd;
|
||||
};
|
||||
|
||||
//#include "drm_mode.h"
|
||||
|
||||
#if 0
|
||||
|
||||
#define DRM_IOCTL_BASE 'd'
|
||||
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
|
||||
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
|
||||
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
|
||||
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
|
||||
|
||||
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
|
||||
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
|
||||
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
|
||||
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
|
||||
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
|
||||
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
|
||||
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
|
||||
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
|
||||
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
|
||||
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
|
||||
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
|
||||
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
|
||||
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
|
||||
|
||||
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
|
||||
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
|
||||
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
|
||||
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
|
||||
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
|
||||
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
|
||||
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
|
||||
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
|
||||
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
|
||||
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
|
||||
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
|
||||
|
||||
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
|
||||
|
||||
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
|
||||
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
|
||||
|
||||
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
|
||||
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
|
||||
|
||||
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
|
||||
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
|
||||
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
|
||||
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
|
||||
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
|
||||
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
|
||||
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
|
||||
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
|
||||
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
|
||||
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
|
||||
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
|
||||
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
|
||||
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
|
||||
|
||||
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
|
||||
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
|
||||
|
||||
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
|
||||
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
|
||||
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
|
||||
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
|
||||
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
|
||||
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
|
||||
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
|
||||
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
|
||||
|
||||
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
|
||||
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
|
||||
|
||||
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
|
||||
|
||||
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
|
||||
|
||||
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
|
||||
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
|
||||
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
|
||||
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
|
||||
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
|
||||
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
|
||||
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
|
||||
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
|
||||
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
|
||||
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
|
||||
|
||||
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
|
||||
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
|
||||
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
|
||||
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
|
||||
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
|
||||
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
|
||||
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
|
||||
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
|
||||
|
||||
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
|
||||
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
|
||||
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
|
||||
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
|
||||
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
|
||||
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
|
||||
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
|
||||
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
|
||||
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Device specific ioctls should only be in their respective headers
|
||||
* The device specific ioctl range is from 0x40 to 0x99.
|
||||
* Generic IOCTLS restart at 0xA0.
|
||||
*
|
||||
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
|
||||
* drmCommandReadWrite().
|
||||
*/
|
||||
#define DRM_COMMAND_BASE 0x40
|
||||
#define DRM_COMMAND_END 0xA0
|
||||
|
||||
/**
|
||||
* Header for events written back to userspace on the drm fd. The
|
||||
* type defines the type of event, the length specifies the total
|
||||
* length of the event (including the header), and user_data is
|
||||
* typically a 64 bit value passed with the ioctl that triggered the
|
||||
* event. A read on the drm fd will always only return complete
|
||||
* events, that is, if for example the read buffer is 100 bytes, and
|
||||
* there are two 64 byte events pending, only one will be returned.
|
||||
*
|
||||
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
|
||||
* up are chipset specific.
|
||||
*/
|
||||
struct drm_event {
|
||||
__u32 type;
|
||||
__u32 length;
|
||||
};
|
||||
|
||||
#define DRM_EVENT_VBLANK 0x01
|
||||
#define DRM_EVENT_FLIP_COMPLETE 0x02
|
||||
|
||||
struct drm_event_vblank {
|
||||
struct drm_event base;
|
||||
__u64 user_data;
|
||||
__u32 tv_sec;
|
||||
__u32 tv_usec;
|
||||
__u32 sequence;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
#define DRM_CAP_DUMB_BUFFER 0x1
|
||||
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
|
||||
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
|
||||
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
|
||||
#define DRM_CAP_PRIME 0x5
|
||||
|
||||
#define DRM_PRIME_CAP_IMPORT 0x1
|
||||
#define DRM_PRIME_CAP_EXPORT 0x2
|
||||
|
||||
/* typedef area */
|
||||
typedef struct drm_clip_rect drm_clip_rect_t;
|
||||
typedef struct drm_drawable_info drm_drawable_info_t;
|
||||
typedef struct drm_tex_region drm_tex_region_t;
|
||||
typedef struct drm_hw_lock drm_hw_lock_t;
|
||||
typedef struct drm_version drm_version_t;
|
||||
typedef struct drm_unique drm_unique_t;
|
||||
typedef struct drm_list drm_list_t;
|
||||
typedef struct drm_block drm_block_t;
|
||||
typedef struct drm_control drm_control_t;
|
||||
typedef enum drm_map_type drm_map_type_t;
|
||||
typedef enum drm_map_flags drm_map_flags_t;
|
||||
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
|
||||
typedef struct drm_map drm_map_t;
|
||||
typedef struct drm_client drm_client_t;
|
||||
typedef enum drm_stat_type drm_stat_type_t;
|
||||
typedef struct drm_stats drm_stats_t;
|
||||
typedef enum drm_lock_flags drm_lock_flags_t;
|
||||
typedef struct drm_lock drm_lock_t;
|
||||
typedef enum drm_dma_flags drm_dma_flags_t;
|
||||
typedef struct drm_buf_desc drm_buf_desc_t;
|
||||
typedef struct drm_buf_info drm_buf_info_t;
|
||||
typedef struct drm_buf_free drm_buf_free_t;
|
||||
typedef struct drm_buf_pub drm_buf_pub_t;
|
||||
typedef struct drm_buf_map drm_buf_map_t;
|
||||
typedef struct drm_dma drm_dma_t;
|
||||
typedef union drm_wait_vblank drm_wait_vblank_t;
|
||||
typedef struct drm_agp_mode drm_agp_mode_t;
|
||||
typedef enum drm_ctx_flags drm_ctx_flags_t;
|
||||
typedef struct drm_ctx drm_ctx_t;
|
||||
typedef struct drm_ctx_res drm_ctx_res_t;
|
||||
typedef struct drm_draw drm_draw_t;
|
||||
typedef struct drm_update_draw drm_update_draw_t;
|
||||
typedef struct drm_auth drm_auth_t;
|
||||
typedef struct drm_irq_busid drm_irq_busid_t;
|
||||
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
|
||||
|
||||
typedef struct drm_agp_buffer drm_agp_buffer_t;
|
||||
typedef struct drm_agp_binding drm_agp_binding_t;
|
||||
typedef struct drm_agp_info drm_agp_info_t;
|
||||
typedef struct drm_scatter_gather drm_scatter_gather_t;
|
||||
typedef struct drm_set_version drm_set_version_t;
|
||||
|
||||
#endif
|
|
@ -0,0 +1,16 @@
|
|||
#ifndef GEN4_VERTEX_H
|
||||
#define GEN4_VERTEX_H
|
||||
|
||||
#include "compiler.h"
|
||||
|
||||
#include "sna.h"
|
||||
#include "sna_render.h"
|
||||
|
||||
void gen4_vertex_flush(struct sna *sna);
|
||||
int gen4_vertex_finish(struct sna *sna);
|
||||
void gen4_vertex_close(struct sna *sna);
|
||||
|
||||
unsigned gen4_choose_composite_emitter(struct sna_composite_op *tmp);
|
||||
//unsigned gen4_choose_spans_emitter(struct sna_composite_spans_op *tmp);
|
||||
|
||||
#endif /* GEN4_VERTEX_H */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,944 @@
|
|||
/*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _I915_DRM_H_
|
||||
#define _I915_DRM_H_
|
||||
|
||||
#include "drm.h"
|
||||
|
||||
/* Please note that modifications to all structs defined here are
|
||||
* subject to backwards-compatibility constraints.
|
||||
*/
|
||||
|
||||
|
||||
/* Each region is a minimum of 16k, and there are at most 255 of them.
|
||||
*/
|
||||
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
|
||||
* of chars for next/prev indices */
|
||||
#define I915_LOG_MIN_TEX_REGION_SIZE 14
|
||||
|
||||
typedef struct _drm_i915_init {
|
||||
enum {
|
||||
I915_INIT_DMA = 0x01,
|
||||
I915_CLEANUP_DMA = 0x02,
|
||||
I915_RESUME_DMA = 0x03
|
||||
} func;
|
||||
unsigned int mmio_offset;
|
||||
int sarea_priv_offset;
|
||||
unsigned int ring_start;
|
||||
unsigned int ring_end;
|
||||
unsigned int ring_size;
|
||||
unsigned int front_offset;
|
||||
unsigned int back_offset;
|
||||
unsigned int depth_offset;
|
||||
unsigned int w;
|
||||
unsigned int h;
|
||||
unsigned int pitch;
|
||||
unsigned int pitch_bits;
|
||||
unsigned int back_pitch;
|
||||
unsigned int depth_pitch;
|
||||
unsigned int cpp;
|
||||
unsigned int chipset;
|
||||
} drm_i915_init_t;
|
||||
|
||||
typedef struct _drm_i915_sarea {
|
||||
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
|
||||
int last_upload; /* last time texture was uploaded */
|
||||
int last_enqueue; /* last time a buffer was enqueued */
|
||||
int last_dispatch; /* age of the most recently dispatched buffer */
|
||||
int ctxOwner; /* last context to upload state */
|
||||
int texAge;
|
||||
int pf_enabled; /* is pageflipping allowed? */
|
||||
int pf_active;
|
||||
int pf_current_page; /* which buffer is being displayed? */
|
||||
int perf_boxes; /* performance boxes to be displayed */
|
||||
int width, height; /* screen size in pixels */
|
||||
|
||||
drm_handle_t front_handle;
|
||||
int front_offset;
|
||||
int front_size;
|
||||
|
||||
drm_handle_t back_handle;
|
||||
int back_offset;
|
||||
int back_size;
|
||||
|
||||
drm_handle_t depth_handle;
|
||||
int depth_offset;
|
||||
int depth_size;
|
||||
|
||||
drm_handle_t tex_handle;
|
||||
int tex_offset;
|
||||
int tex_size;
|
||||
int log_tex_granularity;
|
||||
int pitch;
|
||||
int rotation; /* 0, 90, 180 or 270 */
|
||||
int rotated_offset;
|
||||
int rotated_size;
|
||||
int rotated_pitch;
|
||||
int virtualX, virtualY;
|
||||
|
||||
unsigned int front_tiled;
|
||||
unsigned int back_tiled;
|
||||
unsigned int depth_tiled;
|
||||
unsigned int rotated_tiled;
|
||||
unsigned int rotated2_tiled;
|
||||
|
||||
int pipeA_x;
|
||||
int pipeA_y;
|
||||
int pipeA_w;
|
||||
int pipeA_h;
|
||||
int pipeB_x;
|
||||
int pipeB_y;
|
||||
int pipeB_w;
|
||||
int pipeB_h;
|
||||
|
||||
/* fill out some space for old userspace triple buffer */
|
||||
drm_handle_t unused_handle;
|
||||
__u32 unused1, unused2, unused3;
|
||||
|
||||
/* buffer object handles for static buffers. May change
|
||||
* over the lifetime of the client.
|
||||
*/
|
||||
__u32 front_bo_handle;
|
||||
__u32 back_bo_handle;
|
||||
__u32 unused_bo_handle;
|
||||
__u32 depth_bo_handle;
|
||||
|
||||
} drm_i915_sarea_t;
|
||||
|
||||
/* due to userspace building against these headers we need some compat here */
|
||||
#define planeA_x pipeA_x
|
||||
#define planeA_y pipeA_y
|
||||
#define planeA_w pipeA_w
|
||||
#define planeA_h pipeA_h
|
||||
#define planeB_x pipeB_x
|
||||
#define planeB_y pipeB_y
|
||||
#define planeB_w pipeB_w
|
||||
#define planeB_h pipeB_h
|
||||
|
||||
/* Flags for perf_boxes
|
||||
*/
|
||||
#define I915_BOX_RING_EMPTY 0x1
|
||||
#define I915_BOX_FLIP 0x2
|
||||
#define I915_BOX_WAIT 0x4
|
||||
#define I915_BOX_TEXTURE_LOAD 0x8
|
||||
#define I915_BOX_LOST_CONTEXT 0x10
|
||||
|
||||
/* I915 specific ioctls
|
||||
* The device specific ioctl range is 0x40 to 0x79.
|
||||
*/
|
||||
#define DRM_I915_INIT 0x00
|
||||
#define DRM_I915_FLUSH 0x01
|
||||
#define DRM_I915_FLIP 0x02
|
||||
#define DRM_I915_BATCHBUFFER 0x03
|
||||
#define DRM_I915_IRQ_EMIT 0x04
|
||||
#define DRM_I915_IRQ_WAIT 0x05
|
||||
#define DRM_I915_GETPARAM 0x06
|
||||
#define DRM_I915_SETPARAM 0x07
|
||||
#define DRM_I915_ALLOC 0x08
|
||||
#define DRM_I915_FREE 0x09
|
||||
#define DRM_I915_INIT_HEAP 0x0a
|
||||
#define DRM_I915_CMDBUFFER 0x0b
|
||||
#define DRM_I915_DESTROY_HEAP 0x0c
|
||||
#define DRM_I915_SET_VBLANK_PIPE 0x0d
|
||||
#define DRM_I915_GET_VBLANK_PIPE 0x0e
|
||||
#define DRM_I915_VBLANK_SWAP 0x0f
|
||||
#define DRM_I915_HWS_ADDR 0x11
|
||||
#define DRM_I915_GEM_INIT 0x13
|
||||
#define DRM_I915_GEM_EXECBUFFER 0x14
|
||||
#define DRM_I915_GEM_PIN 0x15
|
||||
#define DRM_I915_GEM_UNPIN 0x16
|
||||
#define DRM_I915_GEM_BUSY 0x17
|
||||
#define DRM_I915_GEM_THROTTLE 0x18
|
||||
#define DRM_I915_GEM_ENTERVT 0x19
|
||||
#define DRM_I915_GEM_LEAVEVT 0x1a
|
||||
#define DRM_I915_GEM_CREATE 0x1b
|
||||
#define DRM_I915_GEM_PREAD 0x1c
|
||||
#define DRM_I915_GEM_PWRITE 0x1d
|
||||
#define DRM_I915_GEM_MMAP 0x1e
|
||||
#define DRM_I915_GEM_SET_DOMAIN 0x1f
|
||||
#define DRM_I915_GEM_SW_FINISH 0x20
|
||||
#define DRM_I915_GEM_SET_TILING 0x21
|
||||
#define DRM_I915_GEM_GET_TILING 0x22
|
||||
#define DRM_I915_GEM_GET_APERTURE 0x23
|
||||
#define DRM_I915_GEM_MMAP_GTT 0x24
|
||||
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
|
||||
#define DRM_I915_GEM_MADVISE 0x26
|
||||
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
|
||||
#define DRM_I915_OVERLAY_ATTRS 0x28
|
||||
#define DRM_I915_GEM_EXECBUFFER2 0x29
|
||||
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
|
||||
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
|
||||
#define DRM_I915_GEM_WAIT 0x2c
|
||||
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
|
||||
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
|
||||
#define DRM_I915_GEM_SET_CACHEING 0x2f
|
||||
#define DRM_I915_GEM_GET_CACHEING 0x30
|
||||
#define DRM_I915_REG_READ 0x31
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
|
||||
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
|
||||
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
|
||||
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
|
||||
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
|
||||
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
|
||||
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
|
||||
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
|
||||
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
|
||||
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
|
||||
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
|
||||
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
|
||||
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
|
||||
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
|
||||
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
|
||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
|
||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
|
||||
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
|
||||
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
|
||||
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
||||
#define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing)
|
||||
#define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing)
|
||||
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
||||
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
|
||||
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
|
||||
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
|
||||
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
|
||||
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
||||
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
||||
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
|
||||
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
||||
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
|
||||
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
|
||||
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
|
||||
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
|
||||
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
|
||||
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
|
||||
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
|
||||
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
|
||||
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
|
||||
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
|
||||
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
|
||||
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
* on the security mechanisms provided by hardware.
|
||||
*/
|
||||
typedef struct drm_i915_batchbuffer {
|
||||
int start; /* agp offset */
|
||||
int used; /* nr bytes in use */
|
||||
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
||||
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
||||
int num_cliprects; /* mulitpass with multiple cliprects? */
|
||||
struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
|
||||
} drm_i915_batchbuffer_t;
|
||||
|
||||
/* As above, but pass a pointer to userspace buffer which can be
|
||||
* validated by the kernel prior to sending to hardware.
|
||||
*/
|
||||
typedef struct _drm_i915_cmdbuffer {
|
||||
char *buf; /* pointer to userspace command buffer */
|
||||
int sz; /* nr bytes in buf */
|
||||
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
|
||||
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
|
||||
int num_cliprects; /* mulitpass with multiple cliprects? */
|
||||
struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
|
||||
} drm_i915_cmdbuffer_t;
|
||||
|
||||
/* Userspace can request & wait on irq's:
|
||||
*/
|
||||
typedef struct drm_i915_irq_emit {
|
||||
int *irq_seq;
|
||||
} drm_i915_irq_emit_t;
|
||||
|
||||
typedef struct drm_i915_irq_wait {
|
||||
int irq_seq;
|
||||
} drm_i915_irq_wait_t;
|
||||
|
||||
/* Ioctl to query kernel params:
|
||||
*/
|
||||
#define I915_PARAM_IRQ_ACTIVE 1
|
||||
#define I915_PARAM_ALLOW_BATCHBUFFER 2
|
||||
#define I915_PARAM_LAST_DISPATCH 3
|
||||
#define I915_PARAM_CHIPSET_ID 4
|
||||
#define I915_PARAM_HAS_GEM 5
|
||||
#define I915_PARAM_NUM_FENCES_AVAIL 6
|
||||
#define I915_PARAM_HAS_OVERLAY 7
|
||||
#define I915_PARAM_HAS_PAGEFLIPPING 8
|
||||
#define I915_PARAM_HAS_EXECBUF2 9
|
||||
#define I915_PARAM_HAS_BSD 10
|
||||
#define I915_PARAM_HAS_BLT 11
|
||||
#define I915_PARAM_HAS_RELAXED_FENCING 12
|
||||
#define I915_PARAM_HAS_COHERENT_RINGS 13
|
||||
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
|
||||
#define I915_PARAM_HAS_RELAXED_DELTA 15
|
||||
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
|
||||
#define I915_PARAM_HAS_LLC 17
|
||||
#define I915_PARAM_HAS_ALIASING_PPGTT 18
|
||||
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
|
||||
|
||||
typedef struct drm_i915_getparam {
|
||||
int param;
|
||||
int *value;
|
||||
} drm_i915_getparam_t;
|
||||
|
||||
/* Ioctl to set kernel params:
|
||||
*/
|
||||
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
|
||||
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
|
||||
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
|
||||
#define I915_SETPARAM_NUM_USED_FENCES 4
|
||||
|
||||
typedef struct drm_i915_setparam {
|
||||
int param;
|
||||
int value;
|
||||
} drm_i915_setparam_t;
|
||||
|
||||
/* A memory manager for regions of shared memory:
|
||||
*/
|
||||
#define I915_MEM_REGION_AGP 1
|
||||
|
||||
typedef struct drm_i915_mem_alloc {
|
||||
int region;
|
||||
int alignment;
|
||||
int size;
|
||||
int *region_offset; /* offset from start of fb or agp */
|
||||
} drm_i915_mem_alloc_t;
|
||||
|
||||
typedef struct drm_i915_mem_free {
|
||||
int region;
|
||||
int region_offset;
|
||||
} drm_i915_mem_free_t;
|
||||
|
||||
typedef struct drm_i915_mem_init_heap {
|
||||
int region;
|
||||
int size;
|
||||
int start;
|
||||
} drm_i915_mem_init_heap_t;
|
||||
|
||||
/* Allow memory manager to be torn down and re-initialized (eg on
|
||||
* rotate):
|
||||
*/
|
||||
typedef struct drm_i915_mem_destroy_heap {
|
||||
int region;
|
||||
} drm_i915_mem_destroy_heap_t;
|
||||
|
||||
/* Allow X server to configure which pipes to monitor for vblank signals
|
||||
*/
|
||||
#define DRM_I915_VBLANK_PIPE_A 1
|
||||
#define DRM_I915_VBLANK_PIPE_B 2
|
||||
|
||||
typedef struct drm_i915_vblank_pipe {
|
||||
int pipe;
|
||||
} drm_i915_vblank_pipe_t;
|
||||
|
||||
/* Schedule buffer swap at given vertical blank:
|
||||
*/
|
||||
typedef struct drm_i915_vblank_swap {
|
||||
drm_drawable_t drawable;
|
||||
enum drm_vblank_seq_type seqtype;
|
||||
unsigned int sequence;
|
||||
} drm_i915_vblank_swap_t;
|
||||
|
||||
typedef struct drm_i915_hws_addr {
|
||||
__u64 addr;
|
||||
} drm_i915_hws_addr_t;
|
||||
|
||||
struct drm_i915_gem_init {
|
||||
/**
|
||||
* Beginning offset in the GTT to be managed by the DRM memory
|
||||
* manager.
|
||||
*/
|
||||
__u64 gtt_start;
|
||||
/**
|
||||
* Ending offset in the GTT to be managed by the DRM memory
|
||||
* manager.
|
||||
*/
|
||||
__u64 gtt_end;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_create {
|
||||
/**
|
||||
* Requested size for the object.
|
||||
*
|
||||
* The (page-aligned) allocated size for the object will be returned.
|
||||
*/
|
||||
__u64 size;
|
||||
/**
|
||||
* Returned handle for the object.
|
||||
*
|
||||
* Object handles are nonzero.
|
||||
*/
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_pread {
|
||||
/** Handle for the object being read. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
/** Offset into the object to read from */
|
||||
__u64 offset;
|
||||
/** Length of data to read */
|
||||
__u64 size;
|
||||
/**
|
||||
* Pointer to write the data into.
|
||||
*
|
||||
* This is a fixed-size type for 32/64 compatibility.
|
||||
*/
|
||||
__u64 data_ptr;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_pwrite {
|
||||
/** Handle for the object being written to. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
/** Offset into the object to write to */
|
||||
__u64 offset;
|
||||
/** Length of data to write */
|
||||
__u64 size;
|
||||
/**
|
||||
* Pointer to read the data from.
|
||||
*
|
||||
* This is a fixed-size type for 32/64 compatibility.
|
||||
*/
|
||||
__u64 data_ptr;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_mmap {
|
||||
/** Handle for the object being mapped. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
/** Offset in the object to map. */
|
||||
__u64 offset;
|
||||
/**
|
||||
* Length of data to map.
|
||||
*
|
||||
* The value will be page-aligned.
|
||||
*/
|
||||
__u64 size;
|
||||
/**
|
||||
* Returned pointer the data was mapped at.
|
||||
*
|
||||
* This is a fixed-size type for 32/64 compatibility.
|
||||
*/
|
||||
__u64 addr_ptr;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_mmap_gtt {
|
||||
/** Handle for the object being mapped. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
/**
|
||||
* Fake offset to use for subsequent mmap call
|
||||
*
|
||||
* This is a fixed-size type for 32/64 compatibility.
|
||||
*/
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_set_domain {
|
||||
/** Handle for the object */
|
||||
__u32 handle;
|
||||
|
||||
/** New read domains */
|
||||
__u32 read_domains;
|
||||
|
||||
/** New write domain */
|
||||
__u32 write_domain;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_sw_finish {
|
||||
/** Handle for the object */
|
||||
__u32 handle;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_relocation_entry {
|
||||
/**
|
||||
* Handle of the buffer being pointed to by this relocation entry.
|
||||
*
|
||||
* It's appealing to make this be an index into the mm_validate_entry
|
||||
* list to refer to the buffer, but this allows the driver to create
|
||||
* a relocation list for state buffers and not re-write it per
|
||||
* exec using the buffer.
|
||||
*/
|
||||
__u32 target_handle;
|
||||
|
||||
/**
|
||||
* Value to be added to the offset of the target buffer to make up
|
||||
* the relocation entry.
|
||||
*/
|
||||
__u32 delta;
|
||||
|
||||
/** Offset in the buffer the relocation entry will be written into */
|
||||
__u64 offset;
|
||||
|
||||
/**
|
||||
* Offset value of the target buffer that the relocation entry was last
|
||||
* written as.
|
||||
*
|
||||
* If the buffer has the same offset as last time, we can skip syncing
|
||||
* and writing the relocation. This value is written back out by
|
||||
* the execbuffer ioctl when the relocation is written.
|
||||
*/
|
||||
__u64 presumed_offset;
|
||||
|
||||
/**
|
||||
* Target memory domains read by this operation.
|
||||
*/
|
||||
__u32 read_domains;
|
||||
|
||||
/**
|
||||
* Target memory domains written by this operation.
|
||||
*
|
||||
* Note that only one domain may be written by the whole
|
||||
* execbuffer operation, so that where there are conflicts,
|
||||
* the application will get -EINVAL back.
|
||||
*/
|
||||
__u32 write_domain;
|
||||
};
|
||||
|
||||
/** @{
|
||||
* Intel memory domains
|
||||
*
|
||||
* Most of these just align with the various caches in
|
||||
* the system and are used to flush and invalidate as
|
||||
* objects end up cached in different domains.
|
||||
*/
|
||||
/** CPU cache */
|
||||
#define I915_GEM_DOMAIN_CPU 0x00000001
|
||||
/** Render cache, used by 2D and 3D drawing */
|
||||
#define I915_GEM_DOMAIN_RENDER 0x00000002
|
||||
/** Sampler cache, used by texture engine */
|
||||
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
|
||||
/** Command queue, used to load batch buffers */
|
||||
#define I915_GEM_DOMAIN_COMMAND 0x00000008
|
||||
/** Instruction cache, used by shader programs */
|
||||
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
|
||||
/** Vertex address cache */
|
||||
#define I915_GEM_DOMAIN_VERTEX 0x00000020
|
||||
/** GTT domain - aperture and scanout */
|
||||
#define I915_GEM_DOMAIN_GTT 0x00000040
|
||||
/** @} */
|
||||
|
||||
struct drm_i915_gem_exec_object {
|
||||
/**
|
||||
* User's handle for a buffer to be bound into the GTT for this
|
||||
* operation.
|
||||
*/
|
||||
__u32 handle;
|
||||
|
||||
/** Number of relocations to be performed on this buffer */
|
||||
__u32 relocation_count;
|
||||
/**
|
||||
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
||||
* the relocations to be performed in this buffer.
|
||||
*/
|
||||
__u64 relocs_ptr;
|
||||
|
||||
/** Required alignment in graphics aperture */
|
||||
__u64 alignment;
|
||||
|
||||
/**
|
||||
* Returned value of the updated offset of the object, for future
|
||||
* presumed_offset writes.
|
||||
*/
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_execbuffer {
|
||||
/**
|
||||
* List of buffers to be validated with their relocations to be
|
||||
* performend on them.
|
||||
*
|
||||
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
|
||||
*
|
||||
* These buffers must be listed in an order such that all relocations
|
||||
* a buffer is performing refer to buffers that have already appeared
|
||||
* in the validate list.
|
||||
*/
|
||||
__u64 buffers_ptr;
|
||||
__u32 buffer_count;
|
||||
|
||||
/** Offset in the batchbuffer to start execution from. */
|
||||
__u32 batch_start_offset;
|
||||
/** Bytes used in batchbuffer from batch_start_offset */
|
||||
__u32 batch_len;
|
||||
__u32 DR1;
|
||||
__u32 DR4;
|
||||
__u32 num_cliprects;
|
||||
/** This is a struct drm_clip_rect *cliprects */
|
||||
__u64 cliprects_ptr;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_exec_object2 {
|
||||
/**
|
||||
* User's handle for a buffer to be bound into the GTT for this
|
||||
* operation.
|
||||
*/
|
||||
__u32 handle;
|
||||
|
||||
/** Number of relocations to be performed on this buffer */
|
||||
__u32 relocation_count;
|
||||
/**
|
||||
* Pointer to array of struct drm_i915_gem_relocation_entry containing
|
||||
* the relocations to be performed in this buffer.
|
||||
*/
|
||||
__u64 relocs_ptr;
|
||||
|
||||
/** Required alignment in graphics aperture */
|
||||
__u64 alignment;
|
||||
|
||||
/**
|
||||
* Returned value of the updated offset of the object, for future
|
||||
* presumed_offset writes.
|
||||
*/
|
||||
__u64 offset;
|
||||
|
||||
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
||||
__u64 flags;
|
||||
__u64 rsvd1;
|
||||
__u64 rsvd2;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_execbuffer2 {
|
||||
/**
|
||||
* List of gem_exec_object2 structs
|
||||
*/
|
||||
__u64 buffers_ptr;
|
||||
__u32 buffer_count;
|
||||
|
||||
/** Offset in the batchbuffer to start execution from. */
|
||||
__u32 batch_start_offset;
|
||||
/** Bytes used in batchbuffer from batch_start_offset */
|
||||
__u32 batch_len;
|
||||
__u32 DR1;
|
||||
__u32 DR4;
|
||||
__u32 num_cliprects;
|
||||
/** This is a struct drm_clip_rect *cliprects */
|
||||
__u64 cliprects_ptr;
|
||||
#define I915_EXEC_RING_MASK (7<<0)
|
||||
#define I915_EXEC_DEFAULT (0<<0)
|
||||
#define I915_EXEC_RENDER (1<<0)
|
||||
#define I915_EXEC_BSD (2<<0)
|
||||
#define I915_EXEC_BLT (3<<0)
|
||||
|
||||
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
|
||||
* Gen6+ only supports relative addressing to dynamic state (default) and
|
||||
* absolute addressing.
|
||||
*
|
||||
* These flags are ignored for the BSD and BLT rings.
|
||||
*/
|
||||
#define I915_EXEC_CONSTANTS_MASK (3<<6)
|
||||
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
|
||||
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
|
||||
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
|
||||
__u64 flags;
|
||||
__u64 rsvd1; /* now used for context info */
|
||||
__u64 rsvd2;
|
||||
};
|
||||
|
||||
/** Resets the SO write offset registers for transform feedback on gen7. */
|
||||
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
|
||||
|
||||
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
|
||||
#define i915_execbuffer2_set_context_id(eb2, context) \
|
||||
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
|
||||
#define i915_execbuffer2_get_context_id(eb2) \
|
||||
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
|
||||
|
||||
struct drm_i915_gem_pin {
|
||||
/** Handle of the buffer to be pinned. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
|
||||
/** alignment required within the aperture */
|
||||
__u64 alignment;
|
||||
|
||||
/** Returned GTT offset of the buffer. */
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_unpin {
|
||||
/** Handle of the buffer to be unpinned. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_busy {
|
||||
/** Handle of the buffer to check for busy */
|
||||
__u32 handle;
|
||||
|
||||
/** Return busy status (1 if busy, 0 if idle).
|
||||
* The high word is used to indicate on which rings the object
|
||||
* currently resides:
|
||||
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
|
||||
*/
|
||||
__u32 busy;
|
||||
};
|
||||
|
||||
#define I915_CACHEING_NONE 0
|
||||
#define I915_CACHEING_CACHED 1
|
||||
|
||||
struct drm_i915_gem_cacheing {
|
||||
/**
|
||||
* Handle of the buffer to set/get the cacheing level of. */
|
||||
__u32 handle;
|
||||
|
||||
/**
|
||||
* Cacheing level to apply or return value
|
||||
*
|
||||
* bits0-15 are for generic cacheing control (i.e. the above defined
|
||||
* values). bits16-31 are reserved for platform-specific variations
|
||||
* (e.g. l3$ caching on gen7). */
|
||||
__u32 cacheing;
|
||||
};
|
||||
|
||||
#define I915_TILING_NONE 0
|
||||
#define I915_TILING_X 1
|
||||
#define I915_TILING_Y 2
|
||||
|
||||
#define I915_BIT_6_SWIZZLE_NONE 0
|
||||
#define I915_BIT_6_SWIZZLE_9 1
|
||||
#define I915_BIT_6_SWIZZLE_9_10 2
|
||||
#define I915_BIT_6_SWIZZLE_9_11 3
|
||||
#define I915_BIT_6_SWIZZLE_9_10_11 4
|
||||
/* Not seen by userland */
|
||||
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
|
||||
/* Seen by userland. */
|
||||
#define I915_BIT_6_SWIZZLE_9_17 6
|
||||
#define I915_BIT_6_SWIZZLE_9_10_17 7
|
||||
|
||||
struct drm_i915_gem_set_tiling {
|
||||
/** Handle of the buffer to have its tiling state updated */
|
||||
__u32 handle;
|
||||
|
||||
/**
|
||||
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
|
||||
* I915_TILING_Y).
|
||||
*
|
||||
* This value is to be set on request, and will be updated by the
|
||||
* kernel on successful return with the actual chosen tiling layout.
|
||||
*
|
||||
* The tiling mode may be demoted to I915_TILING_NONE when the system
|
||||
* has bit 6 swizzling that can't be managed correctly by GEM.
|
||||
*
|
||||
* Buffer contents become undefined when changing tiling_mode.
|
||||
*/
|
||||
__u32 tiling_mode;
|
||||
|
||||
/**
|
||||
* Stride in bytes for the object when in I915_TILING_X or
|
||||
* I915_TILING_Y.
|
||||
*/
|
||||
__u32 stride;
|
||||
|
||||
/**
|
||||
* Returned address bit 6 swizzling required for CPU access through
|
||||
* mmap mapping.
|
||||
*/
|
||||
__u32 swizzle_mode;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_get_tiling {
|
||||
/** Handle of the buffer to get tiling state for. */
|
||||
__u32 handle;
|
||||
|
||||
/**
|
||||
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
|
||||
* I915_TILING_Y).
|
||||
*/
|
||||
__u32 tiling_mode;
|
||||
|
||||
/**
|
||||
* Returned address bit 6 swizzling required for CPU access through
|
||||
* mmap mapping.
|
||||
*/
|
||||
__u32 swizzle_mode;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_get_aperture {
|
||||
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
|
||||
__u64 aper_size;
|
||||
|
||||
/**
|
||||
* Available space in the aperture used by i915_gem_execbuffer, in
|
||||
* bytes
|
||||
*/
|
||||
__u64 aper_available_size;
|
||||
};
|
||||
|
||||
struct drm_i915_get_pipe_from_crtc_id {
|
||||
/** ID of CRTC being requested **/
|
||||
__u32 crtc_id;
|
||||
|
||||
/** pipe of requested CRTC **/
|
||||
__u32 pipe;
|
||||
};
|
||||
|
||||
#define I915_MADV_WILLNEED 0
|
||||
#define I915_MADV_DONTNEED 1
|
||||
#define __I915_MADV_PURGED 2 /* internal state */
|
||||
|
||||
struct drm_i915_gem_madvise {
|
||||
/** Handle of the buffer to change the backing store advice */
|
||||
__u32 handle;
|
||||
|
||||
/* Advice: either the buffer will be needed again in the near future,
|
||||
* or wont be and could be discarded under memory pressure.
|
||||
*/
|
||||
__u32 madv;
|
||||
|
||||
/** Whether the backing store still exists. */
|
||||
__u32 retained;
|
||||
};
|
||||
|
||||
/* flags */
|
||||
#define I915_OVERLAY_TYPE_MASK 0xff
|
||||
#define I915_OVERLAY_YUV_PLANAR 0x01
|
||||
#define I915_OVERLAY_YUV_PACKED 0x02
|
||||
#define I915_OVERLAY_RGB 0x03
|
||||
|
||||
#define I915_OVERLAY_DEPTH_MASK 0xff00
|
||||
#define I915_OVERLAY_RGB24 0x1000
|
||||
#define I915_OVERLAY_RGB16 0x2000
|
||||
#define I915_OVERLAY_RGB15 0x3000
|
||||
#define I915_OVERLAY_YUV422 0x0100
|
||||
#define I915_OVERLAY_YUV411 0x0200
|
||||
#define I915_OVERLAY_YUV420 0x0300
|
||||
#define I915_OVERLAY_YUV410 0x0400
|
||||
|
||||
#define I915_OVERLAY_SWAP_MASK 0xff0000
|
||||
#define I915_OVERLAY_NO_SWAP 0x000000
|
||||
#define I915_OVERLAY_UV_SWAP 0x010000
|
||||
#define I915_OVERLAY_Y_SWAP 0x020000
|
||||
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
|
||||
|
||||
#define I915_OVERLAY_FLAGS_MASK 0xff000000
|
||||
#define I915_OVERLAY_ENABLE 0x01000000
|
||||
|
||||
struct drm_intel_overlay_put_image {
|
||||
/* various flags and src format description */
|
||||
__u32 flags;
|
||||
/* source picture description */
|
||||
__u32 bo_handle;
|
||||
/* stride values and offsets are in bytes, buffer relative */
|
||||
__u16 stride_Y; /* stride for packed formats */
|
||||
__u16 stride_UV;
|
||||
__u32 offset_Y; /* offset for packet formats */
|
||||
__u32 offset_U;
|
||||
__u32 offset_V;
|
||||
/* in pixels */
|
||||
__u16 src_width;
|
||||
__u16 src_height;
|
||||
/* to compensate the scaling factors for partially covered surfaces */
|
||||
__u16 src_scan_width;
|
||||
__u16 src_scan_height;
|
||||
/* output crtc description */
|
||||
__u32 crtc_id;
|
||||
__u16 dst_x;
|
||||
__u16 dst_y;
|
||||
__u16 dst_width;
|
||||
__u16 dst_height;
|
||||
};
|
||||
|
||||
/* flags */
|
||||
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
|
||||
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
|
||||
struct drm_intel_overlay_attrs {
|
||||
__u32 flags;
|
||||
__u32 color_key;
|
||||
__s32 brightness;
|
||||
__u32 contrast;
|
||||
__u32 saturation;
|
||||
__u32 gamma0;
|
||||
__u32 gamma1;
|
||||
__u32 gamma2;
|
||||
__u32 gamma3;
|
||||
__u32 gamma4;
|
||||
__u32 gamma5;
|
||||
};
|
||||
|
||||
/*
|
||||
* Intel sprite handling
|
||||
*
|
||||
* Color keying works with a min/mask/max tuple. Both source and destination
|
||||
* color keying is allowed.
|
||||
*
|
||||
* Source keying:
|
||||
* Sprite pixels within the min & max values, masked against the color channels
|
||||
* specified in the mask field, will be transparent. All other pixels will
|
||||
* be displayed on top of the primary plane. For RGB surfaces, only the min
|
||||
* and mask fields will be used; ranged compares are not allowed.
|
||||
*
|
||||
* Destination keying:
|
||||
* Primary plane pixels that match the min value, masked against the color
|
||||
* channels specified in the mask field, will be replaced by corresponding
|
||||
* pixels from the sprite plane.
|
||||
*
|
||||
* Note that source & destination keying are exclusive; only one can be
|
||||
* active on a given plane.
|
||||
*/
|
||||
|
||||
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
|
||||
#define I915_SET_COLORKEY_DESTINATION (1<<1)
|
||||
#define I915_SET_COLORKEY_SOURCE (1<<2)
|
||||
struct drm_intel_sprite_colorkey {
|
||||
__u32 plane_id;
|
||||
__u32 min_value;
|
||||
__u32 channel_mask;
|
||||
__u32 max_value;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_wait {
|
||||
/** Handle of BO we shall wait on */
|
||||
__u32 bo_handle;
|
||||
__u32 flags;
|
||||
/** Number of nanoseconds to wait, Returns time remaining. */
|
||||
__s64 timeout_ns;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_context_create {
|
||||
/* output: id of new context*/
|
||||
__u32 ctx_id;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_context_destroy {
|
||||
__u32 ctx_id;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
struct drm_i915_reg_read {
|
||||
__u64 offset;
|
||||
__u64 val; /* Return value */
|
||||
};
|
||||
#endif /* _I915_DRM_H_ */
|
|
@ -0,0 +1,152 @@
|
|||
|
||||
|
||||
/** enumeration of 3d consumers so some can maintain invariant state. */
|
||||
enum last_3d {
|
||||
LAST_3D_OTHER,
|
||||
LAST_3D_VIDEO,
|
||||
LAST_3D_RENDER,
|
||||
LAST_3D_ROTATION
|
||||
};
|
||||
|
||||
|
||||
|
||||
typedef struct intel_screen_private {
|
||||
int cpp;
|
||||
|
||||
#define RENDER_BATCH I915_EXEC_RENDER
|
||||
#define BLT_BATCH I915_EXEC_BLT
|
||||
|
||||
unsigned int current_batch;
|
||||
|
||||
dri_bufmgr *bufmgr;
|
||||
|
||||
uint32_t batch_ptr[4096];
|
||||
/** Byte offset in batch_ptr for the next dword to be emitted. */
|
||||
unsigned int batch_used;
|
||||
/** Position in batch_ptr at the start of the current BEGIN_BATCH */
|
||||
unsigned int batch_emit_start;
|
||||
/** Number of bytes to be emitted in the current BEGIN_BATCH. */
|
||||
uint32_t batch_emitting;
|
||||
dri_bo *batch_bo, *last_batch_bo[2];
|
||||
/** Whether we're in a section of code that can't tolerate flushing */
|
||||
Bool in_batch_atomic;
|
||||
/** Ending batch_used that was verified by intel_start_batch_atomic() */
|
||||
int batch_atomic_limit;
|
||||
struct list batch_pixmaps;
|
||||
drm_intel_bo *wa_scratch_bo;
|
||||
|
||||
unsigned int tiling;
|
||||
|
||||
#define INTEL_TILING_FB 0x1
|
||||
#define INTEL_TILING_2D 0x2
|
||||
#define INTEL_TILING_3D 0x4
|
||||
#define INTEL_TILING_ALL (~0)
|
||||
|
||||
Bool has_relaxed_fencing;
|
||||
|
||||
int Chipset;
|
||||
|
||||
unsigned int BR[20];
|
||||
|
||||
void (*vertex_flush) (struct intel_screen_private *intel);
|
||||
void (*batch_flush) (struct intel_screen_private *intel);
|
||||
void (*batch_commit_notify) (struct intel_screen_private *intel);
|
||||
|
||||
Bool need_sync;
|
||||
|
||||
int accel_pixmap_offset_alignment;
|
||||
int accel_max_x;
|
||||
int accel_max_y;
|
||||
int max_bo_size;
|
||||
int max_gtt_map_size;
|
||||
int max_tiling_size;
|
||||
|
||||
struct {
|
||||
drm_intel_bo *gen4_vs_bo;
|
||||
drm_intel_bo *gen4_sf_bo;
|
||||
drm_intel_bo *gen4_wm_packed_bo;
|
||||
drm_intel_bo *gen4_wm_planar_bo;
|
||||
drm_intel_bo *gen4_cc_bo;
|
||||
drm_intel_bo *gen4_cc_vp_bo;
|
||||
drm_intel_bo *gen4_sampler_bo;
|
||||
drm_intel_bo *gen4_sip_kernel_bo;
|
||||
drm_intel_bo *wm_prog_packed_bo;
|
||||
drm_intel_bo *wm_prog_planar_bo;
|
||||
drm_intel_bo *gen6_blend_bo;
|
||||
drm_intel_bo *gen6_depth_stencil_bo;
|
||||
} video;
|
||||
|
||||
/* Render accel state */
|
||||
float scale_units[2][2];
|
||||
/** Transform pointers for src/mask, or NULL if identity */
|
||||
PictTransform *transform[2];
|
||||
|
||||
PixmapPtr render_source, render_mask, render_dest;
|
||||
PicturePtr render_source_picture, render_mask_picture, render_dest_picture;
|
||||
Bool needs_3d_invariant;
|
||||
Bool needs_render_state_emit;
|
||||
Bool needs_render_vertex_emit;
|
||||
|
||||
/* i830 render accel state */
|
||||
uint32_t render_dest_format;
|
||||
uint32_t cblend, ablend, s8_blendctl;
|
||||
|
||||
/* i915 render accel state */
|
||||
PixmapPtr texture[2];
|
||||
uint32_t mapstate[6];
|
||||
uint32_t samplerstate[6];
|
||||
|
||||
struct {
|
||||
int op;
|
||||
uint32_t dst_format;
|
||||
} i915_render_state;
|
||||
|
||||
struct {
|
||||
int num_sf_outputs;
|
||||
int drawrect;
|
||||
uint32_t blend;
|
||||
dri_bo *samplers;
|
||||
dri_bo *kernel;
|
||||
} gen6_render_state;
|
||||
|
||||
uint32_t prim_offset;
|
||||
void (*prim_emit)(struct intel_screen_private *intel,
|
||||
int srcX, int srcY,
|
||||
int maskX, int maskY,
|
||||
int dstX, int dstY,
|
||||
int w, int h);
|
||||
int floats_per_vertex;
|
||||
int last_floats_per_vertex;
|
||||
uint16_t vertex_offset;
|
||||
uint16_t vertex_count;
|
||||
uint16_t vertex_index;
|
||||
uint16_t vertex_used;
|
||||
uint32_t vertex_id;
|
||||
float vertex_ptr[4*1024];
|
||||
dri_bo *vertex_bo;
|
||||
|
||||
uint8_t surface_data[16*1024];
|
||||
uint16_t surface_used;
|
||||
uint16_t surface_table;
|
||||
uint32_t surface_reloc;
|
||||
dri_bo *surface_bo;
|
||||
|
||||
/* 965 render acceleration state */
|
||||
struct gen4_render_state *gen4_render_state;
|
||||
|
||||
Bool use_pageflipping;
|
||||
Bool use_triple_buffer;
|
||||
Bool force_fallback;
|
||||
Bool has_kernel_flush;
|
||||
Bool needs_flush;
|
||||
|
||||
enum last_3d last_3d;
|
||||
|
||||
/**
|
||||
* User option to print acceleration fallback info to the server log.
|
||||
*/
|
||||
Bool fallback_debug;
|
||||
unsigned debug_flush;
|
||||
Bool has_prime_vmap_flush;
|
||||
} intel_screen_private;
|
||||
|
|
@ -0,0 +1,288 @@
|
|||
#ifndef INTEL_DRIVER_H
|
||||
#define INTEL_DRIVER_H
|
||||
|
||||
#define INTEL_VERSION 4000
|
||||
#define INTEL_NAME "intel"
|
||||
#define INTEL_DRIVER_NAME "intel"
|
||||
|
||||
#define INTEL_VERSION_MAJOR PACKAGE_VERSION_MAJOR
|
||||
#define INTEL_VERSION_MINOR PACKAGE_VERSION_MINOR
|
||||
#define INTEL_VERSION_PATCH PACKAGE_VERSION_PATCHLEVEL
|
||||
|
||||
#ifndef PCI_CHIP_I810
|
||||
#define PCI_CHIP_I810 0x7121
|
||||
#define PCI_CHIP_I810_DC100 0x7123
|
||||
#define PCI_CHIP_I810_E 0x7125
|
||||
#define PCI_CHIP_I815 0x1132
|
||||
#define PCI_CHIP_I810_BRIDGE 0x7120
|
||||
#define PCI_CHIP_I810_DC100_BRIDGE 0x7122
|
||||
#define PCI_CHIP_I810_E_BRIDGE 0x7124
|
||||
#define PCI_CHIP_I815_BRIDGE 0x1130
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I830_M
|
||||
#define PCI_CHIP_I830_M 0x3577
|
||||
#define PCI_CHIP_I830_M_BRIDGE 0x3575
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_845_G
|
||||
#define PCI_CHIP_845_G 0x2562
|
||||
#define PCI_CHIP_845_G_BRIDGE 0x2560
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I854
|
||||
#define PCI_CHIP_I854 0x358E
|
||||
#define PCI_CHIP_I854_BRIDGE 0x358C
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I855_GM
|
||||
#define PCI_CHIP_I855_GM 0x3582
|
||||
#define PCI_CHIP_I855_GM_BRIDGE 0x3580
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I865_G
|
||||
#define PCI_CHIP_I865_G 0x2572
|
||||
#define PCI_CHIP_I865_G_BRIDGE 0x2570
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I915_G
|
||||
#define PCI_CHIP_I915_G 0x2582
|
||||
#define PCI_CHIP_I915_G_BRIDGE 0x2580
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I915_GM
|
||||
#define PCI_CHIP_I915_GM 0x2592
|
||||
#define PCI_CHIP_I915_GM_BRIDGE 0x2590
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_E7221_G
|
||||
#define PCI_CHIP_E7221_G 0x258A
|
||||
/* Same as I915_G_BRIDGE */
|
||||
#define PCI_CHIP_E7221_G_BRIDGE 0x2580
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I945_G
|
||||
#define PCI_CHIP_I945_G 0x2772
|
||||
#define PCI_CHIP_I945_G_BRIDGE 0x2770
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I945_GM
|
||||
#define PCI_CHIP_I945_GM 0x27A2
|
||||
#define PCI_CHIP_I945_GM_BRIDGE 0x27A0
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I945_GME
|
||||
#define PCI_CHIP_I945_GME 0x27AE
|
||||
#define PCI_CHIP_I945_GME_BRIDGE 0x27AC
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_PINEVIEW_M
|
||||
#define PCI_CHIP_PINEVIEW_M 0xA011
|
||||
#define PCI_CHIP_PINEVIEW_M_BRIDGE 0xA010
|
||||
#define PCI_CHIP_PINEVIEW_G 0xA001
|
||||
#define PCI_CHIP_PINEVIEW_G_BRIDGE 0xA000
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_G35_G
|
||||
#define PCI_CHIP_G35_G 0x2982
|
||||
#define PCI_CHIP_G35_G_BRIDGE 0x2980
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I965_Q
|
||||
#define PCI_CHIP_I965_Q 0x2992
|
||||
#define PCI_CHIP_I965_Q_BRIDGE 0x2990
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I965_G
|
||||
#define PCI_CHIP_I965_G 0x29A2
|
||||
#define PCI_CHIP_I965_G_BRIDGE 0x29A0
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I946_GZ
|
||||
#define PCI_CHIP_I946_GZ 0x2972
|
||||
#define PCI_CHIP_I946_GZ_BRIDGE 0x2970
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I965_GM
|
||||
#define PCI_CHIP_I965_GM 0x2A02
|
||||
#define PCI_CHIP_I965_GM_BRIDGE 0x2A00
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_I965_GME
|
||||
#define PCI_CHIP_I965_GME 0x2A12
|
||||
#define PCI_CHIP_I965_GME_BRIDGE 0x2A10
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_G33_G
|
||||
#define PCI_CHIP_G33_G 0x29C2
|
||||
#define PCI_CHIP_G33_G_BRIDGE 0x29C0
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_Q35_G
|
||||
#define PCI_CHIP_Q35_G 0x29B2
|
||||
#define PCI_CHIP_Q35_G_BRIDGE 0x29B0
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_Q33_G
|
||||
#define PCI_CHIP_Q33_G 0x29D2
|
||||
#define PCI_CHIP_Q33_G_BRIDGE 0x29D0
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_GM45_GM
|
||||
#define PCI_CHIP_GM45_GM 0x2A42
|
||||
#define PCI_CHIP_GM45_BRIDGE 0x2A40
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_G45_E_G
|
||||
#define PCI_CHIP_G45_E_G 0x2E02
|
||||
#define PCI_CHIP_G45_E_G_BRIDGE 0x2E00
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_G45_G
|
||||
#define PCI_CHIP_G45_G 0x2E22
|
||||
#define PCI_CHIP_G45_G_BRIDGE 0x2E20
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_Q45_G
|
||||
#define PCI_CHIP_Q45_G 0x2E12
|
||||
#define PCI_CHIP_Q45_G_BRIDGE 0x2E10
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_G41_G
|
||||
#define PCI_CHIP_G41_G 0x2E32
|
||||
#define PCI_CHIP_G41_G_BRIDGE 0x2E30
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_B43_G
|
||||
#define PCI_CHIP_B43_G 0x2E42
|
||||
#define PCI_CHIP_B43_G_BRIDGE 0x2E40
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_B43_G1
|
||||
#define PCI_CHIP_B43_G1 0x2E92
|
||||
#define PCI_CHIP_B43_G1_BRIDGE 0x2E90
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_IRONLAKE_D_G
|
||||
#define PCI_CHIP_IRONLAKE_D_G 0x0042
|
||||
#define PCI_CHIP_IRONLAKE_D_G_BRIDGE 0x0040
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_IRONLAKE_M_G
|
||||
#define PCI_CHIP_IRONLAKE_M_G 0x0046
|
||||
#define PCI_CHIP_IRONLAKE_M_G_BRIDGE 0x0044
|
||||
#endif
|
||||
|
||||
#ifndef PCI_CHIP_SANDYBRIDGE_BRIDGE
|
||||
#define PCI_CHIP_SANDYBRIDGE_BRIDGE 0x0100 /* Desktop */
|
||||
#define PCI_CHIP_SANDYBRIDGE_GT1 0x0102
|
||||
#define PCI_CHIP_SANDYBRIDGE_GT2 0x0112
|
||||
#define PCI_CHIP_SANDYBRIDGE_GT2_PLUS 0x0122
|
||||
#define PCI_CHIP_SANDYBRIDGE_BRIDGE_M 0x0104 /* Mobile */
|
||||
#define PCI_CHIP_SANDYBRIDGE_M_GT1 0x0106
|
||||
#define PCI_CHIP_SANDYBRIDGE_M_GT2 0x0116
|
||||
#define PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS 0x0126
|
||||
#define PCI_CHIP_SANDYBRIDGE_BRIDGE_S 0x0108 /* Server */
|
||||
#define PCI_CHIP_SANDYBRIDGE_S_GT 0x010A
|
||||
|
||||
#define PCI_CHIP_IVYBRIDGE_M_GT1 0x0156
|
||||
#define PCI_CHIP_IVYBRIDGE_M_GT2 0x0166
|
||||
#define PCI_CHIP_IVYBRIDGE_D_GT1 0x0152
|
||||
#define PCI_CHIP_IVYBRIDGE_D_GT2 0x0162
|
||||
#define PCI_CHIP_IVYBRIDGE_S_GT1 0x015a
|
||||
#define PCI_CHIP_IVYBRIDGE_S_GT2 0x016a
|
||||
|
||||
#define PCI_CHIP_HASWELL_D_GT1 0x0402
|
||||
#define PCI_CHIP_HASWELL_D_GT2 0x0412
|
||||
#define PCI_CHIP_HASWELL_D_GT2_PLUS 0x0422
|
||||
#define PCI_CHIP_HASWELL_M_GT1 0x0406
|
||||
#define PCI_CHIP_HASWELL_M_GT2 0x0416
|
||||
#define PCI_CHIP_HASWELL_M_GT2_PLUS 0x0426
|
||||
#define PCI_CHIP_HASWELL_S_GT1 0x040A
|
||||
#define PCI_CHIP_HASWELL_S_GT2 0x041A
|
||||
#define PCI_CHIP_HASWELL_S_GT2_PLUS 0x042A
|
||||
#define PCI_CHIP_HASWELL_SDV_D_GT1 0x0C02
|
||||
#define PCI_CHIP_HASWELL_SDV_D_GT2 0x0C12
|
||||
#define PCI_CHIP_HASWELL_SDV_D_GT2_PLUS 0x0C22
|
||||
#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06
|
||||
#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16
|
||||
#define PCI_CHIP_HASWELL_SDV_M_GT2_PLUS 0x0C26
|
||||
#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A
|
||||
#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A
|
||||
#define PCI_CHIP_HASWELL_SDV_S_GT2_PLUS 0x0C2A
|
||||
#define PCI_CHIP_HASWELL_ULT_D_GT1 0x0A02
|
||||
#define PCI_CHIP_HASWELL_ULT_D_GT2 0x0A12
|
||||
#define PCI_CHIP_HASWELL_ULT_D_GT2_PLUS 0x0A22
|
||||
#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06
|
||||
#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16
|
||||
#define PCI_CHIP_HASWELL_ULT_M_GT2_PLUS 0x0A26
|
||||
#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A
|
||||
#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A
|
||||
#define PCI_CHIP_HASWELL_ULT_S_GT2_PLUS 0x0A2A
|
||||
#define PCI_CHIP_HASWELL_CRW_D_GT1 0x0D12
|
||||
#define PCI_CHIP_HASWELL_CRW_D_GT2 0x0D22
|
||||
#define PCI_CHIP_HASWELL_CRW_D_GT2_PLUS 0x0D32
|
||||
#define PCI_CHIP_HASWELL_CRW_M_GT1 0x0D16
|
||||
#define PCI_CHIP_HASWELL_CRW_M_GT2 0x0D26
|
||||
#define PCI_CHIP_HASWELL_CRW_M_GT2_PLUS 0x0D36
|
||||
#define PCI_CHIP_HASWELL_CRW_S_GT1 0x0D1A
|
||||
#define PCI_CHIP_HASWELL_CRW_S_GT2 0x0D2A
|
||||
#define PCI_CHIP_HASWELL_CRW_S_GT2_PLUS 0x0D3A
|
||||
|
||||
#define PCI_CHIP_VALLEYVIEW_PO 0x0f30
|
||||
#define PCI_CHIP_VALLEYVIEW_1 0x0f31
|
||||
#define PCI_CHIP_VALLEYVIEW_2 0x0f32
|
||||
#define PCI_CHIP_VALLEYVIEW_3 0x0f33
|
||||
|
||||
#endif
|
||||
|
||||
#define I85X_CAPID 0x44
|
||||
#define I85X_VARIANT_MASK 0x7
|
||||
#define I85X_VARIANT_SHIFT 5
|
||||
#define I855_GME 0x0
|
||||
#define I855_GM 0x4
|
||||
#define I852_GME 0x2
|
||||
#define I852_GM 0x5
|
||||
|
||||
#define I810_MEMBASE(p,n) (p)->regions[(n)].base_addr
|
||||
#define VENDOR_ID(p) (p)->vendor_id
|
||||
#define DEVICE_ID(p) (p)->device_id
|
||||
#define SUBVENDOR_ID(p) (p)->subvendor_id
|
||||
#define SUBSYS_ID(p) (p)->subdevice_id
|
||||
#define CHIP_REVISION(p) (p)->revision
|
||||
|
||||
#define INTEL_INFO(intel) ((intel)->info)
|
||||
#define IS_GENx(intel, X) (INTEL_INFO(intel)->gen >= 8*(X) && INTEL_INFO(intel)->gen < 8*((X)+1))
|
||||
#define IS_GEN1(intel) IS_GENx(intel, 1)
|
||||
#define IS_GEN2(intel) IS_GENx(intel, 2)
|
||||
#define IS_GEN3(intel) IS_GENx(intel, 3)
|
||||
#define IS_GEN4(intel) IS_GENx(intel, 4)
|
||||
#define IS_GEN5(intel) IS_GENx(intel, 5)
|
||||
#define IS_GEN6(intel) IS_GENx(intel, 6)
|
||||
#define IS_GEN7(intel) IS_GENx(intel, 7)
|
||||
#define IS_HSW(intel) (INTEL_INFO(intel)->gen == 075)
|
||||
|
||||
/* Some chips have specific errata (or limits) that we need to workaround. */
|
||||
#define IS_I830(intel) (DEVICE_ID((intel)->PciInfo) == PCI_CHIP_I830_M)
|
||||
#define IS_845G(intel) (DEVICE_ID((intel)->PciInfo) == PCI_CHIP_845_G)
|
||||
#define IS_I865G(intel) (DEVICE_ID((intel)->PciInfo) == PCI_CHIP_I865_G)
|
||||
|
||||
#define IS_I915G(pI810) (DEVICE_ID(pI810->PciInfo) == PCI_CHIP_I915_G || DEVICE_ID(pI810->PciInfo) == PCI_CHIP_E7221_G)
|
||||
#define IS_I915GM(pI810) (DEVICE_ID(pI810->PciInfo) == PCI_CHIP_I915_GM)
|
||||
|
||||
#define IS_965_Q(pI810) (DEVICE_ID(pI810->PciInfo) == PCI_CHIP_I965_Q)
|
||||
|
||||
/* supports Y tiled surfaces (pre-965 Mesa isn't ready yet) */
|
||||
#define SUPPORTS_YTILING(pI810) (INTEL_INFO(intel)->gen >= 040)
|
||||
#define HAS_BLT(pI810) (INTEL_INFO(intel)->gen >= 060)
|
||||
|
||||
struct intel_device_info {
|
||||
int gen;
|
||||
};
|
||||
|
||||
//void intel_detect_chipset(ScrnInfoPtr scrn,
|
||||
// EntityInfoPtr ent,
|
||||
// struct pci_device *pci);
|
||||
|
||||
|
||||
#endif /* INTEL_DRIVER_H */
|
|
@ -0,0 +1,353 @@
|
|||
/*
|
||||
* Copyright © 2010-2012 Intel Corporation
|
||||
* Copyright © 2010 Francisco Jerez <currojerez@riseup.net>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_LIST_H_
|
||||
#define _INTEL_LIST_H_
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
/**
|
||||
* @file Classic doubly-link circular list implementation.
|
||||
* For real usage examples of the linked list, see the file test/list.c
|
||||
*
|
||||
* Example:
|
||||
* We need to keep a list of struct foo in the parent struct bar, i.e. what
|
||||
* we want is something like this.
|
||||
*
|
||||
* struct bar {
|
||||
* ...
|
||||
* struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{}
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* We need one list head in bar and a list element in all list_of_foos (both are of
|
||||
* data type 'struct list').
|
||||
*
|
||||
* struct bar {
|
||||
* ...
|
||||
* struct list list_of_foos;
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* struct foo {
|
||||
* ...
|
||||
* struct list entry;
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* Now we initialize the list head:
|
||||
*
|
||||
* struct bar bar;
|
||||
* ...
|
||||
* list_init(&bar.list_of_foos);
|
||||
*
|
||||
* Then we create the first element and add it to this list:
|
||||
*
|
||||
* struct foo *foo = malloc(...);
|
||||
* ....
|
||||
* list_add(&foo->entry, &bar.list_of_foos);
|
||||
*
|
||||
* Repeat the above for each element you want to add to the list. Deleting
|
||||
* works with the element itself.
|
||||
* list_del(&foo->entry);
|
||||
* free(foo);
|
||||
*
|
||||
* Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty
|
||||
* list again.
|
||||
*
|
||||
* Looping through the list requires a 'struct foo' as iterator and the
|
||||
* name of the field the subnodes use.
|
||||
*
|
||||
* struct foo *iterator;
|
||||
* list_for_each_entry(iterator, &bar.list_of_foos, entry) {
|
||||
* if (iterator->something == ...)
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* Note: You must not call list_del() on the iterator if you continue the
|
||||
* loop. You need to run the safe for-each loop instead:
|
||||
*
|
||||
* struct foo *iterator, *next;
|
||||
* list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
|
||||
* if (...)
|
||||
* list_del(&iterator->entry);
|
||||
* }
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* The linkage struct for list nodes. This struct must be part of your
|
||||
* to-be-linked struct. struct list is required for both the head of the
|
||||
* list and for each list node.
|
||||
*
|
||||
* Position and name of the struct list field is irrelevant.
|
||||
* There are no requirements that elements of a list are of the same type.
|
||||
* There are no requirements for a list head, any struct list can be a list
|
||||
* head.
|
||||
*/
|
||||
struct list {
|
||||
struct list *next, *prev;
|
||||
};
|
||||
|
||||
/**
|
||||
* Initialize the list as an empty list.
|
||||
*
|
||||
* Example:
|
||||
* list_init(&bar->list_of_foos);
|
||||
*
|
||||
* @param The list to initialized.
|
||||
*/
|
||||
static void
|
||||
list_init(struct list *list)
|
||||
{
|
||||
list->next = list->prev = list;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__list_add(struct list *entry,
|
||||
struct list *prev,
|
||||
struct list *next)
|
||||
{
|
||||
next->prev = entry;
|
||||
entry->next = next;
|
||||
entry->prev = prev;
|
||||
prev->next = entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert a new element after the given list head. The new element does not
|
||||
* need to be initialised as empty list.
|
||||
* The list changes from:
|
||||
* head → some element → ...
|
||||
* to
|
||||
* head → new element → older element → ...
|
||||
*
|
||||
* Example:
|
||||
* struct foo *newfoo = malloc(...);
|
||||
* list_add(&newfoo->entry, &bar->list_of_foos);
|
||||
*
|
||||
* @param entry The new element to prepend to the list.
|
||||
* @param head The existing list.
|
||||
*/
|
||||
static inline void
|
||||
list_add(struct list *entry, struct list *head)
|
||||
{
|
||||
__list_add(entry, head, head->next);
|
||||
}
|
||||
|
||||
static inline void
|
||||
list_add_tail(struct list *entry, struct list *head)
|
||||
{
|
||||
__list_add(entry, head->prev, head);
|
||||
}
|
||||
|
||||
static inline void list_replace(struct list *old,
|
||||
struct list *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->next->prev = new;
|
||||
new->prev = old->prev;
|
||||
new->prev->next = new;
|
||||
}
|
||||
|
||||
#define list_last_entry(ptr, type, member) \
|
||||
list_entry((ptr)->prev, type, member)
|
||||
|
||||
#define list_for_each(pos, head) \
|
||||
for (pos = (head)->next; pos != (head); pos = pos->next)
|
||||
|
||||
/**
|
||||
* Append a new element to the end of the list given with this list head.
|
||||
*
|
||||
* The list changes from:
|
||||
* head → some element → ... → lastelement
|
||||
* to
|
||||
* head → some element → ... → lastelement → new element
|
||||
*
|
||||
* Example:
|
||||
* struct foo *newfoo = malloc(...);
|
||||
* list_append(&newfoo->entry, &bar->list_of_foos);
|
||||
*
|
||||
* @param entry The new element to prepend to the list.
|
||||
* @param head The existing list.
|
||||
*/
|
||||
static inline void
|
||||
list_append(struct list *entry, struct list *head)
|
||||
{
|
||||
__list_add(entry, head->prev, head);
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
__list_del(struct list *prev, struct list *next)
|
||||
{
|
||||
assert(next->prev == prev->next);
|
||||
next->prev = prev;
|
||||
prev->next = next;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_list_del(struct list *entry)
|
||||
{
|
||||
assert(entry->prev->next == entry);
|
||||
assert(entry->next->prev == entry);
|
||||
__list_del(entry->prev, entry->next);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the element from the list it is in. Using this function will reset
|
||||
* the pointers to/from this element so it is removed from the list. It does
|
||||
* NOT free the element itself or manipulate it otherwise.
|
||||
*
|
||||
* Using list_del on a pure list head (like in the example at the top of
|
||||
* this file) will NOT remove the first element from
|
||||
* the list but rather reset the list as empty list.
|
||||
*
|
||||
* Example:
|
||||
* list_del(&foo->entry);
|
||||
*
|
||||
* @param entry The element to remove.
|
||||
*/
|
||||
static inline void
|
||||
list_del(struct list *entry)
|
||||
{
|
||||
_list_del(entry);
|
||||
list_init(entry);
|
||||
}
|
||||
|
||||
static inline void list_move(struct list *list, struct list *head)
|
||||
{
|
||||
if (list->prev != head) {
|
||||
_list_del(list);
|
||||
list_add(list, head);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void list_move_tail(struct list *list, struct list *head)
|
||||
{
|
||||
_list_del(list);
|
||||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the list is empty.
|
||||
*
|
||||
* Example:
|
||||
* list_is_empty(&bar->list_of_foos);
|
||||
*
|
||||
* @return True if the list contains one or more elements or False otherwise.
|
||||
*/
|
||||
static inline bool
|
||||
list_is_empty(struct list *head)
|
||||
{
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alias of container_of
|
||||
*/
|
||||
#define list_entry(ptr, type, member) \
|
||||
container_of(ptr, type, member)
|
||||
|
||||
/**
|
||||
* Retrieve the first list entry for the given list pointer.
|
||||
*
|
||||
* Example:
|
||||
* struct foo *first;
|
||||
* first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos);
|
||||
*
|
||||
* @param ptr The list head
|
||||
* @param type Data type of the list element to retrieve
|
||||
* @param member Member name of the struct list field in the list element.
|
||||
* @return A pointer to the first list element.
|
||||
*/
|
||||
#define list_first_entry(ptr, type, member) \
|
||||
list_entry((ptr)->next, type, member)
|
||||
|
||||
/**
|
||||
* Retrieve the last list entry for the given listpointer.
|
||||
*
|
||||
* Example:
|
||||
* struct foo *first;
|
||||
* first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos);
|
||||
*
|
||||
* @param ptr The list head
|
||||
* @param type Data type of the list element to retrieve
|
||||
* @param member Member name of the struct list field in the list element.
|
||||
* @return A pointer to the last list element.
|
||||
*/
|
||||
#define list_last_entry(ptr, type, member) \
|
||||
list_entry((ptr)->prev, type, member)
|
||||
|
||||
#define __container_of(ptr, sample, member) \
|
||||
(void *)((char *)(ptr) \
|
||||
- ((char *)&(sample)->member - (char *)(sample)))
|
||||
/**
|
||||
* Loop through the list given by head and set pos to struct in the list.
|
||||
*
|
||||
* Example:
|
||||
* struct foo *iterator;
|
||||
* list_for_each_entry(iterator, &bar->list_of_foos, entry) {
|
||||
* [modify iterator]
|
||||
* }
|
||||
*
|
||||
* This macro is not safe for node deletion. Use list_for_each_entry_safe
|
||||
* instead.
|
||||
*
|
||||
* @param pos Iterator variable of the type of the list elements.
|
||||
* @param head List head
|
||||
* @param member Member name of the struct list in the list elements.
|
||||
*
|
||||
*/
|
||||
#define list_for_each_entry(pos, head, member) \
|
||||
for (pos = __container_of((head)->next, pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = __container_of(pos->member.next, pos, member))
|
||||
|
||||
#define list_for_each_entry_reverse(pos, head, member) \
|
||||
for (pos = __container_of((head)->prev, pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = __container_of(pos->member.prev, pos, member))
|
||||
|
||||
/**
|
||||
* Loop through the list, keeping a backup pointer to the element. This
|
||||
* macro allows for the deletion of a list element while looping through the
|
||||
* list.
|
||||
*
|
||||
* See list_for_each_entry for more details.
|
||||
*/
|
||||
#define list_for_each_entry_safe(pos, tmp, head, member) \
|
||||
for (pos = __container_of((head)->next, pos, member), \
|
||||
tmp = __container_of(pos->member.next, pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = tmp, tmp = __container_of(pos->member.next, tmp, member))
|
||||
|
||||
|
||||
#undef container_of
|
||||
#define container_of(ptr, type, member) \
|
||||
((type *)((char *)(ptr) - (char *) &((type *)0)->member))
|
||||
|
||||
#endif /* _INTEL_LIST_H_ */
|
||||
|
|
@ -0,0 +1,559 @@
|
|||
/*
|
||||
* Copyright (c) 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include "sna.h"
|
||||
#include "sna_reg.h"
|
||||
|
||||
#define DBG_NO_HW 0
|
||||
#define DBG_NO_TILING 1
|
||||
#define DBG_NO_CACHE 0
|
||||
#define DBG_NO_CACHE_LEVEL 0
|
||||
#define DBG_NO_CPU 0
|
||||
#define DBG_NO_USERPTR 0
|
||||
#define DBG_NO_LLC 0
|
||||
#define DBG_NO_SEMAPHORES 0
|
||||
#define DBG_NO_MADV 0
|
||||
#define DBG_NO_UPLOAD_CACHE 0
|
||||
#define DBG_NO_UPLOAD_ACTIVE 0
|
||||
#define DBG_NO_MAP_UPLOAD 0
|
||||
#define DBG_NO_RELAXED_FENCING 0
|
||||
#define DBG_NO_SECURE_BATCHES 0
|
||||
#define DBG_NO_PINNED_BATCHES 0
|
||||
#define DBG_NO_FAST_RELOC 0
|
||||
#define DBG_NO_HANDLE_LUT 0
|
||||
#define DBG_DUMP 0
|
||||
|
||||
#define MAX_GTT_VMA_CACHE 512
|
||||
#define MAX_CPU_VMA_CACHE INT16_MAX
|
||||
#define MAP_PRESERVE_TIME 10
|
||||
|
||||
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
|
||||
#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
|
||||
#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
|
||||
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
|
||||
#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
|
||||
|
||||
#define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
|
||||
|
||||
#define LOCAL_I915_PARAM_HAS_BLT 11
|
||||
#define LOCAL_I915_PARAM_HAS_RELAXED_FENCING 12
|
||||
#define LOCAL_I915_PARAM_HAS_RELAXED_DELTA 15
|
||||
#define LOCAL_I915_PARAM_HAS_SEMAPHORES 20
|
||||
#define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23
|
||||
#define LOCAL_I915_PARAM_HAS_PINNED_BATCHES 24
|
||||
#define LOCAL_I915_PARAM_HAS_NO_RELOC 25
|
||||
#define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26
|
||||
|
||||
|
||||
|
||||
static int gem_param(struct kgem *kgem, int name)
|
||||
{
|
||||
ioctl_t io;
|
||||
|
||||
drm_i915_getparam_t gp;
|
||||
int v = -1; /* No param uses the sign bit, reserve it for errors */
|
||||
|
||||
VG_CLEAR(gp);
|
||||
gp.param = name;
|
||||
gp.value = &v;
|
||||
|
||||
io.handle = kgem->fd;
|
||||
io.io_code = SRV_GET_PARAM;
|
||||
io.input = &gp;
|
||||
io.inp_size = sizeof(gp);
|
||||
io.output = NULL;
|
||||
io.out_size = 0;
|
||||
|
||||
if (call_service(&io)!=0)
|
||||
return -1;
|
||||
|
||||
VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
|
||||
return v;
|
||||
}
|
||||
|
||||
static bool test_has_no_reloc(struct kgem *kgem)
|
||||
{
|
||||
if (DBG_NO_FAST_RELOC)
|
||||
return false;
|
||||
|
||||
return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0;
|
||||
}
|
||||
|
||||
static bool test_has_handle_lut(struct kgem *kgem)
|
||||
{
|
||||
if (DBG_NO_HANDLE_LUT)
|
||||
return false;
|
||||
|
||||
return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0;
|
||||
}
|
||||
|
||||
static bool test_has_semaphores_enabled(struct kgem *kgem)
|
||||
{
|
||||
FILE *file;
|
||||
bool detected = false;
|
||||
int ret;
|
||||
|
||||
if (DBG_NO_SEMAPHORES)
|
||||
return false;
|
||||
|
||||
ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
|
||||
if (ret != -1)
|
||||
return ret > 0;
|
||||
|
||||
return detected;
|
||||
}
|
||||
|
||||
|
||||
static bool test_has_relaxed_fencing(struct kgem *kgem)
|
||||
{
|
||||
if (kgem->gen < 040) {
|
||||
if (DBG_NO_RELAXED_FENCING)
|
||||
return false;
|
||||
|
||||
return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0;
|
||||
} else
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool test_has_llc(struct kgem *kgem)
|
||||
{
|
||||
int has_llc = -1;
|
||||
|
||||
if (DBG_NO_LLC)
|
||||
return false;
|
||||
|
||||
#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
|
||||
has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
|
||||
#endif
|
||||
if (has_llc == -1) {
|
||||
DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
|
||||
has_llc = kgem->gen >= 060;
|
||||
}
|
||||
|
||||
return has_llc;
|
||||
}
|
||||
|
||||
static bool test_has_cacheing(struct kgem *kgem)
|
||||
{
|
||||
uint32_t handle;
|
||||
bool ret = false;
|
||||
|
||||
if (DBG_NO_CACHE_LEVEL)
|
||||
return false;
|
||||
|
||||
/* Incoherent blt and sampler hangs the GPU */
|
||||
if (kgem->gen == 040)
|
||||
return false;
|
||||
|
||||
// handle = gem_create(kgem->fd, 1);
|
||||
// if (handle == 0)
|
||||
// return false;
|
||||
|
||||
// ret = gem_set_cacheing(kgem->fd, handle, UNCACHED);
|
||||
// gem_close(kgem->fd, handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool test_has_userptr(struct kgem *kgem)
|
||||
{
|
||||
#if defined(USE_USERPTR)
|
||||
uint32_t handle;
|
||||
void *ptr;
|
||||
|
||||
if (DBG_NO_USERPTR)
|
||||
return false;
|
||||
|
||||
/* Incoherent blt and sampler hangs the GPU */
|
||||
if (kgem->gen == 040)
|
||||
return false;
|
||||
|
||||
ptr = malloc(PAGE_SIZE);
|
||||
handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
|
||||
gem_close(kgem->fd, handle);
|
||||
free(ptr);
|
||||
|
||||
return handle != 0;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool test_has_secure_batches(struct kgem *kgem)
|
||||
{
|
||||
if (DBG_NO_SECURE_BATCHES)
|
||||
return false;
|
||||
|
||||
return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0;
|
||||
}
|
||||
|
||||
static bool test_has_pinned_batches(struct kgem *kgem)
|
||||
{
|
||||
if (DBG_NO_PINNED_BATCHES)
|
||||
return false;
|
||||
|
||||
return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
|
||||
{
|
||||
struct drm_i915_gem_get_aperture aperture;
|
||||
size_t totalram;
|
||||
unsigned half_gpu_max;
|
||||
unsigned int i, j;
|
||||
|
||||
DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
|
||||
|
||||
memset(kgem, 0, sizeof(*kgem));
|
||||
|
||||
kgem->fd = fd;
|
||||
kgem->gen = gen;
|
||||
|
||||
list_init(&kgem->requests[0]);
|
||||
list_init(&kgem->requests[1]);
|
||||
list_init(&kgem->batch_buffers);
|
||||
list_init(&kgem->active_buffers);
|
||||
list_init(&kgem->flushing);
|
||||
list_init(&kgem->large);
|
||||
list_init(&kgem->large_inactive);
|
||||
list_init(&kgem->snoop);
|
||||
list_init(&kgem->scanout);
|
||||
for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++)
|
||||
list_init(&kgem->pinned_batches[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
|
||||
list_init(&kgem->inactive[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
|
||||
for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++)
|
||||
list_init(&kgem->active[i][j]);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) {
|
||||
for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
|
||||
list_init(&kgem->vma[i].inactive[j]);
|
||||
}
|
||||
|
||||
kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
|
||||
kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
|
||||
|
||||
kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0;
|
||||
DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
|
||||
kgem->has_blt));
|
||||
|
||||
kgem->has_relaxed_delta =
|
||||
gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0;
|
||||
DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
|
||||
kgem->has_relaxed_delta));
|
||||
|
||||
|
||||
kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
|
||||
DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
|
||||
kgem->has_relaxed_fencing));
|
||||
|
||||
kgem->has_llc = test_has_llc(kgem);
|
||||
DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
|
||||
kgem->has_llc));
|
||||
|
||||
kgem->has_cacheing = test_has_cacheing(kgem);
|
||||
DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
|
||||
kgem->has_cacheing));
|
||||
|
||||
kgem->has_userptr = test_has_userptr(kgem);
|
||||
DBG(("%s: has userptr? %d\n", __FUNCTION__,
|
||||
kgem->has_userptr));
|
||||
|
||||
kgem->has_no_reloc = test_has_no_reloc(kgem);
|
||||
DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
|
||||
kgem->has_no_reloc));
|
||||
|
||||
kgem->has_handle_lut = test_has_handle_lut(kgem);
|
||||
DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
|
||||
kgem->has_handle_lut));
|
||||
|
||||
kgem->has_semaphores = false;
|
||||
if (kgem->has_blt && test_has_semaphores_enabled(kgem))
|
||||
kgem->has_semaphores = true;
|
||||
DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
|
||||
kgem->has_semaphores));
|
||||
|
||||
kgem->can_blt_cpu = gen >= 030;
|
||||
DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
|
||||
kgem->can_blt_cpu));
|
||||
|
||||
kgem->has_secure_batches = test_has_secure_batches(kgem);
|
||||
DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
|
||||
kgem->has_secure_batches));
|
||||
|
||||
kgem->has_pinned_batches = test_has_pinned_batches(kgem);
|
||||
DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
|
||||
kgem->has_pinned_batches));
|
||||
|
||||
#if 0
|
||||
|
||||
if (!is_hw_supported(kgem, dev)) {
|
||||
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
|
||||
"Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
|
||||
kgem->wedged = 1;
|
||||
} else if (__kgem_throttle(kgem)) {
|
||||
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
|
||||
"Detected a hung GPU, disabling acceleration.\n");
|
||||
kgem->wedged = 1;
|
||||
}
|
||||
|
||||
kgem->batch_size = ARRAY_SIZE(kgem->batch);
|
||||
if (gen == 020 && !kgem->has_pinned_batches)
|
||||
/* Limited to what we can pin */
|
||||
kgem->batch_size = 4*1024;
|
||||
if (gen == 022)
|
||||
/* 865g cannot handle a batch spanning multiple pages */
|
||||
kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
|
||||
if ((gen >> 3) == 7)
|
||||
kgem->batch_size = 16*1024;
|
||||
if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
|
||||
kgem->batch_size = 4*1024;
|
||||
|
||||
if (!kgem_init_pinned_batches(kgem) && gen == 020) {
|
||||
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
|
||||
"Unable to reserve memory for GPU, disabling acceleration.\n");
|
||||
kgem->wedged = 1;
|
||||
}
|
||||
|
||||
DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
|
||||
kgem->batch_size));
|
||||
|
||||
kgem->min_alignment = 4;
|
||||
if (gen < 040)
|
||||
kgem->min_alignment = 64;
|
||||
|
||||
kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
|
||||
DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
|
||||
kgem->half_cpu_cache_pages));
|
||||
|
||||
kgem->next_request = __kgem_request_alloc(kgem);
|
||||
|
||||
DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
|
||||
!DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
|
||||
kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
|
||||
|
||||
VG_CLEAR(aperture);
|
||||
aperture.aper_size = 0;
|
||||
(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
|
||||
if (aperture.aper_size == 0)
|
||||
aperture.aper_size = 64*1024*1024;
|
||||
|
||||
DBG(("%s: aperture size %lld, available now %lld\n",
|
||||
__FUNCTION__,
|
||||
(long long)aperture.aper_size,
|
||||
(long long)aperture.aper_available_size));
|
||||
|
||||
kgem->aperture_total = aperture.aper_size;
|
||||
kgem->aperture_high = aperture.aper_size * 3/4;
|
||||
kgem->aperture_low = aperture.aper_size * 1/3;
|
||||
if (gen < 033) {
|
||||
/* Severe alignment penalties */
|
||||
kgem->aperture_high /= 2;
|
||||
kgem->aperture_low /= 2;
|
||||
}
|
||||
DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
|
||||
kgem->aperture_low, kgem->aperture_low / (1024*1024),
|
||||
kgem->aperture_high, kgem->aperture_high / (1024*1024)));
|
||||
|
||||
kgem->aperture_mappable = agp_aperture_size(dev, gen);
|
||||
if (kgem->aperture_mappable == 0 ||
|
||||
kgem->aperture_mappable > aperture.aper_size)
|
||||
kgem->aperture_mappable = aperture.aper_size;
|
||||
DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
|
||||
kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
|
||||
|
||||
kgem->buffer_size = 64 * 1024;
|
||||
while (kgem->buffer_size < kgem->aperture_mappable >> 10)
|
||||
kgem->buffer_size *= 2;
|
||||
if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages)
|
||||
kgem->buffer_size = kgem->half_cpu_cache_pages << 12;
|
||||
DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
|
||||
kgem->buffer_size, kgem->buffer_size / 1024));
|
||||
|
||||
kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10;
|
||||
kgem->max_gpu_size = kgem->max_object_size;
|
||||
if (!kgem->has_llc)
|
||||
kgem->max_gpu_size = MAX_CACHE_SIZE;
|
||||
|
||||
totalram = total_ram_size();
|
||||
if (totalram == 0) {
|
||||
DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
|
||||
__FUNCTION__));
|
||||
totalram = kgem->aperture_total;
|
||||
}
|
||||
DBG(("%s: total ram=%ld\n", __FUNCTION__, (long)totalram));
|
||||
if (kgem->max_object_size > totalram / 2)
|
||||
kgem->max_object_size = totalram / 2;
|
||||
if (kgem->max_gpu_size > totalram / 4)
|
||||
kgem->max_gpu_size = totalram / 4;
|
||||
|
||||
kgem->max_cpu_size = kgem->max_object_size;
|
||||
|
||||
half_gpu_max = kgem->max_gpu_size / 2;
|
||||
kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
|
||||
if (kgem->max_copy_tile_size > half_gpu_max)
|
||||
kgem->max_copy_tile_size = half_gpu_max;
|
||||
|
||||
if (kgem->has_llc)
|
||||
kgem->max_upload_tile_size = kgem->max_copy_tile_size;
|
||||
else
|
||||
kgem->max_upload_tile_size = kgem->aperture_mappable / 4;
|
||||
if (kgem->max_upload_tile_size > half_gpu_max)
|
||||
kgem->max_upload_tile_size = half_gpu_max;
|
||||
|
||||
kgem->large_object_size = MAX_CACHE_SIZE;
|
||||
if (kgem->large_object_size > kgem->max_gpu_size)
|
||||
kgem->large_object_size = kgem->max_gpu_size;
|
||||
|
||||
if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
|
||||
if (kgem->large_object_size > kgem->max_cpu_size)
|
||||
kgem->large_object_size = kgem->max_cpu_size;
|
||||
} else
|
||||
kgem->max_cpu_size = 0;
|
||||
if (DBG_NO_CPU)
|
||||
kgem->max_cpu_size = 0;
|
||||
|
||||
DBG(("%s: maximum object size=%d\n",
|
||||
__FUNCTION__, kgem->max_object_size));
|
||||
DBG(("%s: large object thresold=%d\n",
|
||||
__FUNCTION__, kgem->large_object_size));
|
||||
DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
|
||||
__FUNCTION__,
|
||||
kgem->max_gpu_size, kgem->max_cpu_size,
|
||||
kgem->max_upload_tile_size, kgem->max_copy_tile_size));
|
||||
|
||||
/* Convert the aperture thresholds to pages */
|
||||
kgem->aperture_low /= PAGE_SIZE;
|
||||
kgem->aperture_high /= PAGE_SIZE;
|
||||
|
||||
kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
|
||||
if ((int)kgem->fence_max < 0)
|
||||
kgem->fence_max = 5; /* minimum safe value for all hw */
|
||||
DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max));
|
||||
|
||||
kgem->batch_flags_base = 0;
|
||||
if (kgem->has_no_reloc)
|
||||
kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC;
|
||||
if (kgem->has_handle_lut)
|
||||
kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT;
|
||||
if (kgem->has_pinned_batches)
|
||||
kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
void kgem_clear_dirty(struct kgem *kgem)
|
||||
{
|
||||
struct list * const buffers = &kgem->next_request->buffers;
|
||||
struct kgem_bo *bo;
|
||||
|
||||
list_for_each_entry(bo, buffers, request) {
|
||||
if (!bo->dirty)
|
||||
break;
|
||||
|
||||
bo->dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
|
||||
{
|
||||
struct kgem_bo_binding *b;
|
||||
|
||||
for (b = &bo->binding; b && b->offset; b = b->next)
|
||||
if (format == b->format)
|
||||
return b->offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
|
||||
{
|
||||
struct kgem_bo_binding *b;
|
||||
|
||||
for (b = &bo->binding; b; b = b->next) {
|
||||
if (b->offset)
|
||||
continue;
|
||||
|
||||
b->offset = offset;
|
||||
b->format = format;
|
||||
|
||||
if (b->next)
|
||||
b->next->offset = 0;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
b = malloc(sizeof(*b));
|
||||
if (b) {
|
||||
b->next = bo->binding.next;
|
||||
b->format = format;
|
||||
b->offset = offset;
|
||||
bo->binding.next = b;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t kgem_add_reloc(struct kgem *kgem,
|
||||
uint32_t pos,
|
||||
struct kgem_bo *bo,
|
||||
uint32_t read_write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kgem_reset(struct kgem *kgem)
|
||||
{
|
||||
|
||||
};
|
||||
|
||||
void _kgem_submit(struct kgem *kgem)
|
||||
{
|
||||
};
|
||||
|
||||
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
|
||||
{
|
||||
struct kgem_bo *bo = NULL;
|
||||
|
||||
return bo;
|
||||
};
|
||||
|
||||
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
|
||||
|
||||
};
|
|
@ -0,0 +1,648 @@
|
|||
/*
|
||||
* Copyright (c) 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef KGEM_H
|
||||
#define KGEM_H
|
||||
|
||||
#define HAS_DEBUG_FULL 1
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "i915_drm.h"
|
||||
|
||||
#include "compiler.h"
|
||||
#include "intel_list.h"
|
||||
|
||||
|
||||
|
||||
#if HAS_DEBUG_FULL
|
||||
#define DBG(x) printf x
|
||||
#else
|
||||
#define DBG(x)
|
||||
#endif
|
||||
|
||||
struct kgem_bo {
|
||||
struct kgem_request *rq;
|
||||
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
|
||||
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
|
||||
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
|
||||
struct drm_i915_gem_exec_object2 *exec;
|
||||
|
||||
struct kgem_bo *proxy;
|
||||
|
||||
struct list list;
|
||||
struct list request;
|
||||
struct list vma;
|
||||
|
||||
void *map;
|
||||
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
|
||||
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
|
||||
|
||||
struct kgem_bo_binding {
|
||||
struct kgem_bo_binding *next;
|
||||
uint32_t format;
|
||||
uint16_t offset;
|
||||
} binding;
|
||||
|
||||
uint32_t unique_id;
|
||||
uint32_t refcnt;
|
||||
uint32_t handle;
|
||||
uint32_t target_handle;
|
||||
uint32_t presumed_offset;
|
||||
uint32_t delta;
|
||||
union {
|
||||
struct {
|
||||
uint32_t count:27;
|
||||
#define PAGE_SIZE 4096
|
||||
uint32_t bucket:5;
|
||||
#define NUM_CACHE_BUCKETS 16
|
||||
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
|
||||
} pages;
|
||||
uint32_t bytes;
|
||||
} size;
|
||||
uint32_t pitch : 18; /* max 128k */
|
||||
uint32_t tiling : 2;
|
||||
uint32_t reusable : 1;
|
||||
uint32_t dirty : 1;
|
||||
uint32_t domain : 2;
|
||||
uint32_t needs_flush : 1;
|
||||
uint32_t snoop : 1;
|
||||
uint32_t io : 1;
|
||||
uint32_t flush : 1;
|
||||
uint32_t scanout : 1;
|
||||
uint32_t purged : 1;
|
||||
};
|
||||
#define DOMAIN_NONE 0
|
||||
#define DOMAIN_CPU 1
|
||||
#define DOMAIN_GTT 2
|
||||
#define DOMAIN_GPU 3
|
||||
|
||||
struct kgem_request {
|
||||
struct list list;
|
||||
struct kgem_bo *bo;
|
||||
struct list buffers;
|
||||
int ring;
|
||||
};
|
||||
|
||||
enum {
|
||||
MAP_GTT = 0,
|
||||
MAP_CPU,
|
||||
NUM_MAP_TYPES,
|
||||
};
|
||||
|
||||
struct kgem {
|
||||
int fd;
|
||||
int wedged;
|
||||
unsigned gen;
|
||||
|
||||
uint32_t unique_id;
|
||||
|
||||
enum kgem_mode {
|
||||
/* order matches I915_EXEC_RING ordering */
|
||||
KGEM_NONE = 0,
|
||||
KGEM_RENDER,
|
||||
KGEM_BSD,
|
||||
KGEM_BLT,
|
||||
} mode, ring;
|
||||
|
||||
struct list flushing;
|
||||
struct list large;
|
||||
struct list large_inactive;
|
||||
struct list active[NUM_CACHE_BUCKETS][3];
|
||||
struct list inactive[NUM_CACHE_BUCKETS];
|
||||
struct list pinned_batches[2];
|
||||
struct list snoop;
|
||||
struct list scanout;
|
||||
struct list batch_buffers, active_buffers;
|
||||
|
||||
struct list requests[2];
|
||||
struct kgem_request *next_request;
|
||||
struct kgem_request static_request;
|
||||
|
||||
struct {
|
||||
struct list inactive[NUM_CACHE_BUCKETS];
|
||||
int16_t count;
|
||||
} vma[NUM_MAP_TYPES];
|
||||
|
||||
uint32_t batch_flags;
|
||||
uint32_t batch_flags_base;
|
||||
#define I915_EXEC_SECURE (1<<9)
|
||||
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
|
||||
|
||||
uint16_t nbatch;
|
||||
uint16_t surface;
|
||||
uint16_t nexec;
|
||||
uint16_t nreloc;
|
||||
uint16_t nreloc__self;
|
||||
uint16_t nfence;
|
||||
uint16_t batch_size;
|
||||
uint16_t min_alignment;
|
||||
|
||||
uint32_t flush:1;
|
||||
uint32_t need_expire:1;
|
||||
uint32_t need_purge:1;
|
||||
uint32_t need_retire:1;
|
||||
uint32_t need_throttle:1;
|
||||
uint32_t scanout_busy:1;
|
||||
uint32_t busy:1;
|
||||
|
||||
uint32_t has_userptr :1;
|
||||
uint32_t has_blt :1;
|
||||
uint32_t has_relaxed_fencing :1;
|
||||
uint32_t has_relaxed_delta :1;
|
||||
uint32_t has_semaphores :1;
|
||||
uint32_t has_secure_batches :1;
|
||||
uint32_t has_pinned_batches :1;
|
||||
uint32_t has_cacheing :1;
|
||||
uint32_t has_llc :1;
|
||||
uint32_t has_no_reloc :1;
|
||||
uint32_t has_handle_lut :1;
|
||||
|
||||
uint32_t can_blt_cpu :1;
|
||||
|
||||
uint16_t fence_max;
|
||||
uint16_t half_cpu_cache_pages;
|
||||
uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
|
||||
uint32_t aperture, aperture_fenced;
|
||||
uint32_t max_upload_tile_size, max_copy_tile_size;
|
||||
uint32_t max_gpu_size, max_cpu_size;
|
||||
uint32_t large_object_size, max_object_size;
|
||||
uint32_t buffer_size;
|
||||
|
||||
void (*context_switch)(struct kgem *kgem, int new_mode);
|
||||
void (*retire)(struct kgem *kgem);
|
||||
void (*expire)(struct kgem *kgem);
|
||||
|
||||
uint32_t batch[64*1024-8];
|
||||
struct drm_i915_gem_exec_object2 exec[256];
|
||||
struct drm_i915_gem_relocation_entry reloc[4096];
|
||||
uint16_t reloc__self[256];
|
||||
|
||||
#ifdef DEBUG_MEMORY
|
||||
struct {
|
||||
int bo_allocs;
|
||||
size_t bo_bytes;
|
||||
} debug_memory;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define KGEM_BATCH_RESERVED 1
|
||||
#define KGEM_RELOC_RESERVED 4
|
||||
#define KGEM_EXEC_RESERVED 1
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
|
||||
#endif
|
||||
|
||||
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
|
||||
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
|
||||
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
|
||||
|
||||
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
|
||||
void kgem_reset(struct kgem *kgem);
|
||||
|
||||
struct kgem_bo *kgem_create_map(struct kgem *kgem,
|
||||
void *ptr, uint32_t size,
|
||||
bool read_only);
|
||||
|
||||
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
|
||||
|
||||
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
|
||||
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
|
||||
struct kgem_bo *target,
|
||||
int offset, int length);
|
||||
|
||||
|
||||
int kgem_choose_tiling(struct kgem *kgem,
|
||||
int tiling, int width, int height, int bpp);
|
||||
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
|
||||
#define KGEM_CAN_CREATE_GPU 0x1
|
||||
#define KGEM_CAN_CREATE_CPU 0x2
|
||||
#define KGEM_CAN_CREATE_LARGE 0x4
|
||||
#define KGEM_CAN_CREATE_GTT 0x8
|
||||
|
||||
struct kgem_bo *
|
||||
kgem_replace_bo(struct kgem *kgem,
|
||||
struct kgem_bo *src,
|
||||
uint32_t width,
|
||||
uint32_t height,
|
||||
uint32_t pitch,
|
||||
uint32_t bpp);
|
||||
enum {
|
||||
CREATE_EXACT = 0x1,
|
||||
CREATE_INACTIVE = 0x2,
|
||||
CREATE_CPU_MAP = 0x4,
|
||||
CREATE_GTT_MAP = 0x8,
|
||||
CREATE_SCANOUT = 0x10,
|
||||
CREATE_PRIME = 0x20,
|
||||
CREATE_TEMPORARY = 0x40,
|
||||
CREATE_CACHED = 0x80,
|
||||
CREATE_NO_RETIRE = 0x100,
|
||||
CREATE_NO_THROTTLE = 0x200,
|
||||
};
|
||||
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
|
||||
int width,
|
||||
int height,
|
||||
int bpp,
|
||||
int tiling,
|
||||
uint32_t flags);
|
||||
|
||||
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
|
||||
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
|
||||
int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
|
||||
|
||||
bool kgem_retire(struct kgem *kgem);
|
||||
|
||||
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
|
||||
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
|
||||
{
|
||||
ring = ring == KGEM_BLT;
|
||||
|
||||
if (list_is_empty(&kgem->requests[ring]))
|
||||
return true;
|
||||
|
||||
return __kgem_ring_is_idle(kgem, ring);
|
||||
}
|
||||
|
||||
static inline bool kgem_is_idle(struct kgem *kgem)
|
||||
{
|
||||
if (!kgem->need_retire)
|
||||
return true;
|
||||
|
||||
return kgem_ring_is_idle(kgem, kgem->ring);
|
||||
}
|
||||
|
||||
void _kgem_submit(struct kgem *kgem);
|
||||
static inline void kgem_submit(struct kgem *kgem)
|
||||
{
|
||||
if (kgem->nbatch)
|
||||
_kgem_submit(kgem);
|
||||
}
|
||||
|
||||
static inline bool kgem_flush(struct kgem *kgem, bool flush)
|
||||
{
|
||||
if (kgem->nreloc == 0)
|
||||
return false;
|
||||
|
||||
return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->exec)
|
||||
_kgem_submit(kgem);
|
||||
}
|
||||
|
||||
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
|
||||
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
kgem_bo_submit(kgem, bo);
|
||||
|
||||
if (!bo->needs_flush)
|
||||
return;
|
||||
|
||||
/* If the kernel fails to emit the flush, then it will be forced when
|
||||
* we assume direct access. And as the useual failure is EIO, we do
|
||||
* not actualy care.
|
||||
*/
|
||||
__kgem_flush(kgem, bo);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->refcnt);
|
||||
bo->refcnt++;
|
||||
return bo;
|
||||
}
|
||||
|
||||
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
|
||||
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->refcnt);
|
||||
if (--bo->refcnt == 0)
|
||||
_kgem_bo_destroy(kgem, bo);
|
||||
}
|
||||
|
||||
void kgem_clear_dirty(struct kgem *kgem);
|
||||
|
||||
static inline void kgem_set_mode(struct kgem *kgem,
|
||||
enum kgem_mode mode,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
assert(!kgem->wedged);
|
||||
|
||||
#if DEBUG_FLUSH_BATCH
|
||||
kgem_submit(kgem);
|
||||
#endif
|
||||
|
||||
if (kgem->mode == mode)
|
||||
return;
|
||||
|
||||
// kgem->context_switch(kgem, mode);
|
||||
kgem->mode = mode;
|
||||
}
|
||||
|
||||
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
|
||||
{
|
||||
assert(kgem->mode == KGEM_NONE);
|
||||
assert(kgem->nbatch == 0);
|
||||
assert(!kgem->wedged);
|
||||
// kgem->context_switch(kgem, mode);
|
||||
kgem->mode = mode;
|
||||
}
|
||||
|
||||
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
|
||||
{
|
||||
assert(num_dwords > 0);
|
||||
assert(kgem->nbatch < kgem->surface);
|
||||
assert(kgem->surface <= kgem->batch_size);
|
||||
return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
|
||||
}
|
||||
|
||||
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
|
||||
{
|
||||
assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
|
||||
return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
|
||||
}
|
||||
|
||||
static inline bool kgem_check_exec(struct kgem *kgem, int n)
|
||||
{
|
||||
assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
|
||||
return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
|
||||
}
|
||||
|
||||
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
|
||||
{
|
||||
return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
|
||||
}
|
||||
|
||||
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
|
||||
int num_dwords,
|
||||
int num_surfaces)
|
||||
{
|
||||
return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
|
||||
kgem_check_reloc(kgem, num_surfaces) &&
|
||||
kgem_check_exec(kgem, num_surfaces);
|
||||
}
|
||||
|
||||
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
|
||||
{
|
||||
|
||||
return kgem->batch + kgem->nbatch;
|
||||
}
|
||||
|
||||
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
|
||||
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
|
||||
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
|
||||
|
||||
#define KGEM_RELOC_FENCED 0x8000
|
||||
uint32_t kgem_add_reloc(struct kgem *kgem,
|
||||
uint32_t pos,
|
||||
struct kgem_bo *bo,
|
||||
uint32_t read_write_domains,
|
||||
uint32_t delta);
|
||||
|
||||
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
|
||||
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
|
||||
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
|
||||
|
||||
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
|
||||
const void *data, int length);
|
||||
|
||||
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
|
||||
void kgem_get_tile_size(struct kgem *kgem, int tiling,
|
||||
int *tile_width, int *tile_height, int *tile_size);
|
||||
|
||||
static inline int __kgem_buffer_size(struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->proxy != NULL);
|
||||
return bo->size.bytes;
|
||||
}
|
||||
|
||||
static inline int __kgem_bo_size(struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->proxy == NULL);
|
||||
return PAGE_SIZE * bo->size.pages.count;
|
||||
}
|
||||
|
||||
static inline int kgem_bo_size(struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->proxy)
|
||||
return __kgem_buffer_size(bo);
|
||||
else
|
||||
return __kgem_bo_size(bo);
|
||||
}
|
||||
|
||||
/*
|
||||
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
int pitch = bo->pitch;
|
||||
if (kgem->gen >= 040 && bo->tiling)
|
||||
pitch /= 4;
|
||||
if (pitch > MAXSHORT) {
|
||||
DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
|
||||
__FUNCTION__, bo->handle, pitch));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool kgem_bo_can_blt(struct kgem *kgem,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->tiling == I915_TILING_Y) {
|
||||
DBG(("%s: can not blt to handle=%d, tiling=Y\n",
|
||||
__FUNCTION__, bo->handle));
|
||||
return false;
|
||||
}
|
||||
|
||||
return kgem_bo_blt_pitch_is_ok(kgem, bo);
|
||||
}
|
||||
*/
|
||||
|
||||
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
|
||||
struct kgem_bo *bo)
|
||||
{
|
||||
if (bo->domain == DOMAIN_GTT)
|
||||
return true;
|
||||
|
||||
if (kgem->gen < 040 && bo->tiling &&
|
||||
bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
|
||||
return false;
|
||||
|
||||
if (!bo->presumed_offset)
|
||||
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
|
||||
|
||||
return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
|
||||
}
|
||||
|
||||
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
DBG(("%s: map=%p, tiling=%d, domain=%d\n",
|
||||
__FUNCTION__, bo->map, bo->tiling, bo->domain));
|
||||
assert(bo->refcnt);
|
||||
|
||||
if (bo->map == NULL)
|
||||
return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
|
||||
|
||||
return IS_CPU_MAP(bo->map) == !bo->tiling;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
|
||||
{
|
||||
DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
|
||||
bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
|
||||
assert(bo->refcnt);
|
||||
return bo->rq;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
|
||||
bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
|
||||
assert(bo->refcnt);
|
||||
|
||||
if (bo->exec)
|
||||
return true;
|
||||
|
||||
if (kgem_flush(kgem, bo->flush))
|
||||
kgem_submit(kgem);
|
||||
|
||||
if (bo->rq && !__kgem_busy(kgem, bo->handle))
|
||||
__kgem_bo_clear_busy(bo);
|
||||
|
||||
return kgem_bo_is_busy(bo);
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
|
||||
{
|
||||
if (bo == NULL)
|
||||
return false;
|
||||
|
||||
assert(bo->refcnt);
|
||||
return bo->dirty;
|
||||
}
|
||||
|
||||
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
|
||||
{
|
||||
/* The bo is outside of our control, so presume it is written to */
|
||||
bo->needs_flush = true;
|
||||
if (bo->rq == NULL)
|
||||
bo->rq = (void *)kgem;
|
||||
|
||||
if (bo->domain != DOMAIN_GPU)
|
||||
bo->domain = DOMAIN_NONE;
|
||||
}
|
||||
|
||||
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
|
||||
{
|
||||
DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
|
||||
bo->handle, bo->proxy != NULL));
|
||||
|
||||
bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
|
||||
bo->needs_flush = bo->dirty = true;
|
||||
list_move(&bo->request, &RQ(bo->rq)->buffers);
|
||||
}
|
||||
|
||||
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
|
||||
{
|
||||
assert(bo->refcnt);
|
||||
do {
|
||||
assert(bo->exec);
|
||||
assert(bo->rq);
|
||||
|
||||
if (bo->dirty)
|
||||
return;
|
||||
|
||||
__kgem_bo_mark_dirty(bo);
|
||||
} while ((bo = bo->proxy));
|
||||
}
|
||||
|
||||
#define KGEM_BUFFER_WRITE 0x1
|
||||
#define KGEM_BUFFER_INPLACE 0x2
|
||||
#define KGEM_BUFFER_LAST 0x4
|
||||
|
||||
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
|
||||
|
||||
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
|
||||
uint32_t size, uint32_t flags,
|
||||
void **ret);
|
||||
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
|
||||
int width, int height, int bpp,
|
||||
uint32_t flags,
|
||||
void **ret);
|
||||
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
|
||||
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
|
||||
|
||||
void kgem_throttle(struct kgem *kgem);
|
||||
#define MAX_INACTIVE_TIME 10
|
||||
bool kgem_expire_cache(struct kgem *kgem);
|
||||
void kgem_purge_cache(struct kgem *kgem);
|
||||
void kgem_cleanup_cache(struct kgem *kgem);
|
||||
|
||||
#if HAS_DEBUG_FULL
|
||||
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
|
||||
#else
|
||||
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
|
||||
{
|
||||
(void)kgem;
|
||||
(void)nbatch;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* KGEM_H */
|
|
@ -0,0 +1,537 @@
|
|||
/*
|
||||
* (C) Copyright IBM Corporation 2006
|
||||
* Copyright 2009 Red Hat, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
* license, and/or sell copies of the Software, and to permit persons to whom
|
||||
* the Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* IBM AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2007 Paulo R. Zanoni, Tiago Vignatti
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use,
|
||||
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following
|
||||
* conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* \file pciaccess.h
|
||||
*
|
||||
* \author Ian Romanick <idr@us.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef PCIACCESS_H
|
||||
#define PCIACCESS_H
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
#if __GNUC__ >= 3
|
||||
#define __deprecated __attribute__((deprecated))
|
||||
#else
|
||||
#define __deprecated
|
||||
#endif
|
||||
|
||||
typedef uint64_t pciaddr_t;
|
||||
|
||||
struct pci_device;
|
||||
struct pci_device_iterator;
|
||||
struct pci_id_match;
|
||||
struct pci_slot_match;
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
int pci_device_has_kernel_driver(struct pci_device *dev);
|
||||
|
||||
int pci_device_is_boot_vga(struct pci_device *dev);
|
||||
|
||||
int pci_device_read_rom(struct pci_device *dev, void *buffer);
|
||||
|
||||
int __deprecated pci_device_map_region(struct pci_device *dev,
|
||||
unsigned region, int write_enable);
|
||||
|
||||
int __deprecated pci_device_unmap_region(struct pci_device *dev,
|
||||
unsigned region);
|
||||
|
||||
int pci_device_map_range(struct pci_device *dev, pciaddr_t base,
|
||||
pciaddr_t size, unsigned map_flags, void **addr);
|
||||
|
||||
int pci_device_unmap_range(struct pci_device *dev, void *memory,
|
||||
pciaddr_t size);
|
||||
|
||||
int __deprecated pci_device_map_memory_range(struct pci_device *dev,
|
||||
pciaddr_t base, pciaddr_t size, int write_enable, void **addr);
|
||||
|
||||
int __deprecated pci_device_unmap_memory_range(struct pci_device *dev,
|
||||
void *memory, pciaddr_t size);
|
||||
|
||||
int pci_device_probe(struct pci_device *dev);
|
||||
|
||||
const struct pci_agp_info *pci_device_get_agp_info(struct pci_device *dev);
|
||||
|
||||
const struct pci_bridge_info *pci_device_get_bridge_info(
|
||||
struct pci_device *dev);
|
||||
|
||||
const struct pci_pcmcia_bridge_info *pci_device_get_pcmcia_bridge_info(
|
||||
struct pci_device *dev);
|
||||
|
||||
int pci_device_get_bridge_buses(struct pci_device *dev, int *primary_bus,
|
||||
int *secondary_bus, int *subordinate_bus);
|
||||
|
||||
int pci_system_init(void);
|
||||
|
||||
void pci_system_init_dev_mem(int fd);
|
||||
|
||||
void pci_system_cleanup(void);
|
||||
|
||||
struct pci_device_iterator *pci_slot_match_iterator_create(
|
||||
const struct pci_slot_match *match);
|
||||
|
||||
struct pci_device_iterator *pci_id_match_iterator_create(
|
||||
const struct pci_id_match *match);
|
||||
|
||||
void pci_iterator_destroy(struct pci_device_iterator *iter);
|
||||
|
||||
struct pci_device *pci_device_next(struct pci_device_iterator *iter);
|
||||
|
||||
struct pci_device *pci_device_find_by_slot(uint32_t domain, uint32_t bus,
|
||||
uint32_t dev, uint32_t func);
|
||||
|
||||
struct pci_device *pci_device_get_parent_bridge(struct pci_device *dev);
|
||||
|
||||
void pci_get_strings(const struct pci_id_match *m,
|
||||
const char **device_name, const char **vendor_name,
|
||||
const char **subdevice_name, const char **subvendor_name);
|
||||
const char *pci_device_get_device_name(const struct pci_device *dev);
|
||||
const char *pci_device_get_subdevice_name(const struct pci_device *dev);
|
||||
const char *pci_device_get_vendor_name(const struct pci_device *dev);
|
||||
const char *pci_device_get_subvendor_name(const struct pci_device *dev);
|
||||
|
||||
void pci_device_enable(struct pci_device *dev);
|
||||
|
||||
int pci_device_cfg_read (struct pci_device *dev, void *data,
|
||||
pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_read);
|
||||
int pci_device_cfg_read_u8 (struct pci_device *dev, uint8_t *data,
|
||||
pciaddr_t offset);
|
||||
int pci_device_cfg_read_u16(struct pci_device *dev, uint16_t *data,
|
||||
pciaddr_t offset);
|
||||
int pci_device_cfg_read_u32(struct pci_device *dev, uint32_t *data,
|
||||
pciaddr_t offset);
|
||||
|
||||
int pci_device_cfg_write (struct pci_device *dev, const void *data,
|
||||
pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_written);
|
||||
int pci_device_cfg_write_u8 (struct pci_device *dev, uint8_t data,
|
||||
pciaddr_t offset);
|
||||
int pci_device_cfg_write_u16(struct pci_device *dev, uint16_t data,
|
||||
pciaddr_t offset);
|
||||
int pci_device_cfg_write_u32(struct pci_device *dev, uint32_t data,
|
||||
pciaddr_t offset);
|
||||
int pci_device_cfg_write_bits(struct pci_device *dev, uint32_t mask,
|
||||
uint32_t data, pciaddr_t offset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* \name Mapping flags passed to \c pci_device_map_range
|
||||
*/
|
||||
/*@{*/
|
||||
#define PCI_DEV_MAP_FLAG_WRITABLE (1U<<0)
|
||||
#define PCI_DEV_MAP_FLAG_WRITE_COMBINE (1U<<1)
|
||||
#define PCI_DEV_MAP_FLAG_CACHABLE (1U<<2)
|
||||
/*@}*/
|
||||
|
||||
|
||||
#define PCI_MATCH_ANY (~0)
|
||||
|
||||
/**
|
||||
* Compare two PCI ID values (either vendor or device). This is used
|
||||
* internally to compare the fields of \c pci_id_match to the fields of
|
||||
* \c pci_device.
|
||||
*/
|
||||
#define PCI_ID_COMPARE(a, b) \
|
||||
(((a) == PCI_MATCH_ANY) || ((a) == (b)))
|
||||
|
||||
/**
|
||||
*/
|
||||
struct pci_id_match {
|
||||
/**
|
||||
* \name Device / vendor matching controls
|
||||
*
|
||||
* Control the search based on the device, vendor, subdevice, or subvendor
|
||||
* IDs. Setting any of these fields to \c PCI_MATCH_ANY will cause the
|
||||
* field to not be used in the comparison.
|
||||
*/
|
||||
/*@{*/
|
||||
uint32_t vendor_id;
|
||||
uint32_t device_id;
|
||||
uint32_t subvendor_id;
|
||||
uint32_t subdevice_id;
|
||||
/*@}*/
|
||||
|
||||
|
||||
/**
|
||||
* \name Device class matching controls
|
||||
*
|
||||
*/
|
||||
/*@{*/
|
||||
uint32_t device_class;
|
||||
uint32_t device_class_mask;
|
||||
/*@}*/
|
||||
|
||||
intptr_t match_data;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
*/
|
||||
struct pci_slot_match {
|
||||
/**
|
||||
* \name Device slot matching controls
|
||||
*
|
||||
* Control the search based on the domain, bus, slot, and function of
|
||||
* the device. Setting any of these fields to \c PCI_MATCH_ANY will cause
|
||||
* the field to not be used in the comparison.
|
||||
*/
|
||||
/*@{*/
|
||||
uint32_t domain;
|
||||
uint32_t bus;
|
||||
uint32_t dev;
|
||||
uint32_t func;
|
||||
/*@}*/
|
||||
|
||||
intptr_t match_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* BAR descriptor for a PCI device.
|
||||
*/
|
||||
struct pci_mem_region {
|
||||
/**
|
||||
* When the region is mapped, this is the pointer to the memory.
|
||||
*
|
||||
* This field is \b only set when the deprecated \c pci_device_map_region
|
||||
* interface is used. Use \c pci_device_map_range instead.
|
||||
*
|
||||
* \deprecated
|
||||
*/
|
||||
void *memory;
|
||||
|
||||
|
||||
/**
|
||||
* Base physical address of the region within its bus / domain.
|
||||
*
|
||||
* \warning
|
||||
* This address is really only useful to other devices in the same
|
||||
* domain. It's probably \b not the address applications will ever
|
||||
* use.
|
||||
*
|
||||
* \warning
|
||||
* Most (all?) platform back-ends leave this field unset.
|
||||
*/
|
||||
pciaddr_t bus_addr;
|
||||
|
||||
|
||||
/**
|
||||
* Base physical address of the region from the CPU's point of view.
|
||||
*
|
||||
* This address is typically passed to \c pci_device_map_range to create
|
||||
* a mapping of the region to the CPU's virtual address space.
|
||||
*/
|
||||
pciaddr_t base_addr;
|
||||
|
||||
|
||||
/**
|
||||
* Size, in bytes, of the region.
|
||||
*/
|
||||
pciaddr_t size;
|
||||
|
||||
|
||||
/**
|
||||
* Is the region I/O ports or memory?
|
||||
*/
|
||||
unsigned is_IO:1;
|
||||
|
||||
/**
|
||||
* Is the memory region prefetchable?
|
||||
*
|
||||
* \note
|
||||
* This can only be set if \c is_IO is not set.
|
||||
*/
|
||||
unsigned is_prefetchable:1;
|
||||
|
||||
|
||||
/**
|
||||
* Is the memory at a 64-bit address?
|
||||
*
|
||||
* \note
|
||||
* This can only be set if \c is_IO is not set.
|
||||
*/
|
||||
unsigned is_64:1;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* PCI device.
|
||||
*
|
||||
* Contains all of the information about a particular PCI device.
|
||||
*/
|
||||
struct pci_device {
|
||||
/**
|
||||
* \name Device bus identification.
|
||||
*
|
||||
* Complete bus identification, including domain, of the device. On
|
||||
* platforms that do not support PCI domains (e.g., 32-bit x86 hardware),
|
||||
* the domain will always be zero.
|
||||
*/
|
||||
/*@{*/
|
||||
uint16_t domain;
|
||||
uint8_t bus;
|
||||
uint8_t dev;
|
||||
uint8_t func;
|
||||
/*@}*/
|
||||
|
||||
|
||||
/**
|
||||
* \name Vendor / device ID
|
||||
*
|
||||
* The vendor ID, device ID, and sub-IDs for the device.
|
||||
*/
|
||||
/*@{*/
|
||||
uint16_t vendor_id;
|
||||
uint16_t device_id;
|
||||
uint16_t subvendor_id;
|
||||
uint16_t subdevice_id;
|
||||
/*@}*/
|
||||
|
||||
/**
|
||||
* Device's class, subclass, and programming interface packed into a
|
||||
* single 32-bit value. The class is at bits [23:16], subclass is at
|
||||
* bits [15:8], and programming interface is at [7:0].
|
||||
*/
|
||||
uint32_t device_class;
|
||||
|
||||
|
||||
/**
|
||||
* Device revision number, as read from the configuration header.
|
||||
*/
|
||||
uint8_t revision;
|
||||
|
||||
|
||||
/**
|
||||
* BAR descriptors for the device.
|
||||
*/
|
||||
struct pci_mem_region regions[6];
|
||||
|
||||
|
||||
/**
|
||||
* Size, in bytes, of the device's expansion ROM.
|
||||
*/
|
||||
pciaddr_t rom_size;
|
||||
|
||||
|
||||
/**
|
||||
* IRQ associated with the device. If there is no IRQ, this value will
|
||||
* be -1.
|
||||
*/
|
||||
int irq;
|
||||
|
||||
|
||||
/**
|
||||
* Storage for user data. Users of the library can store arbitrary
|
||||
* data in this pointer. The library will not use it for any purpose.
|
||||
* It is the user's responsability to free this memory before destroying
|
||||
* the \c pci_device structure.
|
||||
*/
|
||||
intptr_t user_data;
|
||||
|
||||
/**
|
||||
* Used by the VGA arbiter. Type of resource decoded by the device and
|
||||
* the file descriptor (/dev/vga_arbiter). */
|
||||
int vgaarb_rsrc;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Description of the AGP capability of the device.
|
||||
*
|
||||
* \sa pci_device_get_agp_info
|
||||
*/
|
||||
struct pci_agp_info {
|
||||
/**
|
||||
* Offset of the AGP registers in the devices configuration register
|
||||
* space. This is generally used so that the offset of the AGP command
|
||||
* register can be determined.
|
||||
*/
|
||||
unsigned config_offset;
|
||||
|
||||
|
||||
/**
|
||||
* \name AGP major / minor version.
|
||||
*/
|
||||
/*@{*/
|
||||
uint8_t major_version;
|
||||
uint8_t minor_version;
|
||||
/*@}*/
|
||||
|
||||
/**
|
||||
* Logical OR of the supported AGP rates. For example, a value of 0x07
|
||||
* means that the device can support 1x, 2x, and 4x. A value of 0x0c
|
||||
* means that the device can support 8x and 4x.
|
||||
*/
|
||||
uint8_t rates;
|
||||
|
||||
unsigned int fast_writes:1; /**< Are fast-writes supported? */
|
||||
unsigned int addr64:1;
|
||||
unsigned int htrans:1;
|
||||
unsigned int gart64:1;
|
||||
unsigned int coherent:1;
|
||||
unsigned int sideband:1; /**< Is side-band addressing supported? */
|
||||
unsigned int isochronus:1;
|
||||
|
||||
uint8_t async_req_size;
|
||||
uint8_t calibration_cycle_timing;
|
||||
uint8_t max_requests;
|
||||
};
|
||||
|
||||
/**
|
||||
* Description of a PCI-to-PCI bridge device.
|
||||
*
|
||||
* \sa pci_device_get_bridge_info
|
||||
*/
|
||||
struct pci_bridge_info {
|
||||
uint8_t primary_bus;
|
||||
uint8_t secondary_bus;
|
||||
uint8_t subordinate_bus;
|
||||
uint8_t secondary_latency_timer;
|
||||
|
||||
uint8_t io_type;
|
||||
uint8_t mem_type;
|
||||
uint8_t prefetch_mem_type;
|
||||
|
||||
uint16_t secondary_status;
|
||||
uint16_t bridge_control;
|
||||
|
||||
uint32_t io_base;
|
||||
uint32_t io_limit;
|
||||
|
||||
uint32_t mem_base;
|
||||
uint32_t mem_limit;
|
||||
|
||||
uint64_t prefetch_mem_base;
|
||||
uint64_t prefetch_mem_limit;
|
||||
};
|
||||
|
||||
/**
|
||||
* Description of a PCI-to-PCMCIA bridge device.
|
||||
*
|
||||
* \sa pci_device_get_pcmcia_bridge_info
|
||||
*/
|
||||
struct pci_pcmcia_bridge_info {
|
||||
uint8_t primary_bus;
|
||||
uint8_t card_bus;
|
||||
uint8_t subordinate_bus;
|
||||
uint8_t cardbus_latency_timer;
|
||||
|
||||
uint16_t secondary_status;
|
||||
uint16_t bridge_control;
|
||||
|
||||
struct {
|
||||
uint32_t base;
|
||||
uint32_t limit;
|
||||
} io[2];
|
||||
|
||||
struct {
|
||||
uint32_t base;
|
||||
uint32_t limit;
|
||||
} mem[2];
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* VGA Arbiter definitions, functions and related.
|
||||
*/
|
||||
|
||||
/* Legacy VGA regions */
|
||||
#define VGA_ARB_RSRC_NONE 0x00
|
||||
#define VGA_ARB_RSRC_LEGACY_IO 0x01
|
||||
#define VGA_ARB_RSRC_LEGACY_MEM 0x02
|
||||
/* Non-legacy access */
|
||||
#define VGA_ARB_RSRC_NORMAL_IO 0x04
|
||||
#define VGA_ARB_RSRC_NORMAL_MEM 0x08
|
||||
|
||||
int pci_device_vgaarb_init (void);
|
||||
void pci_device_vgaarb_fini (void);
|
||||
int pci_device_vgaarb_set_target (struct pci_device *dev);
|
||||
/* use the targetted device */
|
||||
int pci_device_vgaarb_decodes (int new_vga_rsrc);
|
||||
int pci_device_vgaarb_lock (void);
|
||||
int pci_device_vgaarb_trylock (void);
|
||||
int pci_device_vgaarb_unlock (void);
|
||||
/* return the current device count + resource decodes for the device */
|
||||
int pci_device_vgaarb_get_info (struct pci_device *dev, int *vga_count, int *rsrc_decodes);
|
||||
|
||||
/*
|
||||
* I/O space access.
|
||||
*/
|
||||
|
||||
struct pci_io_handle;
|
||||
|
||||
struct pci_io_handle *pci_device_open_io(struct pci_device *dev, pciaddr_t base,
|
||||
pciaddr_t size);
|
||||
struct pci_io_handle *pci_legacy_open_io(struct pci_device *dev, pciaddr_t base,
|
||||
pciaddr_t size);
|
||||
void pci_device_close_io(struct pci_device *dev, struct pci_io_handle *handle);
|
||||
uint32_t pci_io_read32(struct pci_io_handle *handle, uint32_t reg);
|
||||
uint16_t pci_io_read16(struct pci_io_handle *handle, uint32_t reg);
|
||||
uint8_t pci_io_read8(struct pci_io_handle *handle, uint32_t reg);
|
||||
void pci_io_write32(struct pci_io_handle *handle, uint32_t reg, uint32_t data);
|
||||
void pci_io_write16(struct pci_io_handle *handle, uint32_t reg, uint16_t data);
|
||||
void pci_io_write8(struct pci_io_handle *handle, uint32_t reg, uint8_t data);
|
||||
|
||||
/*
|
||||
* Legacy memory access
|
||||
*/
|
||||
|
||||
int pci_device_map_legacy(struct pci_device *dev, pciaddr_t base,
|
||||
pciaddr_t size, unsigned map_flags, void **addr);
|
||||
int pci_device_unmap_legacy(struct pci_device *dev, void *addr, pciaddr_t size);
|
||||
|
||||
#endif /* PCIACCESS_H */
|
|
@ -0,0 +1,15 @@
|
|||
{ 0x00400031, 0x20c01fbd, 0x0069002c, 0x01110001 },
|
||||
{ 0x00400001, 0x206003be, 0x00690060, 0x00000000 },
|
||||
{ 0x00400040, 0x20e077bd, 0x00690080, 0x006940a0 },
|
||||
{ 0x00400041, 0x202077be, 0x006900e0, 0x000000c0 },
|
||||
{ 0x00400040, 0x20e077bd, 0x006900a0, 0x00694060 },
|
||||
{ 0x00400041, 0x204077be, 0x006900e0, 0x000000c8 },
|
||||
{ 0x00600031, 0x20001fbc, 0x008d0000, 0x8640c800 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
|
@ -0,0 +1,15 @@
|
|||
{ 0x00400031, 0x20c01fbd, 0x0069002c, 0x01110001 },
|
||||
{ 0x00600001, 0x206003be, 0x008d0060, 0x00000000 },
|
||||
{ 0x00600040, 0x20e077bd, 0x008d0080, 0x008d40a0 },
|
||||
{ 0x00600041, 0x202077be, 0x008d00e0, 0x000000c0 },
|
||||
{ 0x00600040, 0x20e077bd, 0x008d00a0, 0x008d4060 },
|
||||
{ 0x00600041, 0x204077be, 0x008d00e0, 0x000000c8 },
|
||||
{ 0x00600031, 0x20001fbc, 0x008d0000, 0x8640c800 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00802041, 0x21c077bd, 0x008d01c0, 0x008d02c0 },
|
||||
{ 0x00802041, 0x220077bd, 0x008d0200, 0x008d0300 },
|
||||
{ 0x00802041, 0x224077bd, 0x008d0240, 0x008d0340 },
|
||||
{ 0x00802041, 0x228077bd, 0x008d0280, 0x008d0380 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00800041, 0x21c077bd, 0x008d01c0, 0x008d02c0 },
|
||||
{ 0x00800041, 0x220077bd, 0x008d0200, 0x008d0300 },
|
||||
{ 0x00800041, 0x224077bd, 0x008d0240, 0x008d0340 },
|
||||
{ 0x00800041, 0x228077bd, 0x008d0280, 0x008d0380 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00802041, 0x21c077bd, 0x008d02c0, 0x008d0280 },
|
||||
{ 0x00802041, 0x220077bd, 0x008d0300, 0x008d0280 },
|
||||
{ 0x00802041, 0x224077bd, 0x008d0340, 0x008d0280 },
|
||||
{ 0x00802041, 0x228077bd, 0x008d0380, 0x008d0280 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00800041, 0x21c077bd, 0x008d02c0, 0x008d0280 },
|
||||
{ 0x00800041, 0x220077bd, 0x008d0300, 0x008d0280 },
|
||||
{ 0x00800041, 0x224077bd, 0x008d0340, 0x008d0280 },
|
||||
{ 0x00800041, 0x228077bd, 0x008d0380, 0x008d0280 },
|
|
@ -0,0 +1,8 @@
|
|||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x000000a0 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x000000a4 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x210077be, 0x008d03c0, 0x000000ac },
|
||||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x000000b0 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x000000b4 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x214077be, 0x008d03c0, 0x000000bc },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x0060005a, 0x210077be, 0x00000100, 0x008d0040 },
|
||||
{ 0x0060005a, 0x212077be, 0x00000100, 0x008d0080 },
|
||||
{ 0x0060005a, 0x214077be, 0x00000110, 0x008d0040 },
|
||||
{ 0x0060005a, 0x216077be, 0x00000110, 0x008d0080 },
|
|
@ -0,0 +1,16 @@
|
|||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x000000c0 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x000000c4 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x000000cc },
|
||||
{ 0x00600031, 0x21801fbd, 0x008d03c0, 0x01110001 },
|
||||
{ 0x00600031, 0x21a01fbd, 0x008d03e0, 0x01110001 },
|
||||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x000000a0 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x000000a4 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x000000ac },
|
||||
{ 0x00802041, 0x210077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x000000b0 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x000000b4 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x000000bc },
|
||||
{ 0x00802041, 0x214077be, 0x008d03c0, 0x008d0180 },
|
|
@ -0,0 +1,12 @@
|
|||
{ 0x0060005a, 0x23c077bd, 0x00000120, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x00000120, 0x008d0080 },
|
||||
{ 0x01600038, 0x218003bd, 0x008d03c0, 0x00000000 },
|
||||
{ 0x01600038, 0x21a003bd, 0x008d03e0, 0x00000000 },
|
||||
{ 0x0060005a, 0x23c077bd, 0x00000100, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x00000100, 0x008d0080 },
|
||||
{ 0x00600041, 0x210077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00600041, 0x212077be, 0x008d03e0, 0x008d01a0 },
|
||||
{ 0x0060005a, 0x23c077bd, 0x00000110, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x00000110, 0x008d0080 },
|
||||
{ 0x00600041, 0x214077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00600041, 0x216077be, 0x008d03e0, 0x008d01a0 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
|
||||
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
|
||||
{ 0x07800031, 0x23801c09, 0x00000000, 0x02520102 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
|
||||
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x23801cc9, 0x000000e0, 0x0a2a0102 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
|
||||
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
|
||||
{ 0x07800031, 0x22c01c09, 0x00000000, 0x02580102 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
|
||||
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x22c01cc9, 0x000000e0, 0x0a8a0102 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00802041, 0x21c077bd, 0x008d01c0, 0x008d0380 },
|
||||
{ 0x00802041, 0x220077bd, 0x008d0200, 0x008d0380 },
|
||||
{ 0x00802041, 0x224077bd, 0x008d0240, 0x008d0380 },
|
||||
{ 0x00802041, 0x228077bd, 0x008d0280, 0x008d0380 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00800041, 0x21c077bd, 0x008d01c0, 0x008d0380 },
|
||||
{ 0x00800041, 0x220077bd, 0x008d0200, 0x008d0380 },
|
||||
{ 0x00800041, 0x224077bd, 0x008d0240, 0x008d0380 },
|
||||
{ 0x00800041, 0x228077bd, 0x008d0280, 0x008d0380 },
|
|
@ -0,0 +1,8 @@
|
|||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x00000060 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x00000064 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x204077be, 0x008d03c0, 0x0000006c },
|
||||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x00000070 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x00000074 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x208077be, 0x008d03c0, 0x0000007c },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x0060005a, 0x204077be, 0x000000c0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x206077be, 0x000000c0, 0x008d0080 },
|
||||
{ 0x0060005a, 0x208077be, 0x000000d0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x20a077be, 0x000000d0, 0x008d0080 },
|
|
@ -0,0 +1,16 @@
|
|||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x00000080 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x00000084 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x0000008c },
|
||||
{ 0x00600031, 0x21801fbd, 0x008d03c0, 0x01110001 },
|
||||
{ 0x00600031, 0x21a01fbd, 0x008d03e0, 0x01110001 },
|
||||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x00000060 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x00000064 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x0000006c },
|
||||
{ 0x00802041, 0x204077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00802041, 0x23c077bd, 0x008d0100, 0x00000070 },
|
||||
{ 0x00802041, 0x238077bd, 0x008d0140, 0x00000074 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x008d0380 },
|
||||
{ 0x00802040, 0x23c077bd, 0x008d03c0, 0x0000007c },
|
||||
{ 0x00802041, 0x208077be, 0x008d03c0, 0x008d0180 },
|
|
@ -0,0 +1,12 @@
|
|||
{ 0x0060005a, 0x23c077bd, 0x000000e0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x000000e0, 0x008d0080 },
|
||||
{ 0x01600038, 0x218003bd, 0x008d03c0, 0x00000000 },
|
||||
{ 0x01600038, 0x21a003bd, 0x008d03e0, 0x00000000 },
|
||||
{ 0x0060005a, 0x23c077bd, 0x000000c0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x000000c0, 0x008d0080 },
|
||||
{ 0x00600041, 0x204077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00600041, 0x206077be, 0x008d03e0, 0x008d01a0 },
|
||||
{ 0x0060005a, 0x23c077bd, 0x000000d0, 0x008d0040 },
|
||||
{ 0x0060005a, 0x23e077bd, 0x000000d0, 0x008d0080 },
|
||||
{ 0x00600041, 0x208077be, 0x008d03c0, 0x008d0180 },
|
||||
{ 0x00600041, 0x20a077be, 0x008d03e0, 0x008d01a0 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x01800031, 0x22801c09, 0x00000000, 0x02520001 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x22801cc9, 0x00000020, 0x0a2a0001 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x01800031, 0x21c01c09, 0x00000000, 0x02580001 },
|
|
@ -0,0 +1,3 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x21c01cc9, 0x00000020, 0x0a8a0001 },
|
|
@ -0,0 +1,5 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x0000e000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x01800031, 0x22001c09, 0x00000000, 0x02520001 },
|
||||
{ 0x01800031, 0x21c01c09, 0x00000000, 0x02520003 },
|
||||
{ 0x01800031, 0x22401c09, 0x00000000, 0x02520005 },
|
|
@ -0,0 +1,5 @@
|
|||
{ 0x00000201, 0x20080061, 0x00000000, 0x0000e000 },
|
||||
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
|
||||
{ 0x02800031, 0x22001cc9, 0x00000020, 0x0a2a0001 },
|
||||
{ 0x02800031, 0x21c01cc9, 0x00000020, 0x0a2a0003 },
|
||||
{ 0x02800031, 0x22401cc9, 0x00000020, 0x0a2a0005 },
|
|
@ -0,0 +1,18 @@
|
|||
{ 0x00600001, 0x204003be, 0x008d01c0, 0x00000000 },
|
||||
{ 0x00600001, 0x206003be, 0x008d0200, 0x00000000 },
|
||||
{ 0x00600001, 0x208003be, 0x008d0240, 0x00000000 },
|
||||
{ 0x00600001, 0x20a003be, 0x008d0280, 0x00000000 },
|
||||
{ 0x00601001, 0x20c003be, 0x008d01e0, 0x00000000 },
|
||||
{ 0x00601001, 0x20e003be, 0x008d0220, 0x00000000 },
|
||||
{ 0x00601001, 0x210003be, 0x008d0260, 0x00000000 },
|
||||
{ 0x00601001, 0x212003be, 0x008d02a0, 0x00000000 },
|
||||
{ 0x00600201, 0x20200022, 0x008d0020, 0x00000000 },
|
||||
{ 0x00800031, 0x24001d28, 0x008d0000, 0x85a04800 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
|
@ -0,0 +1,17 @@
|
|||
{ 0x00600001, 0x204003be, 0x008d01c0, 0x00000000 },
|
||||
{ 0x00600001, 0x206003be, 0x008d01e0, 0x00000000 },
|
||||
{ 0x00600001, 0x208003be, 0x008d0200, 0x00000000 },
|
||||
{ 0x00600001, 0x20a003be, 0x008d0220, 0x00000000 },
|
||||
{ 0x00600001, 0x20c003be, 0x008d0240, 0x00000000 },
|
||||
{ 0x00600001, 0x20e003be, 0x008d0260, 0x00000000 },
|
||||
{ 0x00600001, 0x210003be, 0x008d0280, 0x00000000 },
|
||||
{ 0x00600001, 0x212003be, 0x008d02a0, 0x00000000 },
|
||||
{ 0x05800031, 0x24001cc8, 0x00000040, 0x90019000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
||||
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
|
|
@ -0,0 +1,4 @@
|
|||
{ 0x00800040, 0x23c06d29, 0x00480028, 0x10101010 },
|
||||
{ 0x00800040, 0x23806d29, 0x0048002a, 0x11001100 },
|
||||
{ 0x00802040, 0x2100753d, 0x008d03c0, 0x00004020 },
|
||||
{ 0x00802040, 0x2140753d, 0x008d0380, 0x00004024 },
|
|
@ -0,0 +1,12 @@
|
|||
{ 0x00802040, 0x23007fbd, 0x008d0200, 0xbd808081 },
|
||||
{ 0x00802041, 0x23007fbd, 0x008d0300, 0x3f94fdf4 },
|
||||
{ 0x00802040, 0x22c07fbd, 0x008d01c0, 0xbf008084 },
|
||||
{ 0x00802040, 0x23407fbd, 0x008d0240, 0xbf008084 },
|
||||
{ 0x00802001, 0x240003bc, 0x008d0300, 0x00000000 },
|
||||
{ 0x80802048, 0x21c07fbd, 0x008d02c0, 0x3fcc49ba },
|
||||
{ 0x00802001, 0x240003bc, 0x008d0300, 0x00000000 },
|
||||
{ 0x00802048, 0x24007fbc, 0x008d02c0, 0xbf5020c5 },
|
||||
{ 0x80802048, 0x22007fbd, 0x008d0340, 0xbec8b439 },
|
||||
{ 0x00802001, 0x240003bc, 0x008d0300, 0x00000000 },
|
||||
{ 0x80802048, 0x22407fbd, 0x008d0340, 0x40011687 },
|
||||
{ 0x00802001, 0x228003fd, 0x00000000, 0x3f800000 },
|
|
@ -0,0 +1,12 @@
|
|||
{ 0x00800040, 0x23007fbd, 0x008d0200, 0xbd808081 },
|
||||
{ 0x00800041, 0x23007fbd, 0x008d0300, 0x3f94fdf4 },
|
||||
{ 0x00800040, 0x22c07fbd, 0x008d01c0, 0xbf008084 },
|
||||
{ 0x00800040, 0x23407fbd, 0x008d0240, 0xbf008084 },
|
||||
{ 0x00800001, 0x240003bc, 0x008d0300, 0x00000000 },
|
||||
{ 0x80800048, 0x21c07fbd, 0x008d02c0, 0x3fcc49ba },
|
||||
{ 0x00800001, 0x240003bc, 0x008d0300, 0x00000000 },
|
||||
{ 0x00800048, 0x24007fbc, 0x008d02c0, 0xbf5020c5 },
|
||||
{ 0x80800048, 0x22007fbd, 0x008d0340, 0xbec8b439 },
|
||||
{ 0x00800001, 0x240003bc, 0x008d0300, 0x00000000 },
|
||||
{ 0x80800048, 0x22407fbd, 0x008d0340, 0x40011687 },
|
||||
{ 0x00800001, 0x228003fd, 0x00000000, 0x3f800000 },
|
|
@ -0,0 +1,598 @@
|
|||
//#include "../bitmap.h"
|
||||
|
||||
#include <memory.h>
|
||||
#include <malloc.h>
|
||||
|
||||
#include "sna.h"
|
||||
|
||||
|
||||
const struct intel_device_info *
|
||||
intel_detect_chipset(struct pci_device *pci);
|
||||
|
||||
//struct kgem_bo *create_bo(bitmap_t *bitmap);
|
||||
|
||||
static bool sna_solid_cache_init(struct sna *sna);
|
||||
|
||||
struct sna *sna_device;
|
||||
|
||||
void no_render_init(struct sna *sna)
|
||||
{
|
||||
struct sna_render *render = &sna->render;
|
||||
|
||||
memset (render,0, sizeof (*render));
|
||||
|
||||
render->prefer_gpu = PREFER_GPU_BLT;
|
||||
|
||||
render->vertices = render->vertex_data;
|
||||
render->vertex_size = ARRAY_SIZE(render->vertex_data);
|
||||
|
||||
// render->composite = no_render_composite;
|
||||
|
||||
// render->copy_boxes = no_render_copy_boxes;
|
||||
// render->copy = no_render_copy;
|
||||
|
||||
// render->fill_boxes = no_render_fill_boxes;
|
||||
// render->fill = no_render_fill;
|
||||
// render->fill_one = no_render_fill_one;
|
||||
// render->clear = no_render_clear;
|
||||
|
||||
// render->reset = no_render_reset;
|
||||
// render->flush = no_render_flush;
|
||||
// render->fini = no_render_fini;
|
||||
|
||||
// sna->kgem.context_switch = no_render_context_switch;
|
||||
// sna->kgem.retire = no_render_retire;
|
||||
|
||||
// if (sna->kgem.gen >= 60)
|
||||
sna->kgem.ring = KGEM_RENDER;
|
||||
|
||||
sna_vertex_init(sna);
|
||||
}
|
||||
|
||||
void sna_vertex_init(struct sna *sna)
|
||||
{
|
||||
// pthread_mutex_init(&sna->render.lock, NULL);
|
||||
// pthread_cond_init(&sna->render.wait, NULL);
|
||||
sna->render.active = 0;
|
||||
}
|
||||
|
||||
bool sna_accel_init(struct sna *sna)
|
||||
{
|
||||
const char *backend;
|
||||
|
||||
// list_init(&sna->deferred_free);
|
||||
// list_init(&sna->dirty_pixmaps);
|
||||
// list_init(&sna->active_pixmaps);
|
||||
// list_init(&sna->inactive_clock[0]);
|
||||
// list_init(&sna->inactive_clock[1]);
|
||||
|
||||
// sna_accel_install_timers(sna);
|
||||
|
||||
|
||||
backend = "no";
|
||||
no_render_init(sna);
|
||||
|
||||
if (sna->info->gen >= 0100) {
|
||||
/* } else if (sna->info->gen >= 070) {
|
||||
if (gen7_render_init(sna))
|
||||
backend = "IvyBridge"; */
|
||||
} else if (sna->info->gen >= 060) {
|
||||
if (gen6_render_init(sna))
|
||||
backend = "SandyBridge";
|
||||
/* } else if (sna->info->gen >= 050) {
|
||||
if (gen5_render_init(sna))
|
||||
backend = "Ironlake";
|
||||
} else if (sna->info->gen >= 040) {
|
||||
if (gen4_render_init(sna))
|
||||
backend = "Broadwater/Crestline";
|
||||
} else if (sna->info->gen >= 030) {
|
||||
if (gen3_render_init(sna))
|
||||
backend = "gen3";
|
||||
} else if (sna->info->gen >= 020) {
|
||||
if (gen2_render_init(sna))
|
||||
backend = "gen2"; */
|
||||
}
|
||||
|
||||
DBG(("%s(backend=%s, prefer_gpu=%x)\n",
|
||||
__FUNCTION__, backend, sna->render.prefer_gpu));
|
||||
|
||||
kgem_reset(&sna->kgem);
|
||||
|
||||
// if (!sna_solid_cache_init(sna))
|
||||
// return false;
|
||||
|
||||
sna_device = sna;
|
||||
#if 0
|
||||
{
|
||||
struct kgem_bo *screen_bo;
|
||||
bitmap_t screen;
|
||||
|
||||
screen.pitch = 1024*4;
|
||||
screen.gaddr = 0;
|
||||
screen.width = 1024;
|
||||
screen.height = 768;
|
||||
screen.obj = (void*)-1;
|
||||
|
||||
screen_bo = create_bo(&screen);
|
||||
|
||||
sna->render.clear(sna, &screen, screen_bo);
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int sna_init(uint32_t service)
|
||||
{
|
||||
ioctl_t io;
|
||||
|
||||
static struct pci_device device;
|
||||
struct sna *sna;
|
||||
|
||||
DBG(("%s\n", __FUNCTION__));
|
||||
|
||||
sna = malloc(sizeof(struct sna));
|
||||
if (sna == NULL)
|
||||
return false;
|
||||
|
||||
io.handle = service;
|
||||
io.io_code = SRV_GET_INFO;
|
||||
io.input = &device;
|
||||
io.inp_size = sizeof(device);
|
||||
io.output = NULL;
|
||||
io.out_size = 0;
|
||||
|
||||
if (call_service(&io)!=0)
|
||||
return false;
|
||||
|
||||
sna->PciInfo = &device;
|
||||
|
||||
sna->info = intel_detect_chipset(sna->PciInfo);
|
||||
|
||||
kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
|
||||
/*
|
||||
if (!xf86ReturnOptValBool(sna->Options,
|
||||
OPTION_RELAXED_FENCING,
|
||||
sna->kgem.has_relaxed_fencing)) {
|
||||
xf86DrvMsg(scrn->scrnIndex,
|
||||
sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
|
||||
"Disabling use of relaxed fencing\n");
|
||||
sna->kgem.has_relaxed_fencing = 0;
|
||||
}
|
||||
if (!xf86ReturnOptValBool(sna->Options,
|
||||
OPTION_VMAP,
|
||||
sna->kgem.has_vmap)) {
|
||||
xf86DrvMsg(scrn->scrnIndex,
|
||||
sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
|
||||
"Disabling use of vmap\n");
|
||||
sna->kgem.has_vmap = 0;
|
||||
}
|
||||
*/
|
||||
|
||||
/* Disable tiling by default */
|
||||
sna->tiling = SNA_TILING_DISABLE;
|
||||
|
||||
/* Default fail-safe value of 75 Hz */
|
||||
// sna->vblank_interval = 1000 * 1000 * 1000 / 75;
|
||||
|
||||
sna->flags = 0;
|
||||
|
||||
return sna_accel_init(sna);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
||||
static bool sna_solid_cache_init(struct sna *sna)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
|
||||
DBG(("%s\n", __FUNCTION__));
|
||||
|
||||
cache->cache_bo =
|
||||
kgem_create_linear(&sna->kgem, sizeof(cache->color));
|
||||
if (!cache->cache_bo)
|
||||
return FALSE;
|
||||
|
||||
/*
|
||||
* Initialise [0] with white since it is very common and filling the
|
||||
* zeroth slot simplifies some of the checks.
|
||||
*/
|
||||
cache->color[0] = 0xffffffff;
|
||||
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
|
||||
cache->bo[0]->pitch = 4;
|
||||
cache->dirty = 1;
|
||||
cache->size = 1;
|
||||
cache->last = 0;
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
void
|
||||
sna_render_flush_solid(struct sna *sna)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
|
||||
DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
|
||||
assert(cache->dirty);
|
||||
assert(cache->size);
|
||||
|
||||
kgem_bo_write(&sna->kgem, cache->cache_bo,
|
||||
cache->color, cache->size*sizeof(uint32_t));
|
||||
cache->dirty = 0;
|
||||
cache->last = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
sna_render_finish_solid(struct sna *sna, bool force)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
int i;
|
||||
|
||||
DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
|
||||
force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
|
||||
|
||||
if (!force && cache->cache_bo->domain != DOMAIN_GPU)
|
||||
return;
|
||||
|
||||
if (cache->dirty)
|
||||
sna_render_flush_solid(sna);
|
||||
|
||||
for (i = 0; i < cache->size; i++) {
|
||||
if (cache->bo[i] == NULL)
|
||||
continue;
|
||||
|
||||
kgem_bo_destroy(&sna->kgem, cache->bo[i]);
|
||||
cache->bo[i] = NULL;
|
||||
}
|
||||
kgem_bo_destroy(&sna->kgem, cache->cache_bo);
|
||||
|
||||
DBG(("sna_render_finish_solid reset\n"));
|
||||
|
||||
cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
|
||||
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
|
||||
cache->bo[0]->pitch = 4;
|
||||
if (force)
|
||||
cache->size = 1;
|
||||
}
|
||||
|
||||
|
||||
struct kgem_bo *
|
||||
sna_render_get_solid(struct sna *sna, uint32_t color)
|
||||
{
|
||||
struct sna_solid_cache *cache = &sna->render.solid_cache;
|
||||
int i;
|
||||
|
||||
DBG(("%s: %08x\n", __FUNCTION__, color));
|
||||
|
||||
// if ((color & 0xffffff) == 0) /* alpha only */
|
||||
// return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
|
||||
|
||||
if (color == 0xffffffff) {
|
||||
DBG(("%s(white)\n", __FUNCTION__));
|
||||
return kgem_bo_reference(cache->bo[0]);
|
||||
}
|
||||
|
||||
if (cache->color[cache->last] == color) {
|
||||
DBG(("sna_render_get_solid(%d) = %x (last)\n",
|
||||
cache->last, color));
|
||||
return kgem_bo_reference(cache->bo[cache->last]);
|
||||
}
|
||||
|
||||
for (i = 1; i < cache->size; i++) {
|
||||
if (cache->color[i] == color) {
|
||||
if (cache->bo[i] == NULL) {
|
||||
DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
|
||||
i, color));
|
||||
goto create;
|
||||
} else {
|
||||
DBG(("sna_render_get_solid(%d) = %x (old)\n",
|
||||
i, color));
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
|
||||
|
||||
i = cache->size++;
|
||||
cache->color[i] = color;
|
||||
cache->dirty = 1;
|
||||
DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
|
||||
|
||||
create:
|
||||
cache->bo[i] = kgem_create_proxy(cache->cache_bo,
|
||||
i*sizeof(uint32_t), sizeof(uint32_t));
|
||||
cache->bo[i]->pitch = 4;
|
||||
|
||||
done:
|
||||
cache->last = i;
|
||||
return kgem_bo_reference(cache->bo[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
int sna_blit_copy(uint32_t dst_bitmap, int dst_x, int dst_y,
|
||||
int w, int h, uint32_t src_bitmap, int src_x, int src_y)
|
||||
|
||||
{
|
||||
struct sna_copy_op copy;
|
||||
struct kgem_bo src_bo, dst_bo;
|
||||
|
||||
memset(&src_bo, 0, sizeof(src_bo));
|
||||
memset(&dst_bo, 0, sizeof(dst_bo));
|
||||
|
||||
// src_bo.gaddr = src_bitmap->gaddr;
|
||||
// src_bo.pitch = src_bitmap->pitch;
|
||||
// src_bo.tiling = 0;
|
||||
|
||||
// dst_bo.gaddr = dst_bitmap->gaddr;
|
||||
// dst_bo.pitch = dst_bitmap->pitch;
|
||||
// dst_bo.tiling = 0;
|
||||
|
||||
memset(©, 0, sizeof(copy));
|
||||
|
||||
sna_device->render.copy(sna_device, GXcopy, NULL, &src_bo, NULL, &dst_bo, ©);
|
||||
copy.blt(sna_device, ©, src_x, src_y, w, h, dst_x, dst_y);
|
||||
copy.done(sna_device, ©);
|
||||
|
||||
|
||||
|
||||
// _kgem_submit(&sna_device->kgem, &execbuffer);
|
||||
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
|
||||
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
|
||||
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
|
||||
bitmap_t *mask_bitmap)
|
||||
|
||||
{
|
||||
struct sna_composite_op cop;
|
||||
batchbuffer_t execbuffer;
|
||||
BoxRec box;
|
||||
|
||||
struct kgem_bo src_bo, mask_bo, dst_bo;
|
||||
|
||||
memset(&cop, 0, sizeof(cop));
|
||||
memset(&execbuffer, 0, sizeof(execbuffer));
|
||||
memset(&src_bo, 0, sizeof(src_bo));
|
||||
memset(&dst_bo, 0, sizeof(dst_bo));
|
||||
memset(&mask_bo, 0, sizeof(mask_bo));
|
||||
|
||||
src_bo.gaddr = src_bitmap->gaddr;
|
||||
src_bo.pitch = src_bitmap->pitch;
|
||||
src_bo.tiling = 0;
|
||||
|
||||
dst_bo.gaddr = dst_bitmap->gaddr;
|
||||
dst_bo.pitch = dst_bitmap->pitch;
|
||||
dst_bo.tiling = 0;
|
||||
|
||||
mask_bo.gaddr = mask_bitmap->gaddr;
|
||||
mask_bo.pitch = mask_bitmap->pitch;
|
||||
mask_bo.tiling = 0;
|
||||
|
||||
box.x1 = dst_x;
|
||||
box.y1 = dst_y;
|
||||
box.x2 = dst_x+w;
|
||||
box.y2 = dst_y+h;
|
||||
|
||||
sna_device->render.composite(sna_device, 0,
|
||||
src_bitmap, &src_bo,
|
||||
mask_bitmap, &mask_bo,
|
||||
dst_bitmap, &dst_bo,
|
||||
src_x, src_y,
|
||||
src_x, src_y,
|
||||
dst_x, dst_y,
|
||||
w, h, &cop);
|
||||
|
||||
cop.box(sna_device, &cop, &box);
|
||||
cop.done(sna_device, &cop);
|
||||
|
||||
INIT_LIST_HEAD(&execbuffer.objects);
|
||||
list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
|
||||
list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
|
||||
|
||||
_kgem_submit(&sna_device->kgem, &execbuffer);
|
||||
|
||||
};
|
||||
|
||||
*/
|
||||
|
||||
static const struct intel_device_info intel_generic_info = {
|
||||
.gen = -1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915_info = {
|
||||
.gen = 030,
|
||||
};
|
||||
static const struct intel_device_info intel_i945_info = {
|
||||
.gen = 031,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
.gen = 033,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965_info = {
|
||||
.gen = 040,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g4x_info = {
|
||||
.gen = 045,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_info = {
|
||||
.gen = 050,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_info = {
|
||||
.gen = 060,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_info = {
|
||||
.gen = 070,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_valleyview_info = {
|
||||
.gen = 071,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_haswell_info = {
|
||||
.gen = 075,
|
||||
};
|
||||
|
||||
#define INTEL_DEVICE_MATCH(d,i) \
|
||||
{ 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
|
||||
|
||||
|
||||
static const struct pci_id_match intel_device_match[] = {
|
||||
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
|
||||
/* Another marketing win: Q35 is another g33 device not a gen4 part
|
||||
* like its G35 brethren.
|
||||
*/
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
|
||||
INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
|
||||
|
||||
INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
|
||||
|
||||
{ 0, 0, 0 },
|
||||
};
|
||||
|
||||
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
|
||||
{
|
||||
while(list->device_id)
|
||||
{
|
||||
if(dev==list->device_id)
|
||||
return list;
|
||||
list++;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const struct intel_device_info *
|
||||
intel_detect_chipset(struct pci_device *pci)
|
||||
{
|
||||
const struct pci_id_match *ent = NULL;
|
||||
const char *name = NULL;
|
||||
int i;
|
||||
|
||||
ent = PciDevMatch(pci->device_id, intel_device_match);
|
||||
|
||||
if(ent != NULL)
|
||||
return (const struct intel_device_info*)ent->match_data;
|
||||
else
|
||||
return &intel_generic_info;
|
||||
|
||||
#if 0
|
||||
for (i = 0; intel_chipsets[i].name != NULL; i++) {
|
||||
if (DEVICE_ID(pci) == intel_chipsets[i].token) {
|
||||
name = intel_chipsets[i].name;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (name == NULL) {
|
||||
xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
|
||||
name = "unknown";
|
||||
} else {
|
||||
xf86DrvMsg(scrn->scrnIndex, from,
|
||||
"Integrated Graphics Chipset: Intel(R) %s\n",
|
||||
name);
|
||||
}
|
||||
|
||||
scrn->chipset = name;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,300 @@
|
|||
/**************************************************************************
|
||||
|
||||
Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
Copyright © 2002 David Dawes
|
||||
|
||||
All Rights Reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sub license, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice (including the
|
||||
next paragraph) shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
**************************************************************************/
|
||||
|
||||
/*
|
||||
* Authors:
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
* David Dawes <dawes@xfree86.org>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _SNA_H_
|
||||
#define _SNA_H_
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include <memory.h>
|
||||
#include <malloc.h>
|
||||
|
||||
|
||||
#include "intel_driver.h"
|
||||
#include "pciaccess.h"
|
||||
|
||||
#include "compiler.h"
|
||||
|
||||
//#define DBG(x)
|
||||
//#define DBG(x) ErrorF x
|
||||
|
||||
#define assert(x)
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned handle;
|
||||
unsigned io_code;
|
||||
void *input;
|
||||
int inp_size;
|
||||
void *output;
|
||||
int out_size;
|
||||
}ioctl_t;
|
||||
|
||||
#define SRV_GET_INFO 20
|
||||
#define SRV_GET_PARAM 21
|
||||
|
||||
static int call_service(ioctl_t *io)
|
||||
{
|
||||
int retval;
|
||||
|
||||
asm volatile("int $0x40"
|
||||
:"=a"(retval)
|
||||
:"a"(68),"b"(17),"c"(io)
|
||||
:"memory","cc");
|
||||
|
||||
return retval;
|
||||
};
|
||||
|
||||
|
||||
#define PIXMAN_FORMAT(bpp,type,a,r,g,b) (((bpp) << 24) | \
|
||||
((type) << 16) | \
|
||||
((a) << 12) | \
|
||||
((r) << 8) | \
|
||||
((g) << 4) | \
|
||||
((b)))
|
||||
#define PIXMAN_TYPE_OTHER 0
|
||||
#define PIXMAN_TYPE_A 1
|
||||
#define PIXMAN_TYPE_ARGB 2
|
||||
#define PIXMAN_TYPE_ABGR 3
|
||||
#define PIXMAN_TYPE_COLOR 4
|
||||
#define PIXMAN_TYPE_GRAY 5
|
||||
#define PIXMAN_TYPE_YUY2 6
|
||||
#define PIXMAN_TYPE_YV12 7
|
||||
#define PIXMAN_TYPE_BGRA 8
|
||||
#define PIXMAN_TYPE_RGBA 9
|
||||
#define PIXMAN_TYPE_ARGB_SRGB 10
|
||||
|
||||
/* 32bpp formats */
|
||||
typedef enum {
|
||||
PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,8,8,8,8),
|
||||
PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,8,8,8),
|
||||
PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,8,8,8,8),
|
||||
PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,8,8,8),
|
||||
PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,8,8,8,8),
|
||||
PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,0,8,8,8),
|
||||
PIXMAN_r8g8b8a8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_RGBA,8,8,8,8),
|
||||
PIXMAN_r8g8b8x8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_RGBA,0,8,8,8),
|
||||
PIXMAN_x14r6g6b6 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,6,6,6),
|
||||
PIXMAN_x2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,10,10,10),
|
||||
PIXMAN_a2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,2,10,10,10),
|
||||
PIXMAN_x2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,10,10,10),
|
||||
PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10)
|
||||
|
||||
} pixman_format_code_t;
|
||||
|
||||
|
||||
typedef unsigned long Picture;
|
||||
typedef unsigned long PictFormat;
|
||||
|
||||
typedef struct _Pixmap *PixmapPtr;
|
||||
typedef struct _Picture *PicturePtr;
|
||||
|
||||
typedef struct _Drawable {
|
||||
unsigned char type; /* DRAWABLE_<type> */
|
||||
unsigned char class; /* specific to type */
|
||||
unsigned char depth;
|
||||
unsigned char bitsPerPixel;
|
||||
unsigned int id; /* resource id */
|
||||
short x; /* window: screen absolute, pixmap: 0 */
|
||||
short y; /* window: screen absolute, pixmap: 0 */
|
||||
unsigned short width;
|
||||
unsigned short height;
|
||||
} DrawableRec;
|
||||
|
||||
/*
|
||||
* PIXMAP -- device dependent
|
||||
*/
|
||||
|
||||
typedef struct _Pixmap {
|
||||
DrawableRec drawable;
|
||||
// PrivateRec *devPrivates;
|
||||
int refcnt;
|
||||
int devKind; /* This is the pitch of the pixmap, typically width*bpp/8. */
|
||||
// DevUnion devPrivate; /* When !NULL, devPrivate.ptr points to the raw pixel data. */
|
||||
#ifdef COMPOSITE
|
||||
short screen_x;
|
||||
short screen_y;
|
||||
#endif
|
||||
unsigned usage_hint; /* see CREATE_PIXMAP_USAGE_* */
|
||||
|
||||
PixmapPtr master_pixmap; /* pointer to master copy of pixmap for pixmap sharing */
|
||||
} PixmapRec;
|
||||
|
||||
|
||||
|
||||
struct pixman_box16
|
||||
{
|
||||
int16_t x1, y1, x2, y2;
|
||||
};
|
||||
|
||||
typedef struct pixman_box16 BoxRec;
|
||||
typedef unsigned int CARD32;
|
||||
typedef unsigned short CARD16;
|
||||
|
||||
#include "sna_render.h"
|
||||
#include "kgem.h"
|
||||
|
||||
#define GXclear 0x0
|
||||
#define GXcopy 0x3
|
||||
|
||||
#define PictOpClear 0
|
||||
#define PictOpSrc 1
|
||||
#define PictOpDst 2
|
||||
#define PictOpOver 3
|
||||
#define PictOpOverReverse 4
|
||||
#define PictOpIn 5
|
||||
#define PictOpInReverse 6
|
||||
#define PictOpOut 7
|
||||
#define PictOpOutReverse 8
|
||||
#define PictOpAtop 9
|
||||
#define PictOpAtopReverse 10
|
||||
#define PictOpXor 11
|
||||
#define PictOpAdd 12
|
||||
#define PictOpSaturate 13
|
||||
#define PictOpMaximum 13
|
||||
|
||||
|
||||
|
||||
struct sna {
|
||||
unsigned flags;
|
||||
#define SNA_NO_WAIT 0x1
|
||||
#define SNA_NO_FLIP 0x2
|
||||
#define SNA_TRIPLE_BUFFER 0x4
|
||||
#define SNA_TEAR_FREE 0x10
|
||||
#define SNA_FORCE_SHADOW 0x20
|
||||
|
||||
struct list flush_pixmaps;
|
||||
struct list active_pixmaps;
|
||||
|
||||
|
||||
|
||||
// int vblank_interval;
|
||||
|
||||
// struct list deferred_free;
|
||||
// struct list dirty_pixmaps;
|
||||
// struct list active_pixmaps;
|
||||
// struct list inactive_clock[2];
|
||||
|
||||
unsigned int tiling;
|
||||
#define SNA_TILING_DISABLE 0x0
|
||||
#define SNA_TILING_FB 0x1
|
||||
#define SNA_TILING_2D 0x2
|
||||
#define SNA_TILING_ALL (~0)
|
||||
|
||||
struct pci_device *PciInfo;
|
||||
const struct intel_device_info *info;
|
||||
|
||||
// PicturePtr clear;
|
||||
struct {
|
||||
uint32_t fill_bo;
|
||||
uint32_t fill_pixel;
|
||||
uint32_t fill_alu;
|
||||
} blt_state;
|
||||
union {
|
||||
// struct gen2_render_state gen2;
|
||||
// struct gen3_render_state gen3;
|
||||
// struct gen4_render_state gen4;
|
||||
// struct gen5_render_state gen5;
|
||||
struct gen6_render_state gen6;
|
||||
struct gen7_render_state gen7;
|
||||
} render_state;
|
||||
|
||||
|
||||
/* Broken-out options. */
|
||||
// OptionInfoPtr Options;
|
||||
|
||||
/* Driver phase/state information */
|
||||
// Bool suspended;
|
||||
|
||||
struct kgem kgem;
|
||||
struct sna_render render;
|
||||
|
||||
#if DEBUG_MEMORY
|
||||
struct {
|
||||
int shadow_pixels_allocs;
|
||||
int cpu_bo_allocs;
|
||||
size_t shadow_pixels_bytes;
|
||||
size_t cpu_bo_bytes;
|
||||
} debug_memory;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline int vertex_space(struct sna *sna)
|
||||
{
|
||||
return sna->render.vertex_size - sna->render.vertex_used;
|
||||
}
|
||||
|
||||
static inline void vertex_emit(struct sna *sna, float v)
|
||||
{
|
||||
assert(sna->render.vertex_used < sna->render.vertex_size);
|
||||
sna->render.vertices[sna->render.vertex_used++] = v;
|
||||
}
|
||||
|
||||
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
|
||||
{
|
||||
int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++];
|
||||
assert(sna->render.vertex_used <= sna->render.vertex_size);
|
||||
v[0] = x;
|
||||
v[1] = y;
|
||||
}
|
||||
|
||||
static inline void batch_emit(struct sna *sna, uint32_t dword)
|
||||
{
|
||||
assert(sna->kgem.mode != KGEM_NONE);
|
||||
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
|
||||
sna->kgem.batch[sna->kgem.nbatch++] = dword;
|
||||
}
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
|
||||
#endif
|
||||
|
||||
#ifndef ALIGN
|
||||
#define ALIGN(i,m) (((i) + (m) - 1) & ~((m) - 1))
|
||||
#endif
|
||||
|
||||
#ifndef MIN
|
||||
#define MIN(a,b) ((a) <= (b) ? (a) : (b))
|
||||
#endif
|
||||
|
||||
#ifndef MAX
|
||||
#define MAX(a,b) ((a) >= (b) ? (a) : (b))
|
||||
#endif
|
||||
#endif /* _SNA_H */
|
|
@ -0,0 +1,81 @@
|
|||
#ifndef SNA_REG_H
|
||||
#define SNA_REG_H
|
||||
|
||||
/* Flush */
|
||||
#define MI_FLUSH (0x04<<23)
|
||||
#define MI_FLUSH_DW (0x26<<23)
|
||||
|
||||
#define MI_WRITE_DIRTY_STATE (1<<4)
|
||||
#define MI_END_SCENE (1<<3)
|
||||
#define MI_GLOBAL_SNAPSHOT_COUNT_RESET (1<<3)
|
||||
#define MI_INHIBIT_RENDER_CACHE_FLUSH (1<<2)
|
||||
#define MI_STATE_INSTRUCTION_CACHE_FLUSH (1<<1)
|
||||
#define MI_INVALIDATE_MAP_CACHE (1<<0)
|
||||
/* broadwater flush bits */
|
||||
#define BRW_MI_GLOBAL_SNAPSHOT_RESET (1 << 3)
|
||||
|
||||
#define MI_BATCH_BUFFER_END (0xA << 23)
|
||||
|
||||
/* Noop */
|
||||
#define MI_NOOP 0x00
|
||||
#define MI_NOOP_WRITE_ID (1<<22)
|
||||
#define MI_NOOP_ID_MASK (1<<22 - 1)
|
||||
|
||||
/* Wait for Events */
|
||||
#define MI_WAIT_FOR_EVENT (0x03<<23)
|
||||
#define MI_WAIT_FOR_PIPEB_SVBLANK (1<<18)
|
||||
#define MI_WAIT_FOR_PIPEA_SVBLANK (1<<17)
|
||||
#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
|
||||
#define MI_WAIT_FOR_PIPEB_VBLANK (1<<7)
|
||||
#define MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW (1<<5)
|
||||
#define MI_WAIT_FOR_PIPEA_VBLANK (1<<3)
|
||||
#define MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW (1<<1)
|
||||
|
||||
/* Set the scan line for MI_WAIT_FOR_PIPE?_SCAN_LINE_WINDOW */
|
||||
#define MI_LOAD_SCAN_LINES_INCL (0x12<<23)
|
||||
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEA (0)
|
||||
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEB (0x1<<20)
|
||||
|
||||
/* BLT commands */
|
||||
#define BLT_WRITE_ALPHA (1<<21)
|
||||
#define BLT_WRITE_RGB (1<<20)
|
||||
#define BLT_SRC_TILED (1<<15)
|
||||
#define BLT_DST_TILED (1<<11)
|
||||
|
||||
#define COLOR_BLT_CMD ((2<<29)|(0x40<<22)|(0x3))
|
||||
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4))
|
||||
#define XY_SETUP_BLT ((2<<29)|(1<<22)|6)
|
||||
#define XY_SETUP_MONO_PATTERN_SL_BLT ((2<<29)|(0x11<<22)|7)
|
||||
#define XY_SETUP_CLIP ((2<<29)|(3<<22)|1)
|
||||
#define XY_SCANLINE_BLT ((2<<29)|(0x25<<22)|1)
|
||||
#define XY_TEXT_IMMEDIATE_BLT ((2<<29)|(0x31<<22)|(1<<16))
|
||||
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
|
||||
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|0x4)
|
||||
#define XY_PAT_BLT_IMMEDIATE ((2<<29)|(0x72<<22))
|
||||
#define XY_MONO_PAT ((0x2<<29)|(0x52<<22)|0x7)
|
||||
#define XY_MONO_SRC_COPY ((0x2<<29)|(0x54<<22)|(0x6))
|
||||
#define XY_MONO_SRC_COPY_IMM ((0x2<<29)|(0x71<<22))
|
||||
#define XY_FULL_MONO_PATTERN_BLT ((0x2<<29)|(0x57<<22)|0xa)
|
||||
#define XY_FULL_MONO_PATTERN_MONO_SRC_BLT ((0x2<<29)|(0x58<<22)|0xa)
|
||||
|
||||
/* FLUSH commands */
|
||||
#define BRW_3D(Pipeline,Opcode,Subopcode) \
|
||||
((3 << 29) | \
|
||||
((Pipeline) << 27) | \
|
||||
((Opcode) << 24) | \
|
||||
((Subopcode) << 16))
|
||||
#define PIPE_CONTROL BRW_3D(3, 2, 0)
|
||||
#define PIPE_CONTROL_NOWRITE (0 << 14)
|
||||
#define PIPE_CONTROL_WRITE_QWORD (1 << 14)
|
||||
#define PIPE_CONTROL_WRITE_DEPTH (2 << 14)
|
||||
#define PIPE_CONTROL_WRITE_TIME (3 << 14)
|
||||
#define PIPE_CONTROL_DEPTH_STALL (1 << 13)
|
||||
#define PIPE_CONTROL_WC_FLUSH (1 << 12)
|
||||
#define PIPE_CONTROL_IS_FLUSH (1 << 11)
|
||||
#define PIPE_CONTROL_TC_FLUSH (1 << 10)
|
||||
#define PIPE_CONTROL_NOTIFY_ENABLE (1 << 8)
|
||||
#define PIPE_CONTROL_GLOBAL_GTT (1 << 2)
|
||||
#define PIPE_CONTROL_LOCAL_PGTT (0 << 2)
|
||||
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1 << 0)
|
||||
|
||||
#endif
|
|
@ -0,0 +1,690 @@
|
|||
#ifndef SNA_RENDER_H
|
||||
#define SNA_RENDER_H
|
||||
|
||||
#include "compiler.h"
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define GRADIENT_CACHE_SIZE 16
|
||||
|
||||
#define GXinvalid 0xff
|
||||
|
||||
struct sna;
|
||||
struct sna_glyph;
|
||||
struct sna_video;
|
||||
struct sna_video_frame;
|
||||
struct brw_compile;
|
||||
|
||||
struct sna_composite_rectangles {
|
||||
struct sna_coordinate {
|
||||
int16_t x, y;
|
||||
} src, mask, dst;
|
||||
int16_t width, height;
|
||||
};
|
||||
|
||||
struct sna_composite_op {
|
||||
fastcall void (*blt)(struct sna *sna, const struct sna_composite_op *op,
|
||||
const struct sna_composite_rectangles *r);
|
||||
fastcall void (*box)(struct sna *sna,
|
||||
const struct sna_composite_op *op,
|
||||
const BoxRec *box);
|
||||
void (*boxes)(struct sna *sna, const struct sna_composite_op *op,
|
||||
const BoxRec *box, int nbox);
|
||||
void (*done)(struct sna *sna, const struct sna_composite_op *op);
|
||||
|
||||
struct sna_damage **damage;
|
||||
|
||||
uint32_t op;
|
||||
|
||||
struct {
|
||||
PixmapPtr pixmap;
|
||||
CARD32 format;
|
||||
struct kgem_bo *bo;
|
||||
int16_t x, y;
|
||||
uint16_t width, height;
|
||||
} dst;
|
||||
|
||||
struct sna_composite_channel {
|
||||
struct kgem_bo *bo;
|
||||
// PictTransform *transform;
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
uint32_t pict_format;
|
||||
uint32_t card_format;
|
||||
uint32_t filter;
|
||||
uint32_t repeat;
|
||||
uint32_t is_affine : 1;
|
||||
uint32_t is_solid : 1;
|
||||
uint32_t is_linear : 1;
|
||||
uint32_t is_opaque : 1;
|
||||
uint32_t alpha_fixup : 1;
|
||||
uint32_t rb_reversed : 1;
|
||||
int16_t offset[2];
|
||||
float scale[2];
|
||||
|
||||
// pixman_transform_t embedded_transform;
|
||||
|
||||
union {
|
||||
struct {
|
||||
float dx, dy, offset;
|
||||
} linear;
|
||||
struct {
|
||||
uint32_t pixel;
|
||||
} gen2;
|
||||
struct gen3_shader_channel {
|
||||
int type;
|
||||
uint32_t mode;
|
||||
uint32_t constants;
|
||||
} gen3;
|
||||
} u;
|
||||
} src, mask;
|
||||
uint32_t is_affine : 1;
|
||||
uint32_t has_component_alpha : 1;
|
||||
uint32_t need_magic_ca_pass : 1;
|
||||
uint32_t rb_reversed : 1;
|
||||
|
||||
int16_t floats_per_vertex;
|
||||
int16_t floats_per_rect;
|
||||
fastcall void (*prim_emit)(struct sna *sna,
|
||||
const struct sna_composite_op *op,
|
||||
const struct sna_composite_rectangles *r);
|
||||
|
||||
struct sna_composite_redirect {
|
||||
struct kgem_bo *real_bo;
|
||||
struct sna_damage **real_damage, *damage;
|
||||
BoxRec box;
|
||||
} redirect;
|
||||
|
||||
union {
|
||||
struct sna_blt_state {
|
||||
PixmapPtr src_pixmap;
|
||||
int16_t sx, sy;
|
||||
|
||||
uint32_t inplace :1;
|
||||
uint32_t overwrites:1;
|
||||
uint32_t bpp : 6;
|
||||
|
||||
uint32_t cmd;
|
||||
uint32_t br13;
|
||||
uint32_t pitch[2];
|
||||
uint32_t pixel;
|
||||
struct kgem_bo *bo[2];
|
||||
} blt;
|
||||
|
||||
struct {
|
||||
float constants[8];
|
||||
uint32_t num_constants;
|
||||
} gen3;
|
||||
|
||||
struct {
|
||||
int wm_kernel;
|
||||
int ve_id;
|
||||
} gen4;
|
||||
|
||||
struct {
|
||||
int16_t wm_kernel;
|
||||
int16_t ve_id;
|
||||
} gen5;
|
||||
|
||||
struct {
|
||||
uint32_t flags;
|
||||
} gen6;
|
||||
|
||||
struct {
|
||||
uint32_t flags;
|
||||
} gen7;
|
||||
} u;
|
||||
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct sna_copy_op {
|
||||
struct sna_composite_op base;
|
||||
|
||||
void (*blt)(struct sna *sna, const struct sna_copy_op *op,
|
||||
int16_t sx, int16_t sy,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dx, int16_t dy);
|
||||
void (*done)(struct sna *sna, const struct sna_copy_op *op);
|
||||
};
|
||||
|
||||
struct sna_render {
|
||||
int active;
|
||||
|
||||
int max_3d_size;
|
||||
int max_3d_pitch;
|
||||
|
||||
unsigned prefer_gpu;
|
||||
#define PREFER_GPU_BLT 0x1
|
||||
#define PREFER_GPU_RENDER 0x2
|
||||
#define PREFER_GPU_SPANS 0x4
|
||||
|
||||
#if 0
|
||||
|
||||
bool (*composite)(struct sna *sna, uint8_t op,
|
||||
PicturePtr dst, PicturePtr src, PicturePtr mask,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t msk_x, int16_t msk_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t w, int16_t h,
|
||||
struct sna_composite_op *tmp);
|
||||
|
||||
bool (*check_composite_spans)(struct sna *sna, uint8_t op,
|
||||
PicturePtr dst, PicturePtr src,
|
||||
int16_t w, int16_t h, unsigned flags);
|
||||
bool (*composite_spans)(struct sna *sna, uint8_t op,
|
||||
PicturePtr dst, PicturePtr src,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t w, int16_t h,
|
||||
unsigned flags,
|
||||
struct sna_composite_spans_op *tmp);
|
||||
#define COMPOSITE_SPANS_RECTILINEAR 0x1
|
||||
#define COMPOSITE_SPANS_INPLACE_HINT 0x2
|
||||
|
||||
bool (*video)(struct sna *sna,
|
||||
struct sna_video *video,
|
||||
struct sna_video_frame *frame,
|
||||
RegionPtr dstRegion,
|
||||
short src_w, short src_h,
|
||||
short drw_w, short drw_h,
|
||||
short dx, short dy,
|
||||
PixmapPtr pixmap);
|
||||
|
||||
bool (*fill_boxes)(struct sna *sna,
|
||||
CARD8 op,
|
||||
PictFormat format,
|
||||
const xRenderColor *color,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
const BoxRec *box, int n);
|
||||
bool (*fill)(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
uint32_t color,
|
||||
struct sna_fill_op *tmp);
|
||||
bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
uint32_t color,
|
||||
int16_t x1, int16_t y1, int16_t x2, int16_t y2,
|
||||
uint8_t alu);
|
||||
bool (*clear)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo);
|
||||
|
||||
bool (*copy_boxes)(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
const BoxRec *box, int n, unsigned flags);
|
||||
#define COPY_LAST 0x1
|
||||
#define COPY_SYNC 0x2
|
||||
|
||||
#endif
|
||||
|
||||
bool (*copy)(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
struct sna_copy_op *op);
|
||||
|
||||
void (*flush)(struct sna *sna);
|
||||
void (*reset)(struct sna *sna);
|
||||
void (*fini)(struct sna *sna);
|
||||
|
||||
#if 0
|
||||
|
||||
struct sna_alpha_cache {
|
||||
struct kgem_bo *cache_bo;
|
||||
struct kgem_bo *bo[256+7];
|
||||
} alpha_cache;
|
||||
|
||||
struct sna_solid_cache {
|
||||
struct kgem_bo *cache_bo;
|
||||
struct kgem_bo *bo[1024];
|
||||
uint32_t color[1025];
|
||||
int last;
|
||||
int size;
|
||||
int dirty;
|
||||
} solid_cache;
|
||||
|
||||
struct {
|
||||
struct sna_gradient_cache {
|
||||
struct kgem_bo *bo;
|
||||
int nstops;
|
||||
PictGradientStop *stops;
|
||||
} cache[GRADIENT_CACHE_SIZE];
|
||||
int size;
|
||||
} gradient_cache;
|
||||
|
||||
struct sna_glyph_cache{
|
||||
PicturePtr picture;
|
||||
struct sna_glyph **glyphs;
|
||||
uint16_t count;
|
||||
uint16_t evict;
|
||||
} glyph[2];
|
||||
pixman_image_t *white_image;
|
||||
PicturePtr white_picture;
|
||||
#if HAS_PIXMAN_GLYPHS
|
||||
pixman_glyph_cache_t *glyph_cache;
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
uint16_t vb_id;
|
||||
uint16_t vertex_offset;
|
||||
uint16_t vertex_start;
|
||||
uint16_t vertex_index;
|
||||
uint16_t vertex_used;
|
||||
uint16_t vertex_size;
|
||||
uint16_t vertex_reloc[16];
|
||||
int nvertex_reloc;
|
||||
|
||||
struct kgem_bo *vbo;
|
||||
float *vertices;
|
||||
|
||||
float vertex_data[1024];
|
||||
};
|
||||
|
||||
struct gen2_render_state {
|
||||
uint32_t target;
|
||||
bool need_invariant;
|
||||
uint32_t logic_op_enabled;
|
||||
uint32_t ls1, ls2, vft;
|
||||
uint32_t diffuse;
|
||||
uint32_t specular;
|
||||
};
|
||||
|
||||
struct gen3_render_state {
|
||||
uint32_t current_dst;
|
||||
bool need_invariant;
|
||||
uint32_t tex_count;
|
||||
uint32_t last_drawrect_limit;
|
||||
uint32_t last_target;
|
||||
uint32_t last_blend;
|
||||
uint32_t last_constants;
|
||||
uint32_t last_sampler;
|
||||
uint32_t last_shader;
|
||||
uint32_t last_diffuse;
|
||||
uint32_t last_specular;
|
||||
|
||||
uint16_t last_vertex_offset;
|
||||
uint16_t floats_per_vertex;
|
||||
uint16_t last_floats_per_vertex;
|
||||
|
||||
uint32_t tex_map[4];
|
||||
uint32_t tex_handle[2];
|
||||
uint32_t tex_delta[2];
|
||||
};
|
||||
|
||||
struct gen4_render_state {
|
||||
struct kgem_bo *general_bo;
|
||||
|
||||
uint32_t vs;
|
||||
uint32_t sf;
|
||||
uint32_t wm;
|
||||
uint32_t cc;
|
||||
|
||||
int ve_id;
|
||||
uint32_t drawrect_offset;
|
||||
uint32_t drawrect_limit;
|
||||
uint32_t last_pipelined_pointers;
|
||||
uint16_t last_primitive;
|
||||
int16_t floats_per_vertex;
|
||||
uint16_t surface_table;
|
||||
|
||||
bool needs_invariant;
|
||||
bool needs_urb;
|
||||
};
|
||||
|
||||
struct gen5_render_state {
|
||||
struct kgem_bo *general_bo;
|
||||
|
||||
uint32_t vs;
|
||||
uint32_t sf[2];
|
||||
uint32_t wm;
|
||||
uint32_t cc;
|
||||
|
||||
int ve_id;
|
||||
uint32_t drawrect_offset;
|
||||
uint32_t drawrect_limit;
|
||||
uint16_t last_primitive;
|
||||
int16_t floats_per_vertex;
|
||||
uint16_t surface_table;
|
||||
uint16_t last_pipelined_pointers;
|
||||
|
||||
bool needs_invariant;
|
||||
};
|
||||
|
||||
enum {
|
||||
GEN6_WM_KERNEL_NOMASK = 0,
|
||||
GEN6_WM_KERNEL_NOMASK_P,
|
||||
|
||||
GEN6_WM_KERNEL_MASK,
|
||||
GEN6_WM_KERNEL_MASK_P,
|
||||
|
||||
GEN6_WM_KERNEL_MASKCA,
|
||||
GEN6_WM_KERNEL_MASKCA_P,
|
||||
|
||||
GEN6_WM_KERNEL_MASKSA,
|
||||
GEN6_WM_KERNEL_MASKSA_P,
|
||||
|
||||
GEN6_WM_KERNEL_OPACITY,
|
||||
GEN6_WM_KERNEL_OPACITY_P,
|
||||
|
||||
GEN6_WM_KERNEL_VIDEO_PLANAR,
|
||||
GEN6_WM_KERNEL_VIDEO_PACKED,
|
||||
GEN6_KERNEL_COUNT
|
||||
};
|
||||
|
||||
struct gen6_render_state {
|
||||
const struct gt_info *info;
|
||||
struct kgem_bo *general_bo;
|
||||
|
||||
uint32_t vs_state;
|
||||
uint32_t sf_state;
|
||||
uint32_t sf_mask_state;
|
||||
uint32_t wm_state;
|
||||
uint32_t wm_kernel[GEN6_KERNEL_COUNT][3];
|
||||
|
||||
uint32_t cc_blend;
|
||||
|
||||
uint32_t drawrect_offset;
|
||||
uint32_t drawrect_limit;
|
||||
uint32_t blend;
|
||||
uint32_t samplers;
|
||||
uint32_t kernel;
|
||||
|
||||
uint16_t num_sf_outputs;
|
||||
uint16_t ve_id;
|
||||
uint16_t last_primitive;
|
||||
int16_t floats_per_vertex;
|
||||
uint16_t surface_table;
|
||||
|
||||
bool needs_invariant;
|
||||
bool first_state_packet;
|
||||
};
|
||||
|
||||
enum {
|
||||
GEN7_WM_KERNEL_NOMASK = 0,
|
||||
GEN7_WM_KERNEL_NOMASK_P,
|
||||
|
||||
GEN7_WM_KERNEL_MASK,
|
||||
GEN7_WM_KERNEL_MASK_P,
|
||||
|
||||
GEN7_WM_KERNEL_MASKCA,
|
||||
GEN7_WM_KERNEL_MASKCA_P,
|
||||
|
||||
GEN7_WM_KERNEL_MASKSA,
|
||||
GEN7_WM_KERNEL_MASKSA_P,
|
||||
|
||||
GEN7_WM_KERNEL_OPACITY,
|
||||
GEN7_WM_KERNEL_OPACITY_P,
|
||||
|
||||
GEN7_WM_KERNEL_VIDEO_PLANAR,
|
||||
GEN7_WM_KERNEL_VIDEO_PACKED,
|
||||
GEN7_WM_KERNEL_COUNT
|
||||
};
|
||||
|
||||
struct gen7_render_state {
|
||||
const struct gt_info *info;
|
||||
struct kgem_bo *general_bo;
|
||||
|
||||
uint32_t vs_state;
|
||||
uint32_t sf_state;
|
||||
uint32_t sf_mask_state;
|
||||
uint32_t wm_state;
|
||||
uint32_t wm_kernel[GEN7_WM_KERNEL_COUNT][3];
|
||||
|
||||
uint32_t cc_blend;
|
||||
|
||||
uint32_t drawrect_offset;
|
||||
uint32_t drawrect_limit;
|
||||
uint32_t blend;
|
||||
uint32_t samplers;
|
||||
uint32_t kernel;
|
||||
|
||||
uint16_t num_sf_outputs;
|
||||
uint16_t ve_id;
|
||||
uint16_t last_primitive;
|
||||
int16_t floats_per_vertex;
|
||||
uint16_t surface_table;
|
||||
|
||||
bool needs_invariant;
|
||||
bool emit_flush;
|
||||
};
|
||||
|
||||
struct sna_static_stream {
|
||||
uint32_t size, used;
|
||||
uint8_t *data;
|
||||
};
|
||||
|
||||
int sna_static_stream_init(struct sna_static_stream *stream);
|
||||
uint32_t sna_static_stream_add(struct sna_static_stream *stream,
|
||||
const void *data, uint32_t len, uint32_t align);
|
||||
void *sna_static_stream_map(struct sna_static_stream *stream,
|
||||
uint32_t len, uint32_t align);
|
||||
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream,
|
||||
void *ptr);
|
||||
unsigned sna_static_stream_compile_sf(struct sna *sna,
|
||||
struct sna_static_stream *stream,
|
||||
bool (*compile)(struct brw_compile *));
|
||||
|
||||
unsigned sna_static_stream_compile_wm(struct sna *sna,
|
||||
struct sna_static_stream *stream,
|
||||
bool (*compile)(struct brw_compile *, int),
|
||||
int width);
|
||||
struct kgem_bo *sna_static_stream_fini(struct sna *sna,
|
||||
struct sna_static_stream *stream);
|
||||
|
||||
/*
|
||||
struct kgem_bo *
|
||||
sna_render_get_solid(struct sna *sna,
|
||||
uint32_t color);
|
||||
|
||||
void
|
||||
sna_render_flush_solid(struct sna *sna);
|
||||
|
||||
struct kgem_bo *
|
||||
sna_render_get_gradient(struct sna *sna,
|
||||
PictGradient *pattern);
|
||||
|
||||
uint32_t sna_rgba_for_color(uint32_t color, int depth);
|
||||
uint32_t sna_rgba_to_color(uint32_t rgba, uint32_t format);
|
||||
bool sna_get_rgba_from_pixel(uint32_t pixel,
|
||||
uint16_t *red,
|
||||
uint16_t *green,
|
||||
uint16_t *blue,
|
||||
uint16_t *alpha,
|
||||
uint32_t format);
|
||||
bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
|
||||
|
||||
*/
|
||||
|
||||
void no_render_init(struct sna *sna);
|
||||
|
||||
bool gen2_render_init(struct sna *sna);
|
||||
bool gen3_render_init(struct sna *sna);
|
||||
bool gen4_render_init(struct sna *sna);
|
||||
bool gen5_render_init(struct sna *sna);
|
||||
bool gen6_render_init(struct sna *sna);
|
||||
bool gen7_render_init(struct sna *sna);
|
||||
|
||||
#if 0
|
||||
|
||||
bool sna_tiling_composite(uint32_t op,
|
||||
PicturePtr src,
|
||||
PicturePtr mask,
|
||||
PicturePtr dst,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t mask_x, int16_t mask_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t width, int16_t height,
|
||||
struct sna_composite_op *tmp);
|
||||
bool sna_tiling_fill_boxes(struct sna *sna,
|
||||
CARD8 op,
|
||||
PictFormat format,
|
||||
const xRenderColor *color,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo,
|
||||
const BoxRec *box, int n);
|
||||
|
||||
bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
const BoxRec *box, int n);
|
||||
|
||||
bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
int bpp, const BoxRec *box, int nbox);
|
||||
|
||||
bool sna_blt_composite(struct sna *sna,
|
||||
uint32_t op,
|
||||
PicturePtr src,
|
||||
PicturePtr dst,
|
||||
int16_t src_x, int16_t src_y,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
int16_t width, int16_t height,
|
||||
struct sna_composite_op *tmp,
|
||||
bool fallback);
|
||||
bool sna_blt_composite__convert(struct sna *sna,
|
||||
int x, int y,
|
||||
int width, int height,
|
||||
struct sna_composite_op *tmp);
|
||||
|
||||
bool sna_blt_fill(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *bo,
|
||||
int bpp,
|
||||
uint32_t pixel,
|
||||
struct sna_fill_op *fill);
|
||||
|
||||
bool sna_blt_copy(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *src,
|
||||
struct kgem_bo *dst,
|
||||
int bpp,
|
||||
struct sna_copy_op *copy);
|
||||
|
||||
bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *bo,
|
||||
int bpp,
|
||||
uint32_t pixel,
|
||||
const BoxRec *box, int n);
|
||||
|
||||
bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
|
||||
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
int bpp,
|
||||
const BoxRec *box, int n);
|
||||
bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
|
||||
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
|
||||
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
|
||||
const BoxRec *box, int nbox);
|
||||
|
||||
bool _sna_get_pixel_from_rgba(uint32_t *pixel,
|
||||
uint16_t red,
|
||||
uint16_t green,
|
||||
uint16_t blue,
|
||||
uint16_t alpha,
|
||||
uint32_t format);
|
||||
|
||||
static inline bool
|
||||
sna_get_pixel_from_rgba(uint32_t * pixel,
|
||||
uint16_t red,
|
||||
uint16_t green,
|
||||
uint16_t blue,
|
||||
uint16_t alpha,
|
||||
uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case PICT_x8r8g8b8:
|
||||
alpha = 0xffff;
|
||||
/* fall through to re-use a8r8g8b8 expansion */
|
||||
case PICT_a8r8g8b8:
|
||||
*pixel = ((alpha >> 8 << 24) |
|
||||
(red >> 8 << 16) |
|
||||
(green & 0xff00) |
|
||||
(blue >> 8));
|
||||
return TRUE;
|
||||
case PICT_a8:
|
||||
*pixel = alpha >> 8;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format);
|
||||
}
|
||||
|
||||
struct kgem_bo *
|
||||
__sna_render_pixmap_bo(struct sna *sna,
|
||||
PixmapPtr pixmap,
|
||||
const BoxRec *box,
|
||||
bool blt);
|
||||
|
||||
int
|
||||
sna_render_pixmap_bo(struct sna *sna,
|
||||
struct sna_composite_channel *channel,
|
||||
PixmapPtr pixmap,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
bool
|
||||
sna_render_pixmap_partial(struct sna *sna,
|
||||
PixmapPtr pixmap,
|
||||
struct kgem_bo *bo,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h);
|
||||
|
||||
int
|
||||
sna_render_picture_extract(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
int
|
||||
sna_render_picture_approximate_gradient(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
int
|
||||
sna_render_picture_fixup(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y);
|
||||
|
||||
int
|
||||
sna_render_picture_convert(struct sna *sna,
|
||||
PicturePtr picture,
|
||||
struct sna_composite_channel *channel,
|
||||
PixmapPtr pixmap,
|
||||
int16_t x, int16_t y,
|
||||
int16_t w, int16_t h,
|
||||
int16_t dst_x, int16_t dst_y,
|
||||
bool fixup_alpha);
|
||||
|
||||
inline static void sna_render_composite_redirect_init(struct sna_composite_op *op)
|
||||
{
|
||||
struct sna_composite_redirect *t = &op->redirect;
|
||||
t->real_bo = NULL;
|
||||
t->damage = NULL;
|
||||
}
|
||||
|
||||
bool
|
||||
sna_render_composite_redirect(struct sna *sna,
|
||||
struct sna_composite_op *op,
|
||||
int x, int y, int width, int height);
|
||||
|
||||
void
|
||||
sna_render_composite_redirect_done(struct sna *sna,
|
||||
const struct sna_composite_op *op);
|
||||
|
||||
bool
|
||||
sna_composite_mask_is_opaque(PicturePtr mask);
|
||||
|
||||
#endif
|
||||
|
||||
void sna_vertex_init(struct sna *sna);
|
||||
|
||||
|
||||
#endif /* SNA_RENDER_H */
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright © 2011 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Chris Wilson <chris@chris-wilson.co.uk>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "sna.h"
|
||||
#include "sna_render.h"
|
||||
#include "brw/brw.h"
|
||||
|
||||
int sna_static_stream_init(struct sna_static_stream *stream)
|
||||
{
|
||||
stream->used = 0;
|
||||
stream->size = 64*1024;
|
||||
|
||||
stream->data = malloc(stream->size);
|
||||
return stream->data != NULL;
|
||||
}
|
||||
|
||||
static uint32_t sna_static_stream_alloc(struct sna_static_stream *stream,
|
||||
uint32_t len, uint32_t align)
|
||||
{
|
||||
uint32_t offset = ALIGN(stream->used, align);
|
||||
uint32_t size = offset + len;
|
||||
|
||||
if (size > stream->size) {
|
||||
do
|
||||
stream->size *= 2;
|
||||
while (stream->size < size);
|
||||
|
||||
stream->data = realloc(stream->data, stream->size);
|
||||
}
|
||||
|
||||
stream->used = size;
|
||||
return offset;
|
||||
}
|
||||
|
||||
uint32_t sna_static_stream_add(struct sna_static_stream *stream,
|
||||
const void *data, uint32_t len, uint32_t align)
|
||||
{
|
||||
uint32_t offset = sna_static_stream_alloc(stream, len, align);
|
||||
memcpy(stream->data + offset, data, len);
|
||||
return offset;
|
||||
}
|
||||
|
||||
void *sna_static_stream_map(struct sna_static_stream *stream,
|
||||
uint32_t len, uint32_t align)
|
||||
{
|
||||
uint32_t offset = sna_static_stream_alloc(stream, len, align);
|
||||
return memset(stream->data + offset, 0, len);
|
||||
}
|
||||
|
||||
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream, void *ptr)
|
||||
{
|
||||
return (uint8_t *)ptr - stream->data;
|
||||
}
|
||||
|
||||
struct kgem_bo *sna_static_stream_fini(struct sna *sna,
|
||||
struct sna_static_stream *stream)
|
||||
{
|
||||
struct kgem_bo *bo;
|
||||
|
||||
DBG(("uploaded %d bytes of static state\n", stream->used));
|
||||
|
||||
bo = kgem_create_linear(&sna->kgem, stream->used, 0);
|
||||
if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) {
|
||||
// kgem_bo_destroy(&sna->kgem, bo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
free(stream->data);
|
||||
|
||||
return bo;
|
||||
}
|
||||
unsigned
|
||||
sna_static_stream_compile_wm(struct sna *sna,
|
||||
struct sna_static_stream *stream,
|
||||
bool (*compile)(struct brw_compile *, int),
|
||||
int dispatch_width)
|
||||
{
|
||||
struct brw_compile p;
|
||||
|
||||
brw_compile_init(&p, sna->kgem.gen,
|
||||
sna_static_stream_map(stream,
|
||||
256*sizeof(uint32_t), 64));
|
||||
|
||||
if (!compile(&p, dispatch_width)) {
|
||||
stream->used -= 256*sizeof(uint32_t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(p.nr_insn*sizeof(struct brw_instruction) <= 256*sizeof(uint32_t));
|
||||
|
||||
stream->used -= 256*sizeof(uint32_t) - p.nr_insn*sizeof(struct brw_instruction);
|
||||
return sna_static_stream_offsetof(stream, p.store);
|
||||
}
|
Loading…
Reference in New Issue