ddk: tiny libc and kernel imports library

git-svn-id: svn://kolibrios.org@1408 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2010-02-12 20:11:35 +00:00
parent 7c0a5de1e7
commit 1402c59305
115 changed files with 13459 additions and 655 deletions

68
drivers/ddk/Makefile Normal file
View File

@ -0,0 +1,68 @@
CC = gcc
AS = as
DRV_TOPDIR = $(CURDIR)/..
DRV_INCLUDES = $(DRV_TOPDIR)/include
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32
CFLAGS = -c -O2 $(INCLUDES) $(DEFINES) -fomit-frame-pointer -fno-builtin-printf
NAME:= libdrv
CORE_SRC= core.S
NAME_SRCS:= \
debug/dbglog.c \
io/create.c \
io/finfo.c \
io/ssize.c \
io/write.c \
malloc/malloc.c \
stdio/icompute.c \
stdio/vsprintf.c \
stdio/doprnt.c \
stdio/chartab.c \
string/_memmove.S \
string/_strncat.S \
string/_strncmp.S \
string/_strncpy.S \
string/_strnlen.S \
string/memcpy.S \
string/memcmp.S \
string/memset.S \
string/strcat.S \
string/strchr.S \
string/strcpy.S \
string/strncpy.S \
string/strncmp.S \
string/strlen.S
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRCS))))
TARGET = $(NAME).a
all: $(TARGET) libcore.a
$(TARGET): $(NAME_OBJS) $(NAME_SRC) Makefile
$(AR) cvrs $@ $(NAME_OBJS)
libcore.a: core.S Makefile
$(AS) -o core.o $<
$(LD) -shared -s --out-implib $@ --output-def core.def -o core.dll core.o
%.o: %.S Makefile
$(AS) -o $@ $<
%.o: %.c Makefile
$(CC) $(CFLAGS) -o $@ $<

141
drivers/ddk/core.S Normal file
View File

@ -0,0 +1,141 @@
.file "export.s"
.intel_syntax
.text
.global _AllocPage
.global _AllocPages
.global _CreateRingBuffer
.global _Delay
.global _DestroyObject
.global _FreeKernelSpace
.global _GetDisplay
.global _GetPgAddr
.global _GetService
.global _KernelAlloc
.global _KernelFree
.global _MapIoMem
.global _PciApi
.global _PciRead16
.global _PciRead32
.global _PciRead8
.global _PciWrite16
.global _PciWrite32
.global _PciWrite8
.global _RegService
.global _SetScreen
.global _SysMsgBoardStr
.def _AllocPage; .scl 2; .type 32; .endef
.def _AllocPages; .scl 2; .type 32; .endef
.def _CreateRingBuffer; .scl 2; .type 32; .endef
.def _Delay; .scl 2; .type 32; .endef
.def _DestroyObject; .scl 2; .type 32; .endef
.def _FreeKernelSpace; .scl 2; .type 32; .endef
.def _GetDisplay; .scl 2; .type 32; .endef
.def _GetPgAddr; .scl 2; .type 32; .endef
.def _GetService; .scl 2; .type 32; .endef
.def _KernelAlloc; .scl 2; .type 32; .endef
.def _KernelFree; .scl 2; .type 32; .endef
.def _MapIoMem; .scl 2; .type 32; .endef
.def _PciApi; .scl 2; .type 32; .endef
.def _PciRead16; .scl 2; .type 32; .endef
.def _PciRead32; .scl 2; .type 32; .endef
.def _PciRead8; .scl 2; .type 32; .endef
.def _PciWrite16; .scl 2; .type 32; .endef
.def _PciWrite32; .scl 2; .type 32; .endef
.def _PciWrite8; .scl 2; .type 32; .endef
.def _RegService; .scl 2; .type 32; .endef
.def _SetScreen; .scl 2; .type 32; .endef
.def _SysMsgBoardStr; .scl 2; .type 32; .endef
_AllocPage:
_AllocPages:
_CreateRingBuffer:
_Delay:
_DestroyObject:
_FreeKernelSpace:
_GetDisplay:
_GetPgAddr:
_GetService:
_KernelAlloc:
_KernelFree:
_MapIoMem:
_PciApi:
_PciRead16:
_PciRead32:
_PciRead8:
_PciWrite16:
_PciWrite32:
_PciWrite8:
_RegService:
_SetScreen:
_SysMsgBoardStr:
ret
.section .drectve
.ascii " -export:AllocPage" # gcc ABI
.ascii " -export:AllocPages" # gcc ABI
.ascii " -export:CreateRingBuffer" # stdcall
.ascii " -export:Delay" # stdcall
.ascii " -export:DestroyObject"
.ascii " -export:FreeKernelSpace" # stdcall
.ascii " -export:GetDisplay" # stdcall
.ascii " -export:GetPgAddr" # stdcall
.ascii " -export:GetService" # stdcall
.ascii " -export:KernelAlloc" # stdcall
.ascii " -export:KernelFree" # stdcall
.ascii " -export:MapIoMem" # stdcall
.ascii " -export:PciApi" #
.ascii " -export:PciRead16" # stdcall
.ascii " -export:PciRead32" # stdcall
.ascii " -export:PciRead8" # stdcall
.ascii " -export:PciWrite16" # stdcall
.ascii " -export:PciWrite32" # stdcall
.ascii " -export:PciWrite8" # stdcall
.ascii " -export:RegService" # stdcall
.ascii " -export:SetScreen" # stdcall
.ascii " -export:SysMsgBoardStr" # stdcall

186
drivers/ddk/debug/dbglog.c Normal file
View File

@ -0,0 +1,186 @@
#include <types.h>
#include <syscall.h>
#pragma pack(push, 1)
typedef struct
{
char sec;
char min;
char hour;
char rsv;
}detime_t;
typedef struct
{
char day;
char month;
short year;
}dedate_t;
typedef struct
{
unsigned attr;
unsigned flags;
union
{
detime_t ctime;
unsigned cr_time;
};
union
{
dedate_t cdate;
unsigned cr_date;
};
union
{
detime_t atime;
unsigned acc_time;
};
union
{
dedate_t adate;
unsigned acc_date;
};
union
{
detime_t mtime;
unsigned mod_time;
};
union
{
dedate_t mdate;
unsigned mod_date;
};
unsigned size;
unsigned size_high;
} FILEINFO;
#pragma pack(pop)
typedef struct
{
char *path;
int offset;
} dbgfile_t;
static dbgfile_t dbgfile;
#define va_start(v,l) __builtin_va_start(v,l)
#define va_end(v) __builtin_va_end(v)
#define va_arg(v,l) __builtin_va_arg(v,l)
#define __va_copy(d,s) __builtin_va_copy(d,s)
typedef __builtin_va_list __gnuc_va_list;
typedef __gnuc_va_list va_list;
#define arg(x) va_arg (ap, u32_t)
int dbg_open(char *path)
{
FILEINFO info;
dbgfile.offset = 0;
if(get_fileinfo(path,&info))
{
if(!create_file(path))
{
dbgfile.path = path;
return true;
}
else return false;
};
set_file_size(path, 0);
dbgfile.path = path;
dbgfile.offset = 0;
return true;
};
int vsnprintf(char *s, size_t n, const char *format, va_list arg);
int printf(const char* format, ...)
{
char txtbuf[256];
int len = 0;
va_list ap;
va_start(ap, format);
if (format)
len = vsnprintf(txtbuf, 256, format, ap);
va_end(ap);
if( len )
SysMsgBoardStr(txtbuf);
return len;
}
int dbgprintf(const char* format, ...)
{
char txtbuf[256];
unsigned writes;
int len = 0;
va_list ap;
va_start(ap, format);
if (format)
len = vsnprintf(txtbuf, 256, format, ap);
va_end(ap);
if( len )
{
SysMsgBoardStr(txtbuf);
if(dbgfile.path)
{
write_file(dbgfile.path,txtbuf,dbgfile.offset,len,&writes);
dbgfile.offset+=writes;
};
};
return len;
}
int xf86DrvMsg(int skip, int code, const char* format, ...)
{
char txtbuf[256];
unsigned writes;
va_list ap;
int len = 0;
va_start(ap, format);
if (format)
len = vsnprintf(txtbuf, 256, format, ap);
va_end(ap);
if( len )
{
SysMsgBoardStr(txtbuf);
if(dbgfile.path)
{
write_file(dbgfile.path,txtbuf,dbgfile.offset,len,&writes);
dbgfile.offset+=writes;
};
};
return len;
}
int snprintf(char *s, size_t n, const char *format, ...)
{
va_list ap;
int retval;
va_start(ap, format);
retval = vsnprintf(s, n, format, ap);
va_end(ap);
return retval;
}

22
drivers/ddk/io/create.c Normal file
View File

@ -0,0 +1,22 @@
int create_file(const char *path)
{
int retval;
__asm__ __volatile__ (
"pushl $0 \n\t"
"pushl $0 \n\t"
"movl %0, 1(%%esp) \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl $2 \n\t"
"movl %%esp, %%ebx \n\t"
"movl $70, %%eax \n\t"
"int $0x40 \n\t"
"addl $28, %%esp \n\t"
:"=a" (retval)
:"r" (path)
:"ebx");
return retval;
};

81
drivers/ddk/io/finfo.c Normal file
View File

@ -0,0 +1,81 @@
#pragma pack(push, 1)
typedef struct
{
char sec;
char min;
char hour;
char rsv;
}detime_t;
typedef struct
{
char day;
char month;
short year;
}dedate_t;
typedef struct
{
unsigned attr;
unsigned flags;
union
{
detime_t ctime;
unsigned cr_time;
};
union
{
dedate_t cdate;
unsigned cr_date;
};
union
{
detime_t atime;
unsigned acc_time;
};
union
{
dedate_t adate;
unsigned acc_date;
};
union
{
detime_t mtime;
unsigned mod_time;
};
union
{
dedate_t mdate;
unsigned mod_date;
};
unsigned size;
unsigned size_high;
} FILEINFO;
#pragma pack(pop)
int get_fileinfo(const char *path,FILEINFO *info)
{
int retval;
asm __volatile__
(
"pushl $0 \n\t"
"pushl $0 \n\t"
"movl %0, 1(%%esp) \n\t"
"pushl %%ebx \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl $5 \n\t"
"movl %%esp, %%ebx \n\t"
"movl $70, %%eax \n\t"
"int $0x40 \n\t"
"addl $28, %%esp \n\t"
:"=a" (retval)
:"r" (path), "b" (info)
);
return retval;
};

21
drivers/ddk/io/ssize.c Normal file
View File

@ -0,0 +1,21 @@
int set_file_size(const char *path, unsigned size)
{
int retval;
__asm__ __volatile__(
"pushl $0 \n\t"
"pushl $0 \n\t"
"movl %0, 1(%%esp) \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl $0 \n\t"
"pushl %%ebx \n\t"
"push $4 \n\t"
"movl %%esp, %%ebx \n\t"
"movl $70, %%eax \n\t"
"int $0x40 \n\t"
"addl $28, %%esp \n\t"
:"=a" (retval)
:"a" (path), "b" (size));
return retval;
};

26
drivers/ddk/io/write.c Normal file
View File

@ -0,0 +1,26 @@
int write_file(const char *path,const void *buff,
unsigned offset,unsigned count,unsigned *writes)
{
int retval;
__asm__ __volatile__(
"pushl $0 \n\t"
"pushl $0 \n\t"
"movl %%eax, 1(%%esp) \n\t"
"pushl %%ebx \n\t"
"pushl %%edx \n\t"
"pushl $0 \n\t"
"pushl %%ecx \n\t"
"pushl $3 \n\t"
"movl %%esp, %%ebx \n\t"
"mov $70, %%eax \n\t"
"int $0x40 \n\t"
"testl %%esi, %%esi \n\t"
"jz 1f \n\t"
"movl %%ebx, (%%esi) \n\t"
"1:"
"addl $28, %%esp \n\t"
:"=a" (retval)
:"a"(path),"b"(buff),"c"(offset),"d"(count),"S"(writes));
return retval;
};

3992
drivers/ddk/malloc/malloc.c Normal file

File diff suppressed because it is too large Load Diff

261
drivers/ddk/stdio/chartab.c Normal file
View File

@ -0,0 +1,261 @@
#include "ctype.h"
char __ctype[] = {
0,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C|_S,
_C|_S,
_C|_S,
_C|_S,
_C|_S,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_C,
_S,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_N,
_N,
_N,
_N,
_N,
_N,
_N,
_N,
_N,
_N,
_P,
_P,
_P,
_P,
_P,
_P,
_P,
_U|_X,
_U|_X,
_U|_X,
_U|_X,
_U|_X,
_U|_X,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_U,
_P,
_P,
_P,
_P,
_P,
_P,
_L|_X,
_L|_X,
_L|_X,
_L|_X,
_L|_X,
_L|_X,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_L,
_P,
_P,
_P,
_P,
_C,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
};

44
drivers/ddk/stdio/ctype.h Normal file
View File

@ -0,0 +1,44 @@
/* The <ctype.h> header file defines some macros used to identify characters.
* It works by using a table stored in chartab.c. When a character is presented
* to one of these macros, the character is used as an index into the table
* (__ctype) to retrieve a byte. The relevant bit is then extracted.
*/
#ifndef _CTYPE_H
#define _CTYPE_H
extern char __ctype[]; /* property array defined in chartab.c */
#define _U 0x01 /* this bit is for upper-case letters [A-Z] */
#define _L 0x02 /* this bit is for lower-case letters [a-z] */
#define _N 0x04 /* this bit is for numbers [0-9] */
#define _S 0x08 /* this bit is for white space \t \n \f etc */
#define _P 0x10 /* this bit is for punctuation characters */
#define _C 0x20 /* this bit is for control characters */
#define _X 0x40 /* this bit is for hex digits [a-f] and [A-F]*/
/* Macros for identifying character classes. */
#define isalnum(c) ((__ctype+1)[c]&(_U|_L|_N))
#define isalpha(c) ((__ctype+1)[c]&(_U|_L))
#define iscntrl(c) ((__ctype+1)[c]&_C)
#define isgraph(c) ((__ctype+1)[c]&(_P|_U|_L|_N))
#define ispunct(c) ((__ctype+1)[c]&_P)
#define isspace(c) ((__ctype+1)[c]&_S)
#define isxdigit(c) ((__ctype+1)[c]&(_N|_X))
#define isdigit(c) ((unsigned) ((c)-'0') < 10)
#define islower(c) ((unsigned) ((c)-'a') < 26)
#define isupper(c) ((unsigned) ((c)-'A') < 26)
#define isprint(c) ((unsigned) ((c)-' ') < 95)
#define isascii(c) ((unsigned) (c) < 128)
#define toascii(c) ((c) & 0x7f)
static inline int toupper(int c)
{
return islower(c) ? c - 'a' + 'A' : c ;
}
#endif /* _CTYPE_H */

315
drivers/ddk/stdio/doprnt.c Normal file
View File

@ -0,0 +1,315 @@
/*
* doprnt.c - print formatted output
*/
/* $Header$ */
#include "ctype.h"
#include "stdio.h"
#include <stdarg.h>
#include <string.h>
#include "loc_incl.h"
#define NOFLOAT
#define putc(c, p) (--(p)->_count >= 0 ? \
(int) (*(p)->_ptr++ = (c)) : EOF)
/* gnum() is used to get the width and precision fields of a format. */
static const char *
gnum(register const char *f, int *ip, va_list *app)
{
register int i, c;
if (*f == '*') {
*ip = va_arg((*app), int);
f++;
} else {
i = 0;
while ((c = *f - '0') >= 0 && c <= 9) {
i = i*10 + c;
f++;
}
*ip = i;
}
return f;
}
#if _EM_WSIZE == _EM_PSIZE
#define set_pointer(flags) /* nothing */
#elif _EM_LSIZE == _EM_PSIZE
#define set_pointer(flags) (flags |= FL_LONG)
#else
#error garbage pointer size
#define set_pointer(flags) /* compilation might continue */
#endif
/* print an ordinal number */
static char *
o_print(va_list *ap, int flags, char *s, char c, int precision, int is_signed)
{
long signed_val;
unsigned long unsigned_val;
char *old_s = s;
int base;
switch (flags & (FL_SHORT | FL_LONG)) {
case FL_SHORT:
if (is_signed) {
signed_val = (short) va_arg(*ap, int);
} else {
unsigned_val = (unsigned short) va_arg(*ap, unsigned);
}
break;
case FL_LONG:
if (is_signed) {
signed_val = va_arg(*ap, long);
} else {
unsigned_val = va_arg(*ap, unsigned long);
}
break;
default:
if (is_signed) {
signed_val = va_arg(*ap, int);
} else {
unsigned_val = va_arg(*ap, unsigned int);
}
break;
}
if (is_signed) {
if (signed_val < 0) {
*s++ = '-';
signed_val = -signed_val;
} else if (flags & FL_SIGN) *s++ = '+';
else if (flags & FL_SPACE) *s++ = ' ';
unsigned_val = signed_val;
}
if ((flags & FL_ALT) && (c == 'o')) *s++ = '0';
if (!unsigned_val && c != 'p') {
if (!precision)
return s;
} else if (((flags & FL_ALT) && (c == 'x' || c == 'X'))
|| c == 'p') {
*s++ = '0';
*s++ = (c == 'X' ? 'X' : 'x');
}
switch (c) {
case 'b': base = 2; break;
case 'o': base = 8; break;
case 'd':
case 'i':
case 'u': base = 10; break;
case 'x':
case 'X':
case 'p': base = 16; break;
}
s = _i_compute(unsigned_val, base, s, precision);
if (c == 'X')
while (old_s != s) {
*old_s = toupper(*old_s);
old_s++;
}
return s;
}
int
_doprnt(register const char *fmt, va_list ap, FILE *stream)
{
register char *s;
register int j;
int i, c, width, precision, zfill, flags, between_fill;
int nrchars=0;
const char *oldfmt;
char *s1, buf[1025];
while (c = *fmt++) {
if (c != '%') {
#ifdef CPM
if (c == '\n') {
if (putc('\r', stream) == EOF)
return nrchars ? -nrchars : -1;
nrchars++;
}
#endif
if (putc(c, stream) == EOF)
return nrchars ? -nrchars : -1;
nrchars++;
continue;
}
flags = 0;
do {
switch(*fmt) {
case '-': flags |= FL_LJUST; break;
case '+': flags |= FL_SIGN; break;
case ' ': flags |= FL_SPACE; break;
case '#': flags |= FL_ALT; break;
case '0': flags |= FL_ZEROFILL; break;
default: flags |= FL_NOMORE; continue;
}
fmt++;
} while(!(flags & FL_NOMORE));
oldfmt = fmt;
fmt = gnum(fmt, &width, &ap);
if (fmt != oldfmt) flags |= FL_WIDTHSPEC;
if (*fmt == '.') {
fmt++; oldfmt = fmt;
fmt = gnum(fmt, &precision, &ap);
if (precision >= 0) flags |= FL_PRECSPEC;
}
if ((flags & FL_WIDTHSPEC) && width < 0) {
width = -width;
flags |= FL_LJUST;
}
if (!(flags & FL_WIDTHSPEC)) width = 0;
if (flags & FL_SIGN) flags &= ~FL_SPACE;
if (flags & FL_LJUST) flags &= ~FL_ZEROFILL;
s = s1 = buf;
switch (*fmt) {
case 'h': flags |= FL_SHORT; fmt++; break;
case 'l': flags |= FL_LONG; fmt++; break;
case 'L': flags |= FL_LONGDOUBLE; fmt++; break;
}
switch (c = *fmt++) {
default:
#ifdef CPM
if (c == '\n') {
if (putc('\r', stream) == EOF)
return nrchars ? -nrchars : -1;
nrchars++;
}
#endif
if (putc(c, stream) == EOF)
return nrchars ? -nrchars : -1;
nrchars++;
continue;
case 'n':
if (flags & FL_SHORT)
*va_arg(ap, short *) = (short) nrchars;
else if (flags & FL_LONG)
*va_arg(ap, long *) = (long) nrchars;
else
*va_arg(ap, int *) = (int) nrchars;
continue;
case 's':
s1 = va_arg(ap, char *);
if (s1 == NULL)
s1 = "(null)";
s = s1;
while (precision || !(flags & FL_PRECSPEC)) {
if (*s == '\0')
break;
s++;
precision--;
}
break;
case 'p':
set_pointer(flags);
/* fallthrough */
case 'b':
case 'o':
case 'u':
case 'x':
case 'X':
if (!(flags & FL_PRECSPEC)) precision = 1;
else if (c != 'p') flags &= ~FL_ZEROFILL;
s = o_print(&ap, flags, s, c, precision, 0);
break;
case 'd':
case 'i':
flags |= FL_SIGNEDCONV;
if (!(flags & FL_PRECSPEC)) precision = 1;
else flags &= ~FL_ZEROFILL;
s = o_print(&ap, flags, s, c, precision, 1);
break;
case 'c':
*s++ = va_arg(ap, int);
break;
#ifndef NOFLOAT
case 'G':
case 'g':
if ((flags & FL_PRECSPEC) && (precision == 0))
precision = 1;
case 'f':
case 'E':
case 'e':
if (!(flags & FL_PRECSPEC))
precision = 6;
if (precision >= sizeof(buf))
precision = sizeof(buf) - 1;
flags |= FL_SIGNEDCONV;
s = _f_print(&ap, flags, s, c, precision);
break;
#endif /* NOFLOAT */
case 'r':
ap = va_arg(ap, va_list);
fmt = va_arg(ap, char *);
continue;
}
zfill = ' ';
if (flags & FL_ZEROFILL) zfill = '0';
j = s - s1;
/* between_fill is true under the following conditions:
* 1- the fill character is '0'
* and
* 2a- the number is of the form 0x... or 0X...
* or
* 2b- the number contains a sign or space
*/
between_fill = 0;
if ((flags & FL_ZEROFILL)
&& (((c == 'x' || c == 'X') && (flags & FL_ALT) && j > 1)
|| (c == 'p')
|| ((flags & FL_SIGNEDCONV)
&& ( *s1 == '+' || *s1 == '-' || *s1 == ' '))))
between_fill++;
if ((i = width - j) > 0)
if (!(flags & FL_LJUST)) { /* right justify */
nrchars += i;
if (between_fill) {
if (flags & FL_SIGNEDCONV) {
j--; nrchars++;
if (putc(*s1++, stream) == EOF)
return nrchars ? -nrchars : -1;
} else {
j -= 2; nrchars += 2;
if ((putc(*s1++, stream) == EOF)
|| (putc(*s1++, stream) == EOF))
return nrchars ? -nrchars : -1;
}
}
do {
if (putc(zfill, stream) == EOF)
return nrchars ? -nrchars : -1;
} while (--i);
}
nrchars += j;
while (--j >= 0) {
if (putc(*s1++, stream) == EOF)
return nrchars ? -nrchars : -1;
}
if (i > 0) nrchars += i;
while (--i >= 0)
if (putc(zfill, stream) == EOF)
return nrchars ? -nrchars : -1;
}
return nrchars;
}

View File

@ -0,0 +1,21 @@
/*
* icompute.c - compute an integer
*/
/* $Header$ */
#include "loc_incl.h"
/* This routine is used in doprnt.c as well as in tmpfile.c and tmpnam.c. */
char *
_i_compute(unsigned long val, int base, char *s, int nrdigits)
{
int c;
c= val % base ;
val /= base ;
if (val || nrdigits > 1)
s = _i_compute(val, base, s, nrdigits - 1);
*s++ = (c>9 ? c-10+'a' : c+'0');
return s;
}

View File

@ -0,0 +1,37 @@
/*
* loc_incl.h - local include file for stdio library
*/
/* $Header$ */
#include "stdio.h"
#define io_testflag(p,x) ((p)->_flags & (x))
#include <stdarg.h>
#ifdef _ANSI
int _doprnt(const char *format, va_list ap, FILE *stream);
int _doscan(FILE * stream, const char *format, va_list ap);
char *_i_compute(unsigned long val, int base, char *s, int nrdigits);
char *_f_print(va_list *ap, int flags, char *s, char c, int precision);
void __cleanup(void);
#ifndef NOFLOAT
char *_ecvt(long double value, int ndigit, int *decpt, int *sign);
char *_fcvt(long double value, int ndigit, int *decpt, int *sign);
#endif /* NOFLOAT */
#endif
#define FL_LJUST 0x0001 /* left-justify field */
#define FL_SIGN 0x0002 /* sign in signed conversions */
#define FL_SPACE 0x0004 /* space in signed conversions */
#define FL_ALT 0x0008 /* alternate form */
#define FL_ZEROFILL 0x0010 /* fill with zero's */
#define FL_SHORT 0x0020 /* optional h */
#define FL_LONG 0x0040 /* optional l */
#define FL_LONGDOUBLE 0x0080 /* optional L */
#define FL_WIDTHSPEC 0x0100 /* field width is specified */
#define FL_PRECSPEC 0x0200 /* precision is specified */
#define FL_SIGNEDCONV 0x0400 /* may contain a sign */
#define FL_NOASSIGN 0x0800 /* do not assign (in scanf) */
#define FL_NOMORE 0x1000 /* all flags collected */

46
drivers/ddk/stdio/stdio.h Normal file
View File

@ -0,0 +1,46 @@
/*
* stdio.h - input/output definitions
*
* (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
* See the copyright notice in the ACK home directory, in the file "Copyright".
*/
/* $Header$ */
#ifndef _STDIO_H
#define _STDIO_H
/*
* Focus point of all stdio activity.
*/
typedef struct __iobuf {
int _count;
int _fd;
int _flags;
int _bufsiz;
unsigned char *_buf;
unsigned char *_ptr;
} FILE;
#define _IOFBF 0x000
#define _IOREAD 0x001
#define _IOWRITE 0x002
#define _IONBF 0x004
#define _IOMYBUF 0x008
#define _IOEOF 0x010
#define _IOERR 0x020
#define _IOLBF 0x040
#define _IOREADING 0x080
#define _IOWRITING 0x100
#define _IOAPPEND 0x200
#define _IOFIFO 0x400
/* The following definitions are also in <unistd.h>. They should not
* conflict.
*/
#define SEEK_SET 0
#define SEEK_CUR 1
#define SEEK_END 2
#define EOF (-1)
#endif /* _STDIO_H */

View File

@ -0,0 +1,37 @@
/*
* vsprintf - print formatted output without ellipsis on an array
*/
/* $Header$ */
#include "stdio.h"
#include <stdarg.h>
#include <limits.h>
#include "loc_incl.h"
#define putc(c, p) (--(p)->_count >= 0 ? \
(int) (*(p)->_ptr++ = (c)) : EOF)
int
vsnprintf(char *s, unsigned n, const char *format, va_list arg)
{
int retval;
FILE tmp_stream;
tmp_stream._fd = -1;
tmp_stream._flags = _IOWRITE + _IONBF + _IOWRITING;
tmp_stream._buf = (unsigned char *) s;
tmp_stream._ptr = (unsigned char *) s;
tmp_stream._count = n-1;
retval = _doprnt(format, arg, &tmp_stream);
tmp_stream._count = 1;
putc('\0',&tmp_stream);
return retval;
}
int
vsprintf(char *s, const char *format, va_list arg)
{
return vsnprintf(s, INT_MAX, format, arg);
}

View File

@ -0,0 +1,67 @@
# _memmove() Author: Kees J. Bot 2 Jan 1994
# void *_memmove(void *s1, const void *s2, size_t n)
# Copy a chunk of memory. Handle overlap.
.intel_syntax
.globl __memmove, __memcpy
.text
.align 16
__memmove:
push ebp
mov ebp, esp
push esi
push edi
mov edi, [ebp+8] # String s1
mov esi, [ebp+12] # String s2
mov ecx, [ebp+16] # Length
mov eax, edi
sub eax, esi
cmp eax, ecx
jb downwards # if (s2 - s1) < n then copy downwards
__memcpy:
cld # Clear direction bit: upwards
cmp ecx, 16
jb upbyte # Don't bother being smart with short arrays
mov eax, esi
or eax, edi
testb al, 1
jnz upbyte # Bit 0 set, use byte copy
testb al, 2
jnz upword # Bit 1 set, use word copy
uplword:
shrd eax, ecx, 2 # Save low 2 bits of ecx in eax
shr ecx, 2
rep movsd # Copy longwords.
shld ecx, eax, 2 # Restore excess count
upword:
shr ecx, 1
rep movsw # Copy words
adc ecx, ecx # One more byte?
upbyte:
rep movsb # Copy bytes
done:
mov eax, [ebp+8] # Absolutely noone cares about this value
pop edi
pop esi
pop ebp
ret
# Handle bad overlap by copying downwards, don't bother to do word copies.
downwards:
std # Set direction bit: downwards
lea esi, [esi+ecx-1]
lea edi, [edi+ecx-1]
rep movsb # Copy bytes
cld
jmp done

View File

@ -0,0 +1,43 @@
# _strncat() Author: Kees J. Bot
# 1 Jan 1994
# char *_strncat(char *s1, const char *s2, size_t edx)
# Append string s2 to s1.
#
.intel_syntax
.global __strncat
.text
.align 16
__strncat:
push ebp
mov ebp, esp
push esi
push edi
mov edi, [ebp+8] # String s1
mov ecx, -1
xorb al, al # Null byte
cld
repne
scasb # Look for the zero byte in s1
dec edi # Back one up (and clear 'Z' flag)
push edi # Save end of s1
mov edi, [12+ebp] # edi = string s2
mov ecx, edx # Maximum count
repne
scasb # Look for the end of s2
jne no0
inc ecx # Exclude null byte
no0: sub edx, ecx # Number of bytes in s2
mov ecx, edx
mov esi, [12+ebp] # esi = string s2
pop edi # edi = end of string s1
rep
movsb # Copy bytes
stosb # Add a terminating null
mov eax, [8+ebp] # Return s1
pop edi
pop esi
pop ebp
ret

View File

@ -0,0 +1,44 @@
# strncmp() Author: Kees J. Bot 1 Jan 1994
# int strncmp(const char *s1, const char *s2, size_t ecx)
# Compare two strings.
#
.intel_syntax
.globl __strncmp
.text
.align 16
__strncmp:
push ebp
mov ebp, esp
push esi
push edi
test ecx, ecx # Max length is zero?
je done
mov esi, [ebp+8] # esi = string s1
mov edi, [ebp+12] # edi = string s2
cld
compare:
cmpsb # Compare two bytes
jne done
cmpb [esi-1], 0 # End of string?
je done
dec ecx # Length limit reached?
jne compare
done:
seta al # al = (s1 > s2)
setb ah # ah = (s1 < s2)
subb al, ah
movsx eax, al # eax = (s1 > s2) - (s1 < s2), i.e. -1, 0, 1
pop edi
pop esi
pop ebp
ret

View File

@ -0,0 +1,27 @@
# _strncpy() Author: Kees J. Bot
# 1 Jan 1994
# char *_strncpy(char *s1, const char *s2, size_t ecx)
# Copy string s2 to s1.
#
.intel_syntax
.text
.globl __strncpy
.align 16
__strncpy:
mov edi, [ebp+12] # edi = string s2
xorb al, al # Look for a zero byte
mov edx, ecx # Save maximum count
cld
repne
scasb # Look for end of s2
sub edx, ecx # Number of bytes in s2 including null
xchg ecx, edx
mov esi, [ebp+12] # esi = string s2
mov edi, [ebp+8] # edi = string s1
rep
movsb # Copy bytes
ret

View File

@ -0,0 +1,30 @@
# _strnlen() Author: Kees J. Bot 1 Jan 1994
# size_t _strnlen(const char *s, size_t ecx)
# Return the length of a string.
.intel_syntax
.globl __strnlen
.text
.align 16
__strnlen:
push ebp
mov ebp, esp
push edi
mov edi, [ebp+8] # edi = string
xorb al, al # Look for a zero byte
mov edx, ecx # Save maximum count
cmpb cl, 1 # 'Z' bit must be clear if ecx = 0
cld
repne
scasb # Look for zero
jne no0
inc ecx # Don't count zero byte
no0:
mov eax, edx
sub eax, ecx # Compute bytes scanned
pop edi
pop ebp
ret

View File

@ -0,0 +1,59 @@
# memcmp() Author: Kees J. Bot
# 2 Jan 1994
# int memcmp(const void *s1, const void *s2, size_t n)
# Compare two chunks of memory.
#
.intel_syntax
.globl _memcmp
.text
.align 16
_memcmp:
cld
push ebp
mov ebp, esp
push esi
push edi
mov esi, [8+ebp] # String s1
mov edi, [12+ebp] # String s2
mov ecx, [16+ebp] # Length
cmp ecx, 16
jb cbyte # Don't bother being smart with short arrays
mov eax, esi
or eax, edi
testb al, 1
jnz cbyte # Bit 0 set, use byte compare
testb al, 2
jnz cword # Bit 1 set, use word compare
clword: shrd eax, ecx, 2 # Save low two bits of ecx in eax
shr ecx, 2
repe
cmpsd # Compare longwords
sub esi, 4
sub edi, 4
inc ecx # Recompare the last longword
shld ecx, eax, 2 # And any excess bytes
jmp last
cword: shrd eax, ecx, 1 # Save low bit of ecx in eax
shr ecx, 1
repe
cmpsw # Compare words
sub esi, 2
sub edi, 2
inc ecx # Recompare the last word
shld ecx, eax, 1 # And one more byte?
cbyte: test ecx, ecx # Set 'Z' flag if ecx = 0
last: repe
cmpsb # Look for the first differing byte
seta al # al = (s1 > s2)
setb ah # ah = (s1 < s2)
subb al, ah
movsxb eax, al # eax = (s1 > s2) - (s1 < s2), i.e. -1, 0, 1
mov edx, esi # For bcmp() to play with
pop edi
pop esi
pop ebp
ret

View File

@ -0,0 +1,26 @@
# memcpy() Author: Kees J. Bot 2 Jan 1994
# void *memcpy(void *s1, const void *s2, size_t n)
# Copy a chunk of memory.
# This routine need not handle overlap, so it does not handle overlap.
# One could simply call __memmove, the cost of the overlap check is
# negligible, but you are dealing with a programmer who believes that
# if anything can go wrong, it should go wrong.
.intel_syntax
.globl _memcpy
.text
.align 16
_memcpy:
push ebp
mov ebp, esp
push esi
push edi
mov edi, [ebp+8] # String s1
mov esi, [ebp+12] # String s2
mov ecx, [ebp+16] # Length
# No overlap check here
jmp __memcpy # Call the part of __memmove that copies up

View File

@ -0,0 +1,47 @@
# memset() Author: Kees J. Bot
# 2 Jan 1994
# void *memset(void *s, int c, size_t n)
# Set a chunk of memory to the same byte value.
#
.intel_syntax
.global _memset
.text
.align 16
_memset:
push ebp
mov ebp, esp
push edi
mov edi, [8+ebp] # The string
movzx eax, byte ptr [12+ebp] # The fill byte
mov ecx, [16+ebp] # Length
cld
cmp ecx, 16
jb sbyte # Don't bother being smart with short arrays
test edi, 1
jnz sbyte # Bit 0 set, use byte store
test edi, 2
jnz sword # Bit 1 set, use word store
slword:
movb ah, al
mov edx, eax
sal edx, 16
or eax, edx # One byte to four bytes
shrd edx, ecx, 2 # Save low two bits of ecx in edx
shr ecx, 2
rep stosd # Store longwords.
shld ecx, edx, 2 # Restore low two bits
sword:
movb ah, al # One byte to two bytes
shr ecx, 1
rep stosw # Store words
adc ecx, ecx # One more byte?
sbyte:
rep stosb # Store bytes
done:
mov eax, [8+ebp] # Return some value you have no need for
pop edi
pop ebp
ret

View File

@ -0,0 +1,15 @@
# strcat() Author: Kees J. Bot
# 1 Jan 1994
# char *strcat(char *s1, const char *s2)
# Append string s2 to s1.
#
.intel_syntax
.global _strcat
.text
.align 16
_strcat:
mov edx, -1 # Unlimited length
jmp __strncat # Common code

View File

@ -0,0 +1,46 @@
# strchr() Author: Kees J. Bot 1 Jan 1994
# char *strchr(const char *s, int c)
# Look for a character in a string.
.intel_syntax
.globl _strchr
.text
.align 16
_strchr:
push ebp
mov ebp, esp
push edi
cld
mov edi, [ebp+8] # edi = string
mov edx, 16 # Look at small chunks of the string
next:
shl edx, 1 # Chunks become bigger each time
mov ecx, edx
xorb al, al # Look for the zero at the end
repne scasb
pushf # Remember the flags
sub ecx, edx
neg ecx # Some or all of the chunk
sub edi, ecx # Step back
movb al, [ebp+12] # The character to look for
repne scasb
je found
popf # Did we find the end of string earlier?
jne next # No, try again
xor eax, eax # Return NULL
pop edi
pop ebp
ret
found:
pop eax # Get rid of those flags
lea eax, [edi-1] # Address of byte found
pop edi
pop ebp
ret

View File

@ -0,0 +1,24 @@
# strcpy() Author: Kees J. Bot
# 1 Jan 1994
# char *strcpy(char *s1, const char *s2)
# Copy string s2 to s1.
#
.intel_syntax
.global _strcpy
.text
.align 16
_strcpy:
push ebp
mov ebp, esp
push esi
push edi
mov ecx, -1 # Unlimited length
call _strncpy # Common code
mov eax, [8+ebp] # Return s1
pop edi
pop esi
pop ebp
ret

View File

@ -0,0 +1,15 @@
# strlen() Author: Kees J. Bot 1 Jan 1994
# size_t strlen(const char *s)
# Return the length of a string.
.intel_syntax
.globl _strlen
.text
.align 16
_strlen:
mov ecx, -1 # Unlimited length
jmp __strnlen # Common code

View File

@ -0,0 +1,15 @@
# strncmp() Author: Kees J. Bot 1 Jan 1994
# int strncmp(const char *s1, const char *s2, size_t n)
# Compare two strings.
#
.intel_syntax
.globl _strncmp
.text
.align 16
_strncmp:
mov ecx, [esp+12] # Maximum length
jmp __strncmp # Common code

View File

@ -0,0 +1,28 @@
# strncpy() Author: Kees J. Bot
# 1 Jan 1994
# char *strncpy(char *s1, const char *s2, size_t n)
# Copy string s2 to s1.
#
.intel_syntax
.text
.globl _strncpy
.align 16
_strncpy:
push ebp
mov ebp, esp
push esi
push edi
mov ecx, [ebp+16] # Maximum length
call __strncpy # Common code
mov ecx, edx # Number of bytes not copied
rep
stosb # strncpy always copies n bytes by null padding
mov eax, [ebp+8] # Return s1
pop edi
pop esi
pop ebp
ret

778
drivers/include/drm/drm.h Normal file
View File

@ -0,0 +1,778 @@
/**
* \file drm.h
* Header for the Direct Rendering Manager
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*
* \par Acknowledgments:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
*/
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_H_
#define _DRM_H_
#include <linux/types.h>
#include <errno-base.h>
typedef unsigned int drm_handle_t;
//#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_MAJOR 226
#define DRM_MAX_MINOR 15
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
/**
* Cliprect.
*
* \warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well
*
* \note KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
};
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
/**
* Texture region,
*/
struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
};
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
};
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
*/
struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
size_t name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
};
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
*/
struct drm_unique {
size_t unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
};
struct drm_list {
int count; /**< Length of user-space structures */
struct drm_version __user *version;
};
struct drm_block {
int unused;
};
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
*/
struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
};
/**
* Type of memory to map.
*/
enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_GEM = 6, /**< GEM object */
};
/**
* Memory mapping flags.
*/
enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
};
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
* \sa drmAddMap().
*/
struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
};
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
};
enum drm_stat_type {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
_DRM_STAT_IOCTLS,
_DRM_STAT_LOCKS,
_DRM_STAT_UNLOCKS,
_DRM_STAT_VALUE, /**< Generic value */
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
_DRM_STAT_IRQ, /**< IRQ */
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
_DRM_STAT_DMA, /**< DMA */
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
};
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
unsigned long count;
struct {
unsigned long value;
enum drm_stat_type type;
} data[15];
};
/**
* Hardware locking flags.
*/
enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
struct drm_lock {
int context;
enum drm_lock_flags flags;
};
/**
* DMA flags
*
* \warning
* These values \e must match xf86drm.h.
*
* \sa drm_dma.
*/
enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note The buffer may not yet have
* been processed by the hardware --
* getting a hardware lock with the
* hardware quiescent will ensure
* that the buffer has been
* processed.
*/
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
} flags;
unsigned long agp_start; /**<
* Start address of where the AGP buffers are
* in the AGP aperture
*/
};
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
int count; /**< Entries in list */
struct drm_buf_desc __user *list;
};
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
int count;
int __user *list;
};
/**
* Buffer information
*
* \sa drm_buf_map.
*/
struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
};
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
int count; /**< Length of the buffer list */
void __user *virtual; /**< Mmap'd area in user-virtual */
struct drm_buf_pub __user *list; /**< Buffer information */
};
/**
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
*
* \sa drmDMA().
*/
struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
};
enum drm_ctx_flags {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
};
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
struct drm_ctx {
drm_context_t handle;
enum drm_ctx_flags flags;
};
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
int count;
struct drm_ctx __user *contexts;
};
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
DRM_DRAWABLE_CLIPRECTS,
} drm_drawable_info_type_t;
struct drm_update_draw {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
unsigned long long data;
};
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
*/
struct drm_irq_busid {
int irq; /**< IRQ number */
int busnum; /**< bus number */
int devnum; /**< device number */
int funcnum; /**< function number */
};
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
enum drm_vblank_seq_type type;
unsigned int sequence;
long tval_sec;
long tval_usec;
};
/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
*/
union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
};
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
__u32 crtc;
__u32 cmd;
};
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
*/
struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
*/
struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
};
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
};
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
* drmAgpVendorId() and drmAgpDeviceId().
*/
struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
};
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for mapping / unmapping */
};
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
};
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
/** Returned global name */
__u32 name;
};
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
/** Returned handle for the object */
__u32 handle;
/** Returned size of the object */
__u64 size;
};
#include "drm_mode.h"
/*
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
*/
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
/**
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
*
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
*/
struct drm_event {
__u32 type;
__u32 length;
};
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
struct drm_event_vblank {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 sequence;
__u32 reserved;
};
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
typedef struct drm_drawable_info drm_drawable_info_t;
typedef struct drm_tex_region drm_tex_region_t;
typedef struct drm_hw_lock drm_hw_lock_t;
typedef struct drm_version drm_version_t;
typedef struct drm_unique drm_unique_t;
typedef struct drm_list drm_list_t;
typedef struct drm_block drm_block_t;
typedef struct drm_control drm_control_t;
typedef enum drm_map_type drm_map_type_t;
typedef enum drm_map_flags drm_map_flags_t;
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
typedef struct drm_map drm_map_t;
typedef struct drm_client drm_client_t;
typedef enum drm_stat_type drm_stat_type_t;
typedef struct drm_stats drm_stats_t;
typedef enum drm_lock_flags drm_lock_flags_t;
typedef struct drm_lock drm_lock_t;
typedef enum drm_dma_flags drm_dma_flags_t;
typedef struct drm_buf_desc drm_buf_desc_t;
typedef struct drm_buf_info drm_buf_info_t;
typedef struct drm_buf_free drm_buf_free_t;
typedef struct drm_buf_pub drm_buf_pub_t;
typedef struct drm_buf_map drm_buf_map_t;
typedef struct drm_dma drm_dma_t;
typedef union drm_wait_vblank drm_wait_vblank_t;
typedef struct drm_agp_mode drm_agp_mode_t;
typedef enum drm_ctx_flags drm_ctx_flags_t;
typedef struct drm_ctx drm_ctx_t;
typedef struct drm_ctx_res drm_ctx_res_t;
typedef struct drm_draw drm_draw_t;
typedef struct drm_update_draw drm_update_draw_t;
typedef struct drm_auth drm_auth_t;
typedef struct drm_irq_busid drm_irq_busid_t;
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
typedef struct drm_agp_buffer drm_agp_buffer_t;
typedef struct drm_agp_binding drm_agp_binding_t;
typedef struct drm_agp_info drm_agp_info_t;
typedef struct drm_scatter_gather drm_scatter_gather_t;
typedef struct drm_set_version drm_set_version_t;
#endif
#define mutex_lock(x)
#define mutex_unlock(x)
#endif

1604
drivers/include/drm/drmP.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,804 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_CRTC_H__
#define __DRM_CRTC_H__
#include <linux/i2c.h>
//#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/fb.h>
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
#define DRM_MODE_OBJECT_MODE 0xdededede
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
struct drm_mode_object {
uint32_t id;
uint32_t type;
};
/*
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
* DVI, etc. And 'screen' refers to the whole of the visible display, which
* may span multiple monitors (and therefore multiple CRTC and connector
* structures).
*/
enum drm_mode_status {
MODE_OK = 0, /* Mode OK */
MODE_HSYNC, /* hsync out of range */
MODE_VSYNC, /* vsync out of range */
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
MODE_BAD_WIDTH, /* requires an unsupported linepitch */
MODE_NOMODE, /* no mode with a maching name */
MODE_NO_INTERLACE, /* interlaced mode not supported */
MODE_NO_DBLESCAN, /* doublescan mode not supported */
MODE_NO_VSCAN, /* multiscan mode not supported */
MODE_MEM, /* insufficient video memory */
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
MODE_MEM_VIRT, /* insufficient video memory given virtual size */
MODE_NOCLOCK, /* no fixed clock available */
MODE_CLOCK_HIGH, /* clock required is too high */
MODE_CLOCK_LOW, /* clock required is too low */
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
MODE_BAD_HVALUE, /* horizontal timing was out of range */
MODE_BAD_VVALUE, /* vertical timing was out of range */
MODE_BAD_VSCAN, /* VScan value out of range */
MODE_HSYNC_NARROW, /* horizontal sync too narrow */
MODE_HSYNC_WIDE, /* horizontal sync too wide */
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
MODE_HBLANK_WIDE, /* horizontal blanking too wide */
MODE_VSYNC_NARROW, /* vertical sync too narrow */
MODE_VSYNC_WIDE, /* vertical sync too wide */
MODE_VBLANK_NARROW, /* vertical blanking too narrow */
MODE_VBLANK_WIDE, /* vertical blanking too wide */
MODE_PANEL, /* exceeds panel dimensions */
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
MODE_ONE_WIDTH, /* only one width is supported */
MODE_ONE_HEIGHT, /* only one height is supported */
MODE_ONE_SIZE, /* only one resolution is supported */
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
MODE_UNVERIFIED = -3, /* mode needs to reverified */
MODE_BAD = -2, /* unspecified reason */
MODE_ERROR = -1 /* error condition */
};
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
DRM_MODE_TYPE_CRTC_C)
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
.name = nm, .status = 0, .type = (t), .clock = (c), \
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), .vrefresh = 0
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
struct drm_display_mode {
/* Header */
struct list_head head;
struct drm_mode_object base;
char name[DRM_DISPLAY_MODE_LEN];
int connector_count;
enum drm_mode_status status;
int type;
/* Proposed mode values */
int clock; /* in kHz */
int hdisplay;
int hsync_start;
int hsync_end;
int htotal;
int hskew;
int vdisplay;
int vsync_start;
int vsync_end;
int vtotal;
int vscan;
unsigned int flags;
/* Addressable image size (may be 0 for projectors, etc.) */
int width_mm;
int height_mm;
/* Actual mode we give to hw */
int clock_index;
int synth_clock;
int crtc_hdisplay;
int crtc_hblank_start;
int crtc_hblank_end;
int crtc_hsync_start;
int crtc_hsync_end;
int crtc_htotal;
int crtc_hskew;
int crtc_vdisplay;
int crtc_vblank_start;
int crtc_vblank_end;
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
int crtc_hadjusted;
int crtc_vadjusted;
/* Driver private mode info */
int private_size;
int *private;
int private_flags;
int vrefresh; /* in Hz */
int hsync; /* in kHz */
};
enum drm_connector_status {
connector_status_connected = 1,
connector_status_disconnected = 2,
connector_status_unknown = 3,
};
enum subpixel_order {
SubPixelUnknown = 0,
SubPixelHorizontalRGB,
SubPixelHorizontalBGR,
SubPixelVerticalRGB,
SubPixelVerticalBGR,
SubPixelNone,
};
/*
* Describes a given display (e.g. CRT or flat panel) and its limitations.
*/
struct drm_display_info {
char name[DRM_DISPLAY_INFO_LEN];
/* Input info */
bool serration_vsync;
bool sync_on_green;
bool composite_sync;
bool separate_syncs;
bool blank_to_black;
unsigned char video_level;
bool digital;
/* Physical size */
unsigned int width_mm;
unsigned int height_mm;
/* Display parameters */
unsigned char gamma; /* FIXME: storage format */
bool gtf_supported;
bool standard_color;
enum {
monochrome = 0,
rgb,
other,
unknown,
} display_type;
bool active_off_supported;
bool suspend_supported;
bool standby_supported;
/* Color info FIXME: storage format */
unsigned short redx, redy;
unsigned short greenx, greeny;
unsigned short bluex, bluey;
unsigned short whitex, whitey;
/* Clock limits FIXME: storage format */
unsigned int min_vfreq, max_vfreq;
unsigned int min_hfreq, max_hfreq;
unsigned int pixel_clock;
/* White point indices FIXME: storage format */
unsigned int wpx1, wpy1;
unsigned int wpgamma1;
unsigned int wpx2, wpy2;
unsigned int wpgamma2;
enum subpixel_order subpixel_order;
char *raw_edid; /* if any */
};
struct drm_framebuffer_funcs {
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
/**
* Optinal callback for the dirty fb ioctl.
*
* Userspace can notify the driver via this callback
* that a area of the framebuffer has changed and should
* be flushed to the display hardware.
*
* See documentation in drm_mode.h for the struct
* drm_mode_fb_dirty_cmd for more information as all
* the semantics and arguments have a one to one mapping
* on this function.
*/
int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
unsigned color, struct drm_clip_rect *clips,
unsigned num_clips);
};
struct drm_framebuffer {
struct drm_device *dev;
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
unsigned int pitch;
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
unsigned int depth;
int bits_per_pixel;
int flags;
struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head filp_head;
/* if you are using the helper */
void *helper_private;
};
struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
unsigned int length;
void *data;
};
struct drm_property_enum {
uint64_t value;
struct list_head head;
char name[DRM_PROP_NAME_LEN];
};
struct drm_property {
struct list_head head;
struct drm_mode_object base;
uint32_t flags;
char name[DRM_PROP_NAME_LEN];
uint32_t num_values;
uint64_t *values;
struct list_head enum_blob_list;
};
struct drm_crtc;
struct drm_connector;
struct drm_encoder;
struct drm_pending_vblank_event;
/**
* drm_crtc_funcs - control CRTCs for a given device
* @dpms: control display power levels
* @save: save CRTC state
* @resore: restore CRTC state
* @lock: lock the CRTC
* @unlock: unlock the CRTC
* @shadow_allocate: allocate shadow pixmap
* @shadow_create: create shadow pixmap for rotation support
* @shadow_destroy: free shadow pixmap
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object.
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
* CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
* connectors, not just CRTs).
*
* Each driver is responsible for filling out this structure at startup time,
* in addition to providing other modesetting features, like i2c and DDC
* bus accessors.
*/
struct drm_crtc_funcs {
/* Save CRTC state */
void (*save)(struct drm_crtc *crtc); /* suspend? */
/* Restore CRTC state */
void (*restore)(struct drm_crtc *crtc); /* resume? */
/* cursor controls */
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
/* Set gamma on the CRTC */
void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size);
/* Object destroy routine */
void (*destroy)(struct drm_crtc *crtc);
int (*set_config)(struct drm_mode_set *set);
/*
* Flip to the given framebuffer. This implements the page
* flip ioctl descibed in drm_mode.h, specifically, the
* implementation must return immediately and block all
* rendering to the current fb until the flip has completed.
* If userspace set the event flag in the ioctl, the event
* argument will point to an event to send back when the flip
* completes, otherwise it will be NULL.
*/
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
};
/**
* drm_crtc - central CRTC control structure
* @enabled: is this CRTC enabled?
* @x: x position on screen
* @y: y position on screen
* @desired_mode: new desired mode
* @desired_x: desired x for desired_mode
* @desired_y: desired y for desired_mode
* @funcs: CRTC control functions
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
*/
struct drm_crtc {
struct drm_device *dev;
struct list_head head;
struct drm_mode_object base;
/* framebuffer the connector is currently bound to */
struct drm_framebuffer *fb;
bool enabled;
struct drm_display_mode mode;
int x, y;
struct drm_display_mode *desired_mode;
int desired_x, desired_y;
const struct drm_crtc_funcs *funcs;
/* CRTC gamma size for reporting to userspace */
uint32_t gamma_size;
uint16_t *gamma_store;
/* if you are using the helper */
void *helper_private;
};
/**
* drm_connector_funcs - control connectors on a given device
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
* @mode_valid: is this mode valid on the given connector?
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @detect: is this connector active?
* @get_modes: get mode list for this connector
* @set_property: property for this connector may need update
* @destroy: make object go away
* @force: notify the driver the connector is forced on
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
* etc.
*/
struct drm_connector_funcs {
void (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
enum drm_connector_status (*detect)(struct drm_connector *connector);
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
int (*set_property)(struct drm_connector *connector, struct drm_property *property,
uint64_t val);
void (*destroy)(struct drm_connector *connector);
void (*force)(struct drm_connector *connector);
};
struct drm_encoder_funcs {
void (*destroy)(struct drm_encoder *encoder);
};
#define DRM_CONNECTOR_MAX_UMODES 16
#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 2
/**
* drm_encoder - central DRM encoder structure
*/
struct drm_encoder {
struct drm_device *dev;
struct list_head head;
struct drm_mode_object base;
int encoder_type;
uint32_t possible_crtcs;
uint32_t possible_clones;
struct drm_crtc *crtc;
const struct drm_encoder_funcs *funcs;
void *helper_private;
};
enum drm_connector_force {
DRM_FORCE_UNSPECIFIED,
DRM_FORCE_OFF,
DRM_FORCE_ON, /* force on analog part normally */
DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
};
/**
* drm_connector - central DRM connector control structure
* @crtc: CRTC this connector is currently connected to, NULL if none
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @available_modes: modes available on this connector (from get_modes() + user)
* @initial_x: initial x position for this connector
* @initial_y: initial y position for this connector
* @status: connector connected?
* @funcs: connector control functions
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
* position in the broader display (referred to as a 'screen' though it could
* span multiple monitors).
*/
struct drm_connector {
struct drm_device *dev;
// struct device kdev;
struct device_attribute *attr;
struct list_head head;
struct drm_mode_object base;
int connector_type;
int connector_type_id;
bool interlace_allowed;
bool doublescan_allowed;
struct list_head modes; /* list of modes on this connector */
int initial_x, initial_y;
enum drm_connector_status status;
/* these are modes added by probing with DDC or the BIOS */
struct list_head probed_modes;
struct drm_display_info display_info;
const struct drm_connector_funcs *funcs;
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
/* requested DPMS state */
int dpms;
void *helper_private;
/* forced on connector */
enum drm_connector_force force;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
void *fb_helper_private;
};
/**
* struct drm_mode_set
*
* Represents a single crtc the connectors that it drives with what mode
* and from which framebuffer it scans out from.
*
* This is used to set modes.
*/
struct drm_mode_set {
struct list_head head;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
uint32_t x;
uint32_t y;
struct drm_connector **connectors;
size_t num_connectors;
};
/**
* struct drm_mode_config_funcs - configure CRTCs for a given screen layout
* @resize: adjust CRTCs as necessary for the proposed layout
*
* Currently only a resize hook is available. DRM will call back into the
* driver with a new screen width and height. If the driver can't support
* the proposed size, it can return false. Otherwise it should adjust
* the CRTC<->connector mappings as needed and update its view of the screen.
*/
struct drm_mode_config_funcs {
struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
int (*fb_changed)(struct drm_device *dev);
};
struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
/* list of object IDs for this group */
uint32_t *id_list;
};
/**
* drm_mode_config - Mode configuration control structure
*
*/
struct drm_mode_config {
// struct mutex mutex; /* protects configuration (mode lists etc.) */
// struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
int num_fb;
struct list_head fb_list;
int num_connector;
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
int num_crtc;
struct list_head crtc_list;
struct list_head property_list;
/* in-kernel framebuffers - hung of filp_head in drm_framebuffer */
struct list_head fb_kernel_list;
int min_width, min_height;
int max_width, max_height;
struct drm_mode_config_funcs *funcs;
resource_size_t fb_base;
/* pointers to standard properties */
struct list_head property_blob_list;
struct drm_property *edid_property;
struct drm_property *dpms_property;
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
struct drm_property *dvi_i_select_subconnector_property;
/* TV properties */
struct drm_property *tv_subconnector_property;
struct drm_property *tv_select_subconnector_property;
struct drm_property *tv_mode_property;
struct drm_property *tv_left_margin_property;
struct drm_property *tv_right_margin_property;
struct drm_property *tv_top_margin_property;
struct drm_property *tv_bottom_margin_property;
struct drm_property *tv_brightness_property;
struct drm_property *tv_contrast_property;
struct drm_property *tv_flicker_reduction_property;
struct drm_property *tv_overscan_property;
struct drm_property *tv_saturation_property;
struct drm_property *tv_hue_property;
/* Optional properties */
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
struct drm_property *dirty_info_property;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
#define obj_to_property(x) container_of(x, struct drm_property, base)
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
extern void drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
extern void drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
extern void drm_connector_cleanup(struct drm_connector *connector);
extern void drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type);
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
extern char *drm_get_connector_name(struct drm_connector *connector);
extern char *drm_get_dpms_name(int val);
extern char *drm_get_dvi_i_subconnector_name(int val);
extern char *drm_get_dvi_i_select_name(int val);
extern char *drm_get_tv_subconnector_name(int val);
extern char *drm_get_tv_select_name(int val);
//extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
//extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
// unsigned char *buf, int len);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
extern int drm_mode_width(struct drm_display_mode *mode);
extern int drm_mode_height(struct drm_display_mode *mode);
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_display_mode *mode);
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
extern void drm_mode_list_concat(struct list_head *head,
struct list_head *new);
extern void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch);
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
extern int drm_mode_hsync(struct drm_display_mode *mode);
extern int drm_mode_vrefresh(struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
extern void drm_mode_connector_list_update(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid);
extern int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
extern int drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_dithering_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getconnector(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_setcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_addmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_attachmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_detachmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_replacefb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getencoder(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins);
extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool interlaced, int margins);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
#endif /* __DRM_CRTC_H__ */

View File

@ -0,0 +1,134 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* The DRM mode setting helper functions are common code for drivers to use if
* they wish. Drivers are not forced to use this code in their
* implementations but it would be useful if they code they do use at least
* provides a consistent interface and operation to userspace
*/
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
//#include <linux/spinlock.h>
#include <linux/types.h>
//#include <linux/idr.h>
#include <linux/fb.h>
#include "drm_fb_helper.h"
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
* unsupported, the provider must use the next lowest power level.
*/
void (*dpms)(struct drm_crtc *crtc, int mode);
void (*prepare)(struct drm_crtc *crtc);
void (*commit)(struct drm_crtc *crtc);
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
};
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
/* disable encoder when not in use - more explicit than dpms off */
void (*disable)(struct drm_encoder *encoder);
};
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
};
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
extern bool drm_helper_initial_config(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd);
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = (void *)funcs;
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = (void *)funcs;
}
static inline int drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = (void *)funcs;
return drm_fb_helper_add_connector(connector);
}
extern int drm_helper_resume_force_mode(struct drm_device *dev);
#endif

View File

@ -0,0 +1,180 @@
/*
* Copyright © 2008 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#ifndef _DRM_DP_HELPER_H_
#define _DRM_DP_HELPER_H_
/* From the VESA DisplayPort spec */
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
#define AUX_I2C_WRITE 0x0
#define AUX_I2C_READ 0x1
#define AUX_I2C_STATUS 0x2
#define AUX_I2C_MOT 0x4
#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
#define AUX_I2C_REPLY_ACK (0x0 << 6)
#define AUX_I2C_REPLY_NACK (0x1 << 6)
#define AUX_I2C_REPLY_DEFER (0x2 << 6)
#define AUX_I2C_REPLY_MASK (0x3 << 6)
/* AUX CH addresses */
/* DPCD */
#define DP_DPCD_REV 0x000
#define DP_MAX_LINK_RATE 0x001
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
# define DP_ENHANCED_FRAME_CAP (1 << 7)
#define DP_MAX_DOWNSPREAD 0x003
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
#define DP_NORP 0x004
#define DP_DOWNSTREAMPORT_PRESENT 0x005
# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
/* 00b = DisplayPort */
/* 01b = Analog */
/* 10b = TMDS or HDMI */
/* 11b = Other */
# define DP_FORMAT_CONVERSION (1 << 3)
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
#define DP_TRAINING_PATTERN_SET 0x102
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_MASK 0x3
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
#define DP_TRAINING_LANE0_SET 0x103
#define DP_TRAINING_LANE1_SET 0x104
#define DP_TRAINING_LANE2_SET 0x105
#define DP_TRAINING_LANE3_SET 0x106
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
# define DP_LANE_CR_DONE (1 << 0)
# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
# define DP_LANE_SYMBOL_LOCKED (1 << 2)
#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
DP_LANE_CHANNEL_EQ_DONE | \
DP_LANE_SYMBOL_LOCKED)
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
#define MODE_I2C_START 1
#define MODE_I2C_WRITE 2
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
struct i2c_algo_dp_aux_data {
bool running;
u16 address;
int (*aux_ch) (struct i2c_adapter *adapter,
int mode, uint8_t write_byte,
uint8_t *read_byte);
};
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
#endif /* _DRM_DP_HELPER_H_ */

View File

@ -0,0 +1,204 @@
/*
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_EDID_H__
#define __DRM_EDID_H__
#include <linux/types.h>
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
} __attribute__((packed));
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
#define EDID_TIMING_ASPECT_SHIFT 6
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
/* need to add 60 */
#define EDID_TIMING_VFREQ_SHIFT 0
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq_aspect;
} __attribute__((packed));
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
#define DRM_EDID_PT_STEREO (1 << 5)
#define DRM_EDID_PT_INTERLACED (1 << 7)
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
u8 hactive_lo;
u8 hblank_lo;
u8 hactive_hblank_hi;
u8 vactive_lo;
u8 vblank_lo;
u8 vactive_vblank_hi;
u8 hsync_offset_lo;
u8 hsync_pulse_width_lo;
u8 vsync_offset_pulse_width_lo;
u8 hsync_vsync_offset_pulse_width_hi;
u8 width_mm_lo;
u8 height_mm_lo;
u8 width_height_mm_hi;
u8 hborder;
u8 vborder;
u8 misc;
} __attribute__((packed));
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
__le16 sec_gtf_toggle; /* A000=use above, 20=use below */
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed));
struct detailed_data_wpindex {
u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
} __attribute__((packed));
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
} __attribute__((packed));
struct cvt_timing {
u8 code[3];
} __attribute__((packed));
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
fb=color point data, fa=standard timing data,
f9=undefined, f8=mfg. reserved */
u8 pad2;
union {
struct detailed_data_string str;
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[5];
struct cvt_timing cvt[4];
} data;
} __attribute__((packed));
#define EDID_DETAIL_EST_TIMINGS 0xf7
#define EDID_DETAIL_CVT_3BYTE 0xf8
#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
#define EDID_DETAIL_MONITOR_RANGE 0xfd
#define EDID_DETAIL_MONITOR_STRING 0xfe
#define EDID_DETAIL_MONITOR_SERIAL 0xff
struct detailed_timing {
__le16 pixel_clock; /* need to multiply by 10 KHz */
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
} data;
} __attribute__((packed));
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
struct edid {
u8 header[8];
/* Vendor & product info */
u8 mfg_id[2];
u8 prod_code[2];
u32 serial; /* FIXME: byte order */
u8 mfg_week;
u8 mfg_year;
/* EDID version */
u8 version;
u8 revision;
/* Display info: */
u8 input;
u8 width_cm;
u8 height_cm;
u8 gamma;
u8 features;
/* Color characteristics */
u8 red_green_lo;
u8 black_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
u8 green_y;
u8 blue_x;
u8 blue_y;
u8 white_x;
u8 white_y;
/* Est. timings and mfg rsvd timings*/
struct est_timings established_timings;
/* Standard timings 1-8*/
struct std_timing standard_timings[8];
/* Detailing timings 1-4 */
struct detailed_timing detailed_timings[4];
/* Number of 128 byte ext. blocks */
u8 extensions;
/* Checksum */
u8 checksum;
} __attribute__((packed));
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
#endif /* __DRM_EDID_H__ */

View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2006-2009 Red Hat Inc.
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*
* DRM framebuffer helper functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#ifndef DRM_FB_HELPER_H
#define DRM_FB_HELPER_H
struct drm_fb_helper_crtc {
uint32_t crtc_id;
struct drm_mode_set mode_set;
};
struct drm_fb_helper_funcs {
void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
};
/* mode specified on the command line */
struct drm_fb_helper_cmdline_mode {
bool specified;
bool refresh_specified;
bool bpp_specified;
int xres, yres;
int bpp;
int refresh;
bool rb;
bool interlace;
bool cvt;
bool margins;
};
struct drm_fb_helper_connector {
struct drm_fb_helper_cmdline_mode cmdline_mode;
};
struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
struct drm_display_mode *mode;
int crtc_count;
struct drm_fb_helper_crtc *crtc_info;
struct drm_fb_helper_funcs *funcs;
int conn_limit;
struct list_head kernel_fb_list;
};
int drm_fb_helper_single_fb_probe(struct drm_device *dev,
int preferred_bpp,
int (*fb_create)(struct drm_device *dev,
uint32_t fb_width,
uint32_t fb_height,
uint32_t surface_width,
uint32_t surface_height,
uint32_t surface_depth,
uint32_t surface_bpp,
struct drm_framebuffer **fb_ptr));
int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count,
int max_conn);
void drm_fb_helper_free(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
int drm_fb_helper_set_par(struct fb_info *info);
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
int drm_fb_helper_setcolreg(unsigned regno,
unsigned red,
unsigned green,
unsigned blue,
unsigned transp,
struct fb_info *info);
void drm_fb_helper_restore(void);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
int drm_fb_helper_add_connector(struct drm_connector *connector);
int drm_fb_helper_parse_command_line(struct drm_device *dev);
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
#endif

View File

@ -0,0 +1,69 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef DRM_HASHTAB_H
#define DRM_HASHTAB_H
#include <linux/list.h>
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
struct drm_hash_item {
struct hlist_node head;
unsigned long key;
};
struct drm_open_hash {
unsigned int size;
unsigned int order;
unsigned int fill;
struct hlist_head *table;
int use_vmalloc;
};
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add);
extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern void drm_ht_remove(struct drm_open_hash *ht);
#endif

View File

@ -0,0 +1,141 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Authors:
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_MM_H_
#define _DRM_MM_H_
/*
* Generic range manager structs
*/
#include <linux/list.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
void *private;
};
struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
struct list_head unused_nodes;
int num_unused;
spinlock_t unused_lock;
};
/*
* Basic range manager support (drm_mm.c)
*/
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 1);
}
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
unsigned long size);
extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
unsigned long size, int atomic);
extern int drm_mm_pre_get(struct drm_mm *mm);
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
{
return block->mm;
}
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
#endif
#endif

View File

@ -0,0 +1,346 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* Copyright (c) 2007-2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
/* Scaling mode options */
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
software can still scale) */
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
/* Dirty info options */
#define DRM_MODE_DIRTY_OFF 0
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
__u32 vrefresh;
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
struct drm_mode_card_res {
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
struct drm_mode_crtc {
__u64 set_connectors_ptr;
__u32 count_connectors;
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
__u32 x, y; /**< Position on the frameuffer */
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
struct drm_mode_get_encoder {
__u32 encoder_id;
__u32 encoder_type;
__u32 crtc_id; /**< Id of crtc */
__u32 possible_crtcs;
__u32 possible_clones;
};
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_SUBCONNECTOR_SCART 9
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
struct drm_mode_get_connector {
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
};
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
struct drm_mode_get_property {
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
__u32 count_values;
__u32 count_enum_blobs;
};
struct drm_mode_connector_set_property {
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
__u64 data;
};
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
__u32 handle;
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
/*
* Mark a region of a framebuffer as dirty.
*
* Some hardware does not automatically update display contents
* as a hardware or software draw to a framebuffer. This ioctl
* allows userspace to tell the kernel and the hardware what
* regions of the framebuffer have changed.
*
* The kernel or hardware is free to update more then just the
* region specified by the clip rects. The kernel or hardware
* may also delay and/or coalesce several calls to dirty into a
* single update.
*
* Userspace may annotate the updates, the annotates are a
* promise made by the caller that the change is either a copy
* of pixels or a fill of a single color in the region specified.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
* the number of updated regions are half of num_clips given,
* where the clip rects are paired in src and dst. The width and
* height of each one of the pairs must match.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
* promises that the region specified of the clip rects is filled
* completely with a single color as given in the color argument.
*/
struct drm_mode_fb_dirty_cmd {
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
/*
* depending on the value in flags diffrent members are used.
*
* CURSOR_BO uses
* crtc
* width
* height
* handle - if 0 turns the cursor of
*
* CURSOR_MOVE uses
* crtc
* x
* y
*/
struct drm_mode_cursor {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
};
struct drm_mode_crtc_lut {
__u32 crtc_id;
__u32 gamma_size;
/* pointers to arrays */
__u64 red;
__u64 green;
__u64 blue;
};
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
/*
* Request a page flip on the specified crtc.
*
* This ioctl will ask KMS to schedule a page flip for the specified
* crtc. Once any pending rendering targeting the specified fb (as of
* ioctl time) has completed, the crtc will be reprogrammed to display
* that fb after the next vertical refresh. The ioctl returns
* immediately, but subsequent rendering to the current fb will block
* in the execbuffer ioctl until the page flip happens. If a page
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
* The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
* request that drm sends back a vblank event (see drm.h: struct
* drm_event_vblank) when the page flip is done. The user_data field
* passed in with this ioctl will be returned as the user_data field
* in the vblank event struct.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
*/
struct drm_mode_crtc_page_flip {
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
#endif

View File

@ -0,0 +1,380 @@
/*
This file is auto-generated from the drm_pciids.txt in the DRM CVS
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
{0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
{0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
{0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
{0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4B48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
{0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
{0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
{0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
{0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
{0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
{0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
{0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
{0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
{0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
{0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
{0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}

View File

@ -0,0 +1,911 @@
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keith@tungstengraphics.com>
*/
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
#include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
*/
#ifndef __RADEON_SAREA_DEFINES__
#define __RADEON_SAREA_DEFINES__
/* Old style state flags, required for sarea interface (1.1 and 1.2
* clears) and 1.2 drm_vertex2 ioctl.
*/
#define RADEON_UPLOAD_CONTEXT 0x00000001
#define RADEON_UPLOAD_VERTFMT 0x00000002
#define RADEON_UPLOAD_LINE 0x00000004
#define RADEON_UPLOAD_BUMPMAP 0x00000008
#define RADEON_UPLOAD_MASKS 0x00000010
#define RADEON_UPLOAD_VIEWPORT 0x00000020
#define RADEON_UPLOAD_SETUP 0x00000040
#define RADEON_UPLOAD_TCL 0x00000080
#define RADEON_UPLOAD_MISC 0x00000100
#define RADEON_UPLOAD_TEX0 0x00000200
#define RADEON_UPLOAD_TEX1 0x00000400
#define RADEON_UPLOAD_TEX2 0x00000800
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
#define RADEON_REQUIRE_QUIESCENCE 0x00010000
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
#define RADEON_UPLOAD_ALL 0x003effff
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
/* New style per-packet identifiers for use in cmd_buffer ioctl with
* the RADEON_EMIT_PACKET command. Comments relate new packets to old
* state bits and the packet size:
*/
#define RADEON_EMIT_PP_MISC 0 /* context/7 */
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
#define R200_EMIT_VAP_CTL 32 /* vap/1 */
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
#define R200_EMIT_PP_CUBIC_FACES_0 61
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
#define R200_EMIT_PP_CUBIC_FACES_1 63
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
#define R200_EMIT_PP_CUBIC_FACES_2 65
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
#define R200_EMIT_PP_CUBIC_FACES_3 67
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
#define R200_EMIT_PP_CUBIC_FACES_4 69
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
#define R200_EMIT_PP_CUBIC_FACES_5 71
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
#define RADEON_EMIT_PP_TEX_SIZE_0 73
#define RADEON_EMIT_PP_TEX_SIZE_1 74
#define RADEON_EMIT_PP_TEX_SIZE_2 75
#define R200_EMIT_RB3D_BLENDCOLOR 76
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
#define RADEON_EMIT_PP_CUBIC_FACES_0 78
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
#define RADEON_EMIT_PP_CUBIC_FACES_1 80
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
#define RADEON_EMIT_PP_CUBIC_FACES_2 82
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
#define R200_EMIT_PP_TRI_PERF_CNTL 84
#define R200_EMIT_PP_AFS_0 85
#define R200_EMIT_PP_AFS_1 86
#define R200_EMIT_ATF_TFACTOR 87
#define R200_EMIT_PP_TXCTLALL_0 88
#define R200_EMIT_PP_TXCTLALL_1 89
#define R200_EMIT_PP_TXCTLALL_2 90
#define R200_EMIT_PP_TXCTLALL_3 91
#define R200_EMIT_PP_TXCTLALL_4 92
#define R200_EMIT_PP_TXCTLALL_5 93
#define R200_EMIT_VAP_PVS_CNTL 94
#define RADEON_MAX_STATE_PACKETS 95
/* Commands understood by cmd_buffer ioctl. More can be added but
* obviously these can't be removed or changed:
*/
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
#define RADEON_CMD_SCALARS 2 /* emit scalar data */
#define RADEON_CMD_VECTORS 3 /* emit vector data */
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
#define RADEON_CMD_PACKET3 5 /* emit hw packet */
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
* doesn't make the cpu wait, just
* the graphics hardware */
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
typedef union {
int i;
struct {
unsigned char cmd_type, pad0, pad1, pad2;
} header;
struct {
unsigned char cmd_type, packet_id, pad0, pad1;
} packet;
struct {
unsigned char cmd_type, offset, stride, count;
} scalars;
struct {
unsigned char cmd_type, offset, stride, count;
} vectors;
struct {
unsigned char cmd_type, addr_lo, addr_hi, count;
} veclinear;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
} drm_radeon_cmd_header_t;
#define RADEON_WAIT_2D 0x1
#define RADEON_WAIT_3D 0x2
/* Allowed parameters for R300_CMD_PACKET3
*/
#define R300_CMD_PACKET3_CLEAR 0
#define R300_CMD_PACKET3_RAW 1
/* Commands understood by cmd_buffer ioctl for R300.
* The interface has not been stabilized, so some of these may be removed
* and eventually reordered before stabilization.
*/
#define R300_CMD_PACKET0 1
#define R300_CMD_VPU 2 /* emit vertex program upload */
#define R300_CMD_PACKET3 3 /* emit a packet3 */
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
/* these two defines are DOING IT WRONG - however
* we have userspace which relies on using these.
* The wait interface is backwards compat new
* code should use the NEW_WAIT defines below
* THESE ARE NOT BIT FIELDS
*/
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
# define R300_NEW_WAIT_2D_3D 0x3
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
#define R300_CMD_SCRATCH 8
#define R300_CMD_R500FP 9
typedef union {
unsigned int u;
struct {
unsigned char cmd_type, pad0, pad1, pad2;
} header;
struct {
unsigned char cmd_type, count, reglo, reghi;
} packet0;
struct {
unsigned char cmd_type, count, adrlo, adrhi;
} vpu;
struct {
unsigned char cmd_type, packet, pad0, pad1;
} packet3;
struct {
unsigned char cmd_type, packet;
unsigned short count; /* amount of packet2 to emit */
} delay;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
struct {
unsigned char cmd_type, reg, n_bufs, flags;
} scratch;
struct {
unsigned char cmd_type, count, adrlo, adrhi_flags;
} r500fp;
} drm_r300_cmd_header_t;
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
#define RADEON_DEPTH 0x4
#define RADEON_STENCIL 0x8
#define RADEON_CLEAR_FASTZ 0x80000000
#define RADEON_USE_HIERZ 0x40000000
#define RADEON_USE_COMP_ZBUF 0x20000000
#define R500FP_CONSTANT_TYPE (1 << 1)
#define R500FP_CONSTANT_CLAMP (1 << 2)
/* Primitive types
*/
#define RADEON_POINTS 0x1
#define RADEON_LINES 0x2
#define RADEON_LINE_STRIP 0x3
#define RADEON_TRIANGLES 0x4
#define RADEON_TRIANGLE_FAN 0x5
#define RADEON_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#define RADEON_BUFFER_SIZE 65536
/* Byte offsets for indirect buffer data
*/
#define RADEON_INDEX_PRIM_OFFSET 20
#define RADEON_SCRATCH_REG_OFFSET 32
#define R600_SCRATCH_REG_OFFSET 256
#define RADEON_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/GART). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define RADEON_LOCAL_TEX_HEAP 0
#define RADEON_GART_TEX_HEAP 1
#define RADEON_NR_TEX_HEAPS 2
#define RADEON_NR_TEX_REGIONS 64
#define RADEON_LOG_TEX_GRANULARITY 16
#define RADEON_MAX_TEXTURE_LEVELS 12
#define RADEON_MAX_TEXTURE_UNITS 3
#define RADEON_MAX_SURFACES 8
/* Blits have strict offset rules. All blit offset must be aligned on
* a 1K-byte boundary.
*/
#define RADEON_OFFSET_SHIFT 10
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
#endif /* __RADEON_SAREA_DEFINES__ */
typedef struct {
unsigned int red;
unsigned int green;
unsigned int blue;
unsigned int alpha;
} radeon_color_regs_t;
typedef struct {
/* Context state */
unsigned int pp_misc; /* 0x1c14 */
unsigned int pp_fog_color;
unsigned int re_solid_color;
unsigned int rb3d_blendcntl;
unsigned int rb3d_depthoffset;
unsigned int rb3d_depthpitch;
unsigned int rb3d_zstencilcntl;
unsigned int pp_cntl; /* 0x1c38 */
unsigned int rb3d_cntl;
unsigned int rb3d_coloroffset;
unsigned int re_width_height;
unsigned int rb3d_colorpitch;
unsigned int se_cntl;
/* Vertex format state */
unsigned int se_coord_fmt; /* 0x1c50 */
/* Line state */
unsigned int re_line_pattern; /* 0x1cd0 */
unsigned int re_line_state;
unsigned int se_line_width; /* 0x1db8 */
/* Bumpmap state */
unsigned int pp_lum_matrix; /* 0x1d00 */
unsigned int pp_rot_matrix_0; /* 0x1d58 */
unsigned int pp_rot_matrix_1;
/* Mask state */
unsigned int rb3d_stencilrefmask; /* 0x1d7c */
unsigned int rb3d_ropcntl;
unsigned int rb3d_planemask;
/* Viewport state */
unsigned int se_vport_xscale; /* 0x1d98 */
unsigned int se_vport_xoffset;
unsigned int se_vport_yscale;
unsigned int se_vport_yoffset;
unsigned int se_vport_zscale;
unsigned int se_vport_zoffset;
/* Setup state */
unsigned int se_cntl_status; /* 0x2140 */
/* Misc state */
unsigned int re_top_left; /* 0x26c0 */
unsigned int re_misc;
} drm_radeon_context_regs_t;
typedef struct {
/* Zbias state */
unsigned int se_zbias_factor; /* 0x1dac */
unsigned int se_zbias_constant;
} drm_radeon_context2_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int pp_txfilter;
unsigned int pp_txformat;
unsigned int pp_txoffset;
unsigned int pp_txcblend;
unsigned int pp_txablend;
unsigned int pp_tfactor;
unsigned int pp_border_color;
} drm_radeon_texture_regs_t;
typedef struct {
unsigned int start;
unsigned int finish;
unsigned int prim:8;
unsigned int stateidx:8;
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
unsigned int vc_format; /* vertex format */
} drm_radeon_prim_t;
typedef struct {
drm_radeon_context_regs_t context;
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
drm_radeon_context2_regs_t context2;
unsigned int dirty;
} drm_radeon_state_t;
typedef struct {
/* The channel for communication of state information to the
* kernel on firing a vertex buffer with either of the
* obsoleted vertex/index ioctls.
*/
drm_radeon_context_regs_t context_state;
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
// struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
unsigned int last_clear;
// struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
// 1];
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
int pfState; /* number of 3d windows (0,1,2ormore) */
int pfCurrentPage; /* which buffer is being displayed? */
int crtc2_base; /* CRTC2 frame offset */
int tiling_enabled; /* set by drm, read by 2d + 3d clients */
} drm_radeon_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmRadeon.h)
*
* KW: actually it's illegal to change any of this (backwards compatibility).
*/
/* Radeon specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_RADEON_CP_INIT 0x00
#define DRM_RADEON_CP_START 0x01
#define DRM_RADEON_CP_STOP 0x02
#define DRM_RADEON_CP_RESET 0x03
#define DRM_RADEON_CP_IDLE 0x04
#define DRM_RADEON_RESET 0x05
#define DRM_RADEON_FULLSCREEN 0x06
#define DRM_RADEON_SWAP 0x07
#define DRM_RADEON_CLEAR 0x08
#define DRM_RADEON_VERTEX 0x09
#define DRM_RADEON_INDICES 0x0A
#define DRM_RADEON_NOT_USED
#define DRM_RADEON_STIPPLE 0x0C
#define DRM_RADEON_INDIRECT 0x0D
#define DRM_RADEON_TEXTURE 0x0E
#define DRM_RADEON_VERTEX2 0x0F
#define DRM_RADEON_CMDBUF 0x10
#define DRM_RADEON_GETPARAM 0x11
#define DRM_RADEON_FLIP 0x12
#define DRM_RADEON_ALLOC 0x13
#define DRM_RADEON_FREE 0x14
#define DRM_RADEON_INIT_HEAP 0x15
#define DRM_RADEON_IRQ_EMIT 0x16
#define DRM_RADEON_IRQ_WAIT 0x17
#define DRM_RADEON_CP_RESUME 0x18
#define DRM_RADEON_SETPARAM 0x19
#define DRM_RADEON_SURF_ALLOC 0x1a
#define DRM_RADEON_SURF_FREE 0x1b
/* KMS ioctl */
#define DRM_RADEON_GEM_INFO 0x1c
#define DRM_RADEON_GEM_CREATE 0x1d
#define DRM_RADEON_GEM_MMAP 0x1e
#define DRM_RADEON_GEM_PREAD 0x21
#define DRM_RADEON_GEM_PWRITE 0x22
#define DRM_RADEON_GEM_SET_DOMAIN 0x23
#define DRM_RADEON_GEM_WAIT_IDLE 0x24
#define DRM_RADEON_CS 0x26
#define DRM_RADEON_INFO 0x27
#define DRM_RADEON_GEM_SET_TILING 0x28
#define DRM_RADEON_GEM_GET_TILING 0x29
#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
/* KMS */
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
#define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
#define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
typedef struct drm_radeon_init {
enum {
RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02,
RADEON_INIT_R200_CP = 0x03,
RADEON_INIT_R300_CP = 0x04,
RADEON_INIT_R600_CP = 0x05
} func;
unsigned long sarea_priv_offset;
int is_pci;
int cp_mode;
int gart_size;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long gart_textures_offset;
} drm_radeon_init_t;
typedef struct drm_radeon_cp_stop {
int flush;
int idle;
} drm_radeon_cp_stop_t;
typedef struct drm_radeon_fullscreen {
enum {
RADEON_INIT_FULLSCREEN = 0x01,
RADEON_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_radeon_fullscreen_t;
#define CLEAR_X1 0
#define CLEAR_Y1 1
#define CLEAR_X2 2
#define CLEAR_Y2 3
#define CLEAR_DEPTH 4
typedef union drm_radeon_clear_rect {
float f[5];
unsigned int ui[5];
} drm_radeon_clear_rect_t;
typedef struct drm_radeon_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
// drm_radeon_clear_rect_t __user *depth_boxes;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_radeon_vertex_t;
typedef struct drm_radeon_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_radeon_indices_t;
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
* - allows multiple primitives and state changes in a single ioctl
* - supports driver change to emit native primitives
*/
typedef struct drm_radeon_vertex2 {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
// drm_radeon_state_t __user *state;
int nr_prims;
// drm_radeon_prim_t __user *prim;
} drm_radeon_vertex2_t;
/* v1.3 - obsoletes drm_radeon_vertex2
* - allows arbitarily large cliprect list
* - allows updating of tcl packet, vector and scalar state
* - allows memory-efficient description of state updates
* - allows state to be emitted without a primitive
* (for clears, ctx switches)
* - allows more than one dma buffer to be referenced per ioctl
* - supports tcl driver
* - may be extended in future versions with new cmd types, packets
*/
typedef struct drm_radeon_cmd_buffer {
int bufsz;
char __user *buf;
int nbox;
struct drm_clip_rect __user *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
const void __user *data;
} drm_radeon_tex_image_t;
typedef struct drm_radeon_texture {
unsigned int offset;
int pitch;
int format;
int width; /* Texture image coordinates */
int height;
drm_radeon_tex_image_t __user *image;
} drm_radeon_texture_t;
typedef struct drm_radeon_stipple {
unsigned int __user *mask;
} drm_radeon_stipple_t;
typedef struct drm_radeon_indirect {
int idx;
int start;
int end;
int discard;
} drm_radeon_indirect_t;
/* enum for card type parameters */
#define RADEON_CARD_PCI 0
#define RADEON_CARD_AGP 1
#define RADEON_CARD_PCIE 2
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
#define RADEON_PARAM_LAST_FRAME 2
#define RADEON_PARAM_LAST_DISPATCH 3
#define RADEON_PARAM_LAST_CLEAR 4
/* Added with DRM version 1.6. */
#define RADEON_PARAM_IRQ_NR 5
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
/* Added with DRM version 1.8. */
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
#define RADEON_PARAM_STATUS_HANDLE 8
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
#define RADEON_PARAM_DEVICE_ID 16
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */
typedef struct drm_radeon_getparam {
int param;
void __user *value;
} drm_radeon_getparam_t;
/* 1.6: Set up a memory manager for regions of shared memory:
*/
#define RADEON_MEM_REGION_GART 1
#define RADEON_MEM_REGION_FB 2
typedef struct drm_radeon_mem_alloc {
int region;
int alignment;
int size;
int __user *region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc_t;
typedef struct drm_radeon_mem_free {
int region;
int region_offset;
} drm_radeon_mem_free_t;
typedef struct drm_radeon_mem_init_heap {
int region;
int size;
int start;
} drm_radeon_mem_init_heap_t;
/* 1.6: Userspace can request & wait on irq's:
*/
typedef struct drm_radeon_irq_emit {
int __user *irq_seq;
} drm_radeon_irq_emit_t;
typedef struct drm_radeon_irq_wait {
int irq_seq;
} drm_radeon_irq_wait_t;
/* 1.10: Clients tell the DRM where they think the framebuffer is located in
* the card's address space, via a new generic ioctl to set parameters
*/
typedef struct drm_radeon_setparam {
unsigned int param;
__s64 value;
} drm_radeon_setparam_t;
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
/* 1.14: Clients can allocate/free a surface
*/
typedef struct drm_radeon_surface_alloc {
unsigned int address;
unsigned int size;
unsigned int flags;
} drm_radeon_surface_alloc_t;
typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
/*
* Kernel modesetting world below.
*/
#define RADEON_GEM_DOMAIN_CPU 0x1
#define RADEON_GEM_DOMAIN_GTT 0x2
#define RADEON_GEM_DOMAIN_VRAM 0x4
struct drm_radeon_gem_info {
uint64_t gart_size;
uint64_t vram_size;
uint64_t vram_visible;
};
#define RADEON_GEM_NO_BACKING_STORE 1
struct drm_radeon_gem_create {
uint64_t size;
uint64_t alignment;
uint32_t handle;
uint32_t initial_domain;
uint32_t flags;
};
#define RADEON_TILING_MACRO 0x1
#define RADEON_TILING_MICRO 0x2
#define RADEON_TILING_SWAP_16BIT 0x4
#define RADEON_TILING_SWAP_32BIT 0x8
#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
* when mapped - i.e. front buffer */
struct drm_radeon_gem_set_tiling {
uint32_t handle;
uint32_t tiling_flags;
uint32_t pitch;
};
struct drm_radeon_gem_get_tiling {
uint32_t handle;
uint32_t tiling_flags;
uint32_t pitch;
};
struct drm_radeon_gem_mmap {
uint32_t handle;
uint32_t pad;
uint64_t offset;
uint64_t size;
uint64_t addr_ptr;
};
struct drm_radeon_gem_set_domain {
uint32_t handle;
uint32_t read_domains;
uint32_t write_domain;
};
struct drm_radeon_gem_wait_idle {
uint32_t handle;
uint32_t pad;
};
struct drm_radeon_gem_busy {
uint32_t handle;
uint32_t domain;
};
struct drm_radeon_gem_pread {
/** Handle for the object being read. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to read from */
uint64_t offset;
/** Length of data to read */
uint64_t size;
/** Pointer to write the data into. */
/* void *, but pointers are not 32/64 compatible */
uint64_t data_ptr;
};
struct drm_radeon_gem_pwrite {
/** Handle for the object being written to. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to write to */
uint64_t offset;
/** Length of data to write */
uint64_t size;
/** Pointer to read the data from. */
/* void *, but pointers are not 32/64 compatible */
uint64_t data_ptr;
};
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
struct drm_radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
uint64_t chunk_data;
};
struct drm_radeon_cs_reloc {
uint32_t handle;
uint32_t read_domains;
uint32_t write_domain;
uint32_t flags;
};
struct drm_radeon_cs {
uint32_t num_chunks;
uint32_t cs_id;
/* this points to uint64_t * which point to cs chunks */
uint64_t chunks;
/* updates to the limits after this CS ioctl */
uint64_t gart_limit;
uint64_t vram_limit;
};
#define RADEON_INFO_DEVICE_ID 0x00
#define RADEON_INFO_NUM_GB_PIPES 0x01
#define RADEON_INFO_NUM_Z_PIPES 0x02
#define RADEON_INFO_ACCEL_WORKING 0x03
struct drm_radeon_info {
uint32_t request;
uint32_t pad;
uint64_t value;
};
#endif

View File

@ -0,0 +1,637 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _TTM_BO_API_H_
#define _TTM_BO_API_H_
#include "drm_hashtab.h"
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
//#include <linux/wait.h>
//#include <linux/mutex.h>
//#include <linux/mm.h>
//#include <linux/rbtree.h>
#include <linux/bitmap.h>
struct ttm_bo_device;
struct drm_mm_node;
/**
* struct ttm_placement
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
* @num_placement: number of prefered placements
* @placement: prefered placements
* @num_busy_placement: number of prefered placements when need to evict buffer
* @busy_placement: prefered placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned fpfn;
unsigned lpfn;
unsigned num_placement;
const uint32_t *placement;
unsigned num_busy_placement;
const uint32_t *busy_placement;
};
/**
* struct ttm_mem_reg
*
* @mm_node: Memory manager node.
* @size: Requested size of memory region.
* @num_pages: Actual size of memory region in pages.
* @page_alignment: Page alignment.
* @placement: Placement flags.
*
* Structure indicating the placement and space resources used by a
* buffer object.
*/
struct ttm_mem_reg {
struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
};
/**
* enum ttm_bo_type
*
* @ttm_bo_type_device: These are 'normal' buffers that can
* be mmapped by user space. Each of these bos occupy a slot in the
* device address space, that can be used for normal vm operations.
*
* @ttm_bo_type_user: These are user-space memory areas that are made
* available to the GPU by mapping the buffer pages into the GPU aperture
* space. These buffers cannot be mmaped from the device address space.
*
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
* but they cannot be accessed from user-space. For kernel-only use.
*/
enum ttm_bo_type {
ttm_bo_type_device,
ttm_bo_type_user,
ttm_bo_type_kernel
};
struct ttm_tt;
/**
* struct ttm_buffer_object
*
* @bdev: Pointer to the buffer object device structure.
* @buffer_start: The virtual user-space start address of ttm_bo_type_user
* buffers.
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
* @addr_space_offset: Address space offset.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is put on the delayed delete list.
* @list_kref: List reference count of this buffer object. This member is
* used to avoid destruction while the buffer object is still on a list.
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
* @lock: spinlock protecting mostly synchronization members.
* @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistant shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
* @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
* @val_seq: Sequence of the validation holding the @reserved lock.
* Used to avoid starvation when many processes compete to validate the
* buffer. This member is protected by the bo_device::lru_lock.
* @seq_valid: The value of @val_seq is valid. This value is protected by
* the bo_device::lru_lock.
* @reserved: Deadlock-free lock used for synchronization state transitions.
* @sync_obj_arg: Opaque argument to synchronization object function.
* @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
* @vm_rb: Rb node for the vm rb tree.
* @vm_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
* the driver can usually use the placement offset @offset directly as the
* GPU virtual address. For drivers implementing multiple
* GPU memory manager contexts, the driver should manage the address space
* in these contexts separately and use these objects to get the correct
* placement and caching for these GPU maps. This makes it possible to use
* these objects for even quite elaborate memory management schemes.
* The destroy member, the API visibility of this object makes it possible
* to derive driver specific types.
*/
struct ttm_buffer_object {
/**
* Members constant at init.
*/
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
unsigned long buffer_start;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
uint64_t addr_space_offset;
size_t acc_size;
/**
* Members not needing protection.
*/
struct kref kref;
struct kref list_kref;
// wait_queue_head_t event_queue;
spinlock_t lock;
/**
* Members protected by the bo::reserved lock.
*/
struct ttm_mem_reg mem;
// struct file *persistant_swap_storage;
struct ttm_tt *ttm;
bool evicted;
/**
* Members protected by the bo::reserved lock only when written to.
*/
atomic_t cpu_writers;
/**
* Members protected by the bdev::lru_lock.
*/
struct list_head lru;
struct list_head ddestroy;
struct list_head swap;
uint32_t val_seq;
bool seq_valid;
/**
* Members protected by the bdev::lru_lock
* only when written to.
*/
atomic_t reserved;
/**
* Members protected by the bo::lock
*/
void *sync_obj_arg;
void *sync_obj;
unsigned long priv_flags;
/**
* Members protected by the bdev::vm_lock
*/
// struct rb_node vm_rb;
struct drm_mm_node *vm_node;
/**
* Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with
* either of these locks held.
*/
unsigned long offset;
uint32_t cur_placement;
};
/**
* struct ttm_bo_kmap_obj
*
* @virtual: The current kernel virtual address.
* @page: The page when kmap'ing a single page.
* @bo_kmap_type: Type of bo_kmap.
*
* Object describing a kernel mapping. Since a TTM bo may be located
* in various memory types with various caching policies, the
* mapping can either be an ioremap, a vmap, a kmap or part of a
* premapped region.
*/
#define TTM_BO_MAP_IOMEM_MASK 0x80
struct ttm_bo_kmap_obj {
void *virtual;
struct page *page;
enum {
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
ttm_bo_map_vmap = 2,
ttm_bo_map_kmap = 3,
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
} bo_kmap_type;
};
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
kref_get(&bo->kref);
return bo;
}
/**
* ttm_bo_wait - wait for buffer idle.
*
* @bo: The buffer object.
* @interruptible: Use interruptible wait.
* @no_wait: Return immediately if buffer is busy.
*
* This function must be called with the bo::mutex held, and makes
* sure any previous rendering to the buffer is completed.
* Note: It might be necessary to block validations before the
* wait by reserving the buffer.
* Returns -EBUSY if no_wait is true and the buffer is busy.
* Returns -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
/**
* ttm_bo_validate
*
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
* Returns
* -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy.
* -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible, bool no_wait);
/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
*/
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
/**
* ttm_bo_synccpu_write_grab
*
* @bo: The buffer object:
* @no_wait: Return immediately if buffer is busy.
*
* Synchronizes a buffer object for CPU RW access. This means
* blocking command submission that affects the buffer and
* waiting for buffer idle. This lock is recursive.
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
* -ERESTARTSYS if interrupted by a signal.
*/
extern int
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
/**
* ttm_bo_synccpu_write_release:
*
* @bo : The buffer object.
*
* Releases a synccpu lock.
*/
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
* ttm_bo_init
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @buffer_start: Virtual address of user space data backing a
* user buffer object.
* @interruptible: If needing to sleep to wait for GPU resources,
* sleep interruptible.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistant shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
* As this object may be part of a larger structure, this function,
* together with the @destroy function,
* enables driver-specific objects derived from a ttm_buffer_object.
* On successful return, the object kref and list_kref are set to 1.
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
unsigned long buffer_start,
bool interrubtible,
struct file *persistant_swap_storage,
size_t acc_size,
void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_synccpu_object_init
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @buffer_start: Virtual address of user space data backing a
* user buffer object.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistant shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object.
*
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
* on that object. The destroy function is set to kfree().
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/
extern int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
unsigned long buffer_start,
bool interruptible,
struct file *persistant_swap_storage,
struct ttm_buffer_object **p_bo);
/**
* ttm_bo_check_placement
*
* @bo: the buffer object.
* @placement: placements
*
* Performs minimal validity checking on an intended change of
* placement flags.
* Returns
* -EINVAL: Intended change is invalid or not allowed.
*/
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/**
* ttm_bo_init_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
* @p_size: size managed area in pages.
*
* Initialize a manager for a given memory type.
* Note: if part of driver firstopen, it must be protected from a
* potentially racing lastclose.
* Returns:
* -EINVAL: invalid size or memory type.
* -ENOMEM: Not enough memory.
* May also return driver-specified errors.
*/
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size);
/**
* ttm_bo_clean_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
*
* Take down a manager for a given memory type after first walking
* the LRU list to evict any buffers left alive.
*
* Normally, this function is part of lastclose() or unload(), and at that
* point there shouldn't be any buffers left created by user-space, since
* there should've been removed by the file descriptor release() method.
* However, before this function is run, make sure to signal all sync objects,
* and verify that the delayed delete queue is empty. The driver must also
* make sure that there are no NO_EVICT buffers present in this memory type
* when the call is made.
*
* If this function is part of a VT switch, the caller must make sure that
* there are no appications currently validating buffers before this
* function is called. The caller can do that by first taking the
* struct ttm_bo_device::ttm_lock in write mode.
*
* Returns:
* -EINVAL: invalid or uninitialized memory type.
* -EBUSY: There are still buffers left in this memory type.
*/
extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_bo_evict_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
*
* Evicts all buffers on the lru list of the memory type.
* This is normally part of a VT switch or an
* out-of-memory-space-due-to-fragmentation handler.
* The caller must make sure that there are no other processes
* currently validating buffers, and can do that by taking the
* struct ttm_bo_device::ttm_lock in write mode.
*
* Returns:
* -EINVAL: Invalid or uninitialized memory type.
* -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer.
*/
extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
/**
* ttm_kmap_obj_virtual
*
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
* @is_iomem: Pointer to an integer that on return indicates 1 if the
* virtual map is io memory, 0 if normal memory.
*
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
* If *is_iomem is 1 on return, the virtual address points to an io memory area,
* that should strictly be accessed by the iowriteXX() and similar functions.
*/
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
bool *is_iomem)
{
*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
return map->virtual;
}
/**
* ttm_bo_kmap
*
* @bo: The buffer object.
* @start_page: The first page to map.
* @num_pages: Number of pages to map.
* @map: pointer to a struct ttm_bo_kmap_obj representing the map.
*
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
* data in the buffer object. The ttm_kmap_obj_virtual function can then be
* used to obtain a virtual address to the data.
*
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid range.
*/
extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
/**
* ttm_bo_kunmap
*
* @map: Object describing the map to unmap.
*
* Unmaps a kernel map set up by ttm_bo_kmap.
*/
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
#if 0
#endif
/**
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
*
* @vma: vma as input from the fbdev mmap method.
* @bo: The bo backing the address space. The address space will
* have the same size as the bo, and start at offset 0.
*
* This function is intended to be called by the fbdev mmap method
* if the fbdev address space is to be backed by a bo.
*/
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*
* @filp: filp as input from the mmap method.
* @vma: vma as input from the mmap method.
* @bdev: Pointer to the ttm_bo_device with the address space manager.
*
* This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager.
*/
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
/**
* ttm_bo_io
*
* @bdev: Pointer to the struct ttm_bo_device.
* @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into.
* Null on write.
* @count: Number of bytes to read / write.
* @f_pos: Pointer to current file position.
* @write: 1 for read, 0 for write.
*
* This function implements read / write into ttm buffer objects, and is
* intended to
* be called from the fops::read and fops::write method.
* Returns:
* See man (2) write, man(2) read. In particular,
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
#endif

View File

@ -0,0 +1,924 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _TTM_BO_DRIVER_H_
#define _TTM_BO_DRIVER_H_
#include "ttm/ttm_bo_api.h"
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include "drm_mm.h"
#include "linux/spinlock.h"
struct ttm_backend;
struct ttm_backend_func {
/**
* struct ttm_backend_func member populate
*
* @backend: Pointer to a struct ttm_backend.
* @num_pages: Number of pages to populate.
* @pages: Array of pointers to ttm pages.
* @dummy_read_page: Page to be used instead of NULL pages in the
* array @pages.
*
* Populate the backend with ttm pages. Depending on the backend,
* it may or may not copy the @pages array.
*/
int (*populate) (struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page);
/**
* struct ttm_backend_func member clear
*
* @backend: Pointer to a struct ttm_backend.
*
* This is an "unpopulate" function. Release all resources
* allocated with populate.
*/
void (*clear) (struct ttm_backend *backend);
/**
* struct ttm_backend_func member bind
*
* @backend: Pointer to a struct ttm_backend.
* @bo_mem: Pointer to a struct ttm_mem_reg describing the
* memory type and location for binding.
*
* Bind the backend pages into the aperture in the location
* indicated by @bo_mem. This function should be able to handle
* differences between aperture- and system page sizes.
*/
int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
/**
* struct ttm_backend_func member unbind
*
* @backend: Pointer to a struct ttm_backend.
*
* Unbind previously bound backend pages. This function should be
* able to handle differences between aperture- and system page sizes.
*/
int (*unbind) (struct ttm_backend *backend);
/**
* struct ttm_backend_func member destroy
*
* @backend: Pointer to a struct ttm_backend.
*
* Destroy the backend.
*/
void (*destroy) (struct ttm_backend *backend);
};
/**
* struct ttm_backend
*
* @bdev: Pointer to a struct ttm_bo_device.
* @flags: For driver use.
* @func: Pointer to a struct ttm_backend_func that describes
* the backend methods.
*
*/
struct ttm_backend {
struct ttm_bo_device *bdev;
uint32_t flags;
struct ttm_backend_func *func;
};
#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
#define TTM_PAGE_FLAG_USER (1 << 1)
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
#define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
enum ttm_caching_state {
tt_uncached,
tt_wc,
tt_cached
};
/**
* struct ttm_tt
*
* @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
* pointer.
* @pages: Array of pages backing the data.
* @first_himem_page: Himem pages are put last in the page array, which
* enables us to run caching attribute changes on only the first part
* of the page array containing lomem pages. This is the index of the
* first himem page.
* @last_lomem_page: Index of the last lomem page in the page array.
* @num_pages: Number of pages in the page array.
* @bdev: Pointer to the current struct ttm_bo_device.
* @be: Pointer to the ttm backend.
* @tsk: The task for user ttm.
* @start: virtual address for user ttm.
* @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
struct page *dummy_read_page;
struct page **pages;
long first_himem_page;
long last_lomem_page;
uint32_t page_flags;
unsigned long num_pages;
struct ttm_bo_global *glob;
struct ttm_backend *be;
struct task_struct *tsk;
unsigned long start;
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
tt_bound,
tt_unbound,
tt_unpopulated,
} state;
};
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
before kernel access. */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
/**
* struct ttm_mem_type_manager
*
* @has_type: The memory type has been initialized.
* @use_type: The memory type is enabled.
* @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
* managed by this memory type.
* @gpu_offset: If used, the GPU offset of the first managed page of
* fixed memory or the first managed location in an aperture.
* @io_offset: The io_offset of the first managed page of IO memory or
* the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
* memory, this should be set to NULL.
* @io_size: The size of a managed IO region (fixed memory or aperture).
* @io_addr: Virtual kernel address if the io region is pre-mapped. For
* TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
* @io_addr should be set to NULL.
* @size: Size of the managed region.
* @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
* as defined in ttm_placement_common.h
* @default_caching: The default caching policy used for a buffer object
* placed in this memory type if the user doesn't provide one.
* @manager: The range manager used for this memory type. FIXME: If the aperture
* has a page size different from the underlying system, the granularity
* of this manager should take care of this. But the range allocating code
* in ttm_bo.c needs to be modified for this.
* @lru: The lru list for this memory type.
*
* This structure is used to identify and manage memory types for a device.
* It's set up by the ttm_bo_driver::init_mem_type method.
*/
struct ttm_mem_type_manager {
/*
* No protection. Constant from start.
*/
bool has_type;
bool use_type;
uint32_t flags;
unsigned long gpu_offset;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
/*
* Protected by the bdev->lru_lock.
* TODO: Consider one lru_lock per ttm_mem_type_manager.
* Plays ill with list removal, though.
*/
struct drm_mm manager;
struct list_head lru;
};
/**
* struct ttm_bo_driver
*
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
* @invalidate_caches: Callback to invalidate read caches when a buffer object
* has been evicted.
* @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
* structure.
* @evict_flags: Callback to obtain placement flags when a buffer is evicted.
* @move: Callback for a driver to hook in accelerated functions to
* move a buffer.
* If set to NULL, a potentially slow memcpy() move is used.
* @sync_obj_signaled: See ttm_fence_api.h
* @sync_obj_wait: See ttm_fence_api.h
* @sync_obj_flush: See ttm_fence_api.h
* @sync_obj_unref: See ttm_fence_api.h
* @sync_obj_ref: See ttm_fence_api.h
*/
struct ttm_bo_driver {
/**
* struct ttm_bo_driver member create_ttm_backend_entry
*
* @bdev: The buffer object device.
*
* Create a driver specific struct ttm_backend.
*/
struct ttm_backend *(*create_ttm_backend_entry)
(struct ttm_bo_device *bdev);
/**
* struct ttm_bo_driver member invalidate_caches
*
* @bdev: the buffer object device.
* @flags: new placement of the rebound buffer object.
*
* A previosly evicted buffer has been rebound in a
* potentially new location. Tell the driver that it might
* consider invalidating read (texture) caches on the next command
* submission as a consequence.
*/
int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man);
/**
* struct ttm_bo_driver member evict_flags:
*
* @bo: the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware.
* These will be placed in proposed_flags so that when the move is
* finished, they'll end up in bo->mem.flags
*/
void(*evict_flags) (struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/**
* struct ttm_bo_driver member move:
*
* @bo: the buffer to move
* @evict: whether this motion is evicting the buffer from
* the graphics address space
* @interruptible: Use interruptible sleeps if possible when sleeping.
* @no_wait: whether this should give up and return -EBUSY
* if this move would require sleeping
* @new_mem: the new memory region receiving the buffer
*
* Move a buffer between two memory regions.
*/
int (*move) (struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait, struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
*
* @bo: Pointer to a buffer object.
* @filp: Pointer to a struct file trying to access the object.
*
* Called from the map / write / read methods to verify that the
* caller is permitted to access the buffer object.
* This member may be set to NULL, which will refuse this kind of
* access for all buffer objects.
* This function should return 0 if access is granted, -EPERM otherwise.
*/
int (*verify_access) (struct ttm_buffer_object *bo,
struct file *filp);
/**
* In case a driver writer dislikes the TTM fence objects,
* the driver writer can replace those with sync objects of
* his / her own. If it turns out that no driver writer is
* using these. I suggest we remove these hooks and plug in
* fences directly. The bo driver needs the following functionality:
* See the corresponding functions in the fence object API
* documentation.
*/
bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
bool lazy, bool interruptible);
int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
void (*sync_obj_unref) (void **sync_obj);
void *(*sync_obj_ref) (void *sync_obj);
/* hook to notify driver about a driver move so it
* can do tiling things */
void (*move_notify)(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
/* notify the driver we are taking a fault on this BO
* and have reserved it */
void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
};
/**
* struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
*/
struct ttm_bo_global_ref {
struct ttm_global_reference ref;
struct ttm_mem_global *mem_glob;
};
/**
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffer object swap.
* @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
* used by a buffer object. This is excluding page arrays and backing pages.
* @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
* @device_list_mutex: Mutex protecting the device list.
* This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
* @device_list: List of buffer object devices.
* @swap_lru: Lru list of buffer objects used for swapping.
*/
struct ttm_bo_global {
/**
* Constant after init.
*/
// struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
struct ttm_mem_shrink shrink;
size_t ttm_bo_extra_size;
size_t ttm_bo_size;
// struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
* Protected by device_list_mutex.
*/
struct list_head device_list;
/**
* Protected by the lru_lock.
*/
struct list_head swap_lru;
/**
* Internal protection.
*/
atomic_t bo_count;
};
#define TTM_NUM_MEM_TYPES 8
#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
idling before CPU mapping */
#define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
* @addr_space_mm: Range manager for the device address space.
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
* If a GPU lockup has been detected, this is forced to 0.
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
*
*/
struct ttm_bo_device {
/*
* Constant after bo device init / atomic.
*/
struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
/*
* Protected by the vm lock.
*/
// struct rb_root addr_space_rb;
struct drm_mm addr_space_mm;
/*
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
/*
* Protected by load / firstopen / lastclose /unload sync.
*/
bool nice_mode;
struct address_space *dev_mapping;
/*
* Internal protection.
*/
// struct delayed_work wq;
bool need_dma32;
};
/**
* ttm_flag_masked
*
* @old: Pointer to the result and original value.
* @new: New value of bits.
* @mask: Mask of bits to change.
*
* Convenience function to change a number of bits identified by a mask.
*/
static inline uint32_t
ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
{
*old ^= (*old ^ new) & mask;
return *old;
}
/**
* ttm_tt_create
*
* @bdev: pointer to a struct ttm_bo_device:
* @size: Size of the data needed backing.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
* @dummy_read_page: See struct ttm_bo_device.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size,
uint32_t page_flags,
struct page *dummy_read_page);
/**
* ttm_tt_set_user:
*
* @ttm: The struct ttm_tt to populate.
* @tsk: A struct task_struct for which @start is a valid user-space address.
* @start: A valid user-space address.
* @num_pages: Size in pages of the user memory area.
*
* Populate a struct ttm_tt with a user-space memory area after first pinning
* the pages backing it.
* Returns:
* !0: Error.
*/
extern int ttm_tt_set_user(struct ttm_tt *ttm,
struct task_struct *tsk,
unsigned long start, unsigned long num_pages);
/**
* ttm_ttm_bind:
*
* @ttm: The struct ttm_tt containing backing pages.
* @bo_mem: The struct ttm_mem_reg identifying the binding location.
*
* Bind the pages of @ttm to an aperture location identified by @bo_mem
*/
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
* ttm_tt_populate:
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Add backing pages to all of @ttm
*/
extern int ttm_tt_populate(struct ttm_tt *ttm);
/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy a struct ttm_tt.
*/
extern void ttm_tt_destroy(struct ttm_tt *ttm);
/**
* ttm_ttm_unbind:
*
* @ttm: The struct ttm_tt.
*
* Unbind a struct ttm_tt.
*/
extern void ttm_tt_unbind(struct ttm_tt *ttm);
/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
* @index: Index of the desired page.
*
* Return a pointer to the struct page backing @ttm at page
* index @index. If the page is unpopulated, one will be allocated to
* populate that index.
*
* Returns:
* NULL on OOM.
*/
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
/**
* ttm_tt_cache_flush:
*
* @pages: An array of pointers to struct page:s to flush.
* @num_pages: Number of pages to flush.
*
* Flush the data of the indicated pages from the cpu caches.
* This is used when changing caching attributes of the pages from
* cache-coherent.
*/
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
/**
* ttm_tt_set_placement_caching:
*
* @ttm A struct ttm_tt the backing pages of which will change caching policy.
* @placement: Flag indicating the desired caching policy.
*
* This function will change caching policy of any default kernel mappings of
* the pages backing @ttm. If changing from cached to uncached or
* write-combined,
* all CPU caches will first be flushed to make sure the data of the pages
* hit RAM. This function may be very costly as it involves global TLB
* and cache flushes and potential page splitting / combining.
*/
extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
extern int ttm_tt_swapout(struct ttm_tt *ttm,
struct file *persistant_swap_storage);
/*
* ttm_bo.c
*/
/**
* ttm_mem_reg_is_pci
*
* @bdev: Pointer to a struct ttm_bo_device.
* @mem: A valid struct ttm_mem_reg.
*
* Returns true if the memory described by @mem is PCI memory,
* false otherwise.
*/
extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
/**
* ttm_bo_mem_space
*
* @bo: Pointer to a struct ttm_buffer_object. the data of which
* we want to allocate space for.
* @proposed_placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_mem_reg.
* @interruptible: Sleep interruptible when sliping.
* @no_wait: Don't sleep waiting for space to become available.
*
* Allocate memory space for the buffer object pointed to by @bo, using
* the placement flags in @mem, potentially evicting other idle buffer objects.
* This function may sleep while waiting for space to become available.
* Returns:
* -EBUSY: No space available (only if no_wait == 1).
* -ENOMEM: Could not allocate memory for the buffer object, either due to
* fragmentation or concurrent allocators.
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible, bool no_wait);
/**
* ttm_bo_wait_for_cpu
*
* @bo: Pointer to a struct ttm_buffer_object.
* @no_wait: Don't sleep while waiting.
*
* Wait until a buffer object is no longer sync'ed for CPU access.
* Returns:
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
* -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
/**
* ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
*
* @bo Pointer to a struct ttm_buffer_object.
* @bus_base On return the base of the PCI region
* @bus_offset On return the byte offset into the PCI region
* @bus_size On return the byte size of the buffer object or zero if
* the buffer object memory is not accessible through a PCI region.
*
* Returns:
* -EINVAL if the buffer object is currently not mappable.
* 0 otherwise.
*/
extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
extern void ttm_bo_global_release(struct ttm_global_reference *ref);
extern int ttm_bo_global_init(struct ttm_global_reference *ref);
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
* ttm_bo_device_init
*
* @bdev: A pointer to a struct ttm_bo_device to initialize.
* @mem_global: A pointer to an initialized struct ttm_mem_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the
* address space.
*
* Initializes a struct ttm_bo_device:
* Returns:
* !0: Failure.
*/
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
uint64_t file_page_offset, bool need_dma32);
/**
* ttm_bo_unmap_virtual
*
* @bo: tear down the virtual mappings for this BO
*/
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
* @use_sequence: If @bo is already reserved, Only sleep waiting for
* it to become unreserved if @sequence < (@bo)->sequence.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
* a number of measures to prevent deadlocks.
*
* Deadlocks may occur when two processes try to reserve multiple buffers in
* different order, either by will or as a result of a buffer being evicted
* to make room for a buffer already reserved. (Buffers are reserved before
* they are evicted). The following algorithm prevents such deadlocks from
* occuring:
* 1) Buffers are reserved with the lru spinlock held. Upon successful
* reservation they are removed from the lru list. This stops a reserved buffer
* from being evicted. However the lru spinlock is released between the time
* a buffer is selected for eviction and the time it is reserved.
* Therefore a check is made when a buffer is reserved for eviction, that it
* is still the first buffer in the lru list, before it is removed from the
* list. @check_lru == 1 forces this check. If it fails, the function returns
* -EINVAL, and the caller should then choose a new buffer to evict and repeat
* the procedure.
* 2) Processes attempting to reserve multiple buffers other than for eviction,
* (typically execbuf), should first obtain a unique 32-bit
* validation sequence number,
* and call this function with @use_sequence == 1 and @sequence == the unique
* sequence number. If upon call of this function, the buffer object is already
* reserved, the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer,
* and if the current validation sequence is greater than that of the process
* holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
* waiting for the buffer to become unreserved, after which it retries
* reserving.
* The caller should, when receiving an -EAGAIN error
* release all its buffer reservations, wait for @bo to become unreserved, and
* then rerun the validation with the same validation sequence. This procedure
* will always guarantee that the process with the lowest validation sequence
* will eventually succeed, preventing both deadlocks and starvation.
*
* Returns:
* -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence);
/**
* ttm_bo_unreserve
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo.
*/
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
/**
* ttm_bo_wait_unreserved
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Wait for a struct ttm_buffer_object to become unreserved.
* This is typically used in the execbuf code to relax cpu-usage when
* a potential deadlock condition backoff.
*/
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
bool interruptible);
/**
* ttm_bo_block_reservation
*
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Use interruptible sleep when waiting.
* @no_wait: Don't sleep, but rather return -EBUSY.
*
* Block reservation for validation by simply reserving the buffer.
* This is intended for single buffer use only without eviction,
* and thus needs no deadlock protection.
*
* Returns:
* -EBUSY: If no_wait == 1 and the buffer is already reserved.
* -ERESTARTSYS: If interruptible == 1 and the process received a signal
* while sleeping.
*/
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait);
/**
* ttm_bo_unblock_reservation
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unblocks reservation leaving lru lists untouched.
*/
extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
/*
* ttm_bo_util.c
*/
/**
* ttm_bo_move_ttm
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @no_wait: Never sleep, but rather return with -EBUSY.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Optimized move function for a buffer object with both old and
* new placement backed by a TTM. The function will, if successful,
* free any old aperture space, and set (@new_mem)->mm_node to NULL,
* and update the (@bo)->mem placement flags. If unsuccessful, the old
* data remains untouched, and it's up to the caller to free the
* memory space indicated by @new_mem.
* Returns:
* !0: Failure.
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait,
struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @no_wait: Never sleep, but rather return with -EBUSY.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Fallback move function for a mappable buffer object in mappable memory.
* The function will, if successful,
* free any old aperture space, and set (@new_mem)->mm_node to NULL,
* and update the (@bo)->mem placement flags. If unsuccessful, the old
* data remains untouched, and it's up to the caller to free the
* memory space indicated by @new_mem.
* Returns:
* !0: Failure.
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict,
bool no_wait, struct ttm_mem_reg *new_mem);
/**
* ttm_bo_free_old_node
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Utility function to free an old placement after a successful move.
*/
extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
/**
* ttm_bo_move_accel_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
* @sync_obj: A sync object that signals when moving is complete.
* @sync_obj_arg: An argument to pass to the sync object idle / wait
* functions.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait: Never sleep, but rather return with -EBUSY.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
* has been scheduled. The function will create a new temporary buffer object
* representing the old placement, and put the sync object on both buffer
* objects. After that the newly created buffer object is unref'd to be
* destroyed when the move is complete. This will help pipeline
* buffer moves.
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
bool evict, bool no_wait,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot
*
* @c_state: Caching state.
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define TTM_HAS_AGP
#include <linux/agp_backend.h>
/**
* ttm_agp_backend_init
*
* @bdev: Pointer to a struct ttm_bo_device.
* @bridge: The agp bridge this device is sitting on.
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
* for TT memory. This function uses the linux agpgart interface to
* bind and unbind memory backing a ttm_tt.
*/
extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
struct agp_bridge_data *bridge);
#endif
#endif

View File

@ -0,0 +1,159 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef TTM_MEMORY_H
#define TTM_MEMORY_H
//#include <linux/workqueue.h>
#include <linux/spinlock.h>
//#include <linux/wait.h>
#include <linux/errno.h>
//#include <linux/kobject.h>
//#include <linux/mm.h>
/**
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
*
* @do_shrink: The callback function.
*
* Arguments to the do_shrink functions are intended to be passed using
* inheritance. That is, the argument class derives from struct ttm_mem_srink,
* and can be accessed using container_of().
*/
struct ttm_mem_shrink {
int (*do_shrink) (struct ttm_mem_shrink *);
};
/**
* struct ttm_mem_global - Global memory accounting structure.
*
* @shrink: A single callback to shrink TTM memory usage. Extend this
* to a linked list to be able to handle multiple callbacks when needed.
* @swap_queue: A workqueue to handle shrinking in low memory situations. We
* need a separate workqueue since it will spend a lot of time waiting
* for the GPU, and this will otherwise block other workqueue tasks(?)
* At this point we use only a single-threaded workqueue.
* @work: The workqueue callback for the shrink queue.
* @queue: Wait queue for processes suspended waiting for memory.
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
* @zone_highmem: Pointer to the highmem zone if there is one.
* @zone_dma32: Pointer to the dma32 zone if there is one.
*
* Note that this structure is not per device. It should be global for all
* graphics devices.
*/
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
struct ttm_mem_global {
// struct kobject kobj;
struct ttm_mem_shrink *shrink;
// struct workqueue_struct *swap_queue;
// struct work_struct work;
// wait_queue_head_t queue;
spinlock_t lock;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
#ifdef CONFIG_HIGHMEM
struct ttm_mem_zone *zone_highmem;
#else
struct ttm_mem_zone *zone_dma32;
#endif
};
/**
* ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
*
* @shrink: The object to initialize.
* @func: The callback function.
*/
static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
int (*func) (struct ttm_mem_shrink *))
{
shrink->do_shrink = func;
}
/**
* ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
*
* @glob: The struct ttm_mem_global object to register with.
* @shrink: An initialized struct ttm_mem_shrink object to register.
*
* Returns:
* -EBUSY: There's already a callback registered. (May change).
*/
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
struct ttm_mem_shrink *shrink)
{
spin_lock(&glob->lock);
if (glob->shrink != NULL) {
spin_unlock(&glob->lock);
return -EBUSY;
}
glob->shrink = shrink;
spin_unlock(&glob->lock);
return 0;
}
/**
* ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
*
* @glob: The struct ttm_mem_global object to unregister from.
* @shrink: A previously registert struct ttm_mem_shrink object.
*
*/
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
struct ttm_mem_shrink *shrink)
{
spin_lock(&glob->lock);
BUG_ON(glob->shrink != shrink);
glob->shrink = NULL;
spin_unlock(&glob->lock);
}
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible);
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
bool no_wait, bool interruptible);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page);
extern size_t ttm_round_pot(size_t size);
#endif

View File

@ -0,0 +1,60 @@
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _TTM_MODULE_H_
#define _TTM_MODULE_H_
#include <linux/kernel.h>
struct kobject;
#define TTM_PFX "[TTM] "
enum ttm_global_types {
TTM_GLOBAL_TTM_MEM = 0,
TTM_GLOBAL_TTM_BO,
TTM_GLOBAL_TTM_OBJECT,
TTM_GLOBAL_NUM
};
struct ttm_global_reference {
enum ttm_global_types global_type;
size_t size;
void *object;
int (*init) (struct ttm_global_reference *);
void (*release) (struct ttm_global_reference *);
};
extern void ttm_global_init(void);
extern void ttm_global_release(void);
extern int ttm_global_item_ref(struct ttm_global_reference *ref);
extern void ttm_global_item_unref(struct ttm_global_reference *ref);
extern struct kobject *ttm_get_kobj(void);
#endif /* _TTM_MODULE_H_ */

View File

@ -0,0 +1,92 @@
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _TTM_PLACEMENT_H_
#define _TTM_PLACEMENT_H_
/*
* Memory regions for data placement.
*/
#define TTM_PL_SYSTEM 0
#define TTM_PL_TT 1
#define TTM_PL_VRAM 2
#define TTM_PL_PRIV0 3
#define TTM_PL_PRIV1 4
#define TTM_PL_PRIV2 5
#define TTM_PL_PRIV3 6
#define TTM_PL_PRIV4 7
#define TTM_PL_PRIV5 8
#define TTM_PL_SWAPPED 15
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
#define TTM_PL_MASK_MEM 0x0000FFFF
/*
* Other flags that affects data placement.
* TTM_PL_FLAG_CACHED indicates cache-coherent mappings
* if available.
* TTM_PL_FLAG_SHARED means that another application may
* reference the buffer.
* TTM_PL_FLAG_NO_EVICT means that the buffer may never
* be evicted to make room for other buffers.
*/
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
TTM_PL_FLAG_UNCACHED | \
TTM_PL_FLAG_WC)
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
/*
* Access flags to be used for CPU- and GPU- mappings.
* The idea is that the TTM synchronization mechanism will
* allow concurrent READ access and exclusive write access.
* Currently GPU- and CPU accesses are exclusive.
*/
#define TTM_ACCESS_READ (1 << 0)
#define TTM_ACCESS_WRITE (1 << 1)
#endif

Some files were not shown because too many files have changed in this diff Show More