the simplest memory manager & index buffers
git-svn-id: svn://kolibrios.org@1120 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
e92d5a0b39
commit
c9b2d4590c
|
@ -0,0 +1,105 @@
|
||||||
|
/**************************************************************************
|
||||||
|
*
|
||||||
|
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
|
||||||
|
* All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sub license, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial portions
|
||||||
|
* of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||||
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||||
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||||
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
**************************************************************************/
|
||||||
|
/*
|
||||||
|
* Authors:
|
||||||
|
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _DRM_MM_H_
|
||||||
|
#define _DRM_MM_H_
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generic range manager structs
|
||||||
|
*/
|
||||||
|
#include <types.h>
|
||||||
|
#include <list.h>
|
||||||
|
#include <errno-base.h>
|
||||||
|
|
||||||
|
#define spin_lock_init(x)
|
||||||
|
#define spin_lock(x)
|
||||||
|
#define spin_unlock(x)
|
||||||
|
|
||||||
|
struct drm_mm_node {
|
||||||
|
struct list_head fl_entry;
|
||||||
|
struct list_head ml_entry;
|
||||||
|
int free;
|
||||||
|
unsigned long start;
|
||||||
|
unsigned long size;
|
||||||
|
struct drm_mm *mm;
|
||||||
|
void *private;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct drm_mm {
|
||||||
|
struct list_head fl_entry;
|
||||||
|
struct list_head ml_entry;
|
||||||
|
struct list_head unused_nodes;
|
||||||
|
int num_unused;
|
||||||
|
// spinlock_t unused_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Basic range manager support (drm_mm.c)
|
||||||
|
*/
|
||||||
|
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned alignment,
|
||||||
|
int atomic);
|
||||||
|
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned alignment)
|
||||||
|
{
|
||||||
|
return drm_mm_get_block_generic(parent, size, alignment, 0);
|
||||||
|
}
|
||||||
|
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned alignment)
|
||||||
|
{
|
||||||
|
return drm_mm_get_block_generic(parent, size, alignment, 1);
|
||||||
|
}
|
||||||
|
extern void drm_mm_put_block(struct drm_mm_node *cur);
|
||||||
|
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
||||||
|
unsigned long size,
|
||||||
|
unsigned alignment,
|
||||||
|
int best_match);
|
||||||
|
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
|
||||||
|
unsigned long size);
|
||||||
|
extern void drm_mm_takedown(struct drm_mm *mm);
|
||||||
|
extern int drm_mm_clean(struct drm_mm *mm);
|
||||||
|
extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
|
||||||
|
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
|
||||||
|
unsigned long size);
|
||||||
|
extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
|
||||||
|
unsigned long size, int atomic);
|
||||||
|
extern int drm_mm_pre_get(struct drm_mm *mm);
|
||||||
|
|
||||||
|
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
|
||||||
|
{
|
||||||
|
return block->mm;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -0,0 +1,703 @@
|
||||||
|
#ifndef _LINUX_LIST_H
|
||||||
|
#define _LINUX_LIST_H
|
||||||
|
|
||||||
|
//#include <linux/stddef.h>
|
||||||
|
//#include <linux/poison.h>
|
||||||
|
//#include <linux/prefetch.h>
|
||||||
|
//#include <asm/system.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Simple doubly linked list implementation.
|
||||||
|
*
|
||||||
|
* Some of the internal functions ("__xxx") are useful when
|
||||||
|
* manipulating whole lists rather than single entries, as
|
||||||
|
* sometimes we already know the next/prev entries and we can
|
||||||
|
* generate better code by using them directly rather than
|
||||||
|
* using the generic single-entry routines.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define LIST_POISON1 ((struct list_head*)0xFFFF0100)
|
||||||
|
#define LIST_POISON2 ((struct list_head*)0xFFFF0200)
|
||||||
|
|
||||||
|
#define prefetch(x) __builtin_prefetch(x)
|
||||||
|
|
||||||
|
struct list_head {
|
||||||
|
struct list_head *next, *prev;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define LIST_HEAD_INIT(name) { &(name), &(name) }
|
||||||
|
|
||||||
|
#define LIST_HEAD(name) \
|
||||||
|
struct list_head name = LIST_HEAD_INIT(name)
|
||||||
|
|
||||||
|
static inline void INIT_LIST_HEAD(struct list_head *list)
|
||||||
|
{
|
||||||
|
list->next = list;
|
||||||
|
list->prev = list;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Insert a new entry between two known consecutive entries.
|
||||||
|
*
|
||||||
|
* This is only for internal list manipulation where we know
|
||||||
|
* the prev/next entries already!
|
||||||
|
*/
|
||||||
|
#ifndef CONFIG_DEBUG_LIST
|
||||||
|
static inline void __list_add(struct list_head *new,
|
||||||
|
struct list_head *prev,
|
||||||
|
struct list_head *next)
|
||||||
|
{
|
||||||
|
next->prev = new;
|
||||||
|
new->next = next;
|
||||||
|
new->prev = prev;
|
||||||
|
prev->next = new;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
extern void __list_add(struct list_head *new,
|
||||||
|
struct list_head *prev,
|
||||||
|
struct list_head *next);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_add - add a new entry
|
||||||
|
* @new: new entry to be added
|
||||||
|
* @head: list head to add it after
|
||||||
|
*
|
||||||
|
* Insert a new entry after the specified head.
|
||||||
|
* This is good for implementing stacks.
|
||||||
|
*/
|
||||||
|
static inline void list_add(struct list_head *new, struct list_head *head)
|
||||||
|
{
|
||||||
|
__list_add(new, head, head->next);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_add_tail - add a new entry
|
||||||
|
* @new: new entry to be added
|
||||||
|
* @head: list head to add it before
|
||||||
|
*
|
||||||
|
* Insert a new entry before the specified head.
|
||||||
|
* This is useful for implementing queues.
|
||||||
|
*/
|
||||||
|
static inline void list_add_tail(struct list_head *new, struct list_head *head)
|
||||||
|
{
|
||||||
|
__list_add(new, head->prev, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Delete a list entry by making the prev/next entries
|
||||||
|
* point to each other.
|
||||||
|
*
|
||||||
|
* This is only for internal list manipulation where we know
|
||||||
|
* the prev/next entries already!
|
||||||
|
*/
|
||||||
|
static inline void __list_del(struct list_head * prev, struct list_head * next)
|
||||||
|
{
|
||||||
|
next->prev = prev;
|
||||||
|
prev->next = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_del - deletes entry from list.
|
||||||
|
* @entry: the element to delete from the list.
|
||||||
|
* Note: list_empty() on entry does not return true after this, the entry is
|
||||||
|
* in an undefined state.
|
||||||
|
*/
|
||||||
|
#ifndef CONFIG_DEBUG_LIST
|
||||||
|
static inline void list_del(struct list_head *entry)
|
||||||
|
{
|
||||||
|
__list_del(entry->prev, entry->next);
|
||||||
|
entry->next = LIST_POISON1;
|
||||||
|
entry->prev = LIST_POISON2;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
extern void list_del(struct list_head *entry);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_replace - replace old entry by new one
|
||||||
|
* @old : the element to be replaced
|
||||||
|
* @new : the new element to insert
|
||||||
|
*
|
||||||
|
* If @old was empty, it will be overwritten.
|
||||||
|
*/
|
||||||
|
static inline void list_replace(struct list_head *old,
|
||||||
|
struct list_head *new)
|
||||||
|
{
|
||||||
|
new->next = old->next;
|
||||||
|
new->next->prev = new;
|
||||||
|
new->prev = old->prev;
|
||||||
|
new->prev->next = new;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void list_replace_init(struct list_head *old,
|
||||||
|
struct list_head *new)
|
||||||
|
{
|
||||||
|
list_replace(old, new);
|
||||||
|
INIT_LIST_HEAD(old);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_del_init - deletes entry from list and reinitialize it.
|
||||||
|
* @entry: the element to delete from the list.
|
||||||
|
*/
|
||||||
|
static inline void list_del_init(struct list_head *entry)
|
||||||
|
{
|
||||||
|
__list_del(entry->prev, entry->next);
|
||||||
|
INIT_LIST_HEAD(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_move - delete from one list and add as another's head
|
||||||
|
* @list: the entry to move
|
||||||
|
* @head: the head that will precede our entry
|
||||||
|
*/
|
||||||
|
static inline void list_move(struct list_head *list, struct list_head *head)
|
||||||
|
{
|
||||||
|
__list_del(list->prev, list->next);
|
||||||
|
list_add(list, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_move_tail - delete from one list and add as another's tail
|
||||||
|
* @list: the entry to move
|
||||||
|
* @head: the head that will follow our entry
|
||||||
|
*/
|
||||||
|
static inline void list_move_tail(struct list_head *list,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
__list_del(list->prev, list->next);
|
||||||
|
list_add_tail(list, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_is_last - tests whether @list is the last entry in list @head
|
||||||
|
* @list: the entry to test
|
||||||
|
* @head: the head of the list
|
||||||
|
*/
|
||||||
|
static inline int list_is_last(const struct list_head *list,
|
||||||
|
const struct list_head *head)
|
||||||
|
{
|
||||||
|
return list->next == head;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_empty - tests whether a list is empty
|
||||||
|
* @head: the list to test.
|
||||||
|
*/
|
||||||
|
static inline int list_empty(const struct list_head *head)
|
||||||
|
{
|
||||||
|
return head->next == head;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_empty_careful - tests whether a list is empty and not being modified
|
||||||
|
* @head: the list to test
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* tests whether a list is empty _and_ checks that no other CPU might be
|
||||||
|
* in the process of modifying either member (next or prev)
|
||||||
|
*
|
||||||
|
* NOTE: using list_empty_careful() without synchronization
|
||||||
|
* can only be safe if the only activity that can happen
|
||||||
|
* to the list entry is list_del_init(). Eg. it cannot be used
|
||||||
|
* if another CPU could re-list_add() it.
|
||||||
|
*/
|
||||||
|
static inline int list_empty_careful(const struct list_head *head)
|
||||||
|
{
|
||||||
|
struct list_head *next = head->next;
|
||||||
|
return (next == head) && (next == head->prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_is_singular - tests whether a list has just one entry.
|
||||||
|
* @head: the list to test.
|
||||||
|
*/
|
||||||
|
static inline int list_is_singular(const struct list_head *head)
|
||||||
|
{
|
||||||
|
return !list_empty(head) && (head->next == head->prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __list_cut_position(struct list_head *list,
|
||||||
|
struct list_head *head, struct list_head *entry)
|
||||||
|
{
|
||||||
|
struct list_head *new_first = entry->next;
|
||||||
|
list->next = head->next;
|
||||||
|
list->next->prev = list;
|
||||||
|
list->prev = entry;
|
||||||
|
entry->next = list;
|
||||||
|
head->next = new_first;
|
||||||
|
new_first->prev = head;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_cut_position - cut a list into two
|
||||||
|
* @list: a new list to add all removed entries
|
||||||
|
* @head: a list with entries
|
||||||
|
* @entry: an entry within head, could be the head itself
|
||||||
|
* and if so we won't cut the list
|
||||||
|
*
|
||||||
|
* This helper moves the initial part of @head, up to and
|
||||||
|
* including @entry, from @head to @list. You should
|
||||||
|
* pass on @entry an element you know is on @head. @list
|
||||||
|
* should be an empty list or a list you do not care about
|
||||||
|
* losing its data.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
static inline void list_cut_position(struct list_head *list,
|
||||||
|
struct list_head *head, struct list_head *entry)
|
||||||
|
{
|
||||||
|
if (list_empty(head))
|
||||||
|
return;
|
||||||
|
if (list_is_singular(head) &&
|
||||||
|
(head->next != entry && head != entry))
|
||||||
|
return;
|
||||||
|
if (entry == head)
|
||||||
|
INIT_LIST_HEAD(list);
|
||||||
|
else
|
||||||
|
__list_cut_position(list, head, entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __list_splice(const struct list_head *list,
|
||||||
|
struct list_head *prev,
|
||||||
|
struct list_head *next)
|
||||||
|
{
|
||||||
|
struct list_head *first = list->next;
|
||||||
|
struct list_head *last = list->prev;
|
||||||
|
|
||||||
|
first->prev = prev;
|
||||||
|
prev->next = first;
|
||||||
|
|
||||||
|
last->next = next;
|
||||||
|
next->prev = last;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_splice - join two lists, this is designed for stacks
|
||||||
|
* @list: the new list to add.
|
||||||
|
* @head: the place to add it in the first list.
|
||||||
|
*/
|
||||||
|
static inline void list_splice(const struct list_head *list,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
if (!list_empty(list))
|
||||||
|
__list_splice(list, head, head->next);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_splice_tail - join two lists, each list being a queue
|
||||||
|
* @list: the new list to add.
|
||||||
|
* @head: the place to add it in the first list.
|
||||||
|
*/
|
||||||
|
static inline void list_splice_tail(struct list_head *list,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
if (!list_empty(list))
|
||||||
|
__list_splice(list, head->prev, head);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_splice_init - join two lists and reinitialise the emptied list.
|
||||||
|
* @list: the new list to add.
|
||||||
|
* @head: the place to add it in the first list.
|
||||||
|
*
|
||||||
|
* The list at @list is reinitialised
|
||||||
|
*/
|
||||||
|
static inline void list_splice_init(struct list_head *list,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
if (!list_empty(list)) {
|
||||||
|
__list_splice(list, head, head->next);
|
||||||
|
INIT_LIST_HEAD(list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_splice_tail_init - join two lists and reinitialise the emptied list
|
||||||
|
* @list: the new list to add.
|
||||||
|
* @head: the place to add it in the first list.
|
||||||
|
*
|
||||||
|
* Each of the lists is a queue.
|
||||||
|
* The list at @list is reinitialised
|
||||||
|
*/
|
||||||
|
static inline void list_splice_tail_init(struct list_head *list,
|
||||||
|
struct list_head *head)
|
||||||
|
{
|
||||||
|
if (!list_empty(list)) {
|
||||||
|
__list_splice(list, head->prev, head);
|
||||||
|
INIT_LIST_HEAD(list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_entry - get the struct for this entry
|
||||||
|
* @ptr: the &struct list_head pointer.
|
||||||
|
* @type: the type of the struct this is embedded in.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*/
|
||||||
|
#define list_entry(ptr, type, member) \
|
||||||
|
container_of(ptr, type, member)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_first_entry - get the first element from a list
|
||||||
|
* @ptr: the list head to take the element from.
|
||||||
|
* @type: the type of the struct this is embedded in.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Note, that list is expected to be not empty.
|
||||||
|
*/
|
||||||
|
#define list_first_entry(ptr, type, member) \
|
||||||
|
list_entry((ptr)->next, type, member)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each - iterate over a list
|
||||||
|
* @pos: the &struct list_head to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
*/
|
||||||
|
#define list_for_each(pos, head) \
|
||||||
|
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __list_for_each - iterate over a list
|
||||||
|
* @pos: the &struct list_head to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
*
|
||||||
|
* This variant differs from list_for_each() in that it's the
|
||||||
|
* simplest possible list iteration code, no prefetching is done.
|
||||||
|
* Use this for code that knows the list to be very short (empty
|
||||||
|
* or 1 entry) most of the time.
|
||||||
|
*/
|
||||||
|
#define __list_for_each(pos, head) \
|
||||||
|
for (pos = (head)->next; pos != (head); pos = pos->next)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_prev - iterate over a list backwards
|
||||||
|
* @pos: the &struct list_head to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
*/
|
||||||
|
#define list_for_each_prev(pos, head) \
|
||||||
|
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
|
||||||
|
pos = pos->prev)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_safe - iterate over a list safe against removal of list entry
|
||||||
|
* @pos: the &struct list_head to use as a loop cursor.
|
||||||
|
* @n: another &struct list_head to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
*/
|
||||||
|
#define list_for_each_safe(pos, n, head) \
|
||||||
|
for (pos = (head)->next, n = pos->next; pos != (head); \
|
||||||
|
pos = n, n = pos->next)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
|
||||||
|
* @pos: the &struct list_head to use as a loop cursor.
|
||||||
|
* @n: another &struct list_head to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
*/
|
||||||
|
#define list_for_each_prev_safe(pos, n, head) \
|
||||||
|
for (pos = (head)->prev, n = pos->prev; \
|
||||||
|
prefetch(pos->prev), pos != (head); \
|
||||||
|
pos = n, n = pos->prev)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry - iterate over list of given type
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry(pos, head, member) \
|
||||||
|
for (pos = list_entry((head)->next, typeof(*pos), member); \
|
||||||
|
prefetch(pos->member.next), &pos->member != (head); \
|
||||||
|
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_reverse - iterate backwards over list of given type.
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_reverse(pos, head, member) \
|
||||||
|
for (pos = list_entry((head)->prev, typeof(*pos), member); \
|
||||||
|
prefetch(pos->member.prev), &pos->member != (head); \
|
||||||
|
pos = list_entry(pos->member.prev, typeof(*pos), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
|
||||||
|
* @pos: the type * to use as a start point
|
||||||
|
* @head: the head of the list
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
|
||||||
|
*/
|
||||||
|
#define list_prepare_entry(pos, head, member) \
|
||||||
|
((pos) ? : list_entry(head, typeof(*pos), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_continue - continue iteration over list of given type
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Continue to iterate over list of given type, continuing after
|
||||||
|
* the current position.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_continue(pos, head, member) \
|
||||||
|
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
|
||||||
|
prefetch(pos->member.next), &pos->member != (head); \
|
||||||
|
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_continue_reverse - iterate backwards from the given point
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Start to iterate over list of given type backwards, continuing after
|
||||||
|
* the current position.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_continue_reverse(pos, head, member) \
|
||||||
|
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
|
||||||
|
prefetch(pos->member.prev), &pos->member != (head); \
|
||||||
|
pos = list_entry(pos->member.prev, typeof(*pos), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_from - iterate over list of given type from the current point
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Iterate over list of given type, continuing from current position.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_from(pos, head, member) \
|
||||||
|
for (; prefetch(pos->member.next), &pos->member != (head); \
|
||||||
|
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @n: another type * to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_safe(pos, n, head, member) \
|
||||||
|
for (pos = list_entry((head)->next, typeof(*pos), member), \
|
||||||
|
n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||||
|
&pos->member != (head); \
|
||||||
|
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_safe_continue
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @n: another type * to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Iterate over list of given type, continuing after current point,
|
||||||
|
* safe against removal of list entry.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_safe_continue(pos, n, head, member) \
|
||||||
|
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
|
||||||
|
n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||||
|
&pos->member != (head); \
|
||||||
|
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_safe_from
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @n: another type * to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Iterate over list of given type from current point, safe against
|
||||||
|
* removal of list entry.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_safe_from(pos, n, head, member) \
|
||||||
|
for (n = list_entry(pos->member.next, typeof(*pos), member); \
|
||||||
|
&pos->member != (head); \
|
||||||
|
pos = n, n = list_entry(n->member.next, typeof(*n), member))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* list_for_each_entry_safe_reverse
|
||||||
|
* @pos: the type * to use as a loop cursor.
|
||||||
|
* @n: another type * to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the list_struct within the struct.
|
||||||
|
*
|
||||||
|
* Iterate backwards over list of given type, safe against removal
|
||||||
|
* of list entry.
|
||||||
|
*/
|
||||||
|
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
|
||||||
|
for (pos = list_entry((head)->prev, typeof(*pos), member), \
|
||||||
|
n = list_entry(pos->member.prev, typeof(*pos), member); \
|
||||||
|
&pos->member != (head); \
|
||||||
|
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Double linked lists with a single pointer list head.
|
||||||
|
* Mostly useful for hash tables where the two pointer list head is
|
||||||
|
* too wasteful.
|
||||||
|
* You lose the ability to access the tail in O(1).
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
struct hlist_head {
|
||||||
|
struct hlist_node *first;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct hlist_node {
|
||||||
|
struct hlist_node *next, **pprev;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define HLIST_HEAD_INIT { .first = NULL }
|
||||||
|
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
|
||||||
|
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
|
||||||
|
static inline void INIT_HLIST_NODE(struct hlist_node *h)
|
||||||
|
{
|
||||||
|
h->next = NULL;
|
||||||
|
h->pprev = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int hlist_unhashed(const struct hlist_node *h)
|
||||||
|
{
|
||||||
|
return !h->pprev;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int hlist_empty(const struct hlist_head *h)
|
||||||
|
{
|
||||||
|
return !h->first;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __hlist_del(struct hlist_node *n)
|
||||||
|
{
|
||||||
|
struct hlist_node *next = n->next;
|
||||||
|
struct hlist_node **pprev = n->pprev;
|
||||||
|
*pprev = next;
|
||||||
|
if (next)
|
||||||
|
next->pprev = pprev;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void hlist_del(struct hlist_node *n)
|
||||||
|
{
|
||||||
|
__hlist_del(n);
|
||||||
|
n->next = LIST_POISON1;
|
||||||
|
n->pprev = LIST_POISON2;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void hlist_del_init(struct hlist_node *n)
|
||||||
|
{
|
||||||
|
if (!hlist_unhashed(n)) {
|
||||||
|
__hlist_del(n);
|
||||||
|
INIT_HLIST_NODE(n);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
||||||
|
{
|
||||||
|
struct hlist_node *first = h->first;
|
||||||
|
n->next = first;
|
||||||
|
if (first)
|
||||||
|
first->pprev = &n->next;
|
||||||
|
h->first = n;
|
||||||
|
n->pprev = &h->first;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* next must be != NULL */
|
||||||
|
static inline void hlist_add_before(struct hlist_node *n,
|
||||||
|
struct hlist_node *next)
|
||||||
|
{
|
||||||
|
n->pprev = next->pprev;
|
||||||
|
n->next = next;
|
||||||
|
next->pprev = &n->next;
|
||||||
|
*(n->pprev) = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void hlist_add_after(struct hlist_node *n,
|
||||||
|
struct hlist_node *next)
|
||||||
|
{
|
||||||
|
next->next = n->next;
|
||||||
|
n->next = next;
|
||||||
|
next->pprev = &n->next;
|
||||||
|
|
||||||
|
if(next->next)
|
||||||
|
next->next->pprev = &next->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Move a list from one list head to another. Fixup the pprev
|
||||||
|
* reference of the first entry if it exists.
|
||||||
|
*/
|
||||||
|
static inline void hlist_move_list(struct hlist_head *old,
|
||||||
|
struct hlist_head *new)
|
||||||
|
{
|
||||||
|
new->first = old->first;
|
||||||
|
if (new->first)
|
||||||
|
new->first->pprev = &new->first;
|
||||||
|
old->first = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
||||||
|
|
||||||
|
#define hlist_for_each(pos, head) \
|
||||||
|
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
|
#define hlist_for_each_safe(pos, n, head) \
|
||||||
|
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
|
||||||
|
pos = n)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hlist_for_each_entry - iterate over list of given type
|
||||||
|
* @tpos: the type * to use as a loop cursor.
|
||||||
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the hlist_node within the struct.
|
||||||
|
*/
|
||||||
|
#define hlist_for_each_entry(tpos, pos, head, member) \
|
||||||
|
for (pos = (head)->first; \
|
||||||
|
pos && ({ prefetch(pos->next); 1;}) && \
|
||||||
|
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
|
||||||
|
* @tpos: the type * to use as a loop cursor.
|
||||||
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||||
|
* @member: the name of the hlist_node within the struct.
|
||||||
|
*/
|
||||||
|
#define hlist_for_each_entry_continue(tpos, pos, member) \
|
||||||
|
for (pos = (pos)->next; \
|
||||||
|
pos && ({ prefetch(pos->next); 1;}) && \
|
||||||
|
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
|
||||||
|
* @tpos: the type * to use as a loop cursor.
|
||||||
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||||
|
* @member: the name of the hlist_node within the struct.
|
||||||
|
*/
|
||||||
|
#define hlist_for_each_entry_from(tpos, pos, member) \
|
||||||
|
for (; pos && ({ prefetch(pos->next); 1;}) && \
|
||||||
|
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
||||||
|
* @tpos: the type * to use as a loop cursor.
|
||||||
|
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||||
|
* @n: another &struct hlist_node to use as temporary storage
|
||||||
|
* @head: the head for your list.
|
||||||
|
* @member: the name of the hlist_node within the struct.
|
||||||
|
*/
|
||||||
|
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
|
||||||
|
for (pos = (head)->first; \
|
||||||
|
pos && ({ n = pos->next; 1; }) && \
|
||||||
|
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||||
|
pos = n)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -1165,7 +1165,7 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
|
||||||
int atom_asic_init(struct atom_context *ctx)
|
int atom_asic_init(struct atom_context *ctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
|
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
|
||||||
uint32_t ps[16];
|
uint32_t ps[16];
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
#include <errno-base.h>
|
#include <errno-base.h>
|
||||||
#include <syscall.h>
|
#include <syscall.h>
|
||||||
|
|
||||||
link_t devices;
|
static LIST_HEAD(devices);
|
||||||
|
|
||||||
static dev_t* pci_scan_device(u32_t bus, int devfn);
|
static dev_t* pci_scan_device(u32_t bus, int devfn);
|
||||||
|
|
||||||
|
@ -346,7 +346,7 @@ static dev_t* pci_scan_device(u32_t bus, int devfn)
|
||||||
|
|
||||||
dev = (dev_t*)malloc(sizeof(dev_t));
|
dev = (dev_t*)malloc(sizeof(dev_t));
|
||||||
|
|
||||||
link_initialize(&dev->link);
|
INIT_LIST_HEAD(&dev->link);
|
||||||
|
|
||||||
if(unlikely(dev == NULL))
|
if(unlikely(dev == NULL))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -375,7 +375,7 @@ int pci_scan_slot(u32_t bus, int devfn)
|
||||||
dev = pci_scan_device(bus, devfn);
|
dev = pci_scan_device(bus, devfn);
|
||||||
if( dev )
|
if( dev )
|
||||||
{
|
{
|
||||||
list_append(&dev->link, &devices);
|
list_add(&dev->link, &devices);
|
||||||
|
|
||||||
nr++;
|
nr++;
|
||||||
|
|
||||||
|
@ -420,7 +420,7 @@ int enum_pci_devices()
|
||||||
u32_t last_bus;
|
u32_t last_bus;
|
||||||
u32_t bus = 0 , devfn = 0;
|
u32_t bus = 0 , devfn = 0;
|
||||||
|
|
||||||
list_initialize(&devices);
|
// list_initialize(&devices);
|
||||||
|
|
||||||
last_bus = PciApi(1);
|
last_bus = PciApi(1);
|
||||||
|
|
||||||
|
|
|
@ -268,6 +268,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||||
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
|
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Writeback
|
* Writeback
|
||||||
|
@ -307,14 +308,16 @@ int r100_wb_init(struct radeon_device *rdev)
|
||||||
void r100_wb_fini(struct radeon_device *rdev)
|
void r100_wb_fini(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
if (rdev->wb.wb_obj) {
|
if (rdev->wb.wb_obj) {
|
||||||
radeon_object_kunmap(rdev->wb.wb_obj);
|
// radeon_object_kunmap(rdev->wb.wb_obj);
|
||||||
radeon_object_unpin(rdev->wb.wb_obj);
|
// radeon_object_unpin(rdev->wb.wb_obj);
|
||||||
radeon_object_unref(&rdev->wb.wb_obj);
|
// radeon_object_unref(&rdev->wb.wb_obj);
|
||||||
rdev->wb.wb = NULL;
|
rdev->wb.wb = NULL;
|
||||||
rdev->wb.wb_obj = NULL;
|
rdev->wb.wb_obj = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if 0
|
||||||
int r100_copy_blit(struct radeon_device *rdev,
|
int r100_copy_blit(struct radeon_device *rdev,
|
||||||
uint64_t src_offset,
|
uint64_t src_offset,
|
||||||
uint64_t dst_offset,
|
uint64_t dst_offset,
|
||||||
|
@ -415,7 +418,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
if (r100_gui_wait_for_idle(rdev)) {
|
if (r100_gui_wait_for_idle(rdev)) {
|
||||||
printk(KERN_WARNING "Failed to wait GUI idle while "
|
printk(KERN_WARNING "Failed to wait GUI idle while "
|
||||||
|
@ -498,7 +501,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
uint32_t tmp;
|
uint32_t tmp;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
// if (r100_debugfs_cp_init(rdev)) {
|
// if (r100_debugfs_cp_init(rdev)) {
|
||||||
// DRM_ERROR("Failed to register debugfs file for CP !\n");
|
// DRM_ERROR("Failed to register debugfs file for CP !\n");
|
||||||
|
@ -624,7 +627,7 @@ int r100_cp_reset(struct radeon_device *rdev)
|
||||||
bool reinit_cp;
|
bool reinit_cp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
|
|
||||||
reinit_cp = rdev->cp.ready;
|
reinit_cp = rdev->cp.ready;
|
||||||
|
@ -1170,7 +1173,7 @@ void r100_hdp_reset(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
uint32_t tmp;
|
uint32_t tmp;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
|
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
|
||||||
tmp |= (7 << 28);
|
tmp |= (7 << 28);
|
||||||
|
@ -1187,7 +1190,7 @@ int r100_rb2d_reset(struct radeon_device *rdev)
|
||||||
uint32_t tmp;
|
uint32_t tmp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
|
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
|
||||||
(void)RREG32(RADEON_RBBM_SOFT_RESET);
|
(void)RREG32(RADEON_RBBM_SOFT_RESET);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -155,7 +155,7 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev)
|
||||||
void r520_gpu_init(struct radeon_device *rdev)
|
void r520_gpu_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
unsigned pipe_select_current, gb_pipe_select, tmp;
|
unsigned pipe_select_current, gb_pipe_select, tmp;
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
r100_hdp_reset(rdev);
|
r100_hdp_reset(rdev);
|
||||||
rs600_disable_vga(rdev);
|
rs600_disable_vga(rdev);
|
||||||
|
@ -204,7 +204,7 @@ void r520_gpu_init(struct radeon_device *rdev)
|
||||||
static void r520_vram_get_type(struct radeon_device *rdev)
|
static void r520_vram_get_type(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
uint32_t tmp;
|
uint32_t tmp;
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
rdev->mc.vram_width = 128;
|
rdev->mc.vram_width = 128;
|
||||||
rdev->mc.vram_is_ddr = true;
|
rdev->mc.vram_is_ddr = true;
|
||||||
|
@ -245,7 +245,7 @@ void r520_vram_info(struct radeon_device *rdev)
|
||||||
void rs600_disable_vga(struct radeon_device *rdev)
|
void rs600_disable_vga(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
unsigned tmp;
|
unsigned tmp;
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
WREG32(0x330, 0);
|
WREG32(0x330, 0);
|
||||||
WREG32(0x338, 0);
|
WREG32(0x338, 0);
|
||||||
|
@ -264,7 +264,7 @@ void r420_pipes_init(struct radeon_device *rdev)
|
||||||
unsigned gb_pipe_select;
|
unsigned gb_pipe_select;
|
||||||
unsigned num_pipes;
|
unsigned num_pipes;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
/* GA_ENHANCE workaround TCL deadlock issue */
|
/* GA_ENHANCE workaround TCL deadlock issue */
|
||||||
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
|
WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
|
||||||
|
@ -314,83 +314,11 @@ void r420_pipes_init(struct radeon_device *rdev)
|
||||||
DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
|
DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rv370_pcie_gart_disable(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
uint32_t tmp;
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
|
||||||
|
|
||||||
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
|
|
||||||
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
|
|
||||||
if (rdev->gart.table.vram.robj) {
|
|
||||||
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
|
||||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void radeon_gart_table_vram_free(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
if (rdev->gart.table.vram.robj == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
|
||||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
|
||||||
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Common gart functions.
|
|
||||||
*/
|
|
||||||
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
|
||||||
int pages)
|
|
||||||
{
|
|
||||||
unsigned t;
|
|
||||||
unsigned p;
|
|
||||||
int i, j;
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
|
||||||
|
|
||||||
if (!rdev->gart.ready) {
|
|
||||||
dbgprintf("trying to unbind memory to unitialized GART !\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
t = offset / 4096;
|
|
||||||
p = t / (PAGE_SIZE / 4096);
|
|
||||||
for (i = 0; i < pages; i++, p++) {
|
|
||||||
if (rdev->gart.pages[p]) {
|
|
||||||
// pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
|
||||||
// PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
||||||
rdev->gart.pages[p] = NULL;
|
|
||||||
rdev->gart.pages_addr[p] = 0;
|
|
||||||
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
|
|
||||||
radeon_gart_set_page(rdev, t, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mb();
|
|
||||||
radeon_gart_tlb_flush(rdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
void radeon_gart_fini(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
|
|
||||||
/* unbind pages */
|
|
||||||
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
|
|
||||||
}
|
|
||||||
rdev->gart.ready = false;
|
|
||||||
// kfree(rdev->gart.pages);
|
|
||||||
// kfree(rdev->gart.pages_addr);
|
|
||||||
rdev->gart.pages = NULL;
|
|
||||||
rdev->gart.pages_addr = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int radeon_agp_init(struct radeon_device *rdev)
|
int radeon_agp_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
#if __OS_HAS_AGP
|
#if __OS_HAS_AGP
|
||||||
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
|
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
|
||||||
|
@ -535,182 +463,11 @@ void rs600_mc_disable_clients(struct radeon_device *rdev)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
|
|
||||||
{
|
|
||||||
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
|
|
||||||
|
|
||||||
if (i < 0 || i > rdev->gart.num_gpu_pages) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
addr = (((u32_t)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
|
|
||||||
writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int radeon_gart_init(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
|
|
||||||
dbgprintf("%s\n",__FUNCTION__);
|
|
||||||
|
|
||||||
if (rdev->gart.pages) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/* We need PAGE_SIZE >= 4096 */
|
|
||||||
if (PAGE_SIZE < 4096) {
|
|
||||||
DRM_ERROR("Page size is smaller than GPU page size!\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
/* Compute table size */
|
|
||||||
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
|
|
||||||
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
|
|
||||||
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
|
||||||
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
|
|
||||||
/* Allocate pages table */
|
|
||||||
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (rdev->gart.pages == NULL) {
|
|
||||||
// radeon_gart_fini(rdev);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
rdev->gart.pages_addr = kzalloc(sizeof(u32_t) *
|
|
||||||
rdev->gart.num_cpu_pages, GFP_KERNEL);
|
|
||||||
if (rdev->gart.pages_addr == NULL) {
|
|
||||||
// radeon_gart_fini(rdev);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
uint32_t gpu_addr;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
// if (rdev->gart.table.vram.robj == NULL) {
|
|
||||||
// r = radeon_object_create(rdev, NULL,
|
|
||||||
// rdev->gart.table_size,
|
|
||||||
// true,
|
|
||||||
// RADEON_GEM_DOMAIN_VRAM,
|
|
||||||
// false, &rdev->gart.table.vram.robj);
|
|
||||||
// if (r) {
|
|
||||||
// return r;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// r = radeon_object_pin(rdev->gart.table.vram.robj,
|
|
||||||
// RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
|
||||||
// if (r) {
|
|
||||||
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
|
||||||
// return r;
|
|
||||||
// }
|
|
||||||
// r = radeon_object_kmap(rdev->gart.table.vram.robj,
|
|
||||||
// (void **)&rdev->gart.table.vram.ptr);
|
|
||||||
// if (r) {
|
|
||||||
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
|
||||||
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
|
||||||
// DRM_ERROR("radeon: failed to map gart vram table.\n");
|
|
||||||
// return r;
|
|
||||||
// }
|
|
||||||
|
|
||||||
gpu_addr = 0x800000;
|
|
||||||
|
|
||||||
u32_t pci_addr = rdev->mc.aper_base + gpu_addr;
|
|
||||||
|
|
||||||
rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW);
|
|
||||||
|
|
||||||
rdev->gart.table_addr = gpu_addr;
|
|
||||||
|
|
||||||
dbgprintf("alloc gart vram:\n gpu_base %x pci_base %x lin_addr %x",
|
|
||||||
gpu_addr, pci_addr, rdev->gart.table.vram.ptr);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||||
|
|
||||||
int rv370_pcie_gart_enable(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
uint32_t table_addr;
|
|
||||||
uint32_t tmp;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
dbgprintf("%s\n",__FUNCTION__);
|
|
||||||
|
|
||||||
/* Initialize common gart structure */
|
|
||||||
r = radeon_gart_init(rdev);
|
|
||||||
if (r) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
// r = rv370_debugfs_pcie_gart_info_init(rdev);
|
|
||||||
// if (r) {
|
|
||||||
// DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
|
|
||||||
// }
|
|
||||||
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
|
|
||||||
r = radeon_gart_table_vram_alloc(rdev);
|
|
||||||
if (r) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
/* discard memory request outside of configured range */
|
|
||||||
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
|
|
||||||
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
|
|
||||||
table_addr = rdev->gart.table_addr;
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
|
|
||||||
/* FIXME: setup default page */
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
|
|
||||||
/* Clear error */
|
|
||||||
WREG32_PCIE(0x18, 0);
|
|
||||||
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
|
|
||||||
tmp |= RADEON_PCIE_TX_GART_EN;
|
|
||||||
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
|
||||||
rv370_pcie_gart_tlb_flush(rdev);
|
|
||||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
|
|
||||||
rdev->mc.gtt_size >> 20, table_addr);
|
|
||||||
rdev->gart.ready = true;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
uint32_t tmp;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Workaround HW bug do flush 2 times */
|
|
||||||
for (i = 0; i < 2; i++) {
|
|
||||||
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
|
|
||||||
(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
|
|
||||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
|
||||||
mb();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int r300_gart_enable(struct radeon_device *rdev)
|
|
||||||
{
|
|
||||||
#if __OS_HAS_AGP
|
|
||||||
if (rdev->flags & RADEON_IS_AGP) {
|
|
||||||
if (rdev->family > CHIP_RV350) {
|
|
||||||
rv370_pcie_gart_disable(rdev);
|
|
||||||
} else {
|
|
||||||
r100_pci_gart_disable(rdev);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if (rdev->flags & RADEON_IS_PCIE) {
|
|
||||||
rdev->asic->gart_disable = &rv370_pcie_gart_disable;
|
|
||||||
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
|
|
||||||
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
|
|
||||||
return rv370_pcie_gart_enable(rdev);
|
|
||||||
}
|
|
||||||
// return r100_pci_gart_enable(rdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -741,49 +498,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
|
||||||
int pages, u32_t *pagelist)
|
|
||||||
{
|
|
||||||
unsigned t;
|
|
||||||
unsigned p;
|
|
||||||
uint64_t page_base;
|
|
||||||
int i, j;
|
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
|
||||||
|
|
||||||
|
|
||||||
if (!rdev->gart.ready) {
|
|
||||||
DRM_ERROR("trying to bind memory to unitialized GART !\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
t = offset / 4096;
|
|
||||||
p = t / (PAGE_SIZE / 4096);
|
|
||||||
|
|
||||||
for (i = 0; i < pages; i++, p++) {
|
|
||||||
/* we need to support large memory configurations */
|
|
||||||
/* assume that unbind have already been call on the range */
|
|
||||||
|
|
||||||
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
|
|
||||||
|
|
||||||
//if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
|
|
||||||
// /* FIXME: failed to map page (return -ENOMEM?) */
|
|
||||||
// radeon_gart_unbind(rdev, offset, pages);
|
|
||||||
// return -ENOMEM;
|
|
||||||
//}
|
|
||||||
rdev->gart.pages[p] = pagelist[i];
|
|
||||||
page_base = (uint32_t)rdev->gart.pages_addr[p];
|
|
||||||
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
|
|
||||||
radeon_gart_set_page(rdev, t, page_base);
|
|
||||||
page_base += 4096;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mb();
|
|
||||||
radeon_gart_tlb_flush(rdev);
|
|
||||||
|
|
||||||
dbgprintf("done %s\n",__FUNCTION__);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -44,10 +44,12 @@
|
||||||
* - TESTING, TESTING, TESTING
|
* - TESTING, TESTING, TESTING
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "types.h"
|
#include <types.h>
|
||||||
#include "pci.h"
|
#include <list.h>
|
||||||
|
|
||||||
#include "errno-base.h"
|
#include <pci.h>
|
||||||
|
|
||||||
|
#include <errno-base.h>
|
||||||
|
|
||||||
#include "radeon_mode.h"
|
#include "radeon_mode.h"
|
||||||
#include "radeon_reg.h"
|
#include "radeon_reg.h"
|
||||||
|
@ -60,15 +62,14 @@ extern int radeon_gart_size;
|
||||||
extern int radeon_r4xx_atom;
|
extern int radeon_r4xx_atom;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy from radeon_drv.h so we don't have to include both and have conflicting
|
* Copy from radeon_drv.h so we don't have to include both and have conflicting
|
||||||
* symbol;
|
* symbol;
|
||||||
*/
|
*/
|
||||||
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||||
#define RADEON_IB_POOL_SIZE 16
|
#define RADEON_IB_POOL_SIZE 16
|
||||||
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
|
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
|
||||||
#define RADEONFB_CONN_LIMIT 4
|
#define RADEONFB_CONN_LIMIT 4
|
||||||
|
|
||||||
enum radeon_family {
|
enum radeon_family {
|
||||||
CHIP_R100,
|
CHIP_R100,
|
||||||
|
@ -169,15 +170,15 @@ struct radeon_fence_driver {
|
||||||
unsigned long count_timeout;
|
unsigned long count_timeout;
|
||||||
// wait_queue_head_t queue;
|
// wait_queue_head_t queue;
|
||||||
// rwlock_t lock;
|
// rwlock_t lock;
|
||||||
// struct list_head created;
|
struct list_head created;
|
||||||
// struct list_head emited;
|
struct list_head emited;
|
||||||
// struct list_head signaled;
|
struct list_head signaled;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_fence {
|
struct radeon_fence {
|
||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
// struct kref kref;
|
// struct kref kref;
|
||||||
// struct list_head list;
|
struct list_head list;
|
||||||
/* protected by radeon_fence.lock */
|
/* protected by radeon_fence.lock */
|
||||||
uint32_t seq;
|
uint32_t seq;
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
|
@ -204,7 +205,7 @@ void radeon_fence_unref(struct radeon_fence **fence);
|
||||||
struct radeon_object;
|
struct radeon_object;
|
||||||
|
|
||||||
struct radeon_object_list {
|
struct radeon_object_list {
|
||||||
// struct list_head list;
|
struct list_head list;
|
||||||
struct radeon_object *robj;
|
struct radeon_object *robj;
|
||||||
uint64_t gpu_offset;
|
uint64_t gpu_offset;
|
||||||
unsigned rdomain;
|
unsigned rdomain;
|
||||||
|
@ -216,7 +217,6 @@ struct radeon_object_list {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GART structures, functions & helpers
|
* GART structures, functions & helpers
|
||||||
*/
|
*/
|
||||||
|
@ -255,8 +255,8 @@ int radeon_gart_init(struct radeon_device *rdev);
|
||||||
void radeon_gart_fini(struct radeon_device *rdev);
|
void radeon_gart_fini(struct radeon_device *rdev);
|
||||||
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||||
int pages);
|
int pages);
|
||||||
//int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||||
// int pages, struct page **pagelist);
|
int pages, u32_t *pagelist);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -309,7 +309,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev);
|
||||||
* CP & ring.
|
* CP & ring.
|
||||||
*/
|
*/
|
||||||
struct radeon_ib {
|
struct radeon_ib {
|
||||||
// struct list_head list;
|
struct list_head list;
|
||||||
unsigned long idx;
|
unsigned long idx;
|
||||||
uint64_t gpu_addr;
|
uint64_t gpu_addr;
|
||||||
struct radeon_fence *fence;
|
struct radeon_fence *fence;
|
||||||
|
@ -320,10 +320,10 @@ struct radeon_ib {
|
||||||
struct radeon_ib_pool {
|
struct radeon_ib_pool {
|
||||||
// struct mutex mutex;
|
// struct mutex mutex;
|
||||||
struct radeon_object *robj;
|
struct radeon_object *robj;
|
||||||
// struct list_head scheduled_ibs;
|
struct list_head scheduled_ibs;
|
||||||
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
||||||
bool ready;
|
bool ready;
|
||||||
// DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
|
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_cp {
|
struct radeon_cp {
|
||||||
|
@ -364,7 +364,7 @@ void radeon_ring_fini(struct radeon_device *rdev);
|
||||||
struct radeon_cs_reloc {
|
struct radeon_cs_reloc {
|
||||||
// struct drm_gem_object *gobj;
|
// struct drm_gem_object *gobj;
|
||||||
struct radeon_object *robj;
|
struct radeon_object *robj;
|
||||||
// struct radeon_object_list lobj;
|
struct radeon_object_list lobj;
|
||||||
uint32_t handle;
|
uint32_t handle;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
};
|
};
|
||||||
|
@ -388,7 +388,7 @@ struct radeon_cs_parser {
|
||||||
unsigned nrelocs;
|
unsigned nrelocs;
|
||||||
struct radeon_cs_reloc *relocs;
|
struct radeon_cs_reloc *relocs;
|
||||||
struct radeon_cs_reloc **relocs_ptr;
|
struct radeon_cs_reloc **relocs_ptr;
|
||||||
// struct list_head validated;
|
struct list_head validated;
|
||||||
/* indices of various chunks */
|
/* indices of various chunks */
|
||||||
int chunk_ib_idx;
|
int chunk_ib_idx;
|
||||||
int chunk_relocs_idx;
|
int chunk_relocs_idx;
|
||||||
|
@ -512,24 +512,24 @@ struct radeon_device {
|
||||||
unsigned long rmmio_size;
|
unsigned long rmmio_size;
|
||||||
void *rmmio;
|
void *rmmio;
|
||||||
|
|
||||||
radeon_rreg_t mm_rreg;
|
radeon_rreg_t mm_rreg;
|
||||||
radeon_wreg_t mm_wreg;
|
radeon_wreg_t mm_wreg;
|
||||||
radeon_rreg_t mc_rreg;
|
radeon_rreg_t mc_rreg;
|
||||||
radeon_wreg_t mc_wreg;
|
radeon_wreg_t mc_wreg;
|
||||||
radeon_rreg_t pll_rreg;
|
radeon_rreg_t pll_rreg;
|
||||||
radeon_wreg_t pll_wreg;
|
radeon_wreg_t pll_wreg;
|
||||||
radeon_rreg_t pcie_rreg;
|
radeon_rreg_t pcie_rreg;
|
||||||
radeon_wreg_t pcie_wreg;
|
radeon_wreg_t pcie_wreg;
|
||||||
radeon_rreg_t pciep_rreg;
|
radeon_rreg_t pciep_rreg;
|
||||||
radeon_wreg_t pciep_wreg;
|
radeon_wreg_t pciep_wreg;
|
||||||
struct radeon_clock clock;
|
struct radeon_clock clock;
|
||||||
struct radeon_mc mc;
|
struct radeon_mc mc;
|
||||||
struct radeon_gart gart;
|
struct radeon_gart gart;
|
||||||
struct radeon_mode_info mode_info;
|
struct radeon_mode_info mode_info;
|
||||||
struct radeon_scratch scratch;
|
struct radeon_scratch scratch;
|
||||||
// struct radeon_mman mman;
|
// struct radeon_mman mman;
|
||||||
struct radeon_fence_driver fence_drv;
|
struct radeon_fence_driver fence_drv;
|
||||||
struct radeon_cp cp;
|
struct radeon_cp cp;
|
||||||
struct radeon_ib_pool ib_pool;
|
struct radeon_ib_pool ib_pool;
|
||||||
// struct radeon_irq irq;
|
// struct radeon_irq irq;
|
||||||
struct radeon_asic *asic;
|
struct radeon_asic *asic;
|
||||||
|
|
|
@ -403,8 +403,8 @@ static struct radeon_asic r520_asic = {
|
||||||
.gpu_reset = &rv515_gpu_reset,
|
.gpu_reset = &rv515_gpu_reset,
|
||||||
.mc_init = &r520_mc_init,
|
.mc_init = &r520_mc_init,
|
||||||
.mc_fini = &r520_mc_fini,
|
.mc_fini = &r520_mc_fini,
|
||||||
// .wb_init = &r100_wb_init,
|
.wb_init = &r100_wb_init,
|
||||||
// .wb_fini = &r100_wb_fini,
|
.wb_fini = &r100_wb_fini,
|
||||||
.gart_enable = &r300_gart_enable,
|
.gart_enable = &r300_gart_enable,
|
||||||
.gart_disable = &rv370_pcie_gart_disable,
|
.gart_disable = &rv370_pcie_gart_disable,
|
||||||
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
|
||||||
|
|
|
@ -944,7 +944,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
|
||||||
struct radeon_device *rdev = dev->dev_private;
|
struct radeon_device *rdev = dev->dev_private;
|
||||||
uint32_t bios_2_scratch, bios_6_scratch;
|
uint32_t bios_2_scratch, bios_6_scratch;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
if (rdev->family >= CHIP_R600) {
|
if (rdev->family >= CHIP_R600) {
|
||||||
bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
|
bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
|
||||||
|
|
|
@ -39,7 +39,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
rdev->bios = NULL;
|
rdev->bios = NULL;
|
||||||
bios = pci_map_rom(rdev->pdev, &size);
|
bios = (uint8_t*)pci_map_rom(rdev->pdev, &size);
|
||||||
if (!bios) {
|
if (!bios) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ int radeon_gart_size = 512; /* default gart size */
|
||||||
*/
|
*/
|
||||||
static void radeon_surface_init(struct radeon_device *rdev)
|
static void radeon_surface_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
/* FIXME: check this out */
|
/* FIXME: check this out */
|
||||||
if (rdev->family < CHIP_R600) {
|
if (rdev->family < CHIP_R600) {
|
||||||
|
@ -180,7 +180,7 @@ static bool radeon_card_posted(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
uint32_t reg;
|
uint32_t reg;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
/* first check CRTCs */
|
/* first check CRTCs */
|
||||||
if (ASIC_IS_AVIVO(rdev)) {
|
if (ASIC_IS_AVIVO(rdev)) {
|
||||||
|
@ -231,7 +231,7 @@ void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
|
||||||
void radeon_register_accessor_init(struct radeon_device *rdev)
|
void radeon_register_accessor_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
rdev->mm_rreg = &r100_mm_rreg;
|
rdev->mm_rreg = &r100_mm_rreg;
|
||||||
rdev->mm_wreg = &r100_mm_wreg;
|
rdev->mm_wreg = &r100_mm_wreg;
|
||||||
|
@ -288,7 +288,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
|
||||||
int radeon_asic_init(struct radeon_device *rdev)
|
int radeon_asic_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
radeon_register_accessor_init(rdev);
|
radeon_register_accessor_init(rdev);
|
||||||
switch (rdev->family) {
|
switch (rdev->family) {
|
||||||
|
@ -360,7 +360,7 @@ int radeon_clocks_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
radeon_get_clock_info(rdev->ddev);
|
radeon_get_clock_info(rdev->ddev);
|
||||||
r = radeon_static_clocks_init(rdev->ddev);
|
r = radeon_static_clocks_init(rdev->ddev);
|
||||||
|
@ -436,7 +436,7 @@ static struct card_info atom_card_info = {
|
||||||
|
|
||||||
int radeon_atombios_init(struct radeon_device *rdev)
|
int radeon_atombios_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
atom_card_info.dev = rdev->ddev;
|
atom_card_info.dev = rdev->ddev;
|
||||||
rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
|
rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
|
||||||
|
@ -462,7 +462,6 @@ void radeon_combios_fini(struct radeon_device *rdev)
|
||||||
int radeon_modeset_init(struct radeon_device *rdev);
|
int radeon_modeset_init(struct radeon_device *rdev);
|
||||||
void radeon_modeset_fini(struct radeon_device *rdev);
|
void radeon_modeset_fini(struct radeon_device *rdev);
|
||||||
|
|
||||||
void *ring_buffer;
|
|
||||||
/*
|
/*
|
||||||
* Radeon device.
|
* Radeon device.
|
||||||
*/
|
*/
|
||||||
|
@ -473,7 +472,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
{
|
{
|
||||||
int r, ret = -1;
|
int r, ret = -1;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
DRM_INFO("radeon: Initializing kernel modesetting.\n");
|
DRM_INFO("radeon: Initializing kernel modesetting.\n");
|
||||||
rdev->shutdown = false;
|
rdev->shutdown = false;
|
||||||
|
@ -492,7 +491,6 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
// mutex_init(&rdev->cp.mutex);
|
// mutex_init(&rdev->cp.mutex);
|
||||||
// rwlock_init(&rdev->fence_drv.lock);
|
// rwlock_init(&rdev->fence_drv.lock);
|
||||||
|
|
||||||
ring_buffer = CreateRingBuffer( 1024*1024, PG_SW );
|
|
||||||
|
|
||||||
if (radeon_agpmode == -1) {
|
if (radeon_agpmode == -1) {
|
||||||
rdev->flags &= ~RADEON_IS_AGP;
|
rdev->flags &= ~RADEON_IS_AGP;
|
||||||
|
@ -620,10 +618,10 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
// return r;
|
// return r;
|
||||||
// }
|
// }
|
||||||
/* Memory manager */
|
/* Memory manager */
|
||||||
// r = radeon_object_init(rdev);
|
r = radeon_object_init(rdev);
|
||||||
// if (r) {
|
if (r) {
|
||||||
// return r;
|
return r;
|
||||||
// }
|
}
|
||||||
/* Initialize GART (initialize after TTM so we can allocate
|
/* Initialize GART (initialize after TTM so we can allocate
|
||||||
* memory through TTM but finalize after TTM) */
|
* memory through TTM but finalize after TTM) */
|
||||||
r = radeon_gart_enable(rdev);
|
r = radeon_gart_enable(rdev);
|
||||||
|
@ -635,15 +633,14 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
if (!r) {
|
if (!r) {
|
||||||
r = radeon_cp_init(rdev, 1024 * 1024);
|
r = radeon_cp_init(rdev, 1024 * 1024);
|
||||||
}
|
}
|
||||||
// if (!r) {
|
if (!r) {
|
||||||
// r = radeon_wb_init(rdev);
|
r = radeon_wb_init(rdev);
|
||||||
// if (r) {
|
if (r) {
|
||||||
// DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
|
DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
|
||||||
// return r;
|
return r;
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
if (!r) {
|
if (!r) {
|
||||||
r = radeon_ib_pool_init(rdev);
|
r = radeon_ib_pool_init(rdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -651,6 +648,8 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#if 0
|
||||||
|
|
||||||
if (!r) {
|
if (!r) {
|
||||||
r = radeon_ib_test(rdev);
|
r = radeon_ib_test(rdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -694,9 +693,9 @@ u32_t __stdcall drvEntry(int action)
|
||||||
if(action != 1)
|
if(action != 1)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if(!dbg_open("/rd/1/drivers/atikms.log"))
|
if(!dbg_open("/hd0/2/atikms.log"))
|
||||||
{
|
{
|
||||||
printf("Can't open /rd/1/drivers/ati2d.log\nExit\n");
|
printf("Can't open /hd0/2/atikms.log\nExit\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -793,9 +792,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
|
||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
rdev = malloc(sizeof(struct radeon_device));
|
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
|
||||||
if (rdev == NULL) {
|
if (rdev == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
};
|
};
|
||||||
|
@ -825,7 +824,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
dev = malloc(sizeof(*dev));
|
dev = malloc(sizeof(*dev));
|
||||||
if (!dev)
|
if (!dev)
|
||||||
|
|
|
@ -0,0 +1,261 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||||
|
* Copyright 2008 Red Hat Inc.
|
||||||
|
* Copyright 2009 Jerome Glisse.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
* Authors: Dave Airlie
|
||||||
|
* Alex Deucher
|
||||||
|
* Jerome Glisse
|
||||||
|
*/
|
||||||
|
//#include "drmP.h"
|
||||||
|
#include "radeon_drm.h"
|
||||||
|
#include "radeon.h"
|
||||||
|
#include "radeon_reg.h"
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/*
|
||||||
|
* Common GART table functions.
|
||||||
|
*/
|
||||||
|
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
void *ptr;
|
||||||
|
|
||||||
|
ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
|
||||||
|
&rdev->gart.table_addr);
|
||||||
|
if (ptr == NULL) {
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
|
||||||
|
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||||
|
set_memory_uc((unsigned long)ptr,
|
||||||
|
rdev->gart.table_size >> PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
rdev->gart.table.ram.ptr = ptr;
|
||||||
|
memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void radeon_gart_table_ram_free(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
if (rdev->gart.table.ram.ptr == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
|
||||||
|
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
|
||||||
|
set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
|
||||||
|
rdev->gart.table_size >> PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
pci_free_consistent(rdev->pdev, rdev->gart.table_size,
|
||||||
|
(void *)rdev->gart.table.ram.ptr,
|
||||||
|
rdev->gart.table_addr);
|
||||||
|
rdev->gart.table.ram.ptr = NULL;
|
||||||
|
rdev->gart.table_addr = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
uint32_t gpu_addr;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
|
||||||
|
if (rdev->gart.table.vram.robj == NULL) {
|
||||||
|
r = radeon_object_create(rdev, NULL,
|
||||||
|
rdev->gart.table_size,
|
||||||
|
true,
|
||||||
|
RADEON_GEM_DOMAIN_VRAM,
|
||||||
|
false, &rdev->gart.table.vram.robj);
|
||||||
|
if (r) {
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r = radeon_object_pin(rdev->gart.table.vram.robj,
|
||||||
|
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||||
|
if (r) {
|
||||||
|
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
r = radeon_object_kmap(rdev->gart.table.vram.robj,
|
||||||
|
(void **)&rdev->gart.table.vram.ptr);
|
||||||
|
if (r) {
|
||||||
|
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||||
|
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||||
|
DRM_ERROR("radeon: failed to map gart vram table.\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
rdev->gart.table_addr = gpu_addr;
|
||||||
|
|
||||||
|
dbgprintf("alloc gart vram: gpu_base %x lin_addr %x\n",
|
||||||
|
rdev->gart.table_addr, rdev->gart.table.vram.ptr);
|
||||||
|
|
||||||
|
// gpu_addr = 0x800000;
|
||||||
|
|
||||||
|
// u32_t pci_addr = rdev->mc.aper_base + gpu_addr;
|
||||||
|
|
||||||
|
// rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW);
|
||||||
|
|
||||||
|
|
||||||
|
// dbgprintf("alloc gart vram:\n gpu_base %x pci_base %x lin_addr %x",
|
||||||
|
// gpu_addr, pci_addr, rdev->gart.table.vram.ptr);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void radeon_gart_table_vram_free(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
if (rdev->gart.table.vram.robj == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// radeon_object_kunmap(rdev->gart.table.vram.robj);
|
||||||
|
// radeon_object_unpin(rdev->gart.table.vram.robj);
|
||||||
|
// radeon_object_unref(&rdev->gart.table.vram.robj);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Common gart functions.
|
||||||
|
*/
|
||||||
|
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||||
|
int pages)
|
||||||
|
{
|
||||||
|
unsigned t;
|
||||||
|
unsigned p;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
if (!rdev->gart.ready) {
|
||||||
|
// WARN(1, "trying to unbind memory to unitialized GART !\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
t = offset / 4096;
|
||||||
|
p = t / (PAGE_SIZE / 4096);
|
||||||
|
for (i = 0; i < pages; i++, p++) {
|
||||||
|
if (rdev->gart.pages[p]) {
|
||||||
|
// pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
|
||||||
|
// PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||||
|
rdev->gart.pages[p] = NULL;
|
||||||
|
rdev->gart.pages_addr[p] = 0;
|
||||||
|
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
|
||||||
|
radeon_gart_set_page(rdev, t, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mb();
|
||||||
|
radeon_gart_tlb_flush(rdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||||
|
int pages, u32_t *pagelist)
|
||||||
|
{
|
||||||
|
unsigned t;
|
||||||
|
unsigned p;
|
||||||
|
uint64_t page_base;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
dbgprintf("%s ",__FUNCTION__);
|
||||||
|
dbgprintf("offset %x pages %x list %x\n",
|
||||||
|
offset, pages, pagelist);
|
||||||
|
|
||||||
|
if (!rdev->gart.ready) {
|
||||||
|
DRM_ERROR("trying to bind memory to unitialized GART !\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
t = offset / 4096;
|
||||||
|
p = t / (PAGE_SIZE / 4096);
|
||||||
|
|
||||||
|
for (i = 0; i < pages; i++, p++) {
|
||||||
|
/* we need to support large memory configurations */
|
||||||
|
/* assume that unbind have already been call on the range */
|
||||||
|
|
||||||
|
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
|
||||||
|
|
||||||
|
//if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
|
||||||
|
// /* FIXME: failed to map page (return -ENOMEM?) */
|
||||||
|
// radeon_gart_unbind(rdev, offset, pages);
|
||||||
|
// return -ENOMEM;
|
||||||
|
//}
|
||||||
|
rdev->gart.pages[p] = pagelist[i];
|
||||||
|
page_base = (uint32_t)rdev->gart.pages_addr[p];
|
||||||
|
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
|
||||||
|
radeon_gart_set_page(rdev, t, page_base);
|
||||||
|
page_base += 4096;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mb();
|
||||||
|
radeon_gart_tlb_flush(rdev);
|
||||||
|
|
||||||
|
dbgprintf("done %s\n",__FUNCTION__);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int radeon_gart_init(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
|
||||||
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
|
if (rdev->gart.pages) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* We need PAGE_SIZE >= 4096 */
|
||||||
|
if (PAGE_SIZE < 4096) {
|
||||||
|
DRM_ERROR("Page size is smaller than GPU page size!\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
/* Compute table size */
|
||||||
|
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
|
||||||
|
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
|
||||||
|
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
|
||||||
|
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
|
||||||
|
/* Allocate pages table */
|
||||||
|
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (rdev->gart.pages == NULL) {
|
||||||
|
// radeon_gart_fini(rdev);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
rdev->gart.pages_addr = kzalloc(sizeof(u32_t) *
|
||||||
|
rdev->gart.num_cpu_pages, GFP_KERNEL);
|
||||||
|
if (rdev->gart.pages_addr == NULL) {
|
||||||
|
// radeon_gart_fini(rdev);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void radeon_gart_fini(struct radeon_device *rdev)
|
||||||
|
{
|
||||||
|
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
|
||||||
|
/* unbind pages */
|
||||||
|
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
|
||||||
|
}
|
||||||
|
rdev->gart.ready = false;
|
||||||
|
kfree(rdev->gart.pages);
|
||||||
|
kfree(rdev->gart.pages_addr);
|
||||||
|
rdev->gart.pages = NULL;
|
||||||
|
rdev->gart.pages_addr = NULL;
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -32,14 +32,15 @@
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
|
|
||||||
extern void * ring_buffer;
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
int radeon_debugfs_ib_init(struct radeon_device *rdev);
|
int radeon_debugfs_ib_init(struct radeon_device *rdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IB.
|
* IB.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
|
||||||
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||||
{
|
{
|
||||||
struct radeon_fence *fence;
|
struct radeon_fence *fence;
|
||||||
|
@ -98,6 +99,7 @@ out:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||||
{
|
{
|
||||||
struct radeon_ib *tmp = *ib;
|
struct radeon_ib *tmp = *ib;
|
||||||
|
@ -170,6 +172,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int radeon_ib_pool_init(struct radeon_device *rdev)
|
int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
|
@ -210,9 +213,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||||
rdev->ib_pool.ready = true;
|
rdev->ib_pool.ready = true;
|
||||||
DRM_INFO("radeon: ib pool ready.\n");
|
DRM_INFO("radeon: ib pool ready.\n");
|
||||||
if (radeon_debugfs_ib_init(rdev)) {
|
// if (radeon_debugfs_ib_init(rdev)) {
|
||||||
DRM_ERROR("Failed to register debugfs file for IB !\n");
|
// DRM_ERROR("Failed to register debugfs file for IB !\n");
|
||||||
}
|
// }
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -221,16 +224,18 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||||
if (!rdev->ib_pool.ready) {
|
if (!rdev->ib_pool.ready) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
// mutex_lock(&rdev->ib_pool.mutex);
|
||||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
||||||
if (rdev->ib_pool.robj) {
|
if (rdev->ib_pool.robj) {
|
||||||
radeon_object_kunmap(rdev->ib_pool.robj);
|
// radeon_object_kunmap(rdev->ib_pool.robj);
|
||||||
radeon_object_unref(&rdev->ib_pool.robj);
|
// radeon_object_unref(&rdev->ib_pool.robj);
|
||||||
rdev->ib_pool.robj = NULL;
|
rdev->ib_pool.robj = NULL;
|
||||||
}
|
}
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
// mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
|
||||||
int radeon_ib_test(struct radeon_device *rdev)
|
int radeon_ib_test(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
struct radeon_ib *ib;
|
struct radeon_ib *ib;
|
||||||
|
@ -402,7 +407,6 @@ int radeon_ring_test(struct radeon_device *rdev)
|
||||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||||
int pages, u32_t *pagelist);
|
int pages, u32_t *pagelist);
|
||||||
|
|
||||||
#define page_tabs 0xFDC00000
|
|
||||||
|
|
||||||
|
|
||||||
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
|
@ -413,8 +417,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
|
|
||||||
rdev->cp.ring_size = ring_size;
|
rdev->cp.ring_size = ring_size;
|
||||||
|
|
||||||
#if 0
|
/* Allocate ring buffer */
|
||||||
/* Allocate ring buffer */
|
|
||||||
if (rdev->cp.ring_obj == NULL) {
|
if (rdev->cp.ring_obj == NULL) {
|
||||||
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
|
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
|
||||||
true,
|
true,
|
||||||
|
@ -442,23 +445,19 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
dbgprintf("ring size %x\n", ring_size);
|
|
||||||
|
// rdev->cp.ring = CreateRingBuffer( ring_size, PG_SW );
|
||||||
|
|
||||||
dbgprintf("ring buffer %x\n", rdev->cp.ring );
|
dbgprintf("ring buffer %x\n", rdev->cp.ring );
|
||||||
|
|
||||||
rdev->cp.ring = ring_buffer; //CreateRingBuffer( ring_size, PG_SW );
|
// rdev->cp.gpu_addr = rdev->mc.gtt_location;
|
||||||
|
|
||||||
dbgprintf("ring buffer %x\n", rdev->cp.ring );
|
// u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12];
|
||||||
|
|
||||||
rdev->cp.gpu_addr = rdev->mc.gtt_location;
|
// dbgprintf("pagelist %x\n", pagelist);
|
||||||
|
|
||||||
u32_t *pagelist = &((u32_t*)page_tabs)[(u32_t)rdev->cp.ring >> 12];
|
// radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist);
|
||||||
|
|
||||||
dbgprintf("pagelist %x\n", pagelist);
|
|
||||||
|
|
||||||
radeon_gart_bind(rdev, 0, ring_size / 4096, pagelist);
|
|
||||||
|
|
||||||
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
|
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
|
||||||
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
|
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
|
||||||
|
|
|
@ -140,7 +140,7 @@ void rv515_ring_start(struct radeon_device *rdev)
|
||||||
unsigned gb_tile_config;
|
unsigned gb_tile_config;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
|
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
|
||||||
gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
|
gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
|
||||||
switch (rdev->num_gb_pipes) {
|
switch (rdev->num_gb_pipes) {
|
||||||
|
@ -231,7 +231,7 @@ void rv515_ring_start(struct radeon_device *rdev)
|
||||||
radeon_ring_write(rdev, 0);
|
radeon_ring_write(rdev, 0);
|
||||||
radeon_ring_unlock_commit(rdev);
|
radeon_ring_unlock_commit(rdev);
|
||||||
|
|
||||||
dbgprintf("done %s\n\r",__FUNCTION__);
|
dbgprintf("done %s\n",__FUNCTION__);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,7 +296,7 @@ int rv515_ga_reset(struct radeon_device *rdev)
|
||||||
bool reinit_cp;
|
bool reinit_cp;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
reinit_cp = rdev->cp.ready;
|
reinit_cp = rdev->cp.ready;
|
||||||
rdev->cp.ready = false;
|
rdev->cp.ready = false;
|
||||||
|
@ -350,7 +350,7 @@ int rv515_gpu_reset(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
uint32_t status;
|
uint32_t status;
|
||||||
|
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
/* reset order likely matter */
|
/* reset order likely matter */
|
||||||
status = RREG32(RADEON_RBBM_STATUS);
|
status = RREG32(RADEON_RBBM_STATUS);
|
||||||
|
@ -569,7 +569,7 @@ static const unsigned r500_reg_safe_bm[159] = {
|
||||||
|
|
||||||
int rv515_init(struct radeon_device *rdev)
|
int rv515_init(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
dbgprintf("%s\n\r",__FUNCTION__);
|
dbgprintf("%s\n",__FUNCTION__);
|
||||||
|
|
||||||
rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
|
rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
|
||||||
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
|
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
|
||||||
|
|
Loading…
Reference in New Issue