i915-4.4.1
git-svn-id: svn://kolibrios.org@6131 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
14963ad6d1
commit
0b68aa81cf
|
@ -33,8 +33,13 @@
|
||||||
|
|
||||||
extern int x86_clflush_size;
|
extern int x86_clflush_size;
|
||||||
|
|
||||||
|
#if defined(CONFIG_X86)
|
||||||
|
|
||||||
#if 0
|
/*
|
||||||
|
* clflushopt is an unordered instruction which needs fencing with mfence or
|
||||||
|
* sfence to avoid ordering issues. For drm_clflush_page this fencing happens
|
||||||
|
* in the caller.
|
||||||
|
*/
|
||||||
static void
|
static void
|
||||||
drm_clflush_page(struct page *page)
|
drm_clflush_page(struct page *page)
|
||||||
{
|
{
|
||||||
|
@ -66,70 +71,44 @@ static void drm_cache_flush_clflush(struct page *pages[],
|
||||||
void
|
void
|
||||||
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
|
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
|
||||||
{
|
{
|
||||||
uint8_t *pva;
|
|
||||||
unsigned int i, j;
|
|
||||||
|
|
||||||
pva = AllocKernelSpace(4096);
|
#if defined(CONFIG_X86)
|
||||||
|
drm_cache_flush_clflush(pages, num_pages);
|
||||||
|
return;
|
||||||
|
|
||||||
if(pva != NULL)
|
#elif defined(__powerpc__)
|
||||||
{
|
unsigned long i;
|
||||||
dma_addr_t *src, *dst;
|
for (i = 0; i < num_pages; i++) {
|
||||||
u32 count;
|
struct page *page = pages[i];
|
||||||
|
void *page_virtual;
|
||||||
|
|
||||||
for (i = 0; i < num_pages; i++)
|
if (unlikely(page == NULL))
|
||||||
{
|
continue;
|
||||||
mb();
|
|
||||||
MapPage(pva, page_to_phys(pages[i]), 0x001);
|
page_virtual = kmap_atomic(page);
|
||||||
for (j = 0; j < PAGE_SIZE; j += x86_clflush_size)
|
flush_dcache_range((unsigned long)page_virtual,
|
||||||
clflush(pva + j);
|
(unsigned long)page_virtual + PAGE_SIZE);
|
||||||
}
|
kunmap_atomic(page_virtual);
|
||||||
FreeKernelSpace(pva);
|
}
|
||||||
}
|
#else
|
||||||
mb();
|
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_clflush_pages);
|
EXPORT_SYMBOL(drm_clflush_pages);
|
||||||
|
|
||||||
void
|
void
|
||||||
drm_clflush_sg(struct sg_table *st)
|
drm_clflush_sg(struct sg_table *st)
|
||||||
{
|
|
||||||
struct sg_page_iter sg_iter;
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
uint8_t *pva;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
pva = AllocKernelSpace(4096);
|
|
||||||
if( pva != NULL)
|
|
||||||
{
|
|
||||||
mb();
|
|
||||||
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
|
||||||
{
|
|
||||||
page = sg_page_iter_page(&sg_iter);
|
|
||||||
|
|
||||||
MapPage(pva,page_to_phys(page), 0x001);
|
|
||||||
|
|
||||||
for (i = 0; i < PAGE_SIZE; i += x86_clflush_size)
|
|
||||||
clflush(pva + i);
|
|
||||||
};
|
|
||||||
FreeKernelSpace(pva);
|
|
||||||
};
|
|
||||||
mb();
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(drm_clflush_sg);
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
void
|
|
||||||
drm_clflush_virt_range(void *addr, unsigned long length)
|
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_X86)
|
#if defined(CONFIG_X86)
|
||||||
if (cpu_has_clflush) {
|
if (cpu_has_clflush) {
|
||||||
const int size = boot_cpu_data.x86_clflush_size;
|
struct sg_page_iter sg_iter;
|
||||||
void *end = addr + length;
|
|
||||||
addr = (void *)(((unsigned long)addr) & -size);
|
|
||||||
mb();
|
mb();
|
||||||
for (; addr < end; addr += size)
|
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
|
||||||
clflushopt(addr);
|
drm_clflush_page(sg_page_iter_page(&sg_iter));
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,6 +119,26 @@ drm_clflush_virt_range(void *addr, unsigned long length)
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_clflush_virt_range);
|
EXPORT_SYMBOL(drm_clflush_sg);
|
||||||
|
|
||||||
|
void
|
||||||
|
drm_clflush_virt_range(void *addr, unsigned long length)
|
||||||
|
{
|
||||||
|
#if defined(CONFIG_X86)
|
||||||
|
if (1) {
|
||||||
|
const int size = x86_clflush_size;
|
||||||
|
void *end = addr + length;
|
||||||
|
addr = (void *)(((unsigned long)addr) & -size);
|
||||||
|
mb();
|
||||||
|
for (; addr < end; addr += size)
|
||||||
|
clflush(addr);
|
||||||
|
mb();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_clflush_virt_range);
|
||||||
|
|
|
@ -264,7 +264,7 @@ EXPORT_SYMBOL(drm_gem_handle_delete);
|
||||||
* @file: drm file-private structure to remove the dumb handle from
|
* @file: drm file-private structure to remove the dumb handle from
|
||||||
* @dev: corresponding drm_device
|
* @dev: corresponding drm_device
|
||||||
* @handle: the dumb handle to remove
|
* @handle: the dumb handle to remove
|
||||||
*
|
*
|
||||||
* This implements the ->dumb_destroy kms driver callback for drivers which use
|
* This implements the ->dumb_destroy kms driver callback for drivers which use
|
||||||
* gem to manage their backing storage.
|
* gem to manage their backing storage.
|
||||||
*/
|
*/
|
||||||
|
@ -281,7 +281,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
|
||||||
* @file_priv: drm file-private structure to register the handle for
|
* @file_priv: drm file-private structure to register the handle for
|
||||||
* @obj: object to register
|
* @obj: object to register
|
||||||
* @handlep: pointer to return the created handle to the caller
|
* @handlep: pointer to return the created handle to the caller
|
||||||
*
|
*
|
||||||
* This expects the dev->object_name_lock to be held already and will drop it
|
* This expects the dev->object_name_lock to be held already and will drop it
|
||||||
* before returning. Used to avoid races in establishing new handles when
|
* before returning. Used to avoid races in establishing new handles when
|
||||||
* importing an object from either an flink name or a dma-buf.
|
* importing an object from either an flink name or a dma-buf.
|
||||||
|
|
|
@ -54,6 +54,10 @@ static inline ktime_t ktime_mono_to_real(ktime_t mono)
|
||||||
return mono;
|
return mono;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
irqreturn_t device_irq_handler(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
return dev->driver->irq_handler(0, dev);
|
||||||
|
}
|
||||||
|
|
||||||
/* Access macro for slots in vblank timestamp ringbuffer. */
|
/* Access macro for slots in vblank timestamp ringbuffer. */
|
||||||
#define vblanktimestamp(dev, pipe, count) \
|
#define vblanktimestamp(dev, pipe, count) \
|
||||||
|
@ -401,15 +405,6 @@ EXPORT_SYMBOL(drm_vblank_init);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
irqreturn_t device_irq_handler(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
|
|
||||||
// printf("video irq\n");
|
|
||||||
|
|
||||||
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
|
|
||||||
|
|
||||||
return dev->driver->irq_handler(0, dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_irq_install - install IRQ handler
|
* drm_irq_install - install IRQ handler
|
||||||
|
@ -1424,7 +1419,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
|
||||||
|
|
||||||
list_del(&e->base.link);
|
list_del(&e->base.link);
|
||||||
drm_vblank_put(dev, pipe);
|
drm_vblank_put(dev, pipe);
|
||||||
send_vblank_event(dev, e, seq, &now);
|
// send_vblank_event(dev, e, seq, &now);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -547,21 +547,6 @@ int drm_order(unsigned long size)
|
||||||
return order;
|
return order;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int x86_clflush_size;
|
|
||||||
|
|
||||||
|
|
||||||
void drm_clflush_virt_range(void *addr, unsigned long length)
|
|
||||||
{
|
|
||||||
char *tmp = addr;
|
|
||||||
char *end = tmp + length;
|
|
||||||
mb();
|
|
||||||
for (; tmp < end; tmp += x86_clflush_size)
|
|
||||||
clflush(tmp);
|
|
||||||
clflush(end - 1);
|
|
||||||
mb();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
int drm_sysfs_connector_add(struct drm_connector *connector)
|
int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -181,7 +181,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
|
dev_priv->bridge_dev = _pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
|
||||||
if (!dev_priv->bridge_dev) {
|
if (!dev_priv->bridge_dev) {
|
||||||
DRM_ERROR("bridge device not found\n");
|
DRM_ERROR("bridge device not found\n");
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -899,8 +899,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||||
* All tasks on the workqueue are expected to acquire the dev mutex
|
* All tasks on the workqueue are expected to acquire the dev mutex
|
||||||
* so there is no point in running more than one instance of the
|
* so there is no point in running more than one instance of the
|
||||||
* workqueue at any time. Use an ordered one.
|
* workqueue at any time. Use an ordered one.
|
||||||
*/
|
*/
|
||||||
dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0);
|
dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0);
|
||||||
if (dev_priv->wq == NULL) {
|
if (dev_priv->wq == NULL) {
|
||||||
DRM_ERROR("Failed to create our workqueue.\n");
|
DRM_ERROR("Failed to create our workqueue.\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#define RQ_BUG_ON(expr)
|
#define RQ_BUG_ON(expr)
|
||||||
|
|
||||||
extern int x86_clflush_size;
|
extern int x86_clflush_size;
|
||||||
|
#define __copy_to_user_inatomic __copy_to_user
|
||||||
|
|
||||||
#define PROT_READ 0x1 /* page can be read */
|
#define PROT_READ 0x1 /* page can be read */
|
||||||
#define PROT_WRITE 0x2 /* page can be written */
|
#define PROT_WRITE 0x2 /* page can be written */
|
||||||
|
@ -57,7 +58,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
|
||||||
#define MAX_ERRNO 4095
|
#define MAX_ERRNO 4095
|
||||||
|
|
||||||
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
|
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
|
||||||
|
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
||||||
|
|
||||||
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
||||||
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
|
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
|
||||||
|
@ -238,9 +239,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||||
args->size, &args->handle);
|
args->size, &args->handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
__copy_to_user_swizzled(char __user *cpu_vaddr,
|
__copy_to_user_swizzled(char __user *cpu_vaddr,
|
||||||
const char *gpu_vaddr, int gpu_offset,
|
const char *gpu_vaddr, int gpu_offset,
|
||||||
|
@ -293,6 +291,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pins the specified object's pages and synchronizes the object with
|
||||||
|
* GPU accesses. Sets needs_clflush to non-zero if the caller should
|
||||||
|
* flush the object from the CPU cache.
|
||||||
|
*/
|
||||||
|
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
|
||||||
|
int *needs_clflush)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
*needs_clflush = 0;
|
||||||
|
|
||||||
|
if (!obj->base.filp)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
|
||||||
|
/* If we're not in the cpu read domain, set ourself into the gtt
|
||||||
|
* read domain and manually flush cachelines (if required). This
|
||||||
|
* optimizes for the case when the gpu will dirty the data
|
||||||
|
* anyway again before the next pread happens. */
|
||||||
|
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
|
||||||
|
obj->cache_level);
|
||||||
|
ret = i915_gem_object_wait_rendering(obj, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = i915_gem_object_get_pages(obj);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
i915_gem_object_pin_pages(obj);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Per-page copy function for the shmem pread fastpath.
|
/* Per-page copy function for the shmem pread fastpath.
|
||||||
* Flushes invalid cachelines before reading the target if
|
* Flushes invalid cachelines before reading the target if
|
||||||
* needs_clflush is set. */
|
* needs_clflush is set. */
|
||||||
|
@ -424,16 +458,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
|
||||||
|
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (likely(!i915.prefault_disable) && !prefaulted) {
|
|
||||||
ret = fault_in_multipages_writeable(user_data, remain);
|
|
||||||
/* Userspace is tricking us, but we've already clobbered
|
|
||||||
* its pages with the prefault and promised to write the
|
|
||||||
* data up to the first fault. Hence ignore any errors
|
|
||||||
* and just continue. */
|
|
||||||
(void)ret;
|
|
||||||
prefaulted = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = shmem_pread_slow(page, shmem_page_offset, page_length,
|
ret = shmem_pread_slow(page, shmem_page_offset, page_length,
|
||||||
user_data, page_do_bit17_swizzling,
|
user_data, page_do_bit17_swizzling,
|
||||||
needs_clflush);
|
needs_clflush);
|
||||||
|
@ -471,11 +495,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||||
if (args->size == 0)
|
if (args->size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!access_ok(VERIFY_WRITE,
|
|
||||||
to_user_ptr(args->data_ptr),
|
|
||||||
args->size))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
ret = i915_mutex_lock_interruptible(dev);
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -516,27 +535,7 @@ unlock:
|
||||||
* page faults in the source data
|
* page faults in the source data
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline int
|
|
||||||
fast_user_write(struct io_mapping *mapping,
|
|
||||||
loff_t page_base, int page_offset,
|
|
||||||
char __user *user_data,
|
|
||||||
int length)
|
|
||||||
{
|
|
||||||
void __iomem *vaddr_atomic;
|
|
||||||
void *vaddr;
|
|
||||||
unsigned long unwritten;
|
|
||||||
|
|
||||||
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
|
|
||||||
/* We can use the cpu mem copy function because this is X86. */
|
|
||||||
vaddr = (void __force*)vaddr_atomic + page_offset;
|
|
||||||
unwritten = __copy_from_user_inatomic_nocache(vaddr,
|
|
||||||
user_data, length);
|
|
||||||
io_mapping_unmap_atomic(vaddr_atomic);
|
|
||||||
return unwritten;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
|
||||||
/**
|
/**
|
||||||
* This is the fast pwrite path, where we copy the data directly from the
|
* This is the fast pwrite path, where we copy the data directly from the
|
||||||
* user into the GTT, uncached.
|
* user into the GTT, uncached.
|
||||||
|
@ -585,9 +584,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
||||||
if ((page_offset + remain) > PAGE_SIZE)
|
if ((page_offset + remain) > PAGE_SIZE)
|
||||||
page_length = PAGE_SIZE - page_offset;
|
page_length = PAGE_SIZE - page_offset;
|
||||||
|
|
||||||
MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
|
MapPage(dev_priv->gtt.mappable,
|
||||||
|
dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW);
|
||||||
|
|
||||||
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
|
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
|
||||||
|
|
||||||
remain -= page_length;
|
remain -= page_length;
|
||||||
user_data += page_length;
|
user_data += page_length;
|
||||||
|
|
|
@ -415,6 +415,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||||
*/
|
*/
|
||||||
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
|
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
|
||||||
|
|
||||||
|
{
|
||||||
|
u32 usable_size = dev_priv->gtt.stolen_usable_size >> 20;
|
||||||
|
if(i915.fbsize > usable_size)
|
||||||
|
{
|
||||||
|
i915.fbsize = usable_size;
|
||||||
|
DRM_DEBUG_KMS("Adjust framebuffer size to match reserved memory\n"
|
||||||
|
"new fbsize %dMB\n",i915.fbsize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1445,8 +1445,8 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
|
||||||
|
|
||||||
*pin_mask |= BIT(i);
|
*pin_mask |= BIT(i);
|
||||||
|
|
||||||
// if (!intel_hpd_pin_to_port(i, &port))
|
// if (!intel_hpd_pin_to_port(i, &port))
|
||||||
// continue;
|
continue;
|
||||||
|
|
||||||
if (long_pulse_detect(port, dig_hotplug_reg))
|
if (long_pulse_detect(port, dig_hotplug_reg))
|
||||||
*long_mask |= BIT(i);
|
*long_mask |= BIT(i);
|
||||||
|
@ -2009,8 +2009,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
|
||||||
enum pipe pipe;
|
enum pipe pipe;
|
||||||
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
|
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
|
||||||
|
|
||||||
if (hotplug_trigger)
|
// if (hotplug_trigger)
|
||||||
ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
|
// ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
|
||||||
|
|
||||||
if (de_iir & DE_AUX_CHANNEL_A)
|
if (de_iir & DE_AUX_CHANNEL_A)
|
||||||
dp_aux_irq_handler(dev);
|
dp_aux_irq_handler(dev);
|
||||||
|
@ -4474,13 +4474,3 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
|
||||||
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
|
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
|
||||||
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
|
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
irqreturn_t intel_irq_handler(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
|
|
||||||
// printf("i915 irq\n");
|
|
||||||
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
|
|
||||||
|
|
||||||
return dev->driver->irq_handler(0, dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ struct i915_params i915 __read_mostly = {
|
||||||
.lvds_channel_mode = 0,
|
.lvds_channel_mode = 0,
|
||||||
.panel_use_ssc = -1,
|
.panel_use_ssc = -1,
|
||||||
.vbt_sdvo_panel_type = -1,
|
.vbt_sdvo_panel_type = -1,
|
||||||
.enable_rc6 = -1,
|
.enable_rc6 = 0,
|
||||||
.enable_fbc = -1,
|
.enable_fbc = -1,
|
||||||
.enable_execlists = -1,
|
.enable_execlists = -1,
|
||||||
.enable_hangcheck = true,
|
.enable_hangcheck = true,
|
||||||
|
|
|
@ -43,5 +43,5 @@
|
||||||
#define trace_i915_page_table_entry_map(vm, pde, pt, index, count, GEN6_PTES)
|
#define trace_i915_page_table_entry_map(vm, pde, pt, index, count, GEN6_PTES)
|
||||||
#define trace_i915_va_alloc(vm,start,size,name)
|
#define trace_i915_va_alloc(vm,start,size,name)
|
||||||
#define trace_i915_gem_request_notify(ring)
|
#define trace_i915_gem_request_notify(ring)
|
||||||
|
#define trace_i915_gem_object_pread(obj, offset, size)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -374,6 +374,7 @@ static int check_connector(struct drm_device *dev, struct drm_connector *connect
|
||||||
connector->name, connector->base.id);
|
connector->name, connector->base.id);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
};
|
};
|
||||||
|
connector->encoder = encoder;
|
||||||
}
|
}
|
||||||
|
|
||||||
crtc = encoder->crtc;
|
crtc = encoder->crtc;
|
||||||
|
@ -382,7 +383,8 @@ static int check_connector(struct drm_device *dev, struct drm_connector *connect
|
||||||
|
|
||||||
if(crtc != NULL)
|
if(crtc != NULL)
|
||||||
{
|
{
|
||||||
encoder->crtc = crtc;
|
DRM_DEBUG_KMS("%s connector: %p encode: %p crtc: %p\n",__FUNCTION__,
|
||||||
|
connector, encoder, crtc);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -823,8 +825,6 @@ int init_cursor(cursor_t *cursor)
|
||||||
|
|
||||||
FreeKernelSpace(mapped);
|
FreeKernelSpace(mapped);
|
||||||
|
|
||||||
// release old cursor
|
|
||||||
|
|
||||||
KernelFree(cursor->data);
|
KernelFree(cursor->data);
|
||||||
|
|
||||||
cursor->data = bits;
|
cursor->data = bits;
|
||||||
|
@ -1153,13 +1153,21 @@ int i915_mask_update_ex(struct drm_device *dev, void *data,
|
||||||
mask->height== 0 )
|
mask->height== 0 )
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
obj = drm_gem_object_lookup(dev, file, mask->handle);
|
obj = drm_gem_object_lookup(dev, file, mask->handle);
|
||||||
if (obj == NULL)
|
if (obj == NULL)
|
||||||
return -ENOENT;
|
{
|
||||||
|
ret = -ENOENT;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (!obj->filp) {
|
if (!obj->filp)
|
||||||
drm_gem_object_unreference_unlocked(obj);
|
{
|
||||||
return -EINVAL;
|
ret = -ENOENT;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -1179,10 +1187,6 @@ int i915_mask_update_ex(struct drm_device *dev, void *data,
|
||||||
u8* dst_offset;
|
u8* dst_offset;
|
||||||
u32 ifl;
|
u32 ifl;
|
||||||
|
|
||||||
ret = i915_mutex_lock_interruptible(dev);
|
|
||||||
if (ret)
|
|
||||||
goto err1;
|
|
||||||
|
|
||||||
i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true);
|
i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true);
|
||||||
|
|
||||||
src_offset = os_display->win_map;
|
src_offset = os_display->win_map;
|
||||||
|
@ -1301,11 +1305,12 @@ int i915_mask_update_ex(struct drm_device *dev, void *data,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err2:
|
out:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
|
||||||
err1:
|
|
||||||
drm_gem_object_unreference(obj);
|
drm_gem_object_unreference(obj);
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -126,96 +126,4 @@ cleanup:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
struct drm_i915_gem_object *
|
|
||||||
kos_gem_fb_object_create(struct drm_device *dev,
|
|
||||||
u32 gtt_offset,
|
|
||||||
u32 size)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
||||||
struct i915_address_space *ggtt = &dev_priv->gtt.base;
|
|
||||||
struct drm_i915_gem_object *obj;
|
|
||||||
struct drm_mm_node *fb_node;
|
|
||||||
struct i915_vma *vma;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n",
|
|
||||||
gtt_offset, size);
|
|
||||||
|
|
||||||
/* KISS and expect everything to be page-aligned */
|
|
||||||
BUG_ON(size & 4095);
|
|
||||||
|
|
||||||
if (WARN_ON(size == 0))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL);
|
|
||||||
if (!fb_node)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
fb_node->start = gtt_offset;
|
|
||||||
fb_node->size = size;
|
|
||||||
|
|
||||||
obj = _kos_fb_object_create(dev, fb_node);
|
|
||||||
if (obj == NULL) {
|
|
||||||
DRM_DEBUG_KMS("failed to preallocate framebuffer object\n");
|
|
||||||
kfree(fb_node);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
|
|
||||||
if (IS_ERR(vma)) {
|
|
||||||
ret = PTR_ERR(vma);
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* To simplify the initialisation sequence between KMS and GTT,
|
|
||||||
* we allow construction of the stolen object prior to
|
|
||||||
* setting up the GTT space. The actual reservation will occur
|
|
||||||
* later.
|
|
||||||
*/
|
|
||||||
vma->node.start = gtt_offset;
|
|
||||||
vma->node.size = size;
|
|
||||||
if (drm_mm_initialized(&ggtt->mm)) {
|
|
||||||
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
|
|
||||||
if (ret) {
|
|
||||||
DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n");
|
|
||||||
goto err_vma;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// obj->has_global_gtt_mapping = 1;
|
|
||||||
|
|
||||||
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
|
||||||
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
|
|
||||||
|
|
||||||
mutex_lock(&dev->object_name_lock);
|
|
||||||
idr_preload(GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!obj->base.name) {
|
|
||||||
ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT);
|
|
||||||
if (ret < 0)
|
|
||||||
goto err_gem;
|
|
||||||
|
|
||||||
obj->base.name = ret;
|
|
||||||
|
|
||||||
/* Allocate a reference for the name table. */
|
|
||||||
drm_gem_object_reference(&obj->base);
|
|
||||||
|
|
||||||
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name );
|
|
||||||
}
|
|
||||||
|
|
||||||
idr_preload_end();
|
|
||||||
mutex_unlock(&dev->object_name_lock);
|
|
||||||
drm_gem_object_unreference(&obj->base);
|
|
||||||
return obj;
|
|
||||||
|
|
||||||
err_gem:
|
|
||||||
idr_preload_end();
|
|
||||||
mutex_unlock(&dev->object_name_lock);
|
|
||||||
err_vma:
|
|
||||||
i915_gem_vma_destroy(vma);
|
|
||||||
err_out:
|
|
||||||
kfree(fb_node);
|
|
||||||
drm_gem_object_unreference(&obj->base);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
#include "bitmap.h"
|
#include "bitmap.h"
|
||||||
#include "i915_kos32.h"
|
#include "i915_kos32.h"
|
||||||
|
|
||||||
#define DRV_NAME "i915 v4.4"
|
#define DRV_NAME "i915 v4.4.1"
|
||||||
|
|
||||||
#define I915_DEV_CLOSE 0
|
#define I915_DEV_CLOSE 0
|
||||||
#define I915_DEV_INIT 1
|
#define I915_DEV_INIT 1
|
||||||
|
@ -374,6 +374,8 @@ int do_command_line(const char* usercmd)
|
||||||
#define SRV_MASK_UPDATE 45
|
#define SRV_MASK_UPDATE 45
|
||||||
#define SRV_MASK_UPDATE_EX 46
|
#define SRV_MASK_UPDATE_EX 46
|
||||||
|
|
||||||
|
#define SRV_I915_GEM_PREAD 47
|
||||||
|
|
||||||
#define check_input(size) \
|
#define check_input(size) \
|
||||||
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
|
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
|
||||||
break;
|
break;
|
||||||
|
@ -459,6 +461,10 @@ int _stdcall display_handler(ioctl_t *io)
|
||||||
retval = i915_gem_set_caching_ioctl(main_device, inp, file);
|
retval = i915_gem_set_caching_ioctl(main_device, inp, file);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case SRV_I915_GEM_PREAD:
|
||||||
|
retval = i915_gem_pread_ioctl(main_device, inp, file);
|
||||||
|
break;
|
||||||
|
|
||||||
case SRV_I915_GEM_PWRITE:
|
case SRV_I915_GEM_PWRITE:
|
||||||
retval = i915_gem_pwrite_ioctl(main_device, inp, file);
|
retval = i915_gem_pwrite_ioctl(main_device, inp, file);
|
||||||
break;
|
break;
|
||||||
|
@ -498,7 +504,6 @@ int _stdcall display_handler(ioctl_t *io)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SRV_I915_GEM_EXECBUFFER2:
|
case SRV_I915_GEM_EXECBUFFER2:
|
||||||
// printf("SRV_I915_GEM_EXECBUFFER2\n");
|
|
||||||
retval = i915_gem_execbuffer2(main_device, inp, file);
|
retval = i915_gem_execbuffer2(main_device, inp, file);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
|
#include <syscall.h>
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/mod_devicetable.h>
|
#include <linux/mod_devicetable.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/pm.h>
|
||||||
|
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <syscall.h>
|
|
||||||
|
|
||||||
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
|
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
|
||||||
|
|
||||||
|
@ -372,7 +374,7 @@ static pci_dev_t* pci_scan_device(u32 busnr, int devfn)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int pci_scan_slot(u32 bus, int devfn)
|
int _pci_scan_slot(u32 bus, int devfn)
|
||||||
{
|
{
|
||||||
int func, nr = 0;
|
int func, nr = 0;
|
||||||
|
|
||||||
|
@ -493,7 +495,7 @@ int enum_pci_devices()
|
||||||
for(;bus <= last_bus; bus++)
|
for(;bus <= last_bus; bus++)
|
||||||
{
|
{
|
||||||
for (devfn = 0; devfn < 0x100; devfn += 8)
|
for (devfn = 0; devfn < 0x100; devfn += 8)
|
||||||
pci_scan_slot(bus, devfn);
|
_pci_scan_slot(bus, devfn);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -560,7 +562,7 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
|
||||||
dev = (pci_dev_t*)dev->link.next)
|
dev = (pci_dev_t*)dev->link.next)
|
||||||
{
|
{
|
||||||
if( dev->pci_dev.vendor != vendor )
|
if( dev->pci_dev.vendor != vendor )
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if(dev->pci_dev.device == device)
|
if(dev->pci_dev.device == device)
|
||||||
{
|
{
|
||||||
|
@ -571,7 +573,7 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
|
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
|
||||||
{
|
{
|
||||||
pci_dev_t *dev;
|
pci_dev_t *dev;
|
||||||
|
|
||||||
|
@ -664,13 +666,6 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
|
||||||
struct resource *res)
|
|
||||||
{
|
|
||||||
region->start = res->start;
|
|
||||||
region->end = res->end;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int pci_enable_rom(struct pci_dev *pdev)
|
int pci_enable_rom(struct pci_dev *pdev)
|
||||||
|
@ -682,7 +677,7 @@ int pci_enable_rom(struct pci_dev *pdev)
|
||||||
if (!res->flags)
|
if (!res->flags)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
pcibios_resource_to_bus(pdev, ®ion, res);
|
_pcibios_resource_to_bus(pdev, ®ion, res);
|
||||||
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
|
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
|
||||||
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
|
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
|
||||||
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
|
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
|
||||||
|
@ -758,49 +753,49 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
|
||||||
*/
|
*/
|
||||||
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
|
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
|
||||||
{
|
{
|
||||||
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
|
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
|
||||||
loff_t start;
|
loff_t start;
|
||||||
void __iomem *rom;
|
void __iomem *rom;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
|
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
|
||||||
* memory map if the VGA enable bit of the Bridge Control register is
|
* memory map if the VGA enable bit of the Bridge Control register is
|
||||||
* set for embedded VGA.
|
* set for embedded VGA.
|
||||||
*/
|
*/
|
||||||
if (res->flags & IORESOURCE_ROM_SHADOW) {
|
if (res->flags & IORESOURCE_ROM_SHADOW) {
|
||||||
/* primary video rom always starts here */
|
/* primary video rom always starts here */
|
||||||
start = (loff_t)0xC0000;
|
start = (loff_t)0xC0000;
|
||||||
*size = 0x20000; /* cover C000:0 through E000:0 */
|
*size = 0x20000; /* cover C000:0 through E000:0 */
|
||||||
} else {
|
} else {
|
||||||
if (res->flags &
|
if (res->flags &
|
||||||
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
|
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
|
||||||
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
|
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
|
||||||
return (void __iomem *)(unsigned long)
|
return (void __iomem *)(unsigned long)
|
||||||
pci_resource_start(pdev, PCI_ROM_RESOURCE);
|
pci_resource_start(pdev, PCI_ROM_RESOURCE);
|
||||||
} else {
|
} else {
|
||||||
start = (loff_t)0xC0000;
|
start = (loff_t)0xC0000;
|
||||||
*size = 0x20000; /* cover C000:0 through E000:0 */
|
*size = 0x20000; /* cover C000:0 through E000:0 */
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rom = ioremap(start, *size);
|
rom = ioremap(start, *size);
|
||||||
if (!rom) {
|
if (!rom) {
|
||||||
/* restore enable if ioremap fails */
|
/* restore enable if ioremap fails */
|
||||||
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
|
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
|
||||||
IORESOURCE_ROM_SHADOW |
|
IORESOURCE_ROM_SHADOW |
|
||||||
IORESOURCE_ROM_COPY)))
|
IORESOURCE_ROM_COPY)))
|
||||||
pci_disable_rom(pdev);
|
pci_disable_rom(pdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to find the true size of the ROM since sometimes the PCI window
|
* Try to find the true size of the ROM since sometimes the PCI window
|
||||||
* size is much larger than the actual size of the ROM.
|
* size is much larger than the actual size of the ROM.
|
||||||
* True size is important if the ROM is going to be copied.
|
* True size is important if the ROM is going to be copied.
|
||||||
*/
|
*/
|
||||||
*size = pci_get_rom_size(pdev, rom, *size);
|
*size = pci_get_rom_size(pdev, rom, *size);
|
||||||
return rom;
|
return rom;
|
||||||
}
|
}
|
||||||
|
|
||||||
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
|
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
|
||||||
|
@ -817,50 +812,260 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
|
||||||
pci_disable_rom(pdev);
|
pci_disable_rom(pdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
void pcibios_set_master(struct pci_dev *dev)
|
|
||||||
{
|
|
||||||
u8 lat;
|
|
||||||
|
|
||||||
/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
|
|
||||||
if (pci_is_pcie(dev))
|
|
||||||
return;
|
|
||||||
|
|
||||||
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
|
|
||||||
if (lat < 16)
|
|
||||||
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
|
|
||||||
else if (lat > pcibios_max_latency)
|
|
||||||
lat = pcibios_max_latency;
|
|
||||||
else
|
|
||||||
return;
|
|
||||||
dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
|
|
||||||
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
static void __pci_set_master(struct pci_dev *dev, bool enable)
|
static void __pci_set_master(struct pci_dev *dev, bool enable)
|
||||||
{
|
{
|
||||||
u16 old_cmd, cmd;
|
u16 old_cmd, cmd;
|
||||||
|
|
||||||
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
|
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
|
||||||
if (enable)
|
if (enable)
|
||||||
cmd = old_cmd | PCI_COMMAND_MASTER;
|
cmd = old_cmd | PCI_COMMAND_MASTER;
|
||||||
else
|
else
|
||||||
cmd = old_cmd & ~PCI_COMMAND_MASTER;
|
cmd = old_cmd & ~PCI_COMMAND_MASTER;
|
||||||
if (cmd != old_cmd) {
|
if (cmd != old_cmd) {
|
||||||
dbgprintf("%s bus mastering\n",
|
dbgprintf("%s bus mastering\n",
|
||||||
enable ? "enabling" : "disabling");
|
enable ? "enabling" : "disabling");
|
||||||
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
||||||
}
|
}
|
||||||
dev->is_busmaster = enable;
|
dev->is_busmaster = enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* pci_set_master - enables bus-mastering for device dev
|
||||||
|
* @dev: the PCI device to enable
|
||||||
|
*
|
||||||
|
* Enables bus-mastering on the device and calls pcibios_set_master()
|
||||||
|
* to do the needed arch specific settings.
|
||||||
|
*/
|
||||||
void pci_set_master(struct pci_dev *dev)
|
void pci_set_master(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
__pci_set_master(dev, true);
|
__pci_set_master(dev, true);
|
||||||
// pcibios_set_master(dev);
|
// pcibios_set_master(dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_clear_master - disables bus-mastering for device dev
|
||||||
|
* @dev: the PCI device to disable
|
||||||
|
*/
|
||||||
|
void pci_clear_master(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
__pci_set_master(dev, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline int pcie_cap_version(const struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
int type = pci_pcie_type(dev);
|
||||||
|
|
||||||
|
return pcie_cap_version(dev) > 1 ||
|
||||||
|
type == PCI_EXP_TYPE_ROOT_PORT ||
|
||||||
|
type == PCI_EXP_TYPE_ENDPOINT ||
|
||||||
|
type == PCI_EXP_TYPE_LEG_END;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
int type = pci_pcie_type(dev);
|
||||||
|
|
||||||
|
return pcie_cap_version(dev) > 1 ||
|
||||||
|
type == PCI_EXP_TYPE_ROOT_PORT ||
|
||||||
|
(type == PCI_EXP_TYPE_DOWNSTREAM &&
|
||||||
|
dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
int type = pci_pcie_type(dev);
|
||||||
|
|
||||||
|
return pcie_cap_version(dev) > 1 ||
|
||||||
|
type == PCI_EXP_TYPE_ROOT_PORT ||
|
||||||
|
type == PCI_EXP_TYPE_RC_EC;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
|
||||||
|
{
|
||||||
|
if (!pci_is_pcie(dev))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
switch (pos) {
|
||||||
|
case PCI_EXP_FLAGS_TYPE:
|
||||||
|
return true;
|
||||||
|
case PCI_EXP_DEVCAP:
|
||||||
|
case PCI_EXP_DEVCTL:
|
||||||
|
case PCI_EXP_DEVSTA:
|
||||||
|
return pcie_cap_has_devctl(dev);
|
||||||
|
case PCI_EXP_LNKCAP:
|
||||||
|
case PCI_EXP_LNKCTL:
|
||||||
|
case PCI_EXP_LNKSTA:
|
||||||
|
return pcie_cap_has_lnkctl(dev);
|
||||||
|
case PCI_EXP_SLTCAP:
|
||||||
|
case PCI_EXP_SLTCTL:
|
||||||
|
case PCI_EXP_SLTSTA:
|
||||||
|
return pcie_cap_has_sltctl(dev);
|
||||||
|
case PCI_EXP_RTCTL:
|
||||||
|
case PCI_EXP_RTCAP:
|
||||||
|
case PCI_EXP_RTSTA:
|
||||||
|
return pcie_cap_has_rtctl(dev);
|
||||||
|
case PCI_EXP_DEVCAP2:
|
||||||
|
case PCI_EXP_DEVCTL2:
|
||||||
|
case PCI_EXP_LNKCAP2:
|
||||||
|
case PCI_EXP_LNKCTL2:
|
||||||
|
case PCI_EXP_LNKSTA2:
|
||||||
|
return pcie_cap_version(dev) > 1;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note that these accessor functions are only for the "PCI Express
|
||||||
|
* Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
|
||||||
|
* other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
|
||||||
|
*/
|
||||||
|
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
*val = 0;
|
||||||
|
if (pos & 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (pcie_capability_reg_implemented(dev, pos)) {
|
||||||
|
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
|
||||||
|
/*
|
||||||
|
* Reset *val to 0 if pci_read_config_word() fails, it may
|
||||||
|
* have been written as 0xFFFF if hardware error happens
|
||||||
|
* during pci_read_config_word().
|
||||||
|
*/
|
||||||
|
if (ret)
|
||||||
|
*val = 0;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For Functions that do not implement the Slot Capabilities,
|
||||||
|
* Slot Status, and Slot Control registers, these spaces must
|
||||||
|
* be hardwired to 0b, with the exception of the Presence Detect
|
||||||
|
* State bit in the Slot Status register of Downstream Ports,
|
||||||
|
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
|
||||||
|
*/
|
||||||
|
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
|
||||||
|
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
|
||||||
|
*val = PCI_EXP_SLTSTA_PDS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pcie_capability_read_word);
|
||||||
|
|
||||||
|
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
*val = 0;
|
||||||
|
if (pos & 3)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (pcie_capability_reg_implemented(dev, pos)) {
|
||||||
|
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
|
||||||
|
/*
|
||||||
|
* Reset *val to 0 if pci_read_config_dword() fails, it may
|
||||||
|
* have been written as 0xFFFFFFFF if hardware error happens
|
||||||
|
* during pci_read_config_dword().
|
||||||
|
*/
|
||||||
|
if (ret)
|
||||||
|
*val = 0;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
|
||||||
|
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
|
||||||
|
*val = PCI_EXP_SLTSTA_PDS;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pcie_capability_read_dword);
|
||||||
|
|
||||||
|
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
|
||||||
|
{
|
||||||
|
if (pos & 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!pcie_capability_reg_implemented(dev, pos))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pcie_capability_write_word);
|
||||||
|
|
||||||
|
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
|
||||||
|
{
|
||||||
|
if (pos & 3)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!pcie_capability_reg_implemented(dev, pos))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pcie_capability_write_dword);
|
||||||
|
|
||||||
|
int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
|
||||||
|
u16 clear, u16 set)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u16 val;
|
||||||
|
|
||||||
|
ret = pcie_capability_read_word(dev, pos, &val);
|
||||||
|
if (!ret) {
|
||||||
|
val &= ~clear;
|
||||||
|
val |= set;
|
||||||
|
ret = pcie_capability_write_word(dev, pos, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int pcie_get_readrq(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
u16 ctl;
|
||||||
|
|
||||||
|
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
|
||||||
|
|
||||||
|
return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(pcie_get_readrq);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pcie_set_readrq - set PCI Express maximum memory read request
|
||||||
|
* @dev: PCI device to query
|
||||||
|
* @rq: maximum memory read count in bytes
|
||||||
|
* valid values are 128, 256, 512, 1024, 2048, 4096
|
||||||
|
*
|
||||||
|
* If possible sets maximum memory read request in bytes
|
||||||
|
*/
|
||||||
|
int pcie_set_readrq(struct pci_dev *dev, int rq)
|
||||||
|
{
|
||||||
|
u16 v;
|
||||||
|
|
||||||
|
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
v = (ffs(rq) - 8) << 12;
|
||||||
|
|
||||||
|
return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
|
||||||
|
PCI_EXP_DEVCTL_READRQ, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -588,25 +588,6 @@ void kunmap_atomic(void *vaddr)
|
||||||
MutexUnlock(&kmap_mutex);
|
MutexUnlock(&kmap_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t strlcat(char *dest, const char *src, size_t count)
|
|
||||||
{
|
|
||||||
size_t dsize = strlen(dest);
|
|
||||||
size_t len = strlen(src);
|
|
||||||
size_t res = dsize + len;
|
|
||||||
|
|
||||||
/* This would be a bug */
|
|
||||||
BUG_ON(dsize >= count);
|
|
||||||
|
|
||||||
dest += dsize;
|
|
||||||
count -= dsize;
|
|
||||||
if (len >= count)
|
|
||||||
len = count-1;
|
|
||||||
memcpy(dest, src, len);
|
|
||||||
dest[len] = 0;
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(strlcat);
|
|
||||||
|
|
||||||
void msleep(unsigned int msecs)
|
void msleep(unsigned int msecs)
|
||||||
{
|
{
|
||||||
msecs /= 10;
|
msecs /= 10;
|
||||||
|
|
Loading…
Reference in New Issue