Partial viadrm2 snapshot.

To do:

- autoconf attachment (shouldn't be hard)
- viafb (maybe steal unichromefb and adapt attachment structure)
- actually run it (no hardware here)
This commit is contained in:
riastradh 2014-08-26 17:28:14 +00:00
parent 894179c374
commit 89b4554f68
7 changed files with 377 additions and 18 deletions

View File

@ -39,6 +39,8 @@
#include "via_drv.h"
#include "via_3d_reg.h"
#include <linux/delay.h>
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
@ -234,13 +236,21 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
switch (init->func) {
case VIA_INIT_DMA:
#ifdef __NetBSD__
if (!DRM_SUSER())
#else
if (!capable(CAP_SYS_ADMIN))
#endif
retcode = -EPERM;
else
retcode = via_initialize(dev, dev_priv, init);
break;
case VIA_CLEANUP_DMA:
#ifdef __NetBSD__
if (!DRM_SUSER())
#else
if (!capable(CAP_SYS_ADMIN))
#endif
retcode = -EPERM;
else
retcode = via_dma_cleanup(dev);
@ -586,13 +596,11 @@ static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
{
uint32_t agp_base;
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t jump_addr_lo, jump_addr_hi;
volatile uint32_t *last_pause_ptr;
uint32_t dma_low_save1, dma_low_save2;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
&jump_addr_lo, 0);

View File

@ -41,6 +41,7 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/timer.h>
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
@ -61,8 +62,12 @@ typedef struct _drm_via_descriptor {
static void
via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
via_unmap_blit_from_device(struct drm_device *dev, struct pci_dev *pdev,
drm_via_sg_info_t *vsg)
{
#ifdef __NetBSD__
bus_dmamap_unload(dev->dmat, vsg->dmamap);
#else
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
@ -82,6 +87,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
next = (dma_addr_t) desc_ptr->next;
desc_ptr--;
}
#endif
}
/*
@ -101,7 +107,9 @@ via_map_blit_for_device(struct pci_dev *pdev,
unsigned num_descriptors_this_page = 0;
unsigned char *mem_addr = xfer->mem_addr;
unsigned char *cur_mem;
#ifndef __NetBSD__
unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
#endif
uint32_t fb_addr = xfer->fb_addr;
uint32_t cur_fb;
unsigned long line_len;
@ -126,18 +134,31 @@ via_map_blit_for_device(struct pci_dev *pdev,
line_len -= remaining_len;
if (mode == 1) {
#ifdef __NetBSD__
const bus_dma_segment_t *const seg =
&vsg->dmamap->dm_segs[atop(cur_mem)];
desc_ptr->mem_addr =
seg->ds_addr + trunc_page((vaddr_t)cur_mem);
#else
desc_ptr->mem_addr =
dma_map_page(&pdev->dev,
vsg->pages[VIA_PFN(cur_mem) -
VIA_PFN(first_addr)],
VIA_PGOFF(cur_mem), remaining_len,
vsg->direction);
#endif
desc_ptr->dev_addr = cur_fb;
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
#ifdef __NetBSD__
next = vsg->desc_dmamap
->dm_segs[cur_descriptor_page].ds_addr
+ num_descriptors_this_page;
#else
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
#endif
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
num_descriptors_this_page = 0;
@ -169,21 +190,40 @@ via_map_blit_for_device(struct pci_dev *pdev,
static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
via_free_sg_info(struct drm_device *dev, struct pci_dev *pdev,
drm_via_sg_info_t *vsg)
{
#ifndef __NetBSD__
struct page *page;
int i;
#endif
switch (vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(pdev, vsg);
via_unmap_blit_from_device(dev, pdev, vsg);
case dr_via_desc_pages_alloc:
#ifdef __NetBSD__
bus_dmamap_unload(dev->dmat, vsg->desc_dmamap);
bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap);
bus_dmamem_unmap(dev->dmat, vsg->desc_kva,
vsg->num_desc_pages << PAGE_SHIFT);
bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs);
kfree(vsg->desc_segs);
#else
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
free_page((unsigned long)vsg->desc_pages[i]);
}
#endif
kfree(vsg->desc_pages);
case dr_via_pages_locked:
#ifdef __NetBSD__
/* Make sure any completed transfer is synced. */
bus_dmamap_sync(dev->dmat, vsg->dmamap, 0,
vsg->num_pages << PAGE_SHIFT,
(vsg->direction == DMA_FROM_DEVICE?
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
#else
for (i = 0; i < vsg->num_pages; ++i) {
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
@ -191,13 +231,16 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
page_cache_release(page);
}
}
#endif
case dr_via_pages_alloc:
#ifdef __NetBSD__
bus_dmamap_destroy(dev->dmat, vsg->dmamap);
#else
vfree(vsg->pages);
#endif
default:
vsg->state = dr_via_sg_init;
}
vfree(vsg->bounce_buffer);
vsg->bounce_buffer = NULL;
vsg->free_on_sequence = 0;
}
@ -228,9 +271,47 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
*/
static int
via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
via_lock_all_dma_pages(struct drm_device *dev, drm_via_sg_info_t *vsg,
drm_via_dmablit_t *xfer)
{
int ret;
#ifdef __NetBSD__
const bus_size_t nbytes = roundup2(xfer->num_lines * xfer->mem_stride,
PAGE_SIZE);
const bus_size_t npages = nbytes >> PAGE_SHIFT;
struct iovec iov = {
.iov_base = xfer->mem_addr,
.iov_len = nbytes,
};
struct uio uio = {
.uio_iov = &iov,
.uio_iovcnt = 1,
.uio_offset = 0,
.uio_resid = nbytes,
.uio_rw = xfer->to_fb ? UIO_WRITE : UIO_READ,
.uio_vmspace = curproc->p_vmspace,
};
/*
* XXX Lock out anyone else from doing this? Add a
* dr_via_pages_loading state? Just rely on the giant lock?
*/
/* XXX errno NetBSD->Linux */
ret = -bus_dmamap_create(dev->dmat, nbytes, npages, nbytes, PAGE_SIZE,
BUS_DMA_WAITOK, &vsg->dmamap);
if (ret) {
DRM_ERROR("bus_dmamap_create failed: %d\n", ret);
return ret;
}
ret = -bus_dmamap_load_uio(dev->dmat, vsg->dmamap, &uio,
BUS_DMA_WAITOK | (xfer->to_fb? BUS_DMA_WRITE : BUS_DMA_READ));
if (ret) {
DRM_ERROR("bus_dmamap_load failed: %d\n", ret);
bus_dmamap_destroy(dev->dmat, vsg->dmamap);
return ret;
}
vsg->num_pages = npages;
#else
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
@ -252,6 +333,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->state = dr_via_pages_locked;
return -EINVAL;
}
#endif
vsg->state = dr_via_pages_locked;
DRM_DEBUG("DMA pages locked\n");
return 0;
@ -264,9 +346,12 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
*/
static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
via_alloc_desc_pages(struct drm_device *dev, drm_via_sg_info_t *vsg)
{
int i;
#ifdef __NetBSD__
int ret;
#endif
vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
@ -275,12 +360,67 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
return -ENOMEM;
#ifdef __NetBSD__
vsg->desc_segs = kcalloc(vsg->num_desc_pages, sizeof(*vsg->desc_segs),
GFP_KERNEL);
if (vsg->desc_segs == NULL) {
kfree(vsg->desc_pages);
return -ENOMEM;
}
/* XXX errno NetBSD->Linux */
ret = -bus_dmamem_alloc(dev->dmat, vsg->num_desc_pages << PAGE_SHIFT,
PAGE_SIZE, 0, vsg->desc_segs, vsg->num_pages, &vsg->num_desc_segs,
BUS_DMA_WAITOK);
if (ret) {
kfree(vsg->desc_segs);
kfree(vsg->desc_pages);
return -ENOMEM;
}
/* XXX No nice way to scatter/gather map bus_dmamem. */
/* XXX errno NetBSD->Linux */
ret = -bus_dmamem_map(dev->dmat, vsg->desc_segs, vsg->num_desc_segs,
vsg->num_desc_pages << PAGE_SHIFT, &vsg->desc_kva, BUS_DMA_WAITOK);
if (ret) {
bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs);
kfree(vsg->desc_segs);
kfree(vsg->desc_pages);
return -ENOMEM;
}
/* XXX errno NetBSD->Linux */
ret = -bus_dmamap_create(dev->dmat, vsg->num_desc_pages << PAGE_SHIFT,
vsg->num_desc_pages, PAGE_SIZE, 0, BUS_DMA_WAITOK,
&vsg->desc_dmamap);
if (ret) {
bus_dmamem_unmap(dev->dmat, vsg->desc_kva,
vsg->num_desc_pages << PAGE_SHIFT);
bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs);
kfree(vsg->desc_segs);
kfree(vsg->desc_pages);
return -ENOMEM;
}
ret = -bus_dmamap_load(dev->dmat, vsg->desc_dmamap, vsg->desc_kva,
vsg->num_desc_pages << PAGE_SHIFT, NULL, BUS_DMA_WAITOK);
if (ret) {
bus_dmamap_destroy(dev->dmat, vsg->desc_dmamap);
bus_dmamem_unmap(dev->dmat, vsg->desc_kva,
vsg->num_desc_pages << PAGE_SHIFT);
bus_dmamem_free(dev->dmat, vsg->desc_segs, vsg->num_desc_segs);
kfree(vsg->desc_segs);
kfree(vsg->desc_pages);
return -ENOMEM;
}
for (i = 0; i < vsg->num_desc_pages; i++)
vsg->desc_pages[i] = (void *)
((char *)vsg->desc_kva + (i * PAGE_SIZE));
vsg->state = dr_via_desc_pages_alloc;
#else
vsg->state = dr_via_desc_pages_alloc;
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
#endif
DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
vsg->num_desc);
return 0;
@ -338,7 +478,12 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
#ifdef __NetBSD__
DRM_SPIN_WAKEUP_ALL(&blitq->blit_queue[cur],
&blitq->blit_lock);
#else
wake_up(blitq->blit_queue + cur);
#endif
cur++;
if (cur >= VIA_NUM_BLIT_SLOTS)
@ -363,7 +508,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
via_abort_dmablit(dev, engine);
blitq->aborting = 1;
blitq->end = jiffies + HZ;
blitq->end = jiffies + DRM_HZ;
}
if (!blitq->is_active) {
@ -372,7 +517,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 1;
blitq->cur = cur;
blitq->num_outstanding--;
blitq->end = jiffies + HZ;
blitq->end = jiffies + DRM_HZ;
if (!timer_pending(&blitq->poll_timer))
mod_timer(&blitq->poll_timer, jiffies + 1);
} else {
@ -395,13 +540,21 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
*/
static int
#ifdef __NetBSD__
via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, drm_waitqueue_t **queue)
#else
via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
#endif
{
#ifndef __NetBSD__
unsigned long irqsave;
#endif
uint32_t slot;
int active;
#ifndef __NetBSD__
spin_lock_irqsave(&blitq->blit_lock, irqsave);
#endif
/*
* Allow for handle wraparounds.
@ -417,7 +570,9 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
*queue = blitq->blit_queue + slot;
}
#ifndef __NetBSD__
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
#endif
return active;
}
@ -432,13 +587,27 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
#ifdef __NetBSD__
drm_waitqueue_t *queue;
#else
wait_queue_head_t *queue;
#endif
int ret = 0;
#ifdef __NetBSD__
spin_lock(&blitq->blit_lock);
if (via_dmablit_active(blitq, engine, handle, &queue)) {
DRM_SPIN_TIMED_WAIT_UNTIL(ret, queue, &blitq->blit_lock,
3*DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
spin_unlock(&blitq->blit_lock);
#else
if (via_dmablit_active(blitq, engine, handle, &queue)) {
DRM_WAIT_ON(ret, *queue, 3 * HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
#endif
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
@ -519,11 +688,25 @@ via_dmablit_workqueue(struct work_struct *work)
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
#ifdef __NetBSD__
DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock);
#endif
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
#ifndef __NetBSD__
wake_up(&blitq->busy_queue);
#endif
via_free_sg_info(dev->pdev, cur_sg);
#ifdef __NetBSD__
/* Transfer completed. Sync it. */
bus_dmamap_sync(dev->dmat, cur_sg->dmamap, 0,
cur_sg->num_pages << PAGE_SHIFT,
(cur_sg->direction == DMA_FROM_DEVICE
? BUS_DMASYNC_POSTREAD
: BUS_DMASYNC_POSTWRITE));
#endif
via_free_sg_info(dev, dev->pdev, cur_sg);
kfree(cur_sg);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
@ -560,9 +743,15 @@ via_init_dmablit(struct drm_device *dev)
blitq->is_active = 0;
blitq->aborting = 0;
spin_lock_init(&blitq->blit_lock);
#ifdef __NetBSD__
for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
DRM_INIT_WAITQUEUE(blitq->blit_queue + j, "viablt");
DRM_INIT_WAITQUEUE(&blitq->busy_queue, "viabusy");
#else
for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
init_waitqueue_head(blitq->blit_queue + j);
init_waitqueue_head(&blitq->busy_queue);
#endif
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
(unsigned long)blitq);
@ -581,7 +770,6 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
int ret = 0;
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
vsg->state = dr_via_sg_init;
@ -654,16 +842,16 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
}
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
if (0 != (ret = via_lock_all_dma_pages(dev, vsg, xfer))) {
DRM_ERROR("Could not lock DMA pages.\n");
via_free_sg_info(dev->pdev, vsg);
via_free_sg_info(dev, dev->pdev, vsg);
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
if (0 != (ret = via_alloc_desc_pages(vsg))) {
if (0 != (ret = via_alloc_desc_pages(dev, vsg))) {
DRM_ERROR("Could not allocate DMA descriptor pages.\n");
via_free_sg_info(dev->pdev, vsg);
via_free_sg_info(dev, dev->pdev, vsg);
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
@ -686,6 +874,16 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
DRM_DEBUG("Num free is %d\n", blitq->num_free);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
while (blitq->num_free == 0) {
#ifdef __NetBSD__
DRM_SPIN_TIMED_WAIT_UNTIL(ret, &blitq->busy_queue,
&blitq->blit_lock, DRM_HZ,
blitq->num_free > 0);
if (ret) {
if (ret == -EINTR)
ret = -EAGAIN;
return ret;
}
#else
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
@ -693,6 +891,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
return (-EINTR == ret) ? -EAGAIN : ret;
spin_lock_irqsave(&blitq->blit_lock, irqsave);
#endif
}
blitq->num_free--;
@ -712,8 +911,13 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->num_free++;
#ifdef __NetBSD__
DRM_SPIN_WAKEUP_ONE(&blitq->busy_queue, &blitq->blit_lock);
#endif
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
#ifndef __NetBSD__
wake_up(&blitq->busy_queue);
#endif
}
/*
@ -749,6 +953,14 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
kfree(vsg);
return ret;
}
#ifdef __NetBSD__
/* Prepare to begin a DMA transfer. */
bus_dmamap_sync(dev->dmat, vsg->dmamap, 0,
vsg->num_pages << PAGE_SHIFT,
(vsg->direction == DMA_FROM_DEVICE
? BUS_DMASYNC_PREREAD
: BUS_DMASYNC_PREWRITE));
#endif
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->blits[blitq->head++] = vsg;

View File

@ -38,13 +38,26 @@
struct _drm_via_descriptor;
typedef struct _drm_via_sg_info {
#ifdef __NetBSD__
bus_dmamap_t dmamap;
#else
struct page **pages;
#endif
unsigned long num_pages;
#ifdef __NetBSD__
bus_dma_segment_t *desc_segs;
int num_desc_segs;
void *desc_kva;
bus_dmamap_t desc_dmamap;
#endif
struct _drm_via_descriptor **desc_pages;
int num_desc_pages;
int num_desc;
#ifdef __NetBSD__
enum { DMA_FROM_DEVICE, DMA_TO_DEVICE } direction;
#else
enum dma_data_direction direction;
unsigned char *bounce_buffer;
#endif
dma_addr_t chain_start;
uint32_t free_on_sequence;
unsigned int descriptors_per_page;
@ -72,8 +85,13 @@ typedef struct _drm_via_blitq {
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
spinlock_t blit_lock;
#ifdef __NetBSD__
drm_waitqueue_t blit_queue[VIA_NUM_BLIT_SLOTS];
drm_waitqueue_t busy_queue;
#else
wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
wait_queue_head_t busy_queue;
#endif
struct work_struct wq;
struct timer_list poll_timer;
} drm_via_blitq_t;

View File

@ -51,10 +51,19 @@ typedef struct drm_via_ring_buffer {
typedef uint32_t maskarray_t[5];
typedef struct drm_via_irq {
#ifdef __NetBSD__
spinlock_t irq_lock;
unsigned irq_received;
#else
atomic_t irq_received;
#endif
uint32_t pending_mask;
uint32_t enable_mask;
#ifdef __NetBSD__
drm_waitqueue_t irq_queue;
#else
wait_queue_head_t irq_queue;
#endif
} drm_via_irq_t;
typedef struct drm_via_private {
@ -63,7 +72,12 @@ typedef struct drm_via_private {
drm_local_map_t *fb;
drm_local_map_t *mmio;
unsigned long agpAddr;
#ifdef __NetBSD__
struct mutex decoder_lock[VIA_NR_XVMC_LOCKS];
drm_waitqueue_t decoder_queue[VIA_NR_XVMC_LOCKS];
#else
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
#endif
char *dma_ptr;
unsigned int dma_low;
unsigned int dma_high;

View File

@ -137,8 +137,16 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
for (i = 0; i < dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) {
#ifdef __NetBSD__
spin_lock(&cur_irq->irq_lock);
cur_irq->irq_received++;
DRM_SPIN_WAKEUP_ONE(&cur_irq->irq_queue,
&cur_irq->irq_lock);
spin_unlock(&cur_irq->irq_lock);
#else
atomic_inc(&cur_irq->irq_received);
wake_up(&cur_irq->irq_queue);
#endif
handled = 1;
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
via_dmablit_handler(dev, 0, 1);
@ -238,6 +246,22 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
masks = dev_priv->irq_masks;
cur_irq = dev_priv->via_irqs + real_irq;
#ifdef __NetBSD__
spin_lock(&cur_irq->irq_lock);
if (masks[real_irq][2] && !force_sequence) {
DRM_SPIN_TIMED_WAIT_UNTIL(ret, &cur_irq->irq_queue,
&cur_irq->irq_lock, 3 * DRM_HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
cur_irq_sequence = cur_irq->irq_received;
} else {
DRM_SPIN_TIMED_WAIT_UNTIL(ret, &cur_irq->irq_queue,
&cur_irq->irq_lock, 3 * DRM_HZ,
(((cur_irq_sequence = cur_irq->irq_received) -
*sequence) <= (1 << 23)));
}
spin_unlock(&cur_irq->irq_lock);
#else
if (masks[real_irq][2] && !force_sequence) {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
@ -249,6 +273,7 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
}
#endif
*sequence = cur_irq_sequence;
return ret;
}
@ -284,10 +309,19 @@ void via_driver_irq_preinstall(struct drm_device *dev)
}
for (i = 0; i < dev_priv->num_irqs; ++i) {
#ifdef __NetBSD__
spin_lock_init(&cur_irq->irq_lock);
cur_irq->irq_received = 0;
#else
atomic_set(&cur_irq->irq_received, 0);
#endif
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
#ifdef __NetBSD__
DRM_INIT_WAITQUEUE(&cur_irq->irq_queue, "viairq");
#else
init_waitqueue_head(&cur_irq->irq_queue);
#endif
dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++;
@ -343,6 +377,17 @@ void via_driver_irq_uninstall(struct drm_device *dev)
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
#ifdef __NetBSD__
{
int i;
for (i = 0; i < dev_priv->num_irqs; i++) {
DRM_DESTROY_WAITQUEUE(&dev_priv->via_irqs[i].irq_queue);
spin_lock_destroy(&dev_priv->via_irqs[i].irq_lock);
}
}
#endif
}
}
@ -365,8 +410,12 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
case VIA_IRQ_RELATIVE:
#ifdef __NetBSD__
irqwait->request.sequence += cur_irq->irq_received;
#else
irqwait->request.sequence +=
atomic_read(&cur_irq->irq_received);
#endif
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case VIA_IRQ_ABSOLUTE:
break;

View File

@ -36,13 +36,26 @@ void via_init_futex(drm_via_private_t *dev_priv)
DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
#ifdef __NetBSD__
linux_mutex_init(&dev_priv->decoder_lock[i]);
DRM_INIT_WAITQUEUE(&dev_priv->decoder_queue[i], "viadec");
#else
init_waitqueue_head(&(dev_priv->decoder_queue[i]));
#endif
XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
}
}
void via_cleanup_futex(drm_via_private_t *dev_priv)
{
#ifdef __NetBSD__
unsigned i;
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
DRM_DESTROY_WAITQUEUE(&dev_priv->decoder_queue[i]);
linux_mutex_destroy(&dev_priv->decoder_lock[i]);
}
#endif
}
void via_release_futex(drm_via_private_t *dev_priv, int context)
@ -58,7 +71,14 @@ void via_release_futex(drm_via_private_t *dev_priv, int context)
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
#ifdef __NetBSD__
mutex_lock(&dev_priv->decoder_lock[i]);
DRM_WAKEUP_ALL(&dev_priv->decoder_queue[i],
&dev_priv->decoder_lock[i]);
mutex_unlock(&dev_priv->decoder_lock[i]);
#else
wake_up(&(dev_priv->decoder_queue[i]));
#endif
}
*lock = 0;
}
@ -82,11 +102,27 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
switch (fx->func) {
case VIA_FUTEX_WAIT:
#ifdef __NetBSD__
mutex_lock(&dev_priv->decoder_lock[fx->lock]);
DRM_TIMED_WAIT_UNTIL(ret, &dev_priv->decoder_queue[fx->lock],
&dev_priv->decoder_lock[fx->lock],
(fx->ms / 10) * (DRM_HZ / 100),
*lock != fx->val);
mutex_unlock(&dev_priv->decoder_lock[fx->lock]);
#else
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
(fx->ms / 10) * (HZ / 100), *lock != fx->val);
#endif
return ret;
case VIA_FUTEX_WAKE:
#ifdef __NetBSD__
mutex_lock(&dev_priv->decoder_lock[fx->lock]);
DRM_WAKEUP_ALL(&dev_priv->decoder_queue[fx->lock],
&dev_priv->decoder_lock[fx->lock]);
mutex_unlock(&dev_priv->decoder_lock[fx->lock]);
#else
wake_up(&(dev_priv->decoder_queue[fx->lock]));
#endif
return 0;
}
return 0;

22
sys/external/bsd/drm2/via/files.via vendored Normal file
View File

@ -0,0 +1,22 @@
# $NetBSD: files.via,v 1.1 2014/08/26 17:28:14 riastradh Exp $
define viafbbus { }
device viadrmums: drmkms, drmkms_pci, viafbbus
attach viadrmums at pci
device viafb: viafbbus, genfb, wsemuldisplaydev
attach viafb at viafbbus
makeoptions viadrmums CPPFLAGS+="-I$S/external/bsd/drm2/dist/drm/via"
makeoptions viadrmums CPPFLAGS+="-I$S/external/bsd/drm2/via"
makeoptions viadrmums "CWARNFLAGS.via_verifier.c"+="-Wno-shadow"
file external/bsd/drm2/dist/drm/via/via_dma.c viadrmums
file external/bsd/drm2/dist/drm/via/via_dmablit.c viadrmums
file external/bsd/drm2/dist/drm/via/via_drv.c viadrmums
file external/bsd/drm2/dist/drm/via/via_irq.c viadrmums
file external/bsd/drm2/dist/drm/via/via_map.c viadrmums
file external/bsd/drm2/dist/drm/via/via_mm.c viadrmums
file external/bsd/drm2/dist/drm/via/via_verifier.c viadrmums
file external/bsd/drm2/dist/drm/via/via_video.c viadrmums