Pull up following revision(s) (requested by riastradh in ticket #996):

sys/external/bsd/drm2/dist/drm/nouveau/nouveau_nv84_fence.c: revision 1.3
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h: revision 1.3
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h: revision 1.4
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.h: revision 1.5
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.5
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.6
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.7
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.8
	sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c: revision 1.9

Rewrite nouveau_fence in an attempt to make it make sense.
PR kern/53441
XXX pullup-7
XXX pullup-8


Fences may last longer than their channels.
- Use a reference count on the nouveau_fence_chan object.
- Acquire it with kpreemption disabled.
- Use xcall to wait for kpreempt-disabled sections to complete.
PR kern/53441
XXX pullup-7
XXX pullup-8


Defer nouveau_fence_unref until spin unlock.
- kfree while holding a spin lock is not a good idea.
- Make sure we GC every time we might signal fences.
PR kern/53441
XXX pullup-7
XXX pullup-8


Attempt to make sense of return values of nouveau_fence_wait.
PR kern/53441
XXX pullup-7
XXX pullup-8


Fix edge case of reference counting, oops.
PR kern/53441
XXX pullup-7
XXX pullup-8
This commit is contained in:
martin 2018-08-31 17:35:51 +00:00
parent 4e29817fad
commit 9d96c16922
3 changed files with 390 additions and 133 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $ */
/* $NetBSD: nouveau_fence.c,v 1.4.10.1 2018/08/31 17:35:51 martin Exp $ */
/*
* Copyright (C) 2007 Ben Skeggs.
@ -27,7 +27,10 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4.10.1 2018/08/31 17:35:51 martin Exp $");
#include <sys/types.h>
#include <sys/xcall.h>
#include <drm/drmP.h>
@ -41,6 +44,12 @@ __KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.4 2016/04/13 07:57:15 riastradh
#include <engine/fifo.h>
/*
* struct fence_work
*
* State for a work action scheduled when a fence is completed.
* Will call func(data) at some point after that happens.
*/
struct fence_work {
struct work_struct base;
struct list_head head;
@ -48,101 +57,291 @@ struct fence_work {
void *data;
};
/*
* nouveau_fence_channel_acquire(fence)
*
* Try to return the channel associated with fence.
*/
static struct nouveau_channel *
nouveau_fence_channel_acquire(struct nouveau_fence *fence)
{
struct nouveau_channel *chan;
struct nouveau_fence_chan *fctx;
/*
* Block cross-calls while we examine fence. If we observe
* that fence->done is false, then the channel cannot be
* destroyed even by another CPU until after kpreempt_enable.
*/
kpreempt_disable();
if (fence->done) {
chan = NULL;
} else {
chan = fence->channel;
fctx = chan->fence;
atomic_inc_uint(&fctx->refcnt);
}
kpreempt_enable();
return chan;
}
/*
* nouveau_fence_gc_grab(fctx, list)
*
* Move all of channel's done fences to list.
*
* Caller must hold channel's fence lock.
*/
static void
nouveau_fence_gc_grab(struct nouveau_fence_chan *fctx, struct list_head *list)
{
struct list_head *node, *next;
BUG_ON(!spin_is_locked(&fctx->lock));
list_for_each_safe(node, next, &fctx->done) {
list_move_tail(node, list);
}
}
/*
* nouveau_fence_gc_free(list)
*
* Unreference all of the fences in the list.
*
* Caller MUST NOT hold the fences' channel's fence lock.
*/
static void
nouveau_fence_gc_free(struct list_head *list)
{
struct nouveau_fence *fence, *next;
list_for_each_entry_safe(fence, next, list, head) {
list_del(&fence->head);
nouveau_fence_unref(&fence);
}
}
/*
* nouveau_fence_channel_release(channel)
*
* Release the channel acquired with nouveau_fence_channel_acquire.
*/
static void
nouveau_fence_channel_release(struct nouveau_channel *chan)
{
struct nouveau_fence_chan *fctx = chan->fence;
unsigned old, new;
do {
old = fctx->refcnt;
if (old == 1) {
spin_lock(&fctx->lock);
if (atomic_dec_uint_nv(&fctx->refcnt) == 0)
DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue,
&fctx->lock);
spin_unlock(&fctx->lock);
return;
}
new = old - 1;
} while (atomic_cas_uint(&fctx->refcnt, old, new) != old);
}
/*
* nouveau_fence_signal(fence)
*
* Schedule all the work for fence's completion, mark it done, and
* move it from the pending list to the done list.
*
* Caller must hold fence's channel's fence lock.
*/
static void
nouveau_fence_signal(struct nouveau_fence *fence)
{
struct nouveau_channel *chan __diagused = fence->channel;
struct nouveau_fence_chan *fctx __diagused = chan->fence;
struct fence_work *work, *temp;
BUG_ON(!spin_is_locked(&fctx->lock));
BUG_ON(fence->done);
/* Schedule all the work for this fence. */
list_for_each_entry_safe(work, temp, &fence->work, head) {
schedule_work(&work->base);
list_del(&work->head);
}
fence->channel = NULL;
list_del(&fence->head);
/* Note that the fence is done. */
fence->done = true;
/* Move it from the pending list to the done list. */
list_move_tail(&fence->head, &fctx->done);
}
static void
nouveau_fence_context_del_xc(void *a, void *b)
{
}
/*
* nouveau_fence_context_del(fctx)
*
* Artificially complete all fences in fctx, wait for their work
* to drain, and destroy the memory associated with fctx.
*/
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence, *fnext;
struct list_head done_list;
int ret __diagused;
INIT_LIST_HEAD(&done_list);
/* Signal all the fences in fctx. */
spin_lock(&fctx->lock);
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
nouveau_fence_signal(fence);
}
nouveau_fence_gc_grab(fctx, &done_list);
spin_unlock(&fctx->lock);
/* Release any fences that we signalled. */
nouveau_fence_gc_free(&done_list);
/* Wait for the workqueue to drain. */
flush_scheduled_work();
/* Wait for nouveau_fence_channel_acquire to complete on all CPUs. */
xc_wait(xc_broadcast(0, nouveau_fence_context_del_xc, NULL, NULL));
/* Release our reference and wait for any others to drain. */
spin_lock(&fctx->lock);
KASSERT(fctx->refcnt > 0);
atomic_dec_uint(&fctx->refcnt);
DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &fctx->waitqueue, &fctx->lock,
fctx->refcnt == 0);
BUG_ON(ret);
spin_unlock(&fctx->lock);
/* Make sure there are no more fences on the list. */
BUG_ON(!list_empty(&fctx->done));
BUG_ON(!list_empty(&fctx->flip));
BUG_ON(!list_empty(&fctx->pending));
/* Destroy the fence context. */
DRM_DESTROY_WAITQUEUE(&fctx->waitqueue);
spin_lock_destroy(&fctx->lock);
}
/*
* nouveau_fence_context_new(fctx)
*
* Initialize the state fctx for all fences on a channel.
*/
void
nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
{
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
INIT_LIST_HEAD(&fctx->done);
spin_lock_init(&fctx->lock);
DRM_INIT_WAITQUEUE(&fctx->waitqueue, "nvfnchan");
fctx->refcnt = 1;
}
/*
* nouveau_fence_work_handler(kwork)
*
* Work handler for nouveau_fence_work.
*/
static void
nouveau_fence_work_handler(struct work_struct *kwork)
{
struct fence_work *work = container_of(kwork, typeof(*work), base);
work->func(work->data);
kfree(work);
}
/*
* nouveau_fence_work(fence, func, data)
*
* Arrange to call func(data) after fence is completed. If fence
* is already completed, call it immediately. If memory is
* scarce, synchronously wait for the fence and call it.
*/
void
nouveau_fence_work(struct nouveau_fence *fence,
void (*func)(void *), void *data)
{
struct nouveau_channel *chan = fence->channel;
struct nouveau_channel *chan;
struct nouveau_fence_chan *fctx;
struct fence_work *work = NULL;
if (nouveau_fence_done(fence)) {
func(data);
return;
}
if ((chan = nouveau_fence_channel_acquire(fence)) == NULL)
goto now0;
fctx = chan->fence;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
if (work == NULL) {
WARN_ON(nouveau_fence_wait(fence, false, false));
func(data);
return;
goto now1;
}
spin_lock(&fctx->lock);
if (!fence->channel) {
if (fence->done) {
spin_unlock(&fctx->lock);
kfree(work);
func(data);
return;
goto now2;
}
INIT_WORK(&work->base, nouveau_fence_work_handler);
work->func = func;
work->data = data;
list_add(&work->head, &fence->work);
if (atomic_dec_uint_nv(&fctx->refcnt) == 0)
DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue, &fctx->lock);
spin_unlock(&fctx->lock);
return;
now2: kfree(work);
now1: nouveau_fence_channel_release(chan);
now0: func(data);
}
/*
* nouveau_fence_update(chan)
*
* Test all fences on chan for completion. For any that are
* completed, mark them as such and schedule work for them.
*
* Caller must hold chan's fence lock.
*/
static void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence *fence, *fnext;
spin_lock(&fctx->lock);
BUG_ON(!spin_is_locked(&fctx->lock));
list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
if (fctx->read(chan) < fence->sequence)
break;
nouveau_fence_signal(fence);
nouveau_fence_unref(&fence);
}
spin_unlock(&fctx->lock);
BUG_ON(!spin_is_locked(&fctx->lock));
}
/*
* nouveau_fence_emit(fence, chan)
*
* - Initialize fence.
* - Set its timeout to 15 sec from now.
* - Assign it the next sequence number on channel.
* - Submit it to the device with the device-specific emit routine.
* - If that succeeds, add it to the list of pending fences on chan.
*/
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
@ -151,7 +350,9 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
spin_lock(&fctx->lock);
fence->sequence = ++fctx->sequence;
spin_unlock(&fctx->lock);
ret = fctx->emit(fence);
if (!ret) {
@ -164,129 +365,197 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
return ret;
}
/*
* nouveau_fence_done_locked(fence, chan)
*
* Test whether fence, which must be on chan, is done. If it is
* not marked as done, poll all fences on chan first.
*
* Caller must hold chan's fence lock.
*/
static bool
nouveau_fence_done_locked(struct nouveau_fence *fence,
struct nouveau_channel *chan)
{
struct nouveau_fence_chan *fctx __diagused = chan->fence;
BUG_ON(!spin_is_locked(&fctx->lock));
BUG_ON(fence->channel != chan);
/* If it's not done, poll it for changes. */
if (!fence->done)
nouveau_fence_update(chan);
/* Check, possibly again, whether it is done now. */
return fence->done;
}
/*
* nouveau_fence_done(fence)
*
* Test whether fence is done. If it is not marked as done, poll
* all fences on its channel first. Caller MUST NOT hold the
* fence lock.
*/
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
if (fence->channel)
nouveau_fence_update(fence->channel);
return !fence->channel;
struct nouveau_channel *chan;
struct nouveau_fence_chan *fctx;
struct list_head done_list;
bool done;
if ((chan = nouveau_fence_channel_acquire(fence)) == NULL)
return true;
INIT_LIST_HEAD(&done_list);
fctx = chan->fence;
spin_lock(&fctx->lock);
done = nouveau_fence_done_locked(fence, chan);
nouveau_fence_gc_grab(fctx, &done_list);
spin_unlock(&fctx->lock);
nouveau_fence_channel_release(chan);
nouveau_fence_gc_free(&done_list);
return done;
}
/*
* nouveau_fence_wait_uevent_handler(data, index)
*
* Nouveau uevent handler for fence completion. data is a
* nouveau_fence_chan pointer. Simply wake up all threads waiting
* for completion of any fences on the channel. Does not mark
* fences as completed -- threads must poll fences for completion.
*/
static int
nouveau_fence_wait_uevent_handler(void *data, int index)
{
struct nouveau_fence_priv *priv = data;
#ifdef __NetBSD__
spin_lock(&priv->waitlock);
/* XXX Set a flag... */
DRM_SPIN_WAKEUP_ALL(&priv->waitqueue, &priv->waitlock);
spin_unlock(&priv->waitlock);
#else
wake_up_all(&priv->waiting);
#endif
struct nouveau_fence_chan *fctx = data;
spin_lock(&fctx->lock);
DRM_SPIN_WAKEUP_ALL(&fctx->waitqueue, &fctx->lock);
spin_unlock(&fctx->lock);
return NVKM_EVENT_KEEP;
}
/*
* nouveau_fence_wait_uevent(fence, chan, intr)
*
* Wait using a nouveau event for completion of fence on chan.
* Wait interruptibly iff intr is true.
*
* Return 0 if fence was signalled, negative error code on
* timeout (-EBUSY) or interrupt (-ERESTARTSYS) or other error.
*/
static int
nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
nouveau_fence_wait_uevent(struct nouveau_fence *fence,
struct nouveau_channel *chan, bool intr)
{
struct nouveau_channel *chan = fence->channel;
struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_eventh *handler;
struct list_head done_list;
int ret = 0;
BUG_ON(fence->channel != chan);
ret = nouveau_event_new(pfifo->uevent, 0,
nouveau_fence_wait_uevent_handler,
priv, &handler);
fctx, &handler);
if (ret)
return ret;
nouveau_event_get(handler);
INIT_LIST_HEAD(&done_list);
if (fence->timeout) {
unsigned long timeout = fence->timeout - jiffies;
if (time_before(jiffies, fence->timeout)) {
#ifdef __NetBSD__
spin_lock(&priv->waitlock);
spin_lock(&fctx->lock);
if (intr) {
DRM_SPIN_TIMED_WAIT_UNTIL(ret,
&priv->waitqueue, &priv->waitlock,
&fctx->waitqueue, &fctx->lock,
timeout,
nouveau_fence_done(fence));
nouveau_fence_done_locked(fence, chan));
} else {
DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret,
&priv->waitqueue, &priv->waitlock,
&fctx->waitqueue, &fctx->lock,
timeout,
nouveau_fence_done(fence));
nouveau_fence_done_locked(fence, chan));
}
spin_unlock(&priv->waitlock);
#else
if (intr) {
ret = wait_event_interruptible_timeout(
priv->waiting,
nouveau_fence_done(fence),
timeout);
} else {
ret = wait_event_timeout(priv->waiting,
nouveau_fence_done(fence),
timeout);
}
#endif
}
if (ret >= 0) {
fence->timeout = jiffies + ret;
if (time_after_eq(jiffies, fence->timeout))
nouveau_fence_gc_grab(fctx, &done_list);
spin_unlock(&fctx->lock);
if (ret < 0) {
/* error */
} else if (ret == 0) {
/* timeout */
ret = -EBUSY;
} else {
/* success */
ret = 0;
}
} else {
/* timeout */
ret = -EBUSY;
}
} else {
#ifdef __NetBSD__
spin_lock(&priv->waitlock);
spin_lock(&fctx->lock);
if (intr) {
DRM_SPIN_WAIT_UNTIL(ret, &priv->waitqueue,
&priv->waitlock,
nouveau_fence_done(fence));
DRM_SPIN_WAIT_UNTIL(ret, &fctx->waitqueue,
&fctx->lock,
nouveau_fence_done_locked(fence, chan));
} else {
DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &priv->waitqueue,
&priv->waitlock,
nouveau_fence_done(fence));
DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &fctx->waitqueue,
&fctx->lock,
nouveau_fence_done_locked(fence, chan));
}
spin_unlock(&priv->waitlock);
#else
if (intr) {
ret = wait_event_interruptible(priv->waiting,
nouveau_fence_done(fence));
} else {
wait_event(priv->waiting, nouveau_fence_done(fence));
}
#endif
nouveau_fence_gc_grab(fctx, &done_list);
spin_unlock(&fctx->lock);
}
nouveau_event_ref(NULL, &handler);
nouveau_fence_gc_free(&done_list);
if (unlikely(ret < 0))
return ret;
return 0;
}
/*
* nouveau_fence_wait(fence, lazy, intr)
*
* Wait for fence to complete. Wait interruptibly iff intr is
* true. If lazy is true, may sleep, either for a single tick or
* for an interrupt; otherwise will busy-wait.
*
* Return 0 if fence was signalled, negative error code on
* timeout (-EBUSY) or interrupt (-ERESTARTSYS) or other error.
*/
int
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
struct nouveau_channel *chan = fence->channel;
struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
#ifndef __NetBSD__
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
#endif
struct nouveau_channel *chan;
struct nouveau_fence_priv *priv;
unsigned long delay_usec = 1;
int ret = 0;
if ((chan = nouveau_fence_channel_acquire(fence)) == NULL)
goto out0;
priv = chan->drm->fence;
while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
ret = nouveau_fence_wait_uevent(fence, intr);
ret = nouveau_fence_wait_uevent(fence, chan, intr);
if (ret < 0)
return ret;
goto out1;
}
while (!nouveau_fence_done(fence)) {
@ -295,33 +564,19 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
break;
}
#ifdef __NetBSD__
if (lazy)
kpause("nvfencep", intr, 1, NULL);
else
DELAY(1);
#else
__set_current_state(intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
sleep_time = NSEC_PER_MSEC;
if (lazy && delay_usec >= 1000*hztoms(1)) {
/* XXX errno NetBSD->Linux */
ret = -kpause("nvfencew", intr, 1, NULL);
if (ret != -EWOULDBLOCK)
break;
} else {
DELAY(delay_usec);
delay_usec *= 2;
}
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
#endif
}
#ifndef __NetBSD__
__set_current_state(TASK_RUNNING);
#endif
return ret;
out1: nouveau_fence_channel_release(chan);
out0: return ret;
}
int
@ -331,13 +586,14 @@ nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
struct nouveau_channel *prev;
int ret = 0;
prev = fence ? fence->channel : NULL;
if (prev) {
if (fence != NULL &&
(prev = nouveau_fence_channel_acquire(fence)) != NULL) {
if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
ret = fctx->sync(fence, prev, chan);
if (unlikely(ret))
ret = nouveau_fence_wait(fence, true, false);
}
nouveau_fence_channel_release(prev);
}
return ret;
@ -347,12 +603,14 @@ static void
nouveau_fence_del(struct kref *kref)
{
struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
kfree(fence);
}
void
nouveau_fence_unref(struct nouveau_fence **pfence)
{
if (*pfence)
kref_put(&(*pfence)->kref, nouveau_fence_del);
*pfence = NULL;
@ -361,6 +619,7 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
struct nouveau_fence *
nouveau_fence_ref(struct nouveau_fence *fence)
{
if (fence)
kref_get(&fence->kref);
return fence;
@ -382,6 +641,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
INIT_LIST_HEAD(&fence->work);
fence->sysmem = sysmem;
fence->done = false;
kref_init(&fence->kref);
ret = nouveau_fence_emit(fence, chan);

View File

@ -9,6 +9,7 @@ struct nouveau_fence {
struct kref kref;
bool sysmem;
bool done;
struct nouveau_channel *channel;
unsigned long timeout;
@ -27,9 +28,15 @@ void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
/*
* struct nouveau_fence_chan:
*
* State common to all fences in a single nouveau_channel.
*/
struct nouveau_fence_chan {
struct list_head pending;
struct list_head flip;
struct list_head done;
int (*emit)(struct nouveau_fence *);
int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
@ -39,9 +46,16 @@ struct nouveau_fence_chan {
int (*sync32)(struct nouveau_channel *, u64, u32);
spinlock_t lock;
drm_waitqueue_t waitqueue;
volatile unsigned refcnt;
u32 sequence;
};
/*
* struct nouveau_fence_priv:
*
* Device-specific operations on fences.
*/
struct nouveau_fence_priv {
void (*dtor)(struct nouveau_drm *);
bool (*suspend)(struct nouveau_drm *);
@ -49,12 +63,6 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
#ifdef __NetBSD__
spinlock_t waitlock;
drm_waitqueue_t waitqueue;
#else
wait_queue_head_t waiting;
#endif
bool uevent;
};

View File

@ -1,4 +1,4 @@
/* $NetBSD: nouveau_nv84_fence.c,v 1.2 2015/02/25 14:57:04 riastradh Exp $ */
/* $NetBSD: nouveau_nv84_fence.c,v 1.2.10.1 2018/08/31 17:35:51 martin Exp $ */
/*
* Copyright 2012 Red Hat Inc.
@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: nouveau_nv84_fence.c,v 1.2 2015/02/25 14:57:04 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: nouveau_nv84_fence.c,v 1.2.10.1 2018/08/31 17:35:51 martin Exp $");
#include <core/object.h>
#include <core/client.h>
@ -216,11 +216,6 @@ nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
#ifdef __NetBSD__
spin_lock_destroy(&priv->base.waitlock);
DRM_DESTROY_WAITQUEUE(&priv->base.waitqueue);
#endif
nouveau_bo_unmap(priv->bo_gart);
if (priv->bo_gart)
nouveau_bo_unpin(priv->bo_gart);
@ -250,12 +245,6 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
#ifdef __NetBSD__
spin_lock_init(&priv->base.waitlock);
DRM_INIT_WAITQUEUE(&priv->base.waitqueue, "nvfenceq");
#else
init_waitqueue_head(&priv->base.waiting);
#endif
priv->base.uevent = true;
ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,