From c9a450c39ac867e7960d0f7aa30420c12136cf29 Mon Sep 17 00:00:00 2001 From: riastradh Date: Wed, 13 Apr 2016 08:43:56 +0000 Subject: [PATCH] Provide reader/writer semantics with recursive readers. --- .../bsd/drm2/include/linux/spinlock.h | 79 ++++++++++++++++--- 1 file changed, 67 insertions(+), 12 deletions(-) diff --git a/sys/external/bsd/drm2/include/linux/spinlock.h b/sys/external/bsd/drm2/include/linux/spinlock.h index ae05e32959b2..c117c58005b7 100644 --- a/sys/external/bsd/drm2/include/linux/spinlock.h +++ b/sys/external/bsd/drm2/include/linux/spinlock.h @@ -1,4 +1,4 @@ -/* $NetBSD: spinlock.h,v 1.6 2015/01/01 01:15:42 mrg Exp $ */ +/* $NetBSD: spinlock.h,v 1.7 2016/04/13 08:43:56 riastradh Exp $ */ /*- * Copyright (c) 2013 The NetBSD Foundation, Inc. @@ -35,6 +35,8 @@ #include #include +#include + #define __acquires(lock) /* XXX lockdep stuff */ #define __releases(lock) /* XXX lockdep stuff */ @@ -108,18 +110,71 @@ spin_lock_destroy(spinlock_t *spinlock) KASSERT(mutex_owned(&(spinlock)->sl_lock)) /* - * Linux rwlocks are reader/writer spin locks. We implement them as - * normal spin locks without reader/writer semantics for expedience. - * If that turns out to not work, adapting to reader/writer semantics - * shouldn't be too hard. + * Stupid reader/writer spin locks. No attempt to avoid writer + * starvation. Must allow recursive readers. We use mutex and state + * instead of compare-and-swap for expedience and LOCKDEBUG support. */ -#define rwlock_t spinlock_t -#define rwlock_init spin_lock_init -#define rwlock_destroy spin_lock_destroy -#define write_lock_irq spin_lock_irq -#define write_unlock_irq spin_unlock_irq -#define read_lock spin_lock -#define read_unlock spin_unlock +typedef struct linux_rwlock { + kmutex_t rw_lock; + unsigned rw_nreaders; +} rwlock_t; + +static inline void +rwlock_init(rwlock_t *rw) +{ + + mutex_init(&rw->rw_lock, MUTEX_DEFAULT, IPL_VM); + rw->rw_nreaders = 0; +} + +static inline void +rwlock_destroy(rwlock_t *rw) +{ + + KASSERTMSG(rw->rw_nreaders == 0, + "rwlock still held by %u readers", rw->rw_nreaders); + mutex_destroy(&rw->rw_lock); +} + +static inline void +write_lock_irq(rwlock_t *rw) +{ + + for (;;) { + mutex_spin_enter(&rw->rw_lock); + if (rw->rw_nreaders == 0) + break; + mutex_spin_exit(&rw->rw_lock); + } +} + +static inline void +write_unlock_irq(rwlock_t *rw) +{ + + KASSERT(rw->rw_nreaders == 0); + mutex_spin_exit(&rw->rw_lock); +} + +static inline void +read_lock(rwlock_t *rw) +{ + + mutex_spin_enter(&rw->rw_lock); + KASSERT(rw->rw_nreaders < UINT_MAX); + rw->rw_nreaders++; + mutex_spin_exit(&rw->rw_lock); +} + +static inline void +read_unlock(rwlock_t *rw) +{ + + mutex_spin_enter(&rw->rw_lock); + KASSERT(0 < rw->rw_nreaders); + rw->rw_nreaders--; + mutex_spin_exit(&rw->rw_lock); +} #endif /* _LINUX_SPINLOCK_H_ */