diff --git a/lib/librumpuser/rumpuser_pth.c b/lib/librumpuser/rumpuser_pth.c index 8774a8f5f967..bdd043183bc5 100644 --- a/lib/librumpuser/rumpuser_pth.c +++ b/lib/librumpuser/rumpuser_pth.c @@ -1,4 +1,4 @@ -/* $NetBSD: rumpuser_pth.c,v 1.23 2013/05/02 19:14:59 pooka Exp $ */ +/* $NetBSD: rumpuser_pth.c,v 1.24 2013/05/02 20:33:54 pooka Exp $ */ /* * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved. @@ -28,7 +28,7 @@ #include "rumpuser_port.h" #if !defined(lint) -__RCSID("$NetBSD: rumpuser_pth.c,v 1.23 2013/05/02 19:14:59 pooka Exp $"); +__RCSID("$NetBSD: rumpuser_pth.c,v 1.24 2013/05/02 20:33:54 pooka Exp $"); #endif /* !lint */ #include @@ -378,17 +378,53 @@ rumpuser_cv_destroy(struct rumpuser_cv *cv) free(cv); } +static void +cv_unschedule(struct rumpuser_mtx *mtx, int *nlocks) +{ + + rumpkern_unsched(nlocks, mtx); + mtxexit(mtx); +} + +static void +cv_reschedule(struct rumpuser_mtx *mtx, int nlocks) +{ + + /* + * If the cv interlock is a spin mutex, we must first release + * the mutex that was reacquired by pthread_cond_wait(), + * acquire the CPU context and only then relock the mutex. + * This is to preserve resource allocation order so that + * we don't deadlock. Non-spinning mutexes don't have this + * problem since they don't use a hold-and-wait approach + * to acquiring the mutex wrt the rump kernel CPU context. + * + * The more optimal solution would be to rework rumpkern_sched() + * so that it's possible to tell the scheduler + * "if you need to block, drop this lock first", but I'm not + * going poking there without some numbers on how often this + * path is taken for spin mutexes. + */ + if ((mtx->flags & (RUMPUSER_MTX_SPIN | RUMPUSER_MTX_KMUTEX)) == + (RUMPUSER_MTX_SPIN | RUMPUSER_MTX_KMUTEX)) { + NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx)); + rumpkern_sched(nlocks, mtx); + rumpuser_mutex_enter_nowrap(mtx); + } else { + mtxenter(mtx); + rumpkern_sched(nlocks, mtx); + } +} + void rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx) { int nlocks; cv->nwaiters++; - rumpkern_unsched(&nlocks, mtx); - mtxexit(mtx); + cv_unschedule(mtx, &nlocks); NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx)); - mtxenter(mtx); - rumpkern_sched(nlocks, mtx); + cv_reschedule(mtx, nlocks); cv->nwaiters--; } @@ -420,8 +456,7 @@ rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx, clock_gettime(CLOCK_REALTIME, &ts); cv->nwaiters++; - rumpkern_unsched(&nlocks, mtx); - mtxexit(mtx); + cv_unschedule(mtx, &nlocks); ts.tv_sec += sec; ts.tv_nsec += nsec; @@ -430,8 +465,8 @@ rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx, ts.tv_nsec -= 1000*1000*1000; } rv = pthread_cond_timedwait(&cv->pthcv, &mtx->pthmtx, &ts); - mtxenter(mtx); - rumpkern_sched(nlocks, mtx); + + cv_reschedule(mtx, nlocks); cv->nwaiters--; ET(rv); diff --git a/sys/rump/librump/rumpkern/locks.c b/sys/rump/librump/rumpkern/locks.c index 929927794919..7ab9e8b58ba7 100644 --- a/sys/rump/librump/rumpkern/locks.c +++ b/sys/rump/librump/rumpkern/locks.c @@ -1,4 +1,4 @@ -/* $NetBSD: locks.c,v 1.60 2013/04/30 00:03:53 pooka Exp $ */ +/* $NetBSD: locks.c,v 1.61 2013/05/02 20:33:54 pooka Exp $ */ /* * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. @@ -26,7 +26,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.60 2013/04/30 00:03:53 pooka Exp $"); +__KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.61 2013/05/02 20:33:54 pooka Exp $"); #include #include @@ -113,11 +113,9 @@ mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) isspin = 1; } -#if 0 /* spin mutex support needs some cpu scheduler rework */ if (isspin) ruflags |= RUMPUSER_MTX_SPIN; -#endif rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags); ALLOCK(mtx, &mutex_lockops); } @@ -144,7 +142,7 @@ mutex_spin_enter(kmutex_t *mtx) { WANTLOCK(mtx, false, false); - rumpuser_mutex_enter(RUMPMTX(mtx)); + rumpuser_mutex_enter_nowrap(RUMPMTX(mtx)); LOCKED(mtx, false); }