ipi: Membar audit.

This commit is contained in:
riastradh 2020-11-27 20:11:33 +00:00
parent 003f8943cc
commit 69109c6c58

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_ipi.c,v 1.8 2020/09/08 16:00:35 riastradh Exp $ */
/* $NetBSD: subr_ipi.c,v 1.9 2020/11/27 20:11:33 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.8 2020/09/08 16:00:35 riastradh Exp $");
__KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.9 2020/11/27 20:11:33 riastradh Exp $");
#include <sys/param.h>
#include <sys/types.h>
@ -187,8 +187,11 @@ ipi_mark_pending(u_int ipi_id, struct cpu_info *ci)
KASSERT(ipi_id < IPI_MAXREG);
KASSERT(kpreempt_disabled());
/* Mark as pending and send an IPI. */
if (membar_consumer(), (ci->ci_ipipend[i] & bitm) == 0) {
/* Mark as pending and return true if not previously marked. */
if ((atomic_load_acquire(&ci->ci_ipipend[i]) & bitm) == 0) {
#ifndef __HAVE_ATOMIC_AS_MEMBAR
membar_exit();
#endif
atomic_or_32(&ci->ci_ipipend[i], bitm);
return true;
}
@ -261,6 +264,8 @@ ipi_trigger_broadcast(u_int ipi_id, bool skip_self)
/*
* put_msg: insert message into the mailbox.
*
* Caller is responsible for issuing membar_exit first.
*/
static inline void
put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
@ -268,8 +273,7 @@ put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
int count = SPINLOCK_BACKOFF_MIN;
again:
for (u_int i = 0; i < IPI_MSG_MAX; i++) {
if (__predict_true(mbox->msg[i] == NULL) &&
atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
if (atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
return;
}
}
@ -295,12 +299,12 @@ ipi_cpu_handler(void)
for (u_int i = 0; i < IPI_BITWORDS; i++) {
uint32_t pending, bit;
if (ci->ci_ipipend[i] == 0) {
if (atomic_load_relaxed(&ci->ci_ipipend[i]) == 0) {
continue;
}
pending = atomic_swap_32(&ci->ci_ipipend[i], 0);
#ifndef __HAVE_ATOMIC_AS_MEMBAR
membar_producer();
membar_enter();
#endif
while ((bit = ffs(pending)) != 0) {
const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit;
@ -327,10 +331,10 @@ ipi_msg_cpu_handler(void *arg __unused)
ipi_msg_t *msg;
/* Get the message. */
if ((msg = mbox->msg[i]) == NULL) {
if ((msg = atomic_load_acquire(&mbox->msg[i])) == NULL) {
continue;
}
mbox->msg[i] = NULL;
atomic_store_relaxed(&mbox->msg[i], NULL);
/* Execute the handler. */
KASSERT(msg->func);
@ -338,7 +342,7 @@ ipi_msg_cpu_handler(void *arg __unused)
/* Ack the request. */
#ifndef __HAVE_ATOMIC_AS_MEMBAR
membar_producer();
membar_exit();
#endif
atomic_dec_uint(&msg->_pending);
}
@ -360,7 +364,9 @@ ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
KASSERT(curcpu() != ci);
msg->_pending = 1;
membar_producer();
#ifndef __HAVE_ATOMIC_AS_MEMBAR
membar_exit();
#endif
put_msg(&ipi_mboxes[id], msg);
ipi_trigger(IPI_SYNCH_ID, ci);
@ -384,7 +390,9 @@ ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
local = !!kcpuset_isset(target, cpu_index(self));
msg->_pending = kcpuset_countset(target) - local;
membar_producer();
#ifndef __HAVE_ATOMIC_AS_MEMBAR
membar_exit();
#endif
for (CPU_INFO_FOREACH(cii, ci)) {
cpuid_t id;
@ -420,7 +428,9 @@ ipi_broadcast(ipi_msg_t *msg, bool skip_self)
KASSERT(kpreempt_disabled());
msg->_pending = ncpu - 1;
membar_producer();
#ifndef __HAVE_ATOMIC_AS_MEMBAR
membar_exit();
#endif
/* Broadcast IPIs for remote CPUs. */
for (CPU_INFO_FOREACH(cii, ci)) {
@ -448,8 +458,8 @@ ipi_wait(ipi_msg_t *msg)
{
int count = SPINLOCK_BACKOFF_MIN;
while (msg->_pending) {
KASSERT(msg->_pending < ncpu);
while (atomic_load_acquire(&msg->_pending)) {
KASSERT(atomic_load_relaxed(&msg->_pending) < ncpu);
SPINLOCK_BACKOFF(count);
}
}