- pmap_reactivate: save an atomic op if possible.

- pmap_tlb_shootdown: use atomic ops instead of _lock_cas().
This commit is contained in:
ad 2007-12-09 15:29:53 +00:00
parent 9ad45b39e6
commit dd24075295
1 changed files with 11 additions and 11 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.10 2007/11/28 16:28:44 ad Exp $ */ /* $NetBSD: pmap.c,v 1.11 2007/12/09 15:29:53 ad Exp $ */
/* /*
* Copyright (c) 2007 Manuel Bouyer. * Copyright (c) 2007 Manuel Bouyer.
@ -154,7 +154,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.10 2007/11/28 16:28:44 ad Exp $"); __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.11 2007/12/09 15:29:53 ad Exp $");
#include "opt_user_ldt.h" #include "opt_user_ldt.h"
#include "opt_lockdebug.h" #include "opt_lockdebug.h"
@ -2248,17 +2248,18 @@ pmap_reactivate(struct pmap *pmap)
* synchronize with TLB shootdown interrupts. declare * synchronize with TLB shootdown interrupts. declare
* interest in invalidations (TLBSTATE_VALID) and then * interest in invalidations (TLBSTATE_VALID) and then
* check the cpumask, which the IPIs can change only * check the cpumask, which the IPIs can change only
* when the state is !TLBSTATE_VALID. * when the state is TLBSTATE_LAZY.
*/ */
ci->ci_tlbstate = TLBSTATE_VALID; ci->ci_tlbstate = TLBSTATE_VALID;
oldcpus = pmap->pm_cpus; oldcpus = pmap->pm_cpus;
atomic_or_32(&pmap->pm_cpus, cpumask);
KASSERT((pmap->pm_kernel_cpus & cpumask) != 0); KASSERT((pmap->pm_kernel_cpus & cpumask) != 0);
if (oldcpus & cpumask) { if (oldcpus & cpumask) {
/* got it */ /* got it */
result = true; result = true;
} else { } else {
/* must reload */
atomic_or_32(&pmap->pm_cpus, cpumask);
result = false; result = false;
} }
@ -3857,8 +3858,6 @@ pmap_alloc_level(pd_entry_t **pdes, vaddr_t kva, int lvl, long *needed_ptps)
#ifdef XEN #ifdef XEN
splx(s); splx(s);
#endif #endif
/* For nkptp vs pmap_pdp_cache. */
mb_write();
} }
/* /*
@ -4006,7 +4005,6 @@ void
pmap_tlb_shootdown(struct pmap *pm, vaddr_t sva, vaddr_t eva, pt_entry_t pte) pmap_tlb_shootdown(struct pmap *pm, vaddr_t sva, vaddr_t eva, pt_entry_t pte)
{ {
#ifdef MULTIPROCESSOR #ifdef MULTIPROCESSOR
extern int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t);
extern bool x86_mp_online; extern bool x86_mp_online;
struct cpu_info *ci; struct cpu_info *ci;
struct pmap_mbox *mb, *selfmb; struct pmap_mbox *mb, *selfmb;
@ -4070,8 +4068,9 @@ pmap_tlb_shootdown(struct pmap *pm, vaddr_t sva, vaddr_t eva, pt_entry_t pte)
SPINLOCK_BACKOFF(count); SPINLOCK_BACKOFF(count);
s = splvm(); s = splvm();
} }
} while (!_lock_cas(&mb->mb_head, head, } while (!atomic_cas_ulong(
head + ncpu - 1)); (volatile u_long *)&mb->mb_head,
head, head + ncpu - 1));
/* /*
* Once underway we must stay at IPL_VM until the * Once underway we must stay at IPL_VM until the
@ -4111,8 +4110,9 @@ pmap_tlb_shootdown(struct pmap *pm, vaddr_t sva, vaddr_t eva, pt_entry_t pte)
selfmb->mb_head++; selfmb->mb_head++;
mb = &ci->ci_pmap_cpu->pc_mbox; mb = &ci->ci_pmap_cpu->pc_mbox;
count = SPINLOCK_BACKOFF_MIN; count = SPINLOCK_BACKOFF_MIN;
while (!_lock_cas((uintptr_t *)&mb->mb_pointer, while (!atomic_cas_ulong(
0, (uintptr_t)&selfmb->mb_tail)) { (u_long *)&mb->mb_pointer,
0, (u_long)&selfmb->mb_tail)) {
splx(s); splx(s);
while (mb->mb_pointer != 0) while (mb->mb_pointer != 0)
SPINLOCK_BACKOFF(count); SPINLOCK_BACKOFF(count);