Alpha will size the mutex structure in pmap_tlb_shootdown_q to 16 bytes.

Change the command and pad accordingly;  now pmap_tlb_shootdown_q entries
really are cache-aligned.
This commit is contained in:
mhitch 2009-09-10 22:27:11 +00:00
parent 1310aa04b4
commit 7b90090350

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.241 2009/09/10 21:36:39 mhitch Exp $ */
/* $NetBSD: pmap.c,v 1.242 2009/09/10 22:27:11 mhitch Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008 The NetBSD Foundation, Inc.
@ -140,7 +140,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.241 2009/09/10 21:36:39 mhitch Exp $");
__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.242 2009/09/10 22:27:11 mhitch Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -422,11 +422,11 @@ struct pmap_tlb_shootdown_job {
static struct pmap_tlb_shootdown_q {
TAILQ_HEAD(, pmap_tlb_shootdown_job) pq_head; /* queue 16b */
kmutex_t pq_lock; /* spin lock on queue 12b */
kmutex_t pq_lock; /* spin lock on queue 16b */
int pq_pte; /* aggregate PTE bits 4b */
int pq_count; /* number of pending requests 4b */
int pq_tbia; /* pending global flush 4b */
uint8_t pq_pad[64-16-12-4-4-4]; /* pad to 64 bytes */
uint8_t pq_pad[64-16-16-4-4-4]; /* pad to 64 bytes */
} pmap_tlb_shootdown_q[ALPHA_MAXPROCS] __aligned(CACHE_LINE_SIZE);
/* If we have more pending jobs than this, we just nail the whole TLB. */