2002-07-09 16:24:59 +04:00
|
|
|
/*
|
2005-05-26 14:39:12 +04:00
|
|
|
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
|
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2004-10-20 04:19:38 +04:00
|
|
|
/* Functionality for symetrical multi-processors */
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <thread.h>
|
|
|
|
#include <int.h>
|
|
|
|
#include <smp.h>
|
|
|
|
#include <cpu.h>
|
|
|
|
#include <arch/cpu.h>
|
|
|
|
#include <arch/smp.h>
|
2005-05-26 14:39:12 +04:00
|
|
|
#include <arch/int.h>
|
|
|
|
#include <arch/debug.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-05-26 14:39:12 +04:00
|
|
|
#include <stdlib.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <string.h>
|
|
|
|
|
2002-11-17 03:28:32 +03:00
|
|
|
#define DEBUG_SPINLOCKS 1
|
2005-10-25 20:59:12 +04:00
|
|
|
//#define TRACE_SMP
|
2003-03-18 05:08:51 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#ifdef TRACE_SMP
|
2003-03-18 05:08:51 +03:00
|
|
|
# define TRACE(x) dprintf x
|
|
|
|
#else
|
|
|
|
# define TRACE(x) ;
|
|
|
|
#endif
|
2002-11-17 03:28:32 +03:00
|
|
|
|
2002-11-23 20:43:36 +03:00
|
|
|
#if __INTEL__
|
|
|
|
#define PAUSE() asm volatile ("rep; nop;")
|
|
|
|
#else
|
|
|
|
#define PAUSE()
|
|
|
|
#endif
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
#define MSG_POOL_SIZE (SMP_MAX_CPUS * 4)
|
|
|
|
|
|
|
|
struct smp_msg {
|
2005-10-25 20:59:12 +04:00
|
|
|
struct smp_msg *next;
|
|
|
|
int32 message;
|
|
|
|
uint32 data;
|
|
|
|
uint32 data2;
|
|
|
|
uint32 data3;
|
|
|
|
void *data_ptr;
|
|
|
|
uint32 flags;
|
|
|
|
int32 ref_count;
|
|
|
|
volatile bool done;
|
|
|
|
uint32 proc_bitmap;
|
2002-07-09 16:24:59 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define MAILBOX_LOCAL 1
|
|
|
|
#define MAILBOX_BCAST 2
|
|
|
|
|
2002-10-26 20:13:36 +04:00
|
|
|
static spinlock boot_cpu_spin[SMP_MAX_CPUS] = { 0, };
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
static struct smp_msg *free_msgs = NULL;
|
|
|
|
static volatile int free_msg_count = 0;
|
2002-10-26 20:13:36 +04:00
|
|
|
static spinlock free_msg_spinlock = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
static struct smp_msg *smp_msgs[SMP_MAX_CPUS] = { NULL, };
|
2002-10-26 20:13:36 +04:00
|
|
|
static spinlock cpu_msg_spinlock[SMP_MAX_CPUS] = { 0, };
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
static struct smp_msg *smp_broadcast_msgs = NULL;
|
2002-10-26 20:13:36 +04:00
|
|
|
static spinlock broadcast_msg_spinlock = 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
static bool sICIEnabled = false;
|
|
|
|
static int32 sNumCPUs = 1;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
static int32 process_pending_ici(int32 currentCPU);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
#if DEBUG_SPINLOCKS
|
2005-05-26 14:39:12 +04:00
|
|
|
#define NUM_LAST_CALLERS 32
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
void *caller;
|
|
|
|
spinlock *lock;
|
|
|
|
} sLastCaller[NUM_LAST_CALLERS];
|
|
|
|
static int32 sLastIndex = 0;
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
push_lock_caller(void *caller, spinlock *lock)
|
|
|
|
{
|
|
|
|
sLastCaller[sLastIndex].caller = caller;
|
|
|
|
sLastCaller[sLastIndex].lock = lock;
|
|
|
|
|
|
|
|
if (++sLastIndex >= NUM_LAST_CALLERS)
|
|
|
|
sLastIndex = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void *
|
|
|
|
find_lock_caller(spinlock *lock)
|
|
|
|
{
|
|
|
|
int32 i;
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_LAST_CALLERS; i++) {
|
2005-05-29 17:01:38 +04:00
|
|
|
int32 index = (NUM_LAST_CALLERS + sLastIndex - 1 - i) % NUM_LAST_CALLERS;
|
|
|
|
if (sLastCaller[index].lock == lock)
|
|
|
|
return sLastCaller[index].caller;
|
2005-05-26 14:39:12 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif // DEBUG_SPINLOCKS
|
|
|
|
|
|
|
|
|
2002-09-23 07:06:24 +04:00
|
|
|
void
|
2002-10-26 20:13:36 +04:00
|
|
|
acquire_spinlock(spinlock *lock)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
if (sNumCPUs > 1) {
|
|
|
|
int currentCPU = smp_get_current_cpu();
|
2002-09-23 07:06:24 +04:00
|
|
|
if (are_interrupts_enabled())
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
|
2002-09-23 07:06:24 +04:00
|
|
|
while (1) {
|
2002-11-23 20:43:36 +03:00
|
|
|
while (*lock != 0) {
|
2005-10-25 20:59:12 +04:00
|
|
|
process_pending_ici(currentCPU);
|
2002-11-23 20:43:36 +03:00
|
|
|
PAUSE();
|
|
|
|
}
|
2002-09-23 07:06:24 +04:00
|
|
|
if (atomic_set((int32 *)lock, 1) == 0)
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
|
|
|
}
|
2002-11-17 03:28:32 +03:00
|
|
|
} else {
|
2005-05-26 14:39:12 +04:00
|
|
|
#if DEBUG_SPINLOCKS
|
2005-05-26 16:02:29 +04:00
|
|
|
int32 oldValue;
|
2005-05-26 14:39:12 +04:00
|
|
|
if (are_interrupts_enabled())
|
|
|
|
panic("acquire_spinlock: attempt to acquire lock %p with interrupts enabled\n", lock);
|
2005-05-26 16:02:29 +04:00
|
|
|
oldValue = atomic_set((int32 *)lock, 1);
|
|
|
|
if (oldValue != 0) {
|
|
|
|
panic("acquire_spinlock: attempt to acquire lock %p twice on non-SMP system (last caller: %p, value %ld)\n",
|
|
|
|
lock, find_lock_caller(lock), oldValue);
|
2005-05-26 14:39:12 +04:00
|
|
|
}
|
|
|
|
|
2005-05-29 17:01:38 +04:00
|
|
|
push_lock_caller(arch_debug_get_caller(), lock);
|
2005-05-26 14:39:12 +04:00
|
|
|
#endif
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
}
|
2002-11-23 20:43:36 +03:00
|
|
|
|
2002-09-23 07:06:24 +04:00
|
|
|
|
|
|
|
static void
|
2002-10-26 20:13:36 +04:00
|
|
|
acquire_spinlock_nocheck(spinlock *lock)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
if (sNumCPUs > 1) {
|
2005-05-26 14:39:12 +04:00
|
|
|
#if DEBUG_SPINLOCKS
|
|
|
|
if (are_interrupts_enabled())
|
|
|
|
panic("acquire_spinlock_nocheck: attempt to acquire lock %p with interrupts enabled\n", lock);
|
|
|
|
#endif
|
2002-09-23 07:06:24 +04:00
|
|
|
while (1) {
|
2002-07-09 16:24:59 +04:00
|
|
|
while(*lock != 0)
|
2002-11-23 20:43:36 +03:00
|
|
|
PAUSE();
|
2002-09-23 07:06:24 +04:00
|
|
|
if (atomic_set((int32 *)lock, 1) == 0)
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
|
|
|
}
|
2002-11-17 03:28:32 +03:00
|
|
|
} else {
|
2005-05-26 14:39:12 +04:00
|
|
|
#if DEBUG_SPINLOCKS
|
|
|
|
if (are_interrupts_enabled())
|
|
|
|
panic("acquire_spinlock_nocheck: attempt to acquire lock %p with interrupts enabled\n", lock);
|
|
|
|
if (atomic_set((int32 *)lock, 1) != 0)
|
|
|
|
panic("acquire_spinlock_nocheck: attempt to acquire lock %p twice on non-SMP system\n", lock);
|
|
|
|
#endif
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-09-23 07:06:24 +04:00
|
|
|
|
|
|
|
void
|
2002-10-26 20:13:36 +04:00
|
|
|
release_spinlock(spinlock *lock)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
if (sNumCPUs > 1) {
|
2002-11-17 03:28:32 +03:00
|
|
|
if (are_interrupts_enabled())
|
|
|
|
panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock);
|
|
|
|
if (atomic_set((int32 *)lock, 0) != 1)
|
|
|
|
panic("release_spinlock: lock %p was already released\n", lock);
|
|
|
|
} else {
|
|
|
|
#if DEBUG_SPINLOCKS
|
|
|
|
if (are_interrupts_enabled())
|
|
|
|
panic("release_spinlock: attempt to release lock %p with interrupts enabled\n", lock);
|
|
|
|
if (atomic_set((int32 *)lock, 0) != 1)
|
|
|
|
panic("release_spinlock: lock %p was already released\n", lock);
|
|
|
|
#endif
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 22:18:11 +04:00
|
|
|
/** Finds a free message and gets it.
|
|
|
|
* NOTE: has side effect of disabling interrupts
|
|
|
|
* return value is the former interrupt state
|
|
|
|
*/
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
static cpu_status
|
2002-10-30 02:07:06 +03:00
|
|
|
find_free_message(struct smp_msg **msg)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
cpu_status state;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("find_free_message: entry\n"));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
retry:
|
2002-11-23 20:43:36 +03:00
|
|
|
while (free_msg_count <= 0)
|
|
|
|
PAUSE();
|
2002-07-25 05:05:51 +04:00
|
|
|
state = disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock(&free_msg_spinlock);
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
if (free_msg_count <= 0) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// someone grabbed one while we were getting the lock,
|
|
|
|
// go back to waiting for it
|
|
|
|
release_spinlock(&free_msg_spinlock);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
*msg = free_msgs;
|
|
|
|
free_msgs = (*msg)->next;
|
|
|
|
free_msg_count--;
|
|
|
|
|
|
|
|
release_spinlock(&free_msg_spinlock);
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("find_free_message: returning msg %p\n", *msg));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
|
|
|
static void
|
|
|
|
return_free_message(struct smp_msg *msg)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("return_free_message: returning msg %p\n", msg));
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock_nocheck(&free_msg_spinlock);
|
|
|
|
msg->next = free_msgs;
|
|
|
|
free_msgs = msg;
|
|
|
|
free_msg_count++;
|
|
|
|
release_spinlock(&free_msg_spinlock);
|
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
|
|
|
static struct smp_msg *
|
2005-10-25 20:59:12 +04:00
|
|
|
check_for_message(int currentCPU, int *source_mailbox)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct smp_msg *msg;
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
if (!sICIEnabled)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
acquire_spinlock_nocheck(&cpu_msg_spinlock[currentCPU]);
|
|
|
|
msg = smp_msgs[currentCPU];
|
2002-10-30 02:07:06 +03:00
|
|
|
if (msg != NULL) {
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_msgs[currentCPU] = msg->next;
|
|
|
|
release_spinlock(&cpu_msg_spinlock[currentCPU]);
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE((" found msg %p in cpu mailbox\n", msg));
|
2002-07-09 16:24:59 +04:00
|
|
|
*source_mailbox = MAILBOX_LOCAL;
|
|
|
|
} else {
|
|
|
|
// try getting one from the broadcast mailbox
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
release_spinlock(&cpu_msg_spinlock[currentCPU]);
|
2002-07-09 16:24:59 +04:00
|
|
|
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
|
|
|
|
|
|
|
|
msg = smp_broadcast_msgs;
|
2002-10-30 02:07:06 +03:00
|
|
|
while (msg != NULL) {
|
2005-10-25 20:59:12 +04:00
|
|
|
if (CHECK_BIT(msg->proc_bitmap, currentCPU) != 0) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// we have handled this one already
|
|
|
|
msg = msg->next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark it so we wont try to process this one again
|
2005-10-25 20:59:12 +04:00
|
|
|
msg->proc_bitmap = SET_BIT(msg->proc_bitmap, currentCPU);
|
2002-07-09 16:24:59 +04:00
|
|
|
*source_mailbox = MAILBOX_BCAST;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
release_spinlock(&broadcast_msg_spinlock);
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE((" found msg %p in broadcast mailbox\n", msg));
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
2002-09-23 07:06:24 +04:00
|
|
|
|
|
|
|
static void
|
2005-10-25 20:59:12 +04:00
|
|
|
finish_message_processing(int currentCPU, struct smp_msg *msg, int source_mailbox)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
int old_refcount;
|
|
|
|
|
|
|
|
old_refcount = atomic_add(&msg->ref_count, -1);
|
2002-10-30 02:07:06 +03:00
|
|
|
if (old_refcount == 1) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// we were the last one to decrement the ref_count
|
|
|
|
// it's our job to remove it from the list & possibly clean it up
|
|
|
|
struct smp_msg **mbox = NULL;
|
2002-10-26 20:13:36 +04:00
|
|
|
spinlock *spinlock = NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// clean up the message from one of the mailboxes
|
2002-10-30 02:07:06 +03:00
|
|
|
switch (source_mailbox) {
|
2002-07-09 16:24:59 +04:00
|
|
|
case MAILBOX_BCAST:
|
|
|
|
mbox = &smp_broadcast_msgs;
|
|
|
|
spinlock = &broadcast_msg_spinlock;
|
|
|
|
break;
|
|
|
|
case MAILBOX_LOCAL:
|
2005-10-25 20:59:12 +04:00
|
|
|
mbox = &smp_msgs[currentCPU];
|
|
|
|
spinlock = &cpu_msg_spinlock[currentCPU];
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
acquire_spinlock_nocheck(spinlock);
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("cleaning up message %p\n", msg));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
if (msg == *mbox) {
|
2002-07-09 16:24:59 +04:00
|
|
|
(*mbox) = msg->next;
|
|
|
|
} else {
|
|
|
|
// we need to walk to find the message in the list.
|
|
|
|
// we can't use any data found when previously walking through
|
|
|
|
// the list, since the list may have changed. But, we are guaranteed
|
|
|
|
// to at least have msg in it.
|
|
|
|
struct smp_msg *last = NULL;
|
|
|
|
struct smp_msg *msg1;
|
|
|
|
|
|
|
|
msg1 = *mbox;
|
2002-10-30 02:07:06 +03:00
|
|
|
while (msg1 != NULL && msg1 != msg) {
|
2002-07-09 16:24:59 +04:00
|
|
|
last = msg1;
|
|
|
|
msg1 = msg1->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
// by definition, last must be something
|
2002-10-30 02:07:06 +03:00
|
|
|
if (msg1 == msg && last != NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
last->next = msg->next;
|
2002-10-30 02:07:06 +03:00
|
|
|
else
|
2002-07-09 16:24:59 +04:00
|
|
|
dprintf("last == NULL or msg != msg1!!!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
release_spinlock(spinlock);
|
|
|
|
|
2005-10-25 22:18:11 +04:00
|
|
|
if ((msg->flags & SMP_MSG_FLAG_FREE_ARG) != 0 && msg->data_ptr != NULL)
|
2002-10-30 02:07:06 +03:00
|
|
|
free(msg->data_ptr);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 22:18:11 +04:00
|
|
|
if (msg->flags & SMP_MSG_FLAG_SYNC) {
|
2002-07-09 16:24:59 +04:00
|
|
|
msg->done = true;
|
|
|
|
// the caller cpu should now free the message
|
|
|
|
} else {
|
|
|
|
// in the !SYNC case, we get to free the message
|
|
|
|
return_free_message(msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
static int32
|
|
|
|
process_pending_ici(int32 currentCPU)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct smp_msg *msg;
|
|
|
|
bool halt = false;
|
|
|
|
int source_mailbox = 0;
|
2002-07-19 20:07:36 +04:00
|
|
|
int retval = B_HANDLED_INTERRUPT;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
msg = check_for_message(currentCPU, &source_mailbox);
|
2002-10-30 02:07:06 +03:00
|
|
|
if (msg == NULL)
|
2002-07-09 16:24:59 +04:00
|
|
|
return retval;
|
|
|
|
|
2005-10-27 02:57:13 +04:00
|
|
|
TRACE((" cpu %d message = %d\n", curr_cpu, msg->message));
|
2003-03-18 05:08:51 +03:00
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
switch (msg->message) {
|
2005-12-14 20:07:37 +03:00
|
|
|
case SMP_MSG_INVALIDATE_PAGE_RANGE:
|
2004-10-20 04:19:38 +04:00
|
|
|
arch_cpu_invalidate_TLB_range((addr_t)msg->data, (addr_t)msg->data2);
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
2005-12-14 20:07:37 +03:00
|
|
|
case SMP_MSG_INVALIDATE_PAGE_LIST:
|
2004-10-20 04:19:38 +04:00
|
|
|
arch_cpu_invalidate_TLB_list((addr_t *)msg->data, (int)msg->data2);
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
2005-12-14 20:07:37 +03:00
|
|
|
case SMP_MSG_USER_INVALIDATE_PAGES:
|
|
|
|
arch_cpu_user_TLB_invalidate();
|
|
|
|
break;
|
|
|
|
case SMP_MSG_GLOBAL_INVALIDATE_PAGES:
|
2002-07-09 16:24:59 +04:00
|
|
|
arch_cpu_global_TLB_invalidate();
|
|
|
|
break;
|
|
|
|
case SMP_MSG_RESCHEDULE:
|
2002-07-19 20:07:36 +04:00
|
|
|
retval = B_INVOKE_SCHEDULER;
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
|
|
|
case SMP_MSG_CPU_HALT:
|
|
|
|
halt = true;
|
2005-10-25 20:59:12 +04:00
|
|
|
dprintf("cpu %ld halted!\n", currentCPU);
|
2002-07-09 16:24:59 +04:00
|
|
|
break;
|
2005-10-25 22:18:11 +04:00
|
|
|
case SMP_MSG_CALL_FUNCTION:
|
|
|
|
{
|
|
|
|
smp_call_func func = (smp_call_func)msg->data_ptr;
|
|
|
|
func(msg->data, currentCPU, msg->data2, msg->data3);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
default:
|
2005-10-25 20:59:12 +04:00
|
|
|
dprintf("smp_intercpu_int_handler: got unknown message %ld\n", msg->message);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// finish dealing with this message, possibly removing it from the list
|
2005-10-25 20:59:12 +04:00
|
|
|
finish_message_processing(currentCPU, msg, source_mailbox);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// special case for the halt message
|
|
|
|
// we otherwise wouldn't have gotten the opportunity to clean up
|
2002-10-30 02:07:06 +03:00
|
|
|
if (halt) {
|
2002-07-25 05:05:51 +04:00
|
|
|
disable_interrupts();
|
2002-07-09 16:24:59 +04:00
|
|
|
for(;;);
|
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 22:18:11 +04:00
|
|
|
// #pragma mark -
|
|
|
|
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
int
|
|
|
|
smp_intercpu_int_handler(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
int retval;
|
2005-10-25 20:59:12 +04:00
|
|
|
int currentCPU = smp_get_current_cpu();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
TRACE(("smp_intercpu_int_handler: entry on cpu %d\n", currentCPU));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
retval = process_pending_ici(currentCPU);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_intercpu_int_handler: done\n"));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
|
|
|
void
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 data3,
|
|
|
|
void *data_ptr, uint32 flags)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct smp_msg *msg;
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_send_ici: target 0x%x, mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
|
|
|
|
target_cpu, message, data, data2, data3, data_ptr, flags));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
if (sICIEnabled) {
|
2002-07-09 16:24:59 +04:00
|
|
|
int state;
|
2005-10-25 20:59:12 +04:00
|
|
|
int currentCPU;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// find_free_message leaves interrupts disabled
|
|
|
|
state = find_free_message(&msg);
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
currentCPU = smp_get_current_cpu();
|
|
|
|
if (targetCPU == currentCPU) {
|
2002-07-09 16:24:59 +04:00
|
|
|
return_free_message(msg);
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
return; // nope, cant do that
|
|
|
|
}
|
|
|
|
|
|
|
|
// set up the message
|
|
|
|
msg->message = message;
|
|
|
|
msg->data = data;
|
2003-03-18 05:08:51 +03:00
|
|
|
msg->data2 = data2;
|
|
|
|
msg->data3 = data3;
|
2002-07-09 16:24:59 +04:00
|
|
|
msg->data_ptr = data_ptr;
|
|
|
|
msg->ref_count = 1;
|
|
|
|
msg->flags = flags;
|
|
|
|
msg->done = false;
|
|
|
|
|
|
|
|
// stick it in the appropriate cpu's mailbox
|
2005-10-25 20:59:12 +04:00
|
|
|
acquire_spinlock_nocheck(&cpu_msg_spinlock[targetCPU]);
|
|
|
|
msg->next = smp_msgs[targetCPU];
|
|
|
|
smp_msgs[targetCPU] = msg;
|
|
|
|
release_spinlock(&cpu_msg_spinlock[targetCPU]);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
arch_smp_send_ici(targetCPU);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 22:18:11 +04:00
|
|
|
if (flags & SMP_MSG_FLAG_SYNC) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// wait for the other cpu to finish processing it
|
|
|
|
// the interrupt handler will ref count it to <0
|
|
|
|
// if the message is sync after it has removed it from the mailbox
|
2002-11-23 20:43:36 +03:00
|
|
|
while (msg->done == false) {
|
2005-10-25 20:59:12 +04:00
|
|
|
process_pending_ici(currentCPU);
|
2002-11-23 20:43:36 +03:00
|
|
|
PAUSE();
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
// for SYNC messages, it's our responsibility to put it
|
|
|
|
// back into the free list
|
|
|
|
return_free_message(msg);
|
|
|
|
}
|
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
|
|
|
void
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
|
|
|
|
void *data_ptr, uint32 flags)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct smp_msg *msg;
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
|
|
|
|
smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
if (sICIEnabled) {
|
2002-07-09 16:24:59 +04:00
|
|
|
int state;
|
2005-10-25 20:59:12 +04:00
|
|
|
int currentCPU;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// find_free_message leaves interrupts disabled
|
|
|
|
state = find_free_message(&msg);
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
currentCPU = smp_get_current_cpu();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
msg->message = message;
|
|
|
|
msg->data = data;
|
|
|
|
msg->data2 = data2;
|
|
|
|
msg->data3 = data3;
|
|
|
|
msg->data_ptr = data_ptr;
|
2005-10-25 20:59:12 +04:00
|
|
|
msg->ref_count = sNumCPUs - 1;
|
2002-07-09 16:24:59 +04:00
|
|
|
msg->flags = flags;
|
2005-10-25 20:59:12 +04:00
|
|
|
msg->proc_bitmap = SET_BIT(0, currentCPU);
|
2002-07-09 16:24:59 +04:00
|
|
|
msg->done = false;
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast mbox\n",
|
|
|
|
smp_get_current_cpu(), msg));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
// stick it in the appropriate cpu's mailbox
|
|
|
|
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
|
|
|
|
msg->next = smp_broadcast_msgs;
|
|
|
|
smp_broadcast_msgs = msg;
|
|
|
|
release_spinlock(&broadcast_msg_spinlock);
|
|
|
|
|
|
|
|
arch_smp_send_broadcast_ici();
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_send_broadcast_ici: sent interrupt\n"));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 22:18:11 +04:00
|
|
|
if (flags & SMP_MSG_FLAG_SYNC) {
|
2002-07-09 16:24:59 +04:00
|
|
|
// wait for the other cpus to finish processing it
|
|
|
|
// the interrupt handler will ref count it to <0
|
|
|
|
// if the message is sync after it has removed it from the mailbox
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_send_broadcast_ici: waiting for ack\n"));
|
|
|
|
|
2002-11-23 20:43:36 +03:00
|
|
|
while (msg->done == false) {
|
2005-10-25 20:59:12 +04:00
|
|
|
process_pending_ici(currentCPU);
|
2002-11-23 20:43:36 +03:00
|
|
|
PAUSE();
|
|
|
|
}
|
2003-03-18 05:08:51 +03:00
|
|
|
|
|
|
|
TRACE(("smp_send_broadcast_ici: returning message to free list\n"));
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
// for SYNC messages, it's our responsibility to put it
|
|
|
|
// back into the free list
|
|
|
|
return_free_message(msg);
|
|
|
|
}
|
|
|
|
|
2002-07-25 05:05:51 +04:00
|
|
|
restore_interrupts(state);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2003-03-18 05:08:51 +03:00
|
|
|
|
|
|
|
TRACE(("smp_send_broadcast_ici: done\n"));
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
bool
|
|
|
|
smp_trap_non_boot_cpus(int32 cpu)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2002-10-30 02:07:06 +03:00
|
|
|
if (cpu > 0) {
|
2002-07-09 16:24:59 +04:00
|
|
|
boot_cpu_spin[cpu] = 1;
|
|
|
|
acquire_spinlock(&boot_cpu_spin[cpu]);
|
2005-10-25 20:59:12 +04:00
|
|
|
return false;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2005-10-25 20:59:12 +04:00
|
|
|
|
|
|
|
return true;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
|
|
|
void
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_wake_up_non_boot_cpus()
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
// resume non boot CPUs
|
2002-07-09 16:24:59 +04:00
|
|
|
int i;
|
2005-10-25 20:59:12 +04:00
|
|
|
for (i = 1; i < sNumCPUs; i++) {
|
2002-07-09 16:24:59 +04:00
|
|
|
release_spinlock(&boot_cpu_spin[i]);
|
|
|
|
}
|
2005-10-25 20:59:12 +04:00
|
|
|
|
|
|
|
// ICIs were previously being ignored
|
|
|
|
if (sNumCPUs > 1)
|
|
|
|
sICIEnabled = true;
|
|
|
|
|
|
|
|
// invalidate all of the other processors' TLB caches
|
|
|
|
arch_cpu_global_TLB_invalidate();
|
2005-12-14 20:07:37 +03:00
|
|
|
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0, NULL,
|
|
|
|
SMP_MSG_FLAG_SYNC);
|
2005-10-25 20:59:12 +04:00
|
|
|
|
|
|
|
// start the other processors
|
|
|
|
smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
|
|
|
void
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_wait_for_non_boot_cpus(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
bool retry;
|
|
|
|
int32 i;
|
2002-07-09 16:24:59 +04:00
|
|
|
do {
|
2005-10-25 20:59:12 +04:00
|
|
|
retry = false;
|
|
|
|
for (i = 1; i < sNumCPUs; i++) {
|
2003-01-30 22:49:24 +03:00
|
|
|
if (boot_cpu_spin[i] != 1)
|
2005-10-25 20:59:12 +04:00
|
|
|
retry = true;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2005-10-25 20:59:12 +04:00
|
|
|
} while (retry == true);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2004-03-15 01:54:00 +03:00
|
|
|
status_t
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_init(kernel_args *args)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
struct smp_msg *msg;
|
|
|
|
int i;
|
|
|
|
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_init: entry\n"));
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
if (args->num_cpus > 1) {
|
2002-07-09 16:24:59 +04:00
|
|
|
free_msgs = NULL;
|
|
|
|
free_msg_count = 0;
|
2002-10-30 02:07:06 +03:00
|
|
|
for (i = 0; i < MSG_POOL_SIZE; i++) {
|
|
|
|
msg = (struct smp_msg *)malloc(sizeof(struct smp_msg));
|
|
|
|
if (msg == NULL) {
|
2002-07-09 16:24:59 +04:00
|
|
|
panic("error creating smp mailboxes\n");
|
2002-07-12 02:21:16 +04:00
|
|
|
return B_ERROR;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
memset(msg, 0, sizeof(struct smp_msg));
|
|
|
|
msg->next = free_msgs;
|
|
|
|
free_msgs = msg;
|
|
|
|
free_msg_count++;
|
|
|
|
}
|
2005-10-25 20:59:12 +04:00
|
|
|
sNumCPUs = args->num_cpus;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2003-03-18 05:08:51 +03:00
|
|
|
TRACE(("smp_init: calling arch_smp_init\n"));
|
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
return arch_smp_init(args);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2004-03-15 01:54:00 +03:00
|
|
|
status_t
|
|
|
|
smp_per_cpu_init(kernel_args *args, int32 cpu)
|
|
|
|
{
|
|
|
|
return arch_smp_per_cpu_init(args, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
void
|
2005-10-25 20:59:12 +04:00
|
|
|
smp_set_num_cpus(int32 numCPUs)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
sNumCPUs = numCPUs;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
int32
|
2002-10-30 02:07:06 +03:00
|
|
|
smp_get_num_cpus()
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
return sNumCPUs;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
int32
|
2002-10-30 02:07:06 +03:00
|
|
|
smp_get_current_cpu(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2005-10-25 20:59:12 +04:00
|
|
|
struct thread *thread = thread_get_current_thread();
|
|
|
|
if (thread)
|
|
|
|
return thread->cpu->info.cpu_num;
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2005-10-25 20:59:12 +04:00
|
|
|
// this is not always correct during early boot, but it's okay for
|
|
|
|
// for the boot process
|
2002-10-30 02:07:06 +03:00
|
|
|
return 0;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
|
2004-03-30 17:05:24 +04:00
|
|
|
// #pragma mark -
|
|
|
|
// public exported functions
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2005-10-25 22:18:11 +04:00
|
|
|
call_all_cpus(void (*func)(void *, int), void *cookie)
|
2004-03-30 17:05:24 +04:00
|
|
|
{
|
2005-10-25 22:18:11 +04:00
|
|
|
cpu_status state = disable_interrupts();
|
|
|
|
|
|
|
|
if (smp_get_num_cpus() > 1) {
|
|
|
|
smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (uint32)cookie,
|
|
|
|
0, 0, (void *)func, SMP_MSG_FLAG_SYNC);
|
|
|
|
}
|
|
|
|
|
|
|
|
// we need to call this function ourselves as well
|
|
|
|
func(cookie, smp_get_current_cpu());
|
|
|
|
|
|
|
|
restore_interrupts(state);
|
2004-03-30 17:05:24 +04:00
|
|
|
}
|
|
|
|
|