* Update the PAUSE macro from "rep; nop;" to "pause;" which is the same bytecode wise but more readable

* Insert the pause op in all spin wait loops (as macro for platform independent sources or as inline assembly on x86 only files)
* Fix some warnings with tracing on and extended some output
* Minor cleanups here and there

The pause instruction is implemented since P4 systems but is fully backwards compatible (it's a no-op prior to P4). According to Intel specs it reduces performance penalties as memory order violations can be avoided. Also power consumption is reduced. Most of all this will be beneficial to hyper-threading systems as it frees resources to the other logical processor when one logical processor executes a fast spinning loop.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23141 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2007-12-18 19:46:26 +00:00
parent 9e0b9fbda5
commit b938008fe1
6 changed files with 31 additions and 26 deletions

View File

@ -442,7 +442,7 @@ calculate_apic_timer_conversion_factor(void)
t1 = system_time();
apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter
execute_n_instructions(128*20000);
execute_n_instructions(128 * 20000);
count = apic_read(APIC_CURRENT_TIMER_COUNT);
t2 = system_time();
@ -544,7 +544,7 @@ smp_boot_other_cpus(void)
dprintf("wait for delivery\n");
// wait for pending to end
while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
;
asm volatile ("pause;");
dprintf("deassert INIT\n");
/* deassert INIT */
@ -558,7 +558,7 @@ dprintf("deassert INIT\n");
dprintf("wait for delivery\n");
// wait for pending to end
while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
;
asm volatile ("pause;");
/* wait 10ms */
spin(10000);
@ -586,7 +586,7 @@ dprintf("send STARTUP\n");
dprintf("wait for delivery\n");
while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
;
asm volatile ("pause;");
}
}

View File

@ -106,7 +106,7 @@ set_mtrr(void *_parameter, int cpu)
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, 1);
while (sWaitAllCPUs != smp_get_num_cpus())
;
asm volatile ("pause;");
disable_caches();
@ -118,7 +118,7 @@ set_mtrr(void *_parameter, int cpu)
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, -1);
while (sWaitAllCPUs != 0)
;
asm volatile ("pause;");
}
@ -128,7 +128,7 @@ init_mtrrs(void *_unused, int cpu)
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, 1);
while (sWaitAllCPUs != smp_get_num_cpus())
;
asm volatile ("pause;");
disable_caches();
@ -139,7 +139,7 @@ init_mtrrs(void *_unused, int cpu)
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, -1);
while (sWaitAllCPUs != 0)
;
asm volatile ("pause;");
}

View File

@ -111,7 +111,7 @@ put_char(const char c)
// wait until the transmitter empty bit is set
while ((in8(sSerialBasePort + SERIAL_LINE_STATUS) & 0x20) == 0)
;
asm volatile ("pause;");
out8(c, sSerialBasePort + SERIAL_TRANSMIT_BUFFER);
}
@ -275,7 +275,7 @@ arch_debug_serial_getchar(void)
#endif
while ((in8(sSerialBasePort + SERIAL_LINE_STATUS) & 0x1) == 0)
;
asm volatile ("pause;");
return in8(sSerialBasePort + SERIAL_RECEIVE_BUFFER);
}

View File

@ -254,7 +254,8 @@ arch_smp_send_ici(int32 target_cpu)
timeout = 100000000;
// wait for message to be sent
while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0 && --timeout != 0)
;
asm volatile ("pause;");
if (timeout == 0)
panic("arch_smp_send_ici: timeout, target_cpu %ld", target_cpu);

View File

@ -30,9 +30,9 @@
#endif
#if __INTEL__
#define PAUSE() asm volatile ("rep; nop;")
# define PAUSE() asm volatile ("pause;")
#else
#define PAUSE()
# define PAUSE()
#endif
#define MSG_POOL_SIZE (SMP_MAX_CPUS * 4)
@ -248,7 +248,7 @@ check_for_message(int currentCPU, int *source_mailbox)
if (msg != NULL) {
smp_msgs[currentCPU] = msg->next;
release_spinlock(&cpu_msg_spinlock[currentCPU]);
TRACE((" found msg %p in cpu mailbox\n", msg));
TRACE((" cpu %d: found msg %p in cpu mailbox\n", currentCPU, msg));
*source_mailbox = MAILBOX_LOCAL;
} else {
// try getting one from the broadcast mailbox
@ -270,7 +270,7 @@ check_for_message(int currentCPU, int *source_mailbox)
break;
}
release_spinlock(&broadcast_msg_spinlock);
TRACE((" found msg %p in broadcast mailbox\n", msg));
TRACE((" cpu %d: found msg %p in broadcast mailbox\n", currentCPU, msg));
}
return msg;
}
@ -355,7 +355,7 @@ process_pending_ici(int32 currentCPU)
if (msg == NULL)
return retval;
TRACE((" cpu %d message = %d\n", currentCPU, msg->message));
TRACE((" cpu %ld message = %ld\n", currentCPU, msg->message));
switch (msg->message) {
case SMP_MSG_INVALIDATE_PAGE_RANGE:
@ -396,7 +396,7 @@ process_pending_ici(int32 currentCPU)
cpu_status state = disable_interrupts();
while (*haltValue != 0)
;
PAUSE();
restore_interrupts(state);
}
@ -430,7 +430,7 @@ smp_send_ici(int32 targetCPU, int32 message, uint32 data, uint32 data2, uint32 d
{
struct smp_msg *msg;
TRACE(("smp_send_ici: target 0x%x, mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
TRACE(("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n",
targetCPU, message, data, data2, data3, data_ptr, flags));
if (sICIEnabled) {
@ -489,7 +489,7 @@ smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
{
struct smp_msg *msg;
TRACE(("smp_send_broadcast_ici: cpu %d mess 0x%x, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%x\n",
TRACE(("smp_send_broadcast_ici: cpu %ld mess 0x%lx, data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n",
smp_get_current_cpu(), message, data, data2, data3, data_ptr, flags));
if (sICIEnabled) {
@ -512,7 +512,7 @@ smp_send_broadcast_ici(int32 message, uint32 data, uint32 data2, uint32 data3,
msg->done = false;
TRACE(("smp_send_broadcast_ici%d: inserting msg %p into broadcast mbox\n",
smp_get_current_cpu(), msg));
currentCPU, msg));
// stick it in the appropriate cpu's mailbox
acquire_spinlock_nocheck(&broadcast_msg_spinlock);
@ -555,7 +555,6 @@ smp_trap_non_boot_cpus(int32 cpu)
if (cpu > 0) {
boot_cpu_spin[cpu] = 1;
acquire_spinlock_nocheck(&boot_cpu_spin[cpu]);
return false;
}
@ -582,10 +581,10 @@ smp_wake_up_non_boot_cpus()
void
smp_cpu_rendezvous(volatile uint32 *var, int current_cpu)
{
atomic_or(var, 1<<current_cpu);
atomic_or(var, 1 << current_cpu);
while (*var != ((1<<sNumCPUs) - 1))
;
while (*var != ((1 << sNumCPUs) - 1))
PAUSE();
}
status_t

View File

@ -28,6 +28,12 @@ static spinlock sTimerSpinlock[B_MAX_CPU_COUNT] = { 0, };
# define TRACE(x) ;
#endif
#if __INTEL__
# define PAUSE() asm volatile ("pause;")
#else
# define PAUSE()
#endif
status_t
timer_init(kernel_args *args)
@ -261,6 +267,5 @@ spin(bigtime_t microseconds)
bigtime_t time = system_time();
while((system_time() - time) < microseconds)
;
PAUSE();
}