haiku/headers/private/kernel/cpu.h

72 lines
1.6 KiB
C
Raw Normal View History

/*
* Copyright 2002-2006, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_CPU_H
#define _KERNEL_CPU_H
#include <smp.h>
#include <timer.h>
#include <boot/kernel_args.h>
#include <arch/cpu.h>
// define PAUSE, if not done in arch/cpu.h
#ifndef PAUSE
# define PAUSE()
#endif
/* CPU local data structure */
typedef struct cpu_ent {
int cpu_num;
// thread.c: used to force a reschedule at quantum expiration time
int preempted;
timer quantum_timer;
// keeping track of CPU activity
bigtime_t active_time;
bigtime_t last_kernel_time;
bigtime_t last_user_time;
* Removed unused SMP_MSG_RESCHEDULE ICI message. * Introduced flag "invoke_scheduler" in the per CPU structure. It is evaluated in hardware_interrupt() (x86 only ATM). * Introduced SMP_MSG_RESCHEDULE_IF_IDLE message, which enters the scheduler when the CPU currently runs an idle thread. * Don't do dprintf() "CPU x halted!" when handling a SMP_MSG_CPU_HALT ICI message. It uses nested spinlocks and could thus potentially deadlock itself (acquire_spinlock() processes ICI messages, so it could already hold one of the locks). This is a pretty likely scenario on machines with more than two CPUs, but is also possible when the panic()ing thread holds the threads spinlock. Probably fixes #2572. * Reworked the way the kernel debugger is entered and added a "cpu" command that allows switching the CPU once in KDL. It is thus possible to get a stack trace of the thread not on the panic()ing CPU. * When a thread is added to the run queue, we do now check, if another CPU is idle and ask it to reschedule, if it is. Before this change, the CPU was continuing to idle until the quantum of the idle thread expired. Speeds up the libbe.so build about 8% on my machine (haven't tested the full Haiku image build yet). * When spinlock debugging is enabled (DEBUG_SPINLOCKS) we also record the spinlock acquirer on non-smp machines. Added "spinlock" debugger command to get the info. * Added debugger commands "ici" and "ici_message", printing info on pending ICI message respectively on a given one. * Process not only a single ICI message in acquire_spinlock() and other places, but all pending ones. * Also process ICI messages when waiting for a free one -- avoids a potential deadlock. * Mask out non-existing CPUs in send_multicast_ici(). panic() instead of just returning when there's no target CPU left. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@28223 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-10-17 22:14:08 +04:00
bool invoke_scheduler;
bool disabled;
// arch-specific stuff
arch_cpu_info arch;
} cpu_ent __attribute__((aligned(64)));
extern cpu_ent gCPU[MAX_BOOT_CPUS];
#ifdef __cplusplus
extern "C" {
#endif
status_t cpu_preboot_init_percpu(struct kernel_args *args, int curr_cpu);
status_t cpu_init(struct kernel_args *args);
status_t cpu_init_percpu(kernel_args *ka, int curr_cpu);
status_t cpu_init_post_vm(struct kernel_args *args);
status_t cpu_init_post_modules(struct kernel_args *args);
bigtime_t cpu_get_active_time(int32 cpu);
cpu_ent *get_cpu_struct(void);
extern inline cpu_ent *get_cpu_struct(void) { return &gCPU[smp_get_current_cpu()]; }
void _user_clear_caches(void *address, size_t length, uint32 flags);
bool _user_cpu_enabled(int32 cpu);
status_t _user_set_cpu_enabled(int32 cpu, bool enabled);
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_CPU_H */