Changed the way user/kernel time is tracked for threads. Now, thread_at_kernel_entry()

and thread_at_kernel_exit() are always called for userland threads at the appropriate
situation (note, I've renamed those from *_atkernel_*).
The timing should be more accurate this way, and the thread::last_time_type field
is no longer needed: all interrupts are now added to the kernel time (where the
time is actually spent).


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@11331 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-02-11 03:10:21 +00:00
parent 4265bb4b19
commit 90bce836e0
6 changed files with 69 additions and 86 deletions

View File

@ -31,9 +31,9 @@ struct thread *thread_dequeue_id(struct thread_queue *q, thread_id thr_id);
void scheduler_enqueue_in_run_queue(struct thread *thread);
void scheduler_remove_from_run_queue(struct thread *thread);
void thread_atkernel_entry(void);
void thread_at_kernel_entry(void);
// called when the thread enters the kernel on behalf of the thread
void thread_atkernel_exit(void);
void thread_at_kernel_exit(void);
status_t thread_init(struct kernel_args *args);
status_t thread_per_cpu_init(int32 cpu_num);
@ -80,11 +80,6 @@ struct rlimit;
int _user_getrlimit(int resource, struct rlimit * rlp);
int _user_setrlimit(int resource, const struct rlimit * rlp);
#if 1
// XXX remove later
int thread_test(void);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright 2004, Haiku Inc.
* Copyright 2004-2005, Haiku Inc.
* Distributed under the terms of the MIT License.
*
* Thread definition and structures
@ -44,11 +44,6 @@ enum team_state {
TEAM_STATE_DEATH // being killed
};
enum {
KERNEL_TIME,
USER_TIME
};
#define THREAD_RETURN_EXIT 0x1
#define THREAD_RETURN_INTERRUPTED 0x2
@ -187,7 +182,6 @@ struct thread {
bigtime_t user_time;
bigtime_t kernel_time;
bigtime_t last_time;
int32 last_time_type; // KERNEL_TIME or USER_TIME
// architecture dependant section
struct arch_thread arch_info;

View File

@ -1,10 +1,10 @@
/*
** Copyright 2002-2004, The Haiku Team. All rights reserved.
** Distributed under the terms of the Haiku License.
**
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <vm.h>
#include <int.h>
@ -201,6 +201,9 @@ i386_handle_trap(struct iframe frame)
if (thread)
i386_push_iframe(thread, &frame);
if (frame.cs == USER_CODE_SEG)
thread_at_kernel_entry();
// if(frame.vector != 0x20)
// dprintf("i386_handle_trap: vector 0x%x, ip 0x%x, cpu %d\n", frame.vector, frame.eip, smp_get_current_cpu());
@ -228,8 +231,8 @@ i386_handle_trap(struct iframe frame)
enable_interrupts();
ret = vm_page_fault(cr2, frame.eip,
(frame.error_code & 0x2) != 0,
(frame.error_code & 0x4) != 0,
(frame.error_code & 0x2) != 0, // write access
(frame.error_code & 0x4) != 0, // userland
&newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the
@ -238,12 +241,12 @@ i386_handle_trap(struct iframe frame)
}
break;
}
case 99: // syscall
{
uint64 retcode;
unsigned int args[MAX_ARGS];
thread_atkernel_entry();
#if 0
{
int i;
@ -253,13 +256,12 @@ i386_handle_trap(struct iframe frame)
dprintf("\t0x%x\n", ((unsigned int *)frame.edx)[i]);
}
#endif
/*
** syscall interface works as such:
** eax has syscall #
** ecx has number of args (0-16)
** edx has pointer to buffer containing args from first to last
** each is verified to make sure someone doesn't try to clobber it
*/
/* syscall interface works as such:
* %eax has syscall #
* %ecx has number of args (0-16)
* %edx has pointer to buffer containing args from first to last
* each is verified to make sure someone doesn't try to clobber it
*/
if (frame.ecx <= MAX_ARGS) {
if (IS_KERNEL_ADDRESS(frame.edx)
|| user_memcpy(args, (void *)frame.edx, frame.ecx * sizeof(unsigned int)) < B_OK) {
@ -274,6 +276,7 @@ i386_handle_trap(struct iframe frame)
frame.edx = retcode >> 32;
break;
}
default:
if (frame.vector >= 0x20) {
interrupt_ack(frame.vector); // ack the 8239 (if applicable)
@ -296,8 +299,8 @@ i386_handle_trap(struct iframe frame)
restore_interrupts(state);
}
if (frame.cs == USER_CODE_SEG || frame.vector == 99)
thread_atkernel_exit();
if (frame.cs == USER_CODE_SEG)
thread_at_kernel_exit();
// dprintf("0x%x cpu %d!\n", thread_get_current_thread_id(), smp_get_current_cpu());

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
@ -403,6 +403,9 @@ void
arch_check_syscall_restart(struct thread *t)
{
struct iframe *frame = i386_get_current_iframe();
if (frame == NULL)
// this thread is obviously new; we didn't come from an interrupt
return;
if ((status_t)frame->orig_eax >= 0 && (status_t)frame->eax == EINTR) {
frame->eax = frame->orig_eax;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002, Angelo Mottola, a.mottola@libero.it.
* Distributed under the terms of the MIT License.
*
@ -70,7 +70,7 @@ dump_run_queue(int argc, char **argv)
/** Enqueues the thread into the run queue.
* Note: THREAD_LOCK must be held when entering this function
* Note: thread lock must be held when entering this function
*/
void
@ -90,6 +90,7 @@ scheduler_enqueue_in_run_queue(struct thread *thread)
else
prev = sRunQueue.head;
}
thread->queue_next = curr;
if (prev)
prev->queue_next = thread;
@ -99,7 +100,7 @@ scheduler_enqueue_in_run_queue(struct thread *thread)
/** Removes a thread from the run queue.
* Note: THREAD_LOCK must be held when entering this function
* Note: thread lock must be held when entering this function
*/
void
@ -127,14 +128,9 @@ scheduler_remove_from_run_queue(struct thread *thread)
static void
context_switch(struct thread *fromThread, struct thread *toThread)
{
bigtime_t now;
// track kernel & user time
now = system_time();
if (fromThread->last_time_type == KERNEL_TIME)
fromThread->kernel_time += now - fromThread->last_time;
else
fromThread->user_time += now - fromThread->last_time;
bigtime_t now = system_time();
fromThread->kernel_time += now - fromThread->last_time;
toThread->last_time = now;
toThread->cpu = fromThread->cpu;
@ -156,7 +152,7 @@ reschedule_event(timer *unused)
/** Runs the scheduler.
* NOTE: expects thread_spinlock to be held
* Note: expects thread spinlock to be held
*/
void
@ -242,9 +238,9 @@ scheduler_reschedule(void)
void
start_scheduler(void)
{
int state;
cpu_status state;
// XXX may not be the best place for this
// ToDo: may not be the best place for this
// invalidate all of the other processors' TLB caches
state = disable_interrupts();
arch_cpu_global_TLB_invalidate();

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -193,7 +193,6 @@ create_thread_struct(const char *name)
t->user_time = 0;
t->kernel_time = 0;
t->last_time = 0;
t->last_time_type = KERNEL_TIME;
t->exit.status = 0;
t->exit.reason = 0;
list_init(&t->exit.waiters);
@ -248,9 +247,15 @@ delete_thread_struct(struct thread *thread)
static void
thread_kthread_entry(void)
{
struct thread *thread = thread_get_current_thread();
// simulates the thread spinlock release that would occur if the thread had been
// rescheded from. The resched didn't happen because the thread is new.
RELEASE_THREAD_LOCK();
// start tracking time
thread->last_time = system_time();
enable_interrupts(); // this essentially simulates a return-from-interrupt
}
@ -258,9 +263,9 @@ thread_kthread_entry(void)
static void
thread_kthread_exit(void)
{
struct thread *t = thread_get_current_thread();
struct thread *thread = thread_get_current_thread();
t->exit.reason = THREAD_RETURN_EXIT;
thread->exit.reason = THREAD_RETURN_EXIT;
thread_exit();
}
@ -276,12 +281,8 @@ _create_user_thread_kentry(void)
struct thread *thread = thread_get_current_thread();
// a signal may have been delivered here
// ToDo: this looks broken
// thread_atkernel_exit();
// start tracking kernel & user time
thread->last_time = system_time();
thread->last_time_type = KERNEL_TIME;
thread->in_kernel = false;
thread_at_kernel_exit();
// jump to the entry point in user space
arch_thread_enter_uspace(thread, (addr_t)thread->entry, thread->args1, thread->args2);
@ -297,15 +298,9 @@ static int
_create_kernel_thread_kentry(void)
{
struct thread *thread = thread_get_current_thread();
int (*func)(void *args);
// start tracking kernel & user time
thread->last_time = system_time();
thread->last_time_type = KERNEL_TIME;
int (*func)(void *args) = (void *)thread->entry;
// call the entry function with the appropriate args
func = (void *)thread->entry;
return func(thread->args1);
}
@ -974,64 +969,61 @@ thread_get_thread_struct_locked(thread_id id)
}
// called in the int handler code when a thread enters the kernel for any reason
/** Called in the interrupt handler code when a thread enters
* the kernel for any reason.
* Only tracks time for now.
*/
void
thread_atkernel_entry(void)
thread_at_kernel_entry(void)
{
struct thread *thread = thread_get_current_thread();
cpu_status state;
struct thread *t;
bigtime_t now;
t = thread_get_current_thread();
TRACE(("thread_atkernel_entry: entry thread 0x%lx\n", t->id));
TRACE(("thread_atkernel_entry: entry thread 0x%lx\n", thread->id));
state = disable_interrupts();
// track user time
now = system_time();
t->user_time += now - t->last_time;
t->last_time = now;
t->last_time_type = KERNEL_TIME;
thread->user_time += now - thread->last_time;
thread->last_time = now;
t->in_kernel = true;
thread->in_kernel = true;
restore_interrupts(state);
}
// called when a thread exits kernel space to user space
/** Called whenever a thread exits kernel space to user space.
* Tracks time, handles signals, ...
*/
void
thread_atkernel_exit(void)
thread_at_kernel_exit(void)
{
struct thread *thread = thread_get_current_thread();
cpu_status state;
struct thread *t;
bigtime_t now;
TRACE(("thread_atkernel_exit: entry\n"));
// ToDo: this may be broken (when it is called, and what exactly should it do...)
t = thread_get_current_thread();
state = disable_interrupts();
GRAB_THREAD_LOCK();
if (handle_signals(t, &state))
if (handle_signals(thread, &state))
scheduler_reschedule();
// was: smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
t->in_kernel = false;
thread->in_kernel = false;
RELEASE_THREAD_LOCK();
// track kernel time
now = system_time();
t->kernel_time += now - t->last_time;
t->last_time = now;
t->last_time_type = USER_TIME;
thread->kernel_time += now - thread->last_time;
thread->last_time = now;
restore_interrupts(state);
}
@ -1041,7 +1033,7 @@ thread_atkernel_exit(void)
// private kernel exported functions
/** insert a thread onto the tail of a queue
/** Insert a thread to the tail of a queue
*/
void