kernel: Track load produced by interrupt handlers
This commit is contained in:
parent
288a2664a2
commit
6a164daad4
@ -19,6 +19,16 @@
|
||||
struct kernel_args;
|
||||
|
||||
|
||||
enum interrupt_type {
|
||||
INTERRUPT_TYPE_EXCEPTION,
|
||||
INTERRUPT_TYPE_IRQ,
|
||||
INTERRUPT_TYPE_LOCAL_IRQ,
|
||||
INTERRUPT_TYPE_SYSCALL,
|
||||
INTERRUPT_TYPE_ICI,
|
||||
INTERRUPT_TYPE_UNKNOWN
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -53,7 +63,8 @@ are_interrupts_enabled(void)
|
||||
#define restore_interrupts(status) arch_int_restore_interrupts(status)
|
||||
|
||||
|
||||
status_t reserve_io_interrupt_vectors(long count, long startVector);
|
||||
status_t reserve_io_interrupt_vectors(long count, long startVector,
|
||||
enum interrupt_type type);
|
||||
status_t allocate_io_interrupt_vectors(long count, long *startVector);
|
||||
void free_io_interrupt_vectors(long count, long startVector);
|
||||
|
||||
|
55
headers/private/kernel/load_tracking.h
Normal file
55
headers/private/kernel/load_tracking.h
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright 2013 Paweł Dziepak, pdziepak@quarnos.org.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _KERNEL_LOAD_TRACKING_H
|
||||
#define _KERNEL_LOAD_TRACKING_H
|
||||
|
||||
|
||||
const int32 kMaxLoad = 1000;
|
||||
const bigtime_t kLoadMeasureInterval = 50000;
|
||||
const bigtime_t kIntervalInaccuracy = kLoadMeasureInterval / 4;
|
||||
|
||||
|
||||
static int32
|
||||
compute_load(bigtime_t& measureTime, bigtime_t& measureActiveTime, int32& load)
|
||||
{
|
||||
bigtime_t now = system_time();
|
||||
|
||||
if (measureTime == 0) {
|
||||
measureTime = now;
|
||||
return -1;
|
||||
}
|
||||
|
||||
bigtime_t deltaTime = now - measureTime;
|
||||
|
||||
if (deltaTime < kLoadMeasureInterval)
|
||||
return -1;
|
||||
|
||||
int32 oldLoad = load;
|
||||
ASSERT(oldLoad >= 0 && oldLoad <= kMaxLoad);
|
||||
|
||||
int32 newLoad = measureActiveTime * kMaxLoad;
|
||||
newLoad /= max_c(deltaTime, 1);
|
||||
newLoad = max_c(min_c(newLoad, kMaxLoad), 0);
|
||||
|
||||
measureActiveTime = 0;
|
||||
measureTime = now;
|
||||
|
||||
deltaTime += kIntervalInaccuracy;
|
||||
int n = deltaTime / kLoadMeasureInterval;
|
||||
ASSERT(n > 0);
|
||||
|
||||
if (n > 10)
|
||||
load = newLoad;
|
||||
else {
|
||||
newLoad *= (1 << n) - 1;
|
||||
load = (load + newLoad) / (1 << n);
|
||||
ASSERT(load >= 0 && load <= kMaxLoad);
|
||||
}
|
||||
|
||||
return oldLoad;
|
||||
}
|
||||
|
||||
|
||||
#endif // _KERNEL_LOAD_TRACKING_H
|
@ -311,7 +311,7 @@ x86_descriptors_init(kernel_args* args)
|
||||
set_trap_gate(0, 98, &trap98); // for performance testing only
|
||||
set_trap_gate(0, 99, &trap99); // syscall interrupt
|
||||
|
||||
reserve_io_interrupt_vectors(2, 98);
|
||||
reserve_io_interrupt_vectors(2, 98, INTERRUPT_TYPE_SYSCALL);
|
||||
|
||||
// configurable msi or msi-x interrupts
|
||||
set_interrupt_gate(0, 100, &trap100);
|
||||
|
@ -90,7 +90,8 @@ arch_smp_init(kernel_args *args)
|
||||
|
||||
if (args->num_cpus > 1) {
|
||||
// I/O interrupts start at ARCH_INTERRUPT_BASE, so all interrupts are shifted
|
||||
reserve_io_interrupt_vectors(3, 0xfd - ARCH_INTERRUPT_BASE);
|
||||
reserve_io_interrupt_vectors(3, 0xfd - ARCH_INTERRUPT_BASE,
|
||||
INTERRUPT_TYPE_ICI);
|
||||
install_io_interrupt_handler(0xfd - ARCH_INTERRUPT_BASE, &x86_ici_interrupt, NULL, B_NO_LOCK_VECTOR);
|
||||
install_io_interrupt_handler(0xfe - ARCH_INTERRUPT_BASE, &x86_smp_error_interrupt, NULL, B_NO_LOCK_VECTOR);
|
||||
install_io_interrupt_handler(0xff - ARCH_INTERRUPT_BASE, &x86_spurious_interrupt, NULL, B_NO_LOCK_VECTOR);
|
||||
|
@ -774,7 +774,7 @@ ioapic_init(kernel_args* args)
|
||||
current = sIOAPICs;
|
||||
while (current != NULL) {
|
||||
reserve_io_interrupt_vectors(current->max_redirection_entry + 1,
|
||||
current->global_interrupt_base);
|
||||
current->global_interrupt_base, INTERRUPT_TYPE_IRQ);
|
||||
current = current->next;
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ pic_init()
|
||||
|
||||
TRACE(("PIC level trigger mode: 0x%08lx\n", sLevelTriggeredInterrupts));
|
||||
|
||||
reserve_io_interrupt_vectors(16, 0);
|
||||
reserve_io_interrupt_vectors(16, 0, INTERRUPT_TYPE_EXCEPTION);
|
||||
|
||||
// make the pic controller the current one
|
||||
arch_int_set_interrupt_controller(picController);
|
||||
|
@ -106,7 +106,8 @@ apic_timer_init(struct kernel_args *args)
|
||||
|
||||
sApicTicsPerSec = args->arch_args.apic_time_cv_factor;
|
||||
|
||||
reserve_io_interrupt_vectors(1, 0xfb - ARCH_INTERRUPT_BASE);
|
||||
reserve_io_interrupt_vectors(1, 0xfb - ARCH_INTERRUPT_BASE,
|
||||
INTERRUPT_TYPE_LOCAL_IRQ);
|
||||
install_io_interrupt_handler(0xfb - ARCH_INTERRUPT_BASE,
|
||||
&apic_timer_interrupt, NULL, B_NO_LOCK_VECTOR);
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <arch/int.h>
|
||||
#include <boot/kernel_args.h>
|
||||
#include <elf.h>
|
||||
#include <load_tracking.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/kqueue.h>
|
||||
#include <smp.h>
|
||||
@ -51,6 +52,13 @@ struct io_vector {
|
||||
spinlock vector_lock;
|
||||
int32 enable_count;
|
||||
bool no_lock_vector;
|
||||
interrupt_type type;
|
||||
|
||||
spinlock load_lock;
|
||||
bigtime_t last_measure_time;
|
||||
bigtime_t last_measure_active;
|
||||
int32 load;
|
||||
|
||||
#if DEBUG_INTERRUPTS
|
||||
int64 handled_count;
|
||||
int64 unhandled_count;
|
||||
@ -116,6 +124,29 @@ dump_int_statistics(int argc, char **argv)
|
||||
#endif
|
||||
|
||||
|
||||
static int
|
||||
dump_int_load(int argc, char** argv)
|
||||
{
|
||||
static const char* typeNames[]
|
||||
= { "exception", "irq", "local irq", "syscall", "ici", "unknown" };
|
||||
|
||||
for (int i = 0; i < NUM_IO_VECTORS; i++) {
|
||||
if (!B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock)
|
||||
&& sVectors[i].handler_list == NULL
|
||||
&& sVectors[i].enable_count == 0)
|
||||
continue;
|
||||
|
||||
kprintf("int %3d, type %s, enabled %" B_PRId32 ", load %" B_PRId32
|
||||
"%%%s\n", i, typeNames[min_c(sVectors[i].type,
|
||||
INTERRUPT_TYPE_UNKNOWN)],
|
||||
sVectors[i].enable_count, sVectors[i].load / 10,
|
||||
B_SPINLOCK_IS_LOCKED(&sVectors[i].vector_lock) ? ", ACTIVE" : "");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - private kernel API
|
||||
|
||||
|
||||
@ -145,6 +176,13 @@ int_init_post_vm(kernel_args* args)
|
||||
B_INITIALIZE_SPINLOCK(&sVectors[i].vector_lock);
|
||||
sVectors[i].enable_count = 0;
|
||||
sVectors[i].no_lock_vector = false;
|
||||
sVectors[i].type = INTERRUPT_TYPE_UNKNOWN;
|
||||
|
||||
B_INITIALIZE_SPINLOCK(&sVectors[i].load_lock);
|
||||
sVectors[i].last_measure_time = 0;
|
||||
sVectors[i].last_measure_active = 0;
|
||||
sVectors[i].load = 0;
|
||||
|
||||
#if DEBUG_INTERRUPTS
|
||||
sVectors[i].handled_count = 0;
|
||||
sVectors[i].unhandled_count = 0;
|
||||
@ -159,6 +197,9 @@ int_init_post_vm(kernel_args* args)
|
||||
"list interrupt statistics");
|
||||
#endif
|
||||
|
||||
add_debugger_command("int_load", &dump_int_load,
|
||||
"list interrupt usage statistics");
|
||||
|
||||
return arch_int_init_post_vm(args);
|
||||
}
|
||||
|
||||
@ -179,6 +220,17 @@ int_init_post_device_manager(kernel_args* args)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
update_int_load(int i)
|
||||
{
|
||||
if (!try_acquire_spinlock(&sVectors[i].load_lock))
|
||||
return;
|
||||
compute_load(sVectors[i].last_measure_time, sVectors[i].last_measure_active,
|
||||
sVectors[i].load);
|
||||
release_spinlock(&sVectors[i].load_lock);
|
||||
}
|
||||
|
||||
|
||||
/*! Actually process an interrupt via the handlers registered for that
|
||||
vector (IRQ).
|
||||
*/
|
||||
@ -189,6 +241,8 @@ int_io_interrupt_handler(int vector, bool levelTriggered)
|
||||
struct io_handler* io;
|
||||
bool handled = false;
|
||||
|
||||
bigtime_t start = system_time();
|
||||
|
||||
if (!sVectors[vector].no_lock_vector)
|
||||
acquire_spinlock(&sVectors[vector].vector_lock);
|
||||
|
||||
@ -267,6 +321,12 @@ int_io_interrupt_handler(int vector, bool levelTriggered)
|
||||
if (!sVectors[vector].no_lock_vector)
|
||||
release_spinlock(&sVectors[vector].vector_lock);
|
||||
|
||||
SpinLocker locker(sVectors[vector].load_lock);
|
||||
sVectors[vector].last_measure_active += system_time() - start;
|
||||
locker.Unlock();
|
||||
|
||||
update_int_load(vector);
|
||||
|
||||
if (levelTriggered)
|
||||
return status;
|
||||
|
||||
@ -435,7 +495,7 @@ remove_io_interrupt_handler(long vector, interrupt_handler handler, void *data)
|
||||
vectors using allocate_io_interrupt_vectors() instead.
|
||||
*/
|
||||
status_t
|
||||
reserve_io_interrupt_vectors(long count, long startVector)
|
||||
reserve_io_interrupt_vectors(long count, long startVector, interrupt_type type)
|
||||
{
|
||||
MutexLocker locker(&sIOInterruptVectorAllocationLock);
|
||||
|
||||
@ -448,6 +508,7 @@ reserve_io_interrupt_vectors(long count, long startVector)
|
||||
return B_BUSY;
|
||||
}
|
||||
|
||||
sVectors[startVector + i].type = type;
|
||||
sAllocatedIOInterruptVectors[startVector + i] = true;
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <kernel.h>
|
||||
#include <kscheduler.h>
|
||||
#include <listeners.h>
|
||||
#include <load_tracking.h>
|
||||
#include <scheduler_defs.h>
|
||||
#include <smp.h>
|
||||
#include <thread.h>
|
||||
@ -61,10 +62,9 @@ const bigtime_t kMinimalWaitTime = kThreadQuantum / 4;
|
||||
|
||||
const bigtime_t kCacheExpire = 100000;
|
||||
|
||||
const int kTargetLoad = 550;
|
||||
const int kHighLoad = 700;
|
||||
const int kMaxLoad = 1000;
|
||||
const int kLoadDifference = 200;
|
||||
const int kTargetLoad = kMaxLoad * 55 / 100;
|
||||
const int kHighLoad = kMaxLoad * 70 / 100;
|
||||
const int kLoadDifference = kMaxLoad * 20 / 100;
|
||||
|
||||
static bigtime_t sDisableSmallTaskPacking;
|
||||
static int32 sSmallTaskCore;
|
||||
@ -924,50 +924,6 @@ should_rebalance(Thread* thread)
|
||||
}
|
||||
|
||||
|
||||
static inline int
|
||||
compute_load(bigtime_t& measureTime, bigtime_t& measureActiveTime, int32& load)
|
||||
{
|
||||
const bigtime_t kLoadMeasureInterval = 50000;
|
||||
const bigtime_t kIntervalInaccuracy = kLoadMeasureInterval / 4;
|
||||
|
||||
bigtime_t now = system_time();
|
||||
|
||||
if (measureTime == 0) {
|
||||
measureTime = now;
|
||||
return -1;
|
||||
}
|
||||
|
||||
bigtime_t deltaTime = now - measureTime;
|
||||
|
||||
if (deltaTime < kLoadMeasureInterval)
|
||||
return -1;
|
||||
|
||||
int oldLoad = load;
|
||||
ASSERT(oldLoad >= 0 && oldLoad <= kMaxLoad);
|
||||
|
||||
int newLoad = measureActiveTime * kMaxLoad;
|
||||
newLoad /= max_c(deltaTime, 1);
|
||||
newLoad = max_c(min_c(newLoad, kMaxLoad), 0);
|
||||
|
||||
measureActiveTime = 0;
|
||||
measureTime = now;
|
||||
|
||||
deltaTime += kIntervalInaccuracy;
|
||||
int n = deltaTime / kLoadMeasureInterval;
|
||||
ASSERT(n > 0);
|
||||
|
||||
if (n > 10)
|
||||
load = newLoad;
|
||||
else {
|
||||
newLoad *= (1 << n) - 1;
|
||||
load = (load + newLoad) / (1 << n);
|
||||
ASSERT(load >= 0 && load <= kMaxLoad);
|
||||
}
|
||||
|
||||
return oldLoad;
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
compute_cpu_load(int32 cpu)
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user