2002-07-09 16:24:59 +04:00
|
|
|
/*
|
2006-02-01 19:09:05 +03:00
|
|
|
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
|
2004-12-14 02:02:18 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2003-05-03 20:20:38 +04:00
|
|
|
|
2004-10-21 05:45:43 +04:00
|
|
|
/* This file contains the cpu functions (init, etc). */
|
2003-05-03 20:20:38 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <kernel.h>
|
|
|
|
#include <cpu.h>
|
|
|
|
#include <vm.h>
|
|
|
|
#include <arch/cpu.h>
|
2003-10-08 03:12:37 +04:00
|
|
|
#include <boot/kernel_args.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
|
2004-10-21 05:45:43 +04:00
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
/* global per-cpu structure */
|
2006-02-01 19:09:05 +03:00
|
|
|
cpu_ent gCPU[MAX_BOOT_CPUS];
|
|
|
|
|
|
|
|
static spinlock sSetCpuLock;
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-03 20:20:38 +04:00
|
|
|
|
2004-10-21 05:45:43 +04:00
|
|
|
status_t
|
|
|
|
cpu_init(kernel_args *args)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2006-02-01 19:09:05 +03:00
|
|
|
memset(gCPU, 0, sizeof(gCPU));
|
2004-10-21 05:45:43 +04:00
|
|
|
for (i = 0; i < MAX_BOOT_CPUS; i++) {
|
2006-02-01 19:09:05 +03:00
|
|
|
gCPU[i].info.cpu_num = i;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2004-10-21 05:45:43 +04:00
|
|
|
return arch_cpu_init(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
cpu_init_post_vm(kernel_args *args)
|
|
|
|
{
|
|
|
|
return arch_cpu_init_post_vm(args);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-03 20:20:38 +04:00
|
|
|
|
The short story: we now have MTRR support on Intel and AMD CPUs (the latter
has not yet been tested, though - I'll do this after this commit):
* Removed the arch_memory_type stuff from vm_area; since there are only 8 memory
ranges on x86, it's simply overkill. The MTRR code now remembers the area ID
and finds the MTRR that way (it could also iterate over the existing MTRRs).
* Introduced some post_modules() init functions.
* If the other x86 CPUs out there don't differ a lot, MTRR functionality might
be put back into the kernel.
* x86_write_msr() was broken, it wrote the 64 bit number with the 32 bit words
switched - it took me some time (and lots of #GPs) to figure that one out.
* Removed the macro read_ebp() and introduced a function x86_read_ebp()
(it's not really a time critical call).
* Followed the Intel docs on how to change MTRRs (symmetrically on all CPUs
with caches turned off).
* Asking for memory types will automatically change the requested length to
a power of two - note that BeOS seems to behave in the same, although that's
not really very clean.
* fixed MTRRs are ignored for now - we should make sure at least, though,
that they are identical on all CPUs (or turn them off, even though I'd
prefer the BIOS stuff to be uncacheable, which we don't enforce yet, though).
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15528 a95241bf-73f2-0310-859d-f6bbb57e9c96
2005-12-13 19:34:29 +03:00
|
|
|
status_t
|
|
|
|
cpu_init_post_modules(kernel_args *args)
|
|
|
|
{
|
|
|
|
return arch_cpu_init_post_modules(args);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-21 05:45:43 +04:00
|
|
|
status_t
|
|
|
|
cpu_preboot_init(kernel_args *args)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2004-10-21 05:45:43 +04:00
|
|
|
return arch_cpu_preboot_init(args);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2005-03-17 20:06:56 +03:00
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
clear_caches(void *address, size_t length, uint32 flags)
|
|
|
|
{
|
|
|
|
// ToDo: implement me!
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// #pragma mark -
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
_user_clear_caches(void *address, size_t length, uint32 flags)
|
|
|
|
{
|
|
|
|
clear_caches(address, length, flags);
|
|
|
|
}
|
|
|
|
|
2006-02-01 19:09:05 +03:00
|
|
|
|
|
|
|
bool
|
|
|
|
_user_cpu_enabled(int32 cpu)
|
|
|
|
{
|
|
|
|
if (cpu < 0 || cpu >= smp_get_num_cpus())
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
return !gCPU[cpu].info.disabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_user_set_cpu_enabled(int32 cpu, bool enabled)
|
|
|
|
{
|
|
|
|
status_t status = B_OK;
|
|
|
|
cpu_status state;
|
|
|
|
int32 i, count;
|
|
|
|
|
|
|
|
if (cpu < 0 || cpu >= smp_get_num_cpus())
|
|
|
|
return B_BAD_VALUE;
|
|
|
|
|
|
|
|
// We need to lock here to make sure that no one can disable
|
|
|
|
// the last CPU
|
|
|
|
|
|
|
|
state = disable_interrupts();
|
|
|
|
acquire_spinlock(&sSetCpuLock);
|
|
|
|
|
|
|
|
if (!enabled) {
|
|
|
|
// check if this is the last CPU to be disabled
|
|
|
|
for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
|
|
|
|
if (!gCPU[i].info.disabled)
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count == 1)
|
|
|
|
status = B_NOT_ALLOWED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status == B_OK)
|
|
|
|
gCPU[cpu].info.disabled = !enabled;
|
|
|
|
|
|
|
|
release_spinlock(&sSetCpuLock);
|
|
|
|
restore_interrupts(state);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|