* Renamed the ROUNDOWN macro to ROUNDDOWN. Also changed the implementation of

ROUNDUP to use '*' and '/' -- the compiler will optimize that for powers of
  two anyway and this implementation works for other numbers as well.
* The thread::fault_handler use in C[++] code was broken with gcc 4. At least
  when other functions were invoked. Trying to trick the compiler wasn't a
  particularly good idea anyway, since the next compiler version could break
  the trick again. So the general policy is to use the fault handlers only in
  assembly code where we have full control. Changed that for x86 (save for the
  vm86 mode, which has a similar mechanism), but not for the other
  architectures.
* Introduced fault_handler, fault_handler_stack_pointer, and fault_jump_buffer
  fields in the cpu_ent structure, which must be used instead of
  thread::fault_handler in the kernel debugger. Consequently user_memcpy() must
  not be used in the kernel debugger either. Introduced a debug_memcpy()
  instead.
* Introduced debug_call_with_fault_handler() function which calls a function
  in a setjmp() and fault handler context. The architecture specific backend
  arch_debug_call_with_fault_handler() has only been implemented for x86 yet.
* Introduced debug_is_kernel_memory_accessible() for use in the kernel
  debugger. It determines whether a range of memory can be accessed in the
  way specified. The architecture specific back end
  arch_vm_translation_map_is_kernel_page_accessible() has only been implemented
  for x86 yet.
* Added arch_debug_unset_current_thread() (only implemented for x86) to unset
  the current thread pointer in the kernel debugger. When entering the kernel
  debugger we do some basic sanity checks of the currently set thread structure
  and unset it, if they fail. This allows certain commands (most importantly
  the stack trace command) to avoid accessing the thread structure.
* x86: When handling a double fault, we do now install a special handler for
  page faults. This allows us to gracefully catch faulting commands, even if
  e.g. the thread structure is toast.

We are now in much better shape to deal with double faults. Hopefully avoiding
the triple faults that some people have been experiencing on their hardware
and ideally even allowing to use the kernel debugger normally.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@32073 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2009-08-03 12:39:56 +00:00
parent 16357dea6b
commit ea2abd110b
33 changed files with 649 additions and 227 deletions

View File

@ -11,6 +11,9 @@
#include <SupportDefs.h>
#include <cpu.h>
struct kernel_args;
struct thread;
@ -27,6 +30,9 @@ void* arch_debug_get_interrupt_pc(bool* _isSyscall);
bool arch_debug_contains_call(struct thread *thread, const char *symbol,
addr_t start, addr_t end);
void arch_debug_save_registers(int *);
void arch_debug_unset_current_thread(void);
void arch_debug_call_with_fault_handler(cpu_ent* cpu, jmp_buf jumpBuffer,
void (*function)(void*), void* parameter);
bool arch_is_debug_variable_defined(const char* variableName);
status_t arch_set_debug_variable(const char* variableName, uint64 value);

View File

@ -23,11 +23,14 @@ status_t arch_vm_translation_map_init(struct kernel_args *args);
status_t arch_vm_translation_map_init_post_area(struct kernel_args *args);
status_t arch_vm_translation_map_init_post_sem(struct kernel_args *args);
// quick function to map a page in regardless of map context. Used in VM initialization,
// before most vm data structures exist
// Quick function to map a page in regardless of map context. Used in VM
// initialization before most vm data structures exist.
status_t arch_vm_translation_map_early_map(struct kernel_args *args, addr_t va, addr_t pa,
uint8 attributes, addr_t (*get_free_page)(struct kernel_args *));
bool arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection);
#ifdef __cplusplus
}
#endif

View File

@ -284,6 +284,7 @@ bool x86_check_feature(uint32 feature, enum x86_feature_type type);
void* x86_get_double_fault_stack(int32 cpu, size_t* _size);
int32 x86_double_fault_get_cpu(void);
void x86_double_fault_exception(struct iframe* frame);
void x86_page_fault_exception_double_fault(struct iframe* frame);
#define read_cr3(value) \

View File

@ -9,6 +9,8 @@
#define _KERNEL_CPU_H
#include <setjmp.h>
#include <smp.h>
#include <timer.h>
#include <boot/kernel_args.h>
@ -24,19 +26,24 @@
/* CPU local data structure */
typedef struct cpu_ent {
int cpu_num;
int cpu_num;
// thread.c: used to force a reschedule at quantum expiration time
int preempted;
timer quantum_timer;
int preempted;
timer quantum_timer;
// keeping track of CPU activity
bigtime_t active_time;
bigtime_t last_kernel_time;
bigtime_t last_user_time;
bigtime_t active_time;
bigtime_t last_kernel_time;
bigtime_t last_user_time;
bool invoke_scheduler;
bool disabled;
// used in the kernel debugger
addr_t fault_handler;
addr_t fault_handler_stack_pointer;
jmp_buf fault_jump_buffer;
bool invoke_scheduler;
bool disabled;
// arch-specific stuff
arch_cpu_info arch;

View File

@ -8,11 +8,14 @@
#ifndef _KERNEL_DEBUG_H
#define _KERNEL_DEBUG_H
#include "kernel_debug_config.h"
#include <setjmp.h>
#include <KernelExport.h>
#include <module.h>
#include "kernel_debug_config.h"
/* KDEBUG
The kernel debug level.
@ -123,6 +126,11 @@ extern debug_page_fault_info* debug_get_page_fault_info();
extern void debug_trap_cpu_in_kdl(int32 cpu, bool returnIfHandedOver);
extern void debug_double_fault(int32 cpu);
extern bool debug_emergency_key_pressed(char key);
extern bool debug_is_kernel_memory_accessible(addr_t address, size_t size,
uint32 protection);
extern int debug_call_with_fault_handler(jmp_buf jumpBuffer,
void (*function)(void*), void* parameter);
extern status_t debug_memcpy(void* to, const void* from, size_t size);
extern char kgetc(void);
extern void kputs(const char *string);

View File

@ -39,8 +39,8 @@
#define ENV_SIZE (B_PAGE_SIZE * 8)
#define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1))
#define ROUNDOWN(a, b) (((a) / (b)) * (b))
#define ROUNDDOWN(a, b) (((a) / (b)) * (b))
#define ROUNDUP(a, b) ROUNDDOWN((a) + (b) - 1, b)
#define CHECK_BIT(a, b) ((a) & (1 << (b)))

View File

@ -126,9 +126,9 @@ load_elf_symbol_table(int fd, preloaded_image *image)
status = B_ERROR;
goto error1;
}
// find symbol table in section headers
for (int32 i = 0; i < elfHeader.e_shnum; i++) {
if (sectionHeaders[i].sh_type == SHT_SYMTAB) {
stringHeader = &sectionHeaders[sectionHeaders[i].sh_link];
@ -275,7 +275,7 @@ elf_load_image(int fd, preloaded_image *image)
} else
continue;
region->start = ROUNDOWN(header.p_vaddr, B_PAGE_SIZE);
region->start = ROUNDDOWN(header.p_vaddr, B_PAGE_SIZE);
region->size = ROUNDUP(header.p_memsz + (header.p_vaddr % B_PAGE_SIZE),
B_PAGE_SIZE);
region->delta = -region->start;

View File

@ -62,7 +62,7 @@ insert_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
{
uint32 numRanges = *_numRanges;
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
addr_t end = start + size;
@ -141,7 +141,7 @@ remove_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
uint32 numRanges = *_numRanges;
addr_t end = ROUNDUP(start + size, B_PAGE_SIZE);
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
for (uint32 i = 0; i < numRanges; i++) {
addr_t rangeStart = ranges[i].start;

View File

@ -632,7 +632,7 @@ mmu_init(void)
extMemoryBlock[i].base_addr
= ROUNDUP(extMemoryBlock[i].base_addr, B_PAGE_SIZE);
extMemoryBlock[i].length
= ROUNDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
= ROUNDDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
// we ignore all memory beyond 4 GB
if (extMemoryBlock[i].base_addr > 0xffffffffULL)

View File

@ -57,7 +57,7 @@ static status_t
insert_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
const void *_start, uint32 _size)
{
addr_t start = ROUNDOWN(addr_t(_start), B_PAGE_SIZE);
addr_t start = ROUNDDOWN(addr_t(_start), B_PAGE_SIZE);
addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
addr_t size = end - start;
if (size == 0)
@ -133,7 +133,7 @@ static status_t
remove_memory_range(addr_range *ranges, uint32 &numRanges, uint32 maxRanges,
const void *_start, uint32 _size)
{
addr_t start = ROUNDOWN(addr_t(_start), B_PAGE_SIZE);
addr_t start = ROUNDDOWN(addr_t(_start), B_PAGE_SIZE);
addr_t end = ROUNDUP(addr_t(_start) + _size, B_PAGE_SIZE);
for (uint32 i = 0; i < numRanges; i++) {

View File

@ -618,7 +618,7 @@ mmu_init(void)
extMemoryBlock[i].base_addr
= ROUNDUP(extMemoryBlock[i].base_addr, B_PAGE_SIZE);
extMemoryBlock[i].length
= ROUNDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
= ROUNDDOWN(extMemoryBlock[i].length, B_PAGE_SIZE);
// we ignore all memory beyond 4 GB
if (extMemoryBlock[i].base_addr > 0xffffffffULL)

View File

@ -30,7 +30,7 @@ int arch_fpu_type;
int arch_mmu_type;
int arch_platform;
status_t
status_t
arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
{
// enable FPU
@ -44,7 +44,7 @@ arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
}
status_t
status_t
arch_cpu_init_percpu(kernel_args *args, int curr_cpu)
{
//detect_cpu(curr_cpu);
@ -102,7 +102,7 @@ arch_cpu_init_post_modules(kernel_args *args)
}
void
void
arch_cpu_sync_icache(void *address, size_t len)
{
cpu_ops.flush_icache((addr_t)address, len);
@ -125,7 +125,7 @@ arch_cpu_memory_write_barrier(void)
}
void
void
arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
{
int32 num_pages = end / B_PAGE_SIZE - start / B_PAGE_SIZE;
@ -139,11 +139,11 @@ arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
}
void
void
arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
{
int i;
cpu_ops.flush_insn_pipeline();
for (i = 0; i < num_pages; i++) {
cpu_ops.flush_atc_addr(pages[i]);
@ -153,7 +153,7 @@ arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
}
void
void
arch_cpu_global_TLB_invalidate(void)
{
cpu_ops.flush_insn_pipeline();
@ -162,7 +162,7 @@ arch_cpu_global_TLB_invalidate(void)
}
void
void
arch_cpu_user_TLB_invalidate(void)
{
cpu_ops.flush_insn_pipeline();
@ -179,6 +179,7 @@ arch_cpu_user_memcpy(void *to, const void *from, size_t size,
char *s = (char *)from;
addr_t oldFaultHandler = *faultHandler;
// TODO: This doesn't work correctly with gcc 4 anymore!
if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
@ -200,7 +201,7 @@ error:
* \param to Pointer to the destination C-string.
* \param from Pointer to the source C-string.
* \param size Size in bytes of the string buffer pointed to by \a to.
*
*
* \return strlen(\a from).
*/
@ -210,12 +211,13 @@ arch_cpu_user_strlcpy(char *to, const char *from, size_t size, addr_t *faultHand
int from_length = 0;
addr_t oldFaultHandler = *faultHandler;
// TODO: This doesn't work correctly with gcc 4 anymore!
if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
if (size > 0) {
to[--size] = '\0';
// copy
// copy
for ( ; size; size--, from_length++, to++, from++) {
if ((*to = *from) == '\0')
break;
@ -240,6 +242,7 @@ arch_cpu_user_memset(void *s, char c, size_t count, addr_t *faultHandler)
char *xs = (char *)s;
addr_t oldFaultHandler = *faultHandler;
// TODO: This doesn't work correctly with gcc 4 anymore!
if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
@ -289,6 +292,7 @@ arch_cpu_idle(void)
bool
m68k_set_fault_handler(addr_t *handlerLocation, addr_t handler)
{
// TODO: This doesn't work correctly with gcc 4 anymore!
*handlerLocation = handler;
return false;
}

View File

@ -65,25 +65,14 @@ get_current_stack_frame()
static status_t
get_next_frame(addr_t framePointer, addr_t *next, addr_t *ip)
{
struct thread *thread = thread_get_current_thread();
addr_t oldFaultHandler = thread->fault_handler;
stack_frame frame;
if (debug_memcpy(&frame, (void*)framePointer, sizeof(frame)) != B_OK)
return B_BAD_ADDRESS;
// set fault handler, so that we can safely access user stacks
if (thread) {
if (m68k_set_fault_handler(&thread->fault_handler, (addr_t)&&error))
goto error;
}
*ip = frame.return_address;
*next = (addr_t)frame.previous;
*ip = ((struct stack_frame *)framePointer)->return_address;
*next = (addr_t)((struct stack_frame *)framePointer)->previous;
if (thread)
thread->fault_handler = oldFaultHandler;
return B_OK;
error:
thread->fault_handler = oldFaultHandler;
return B_BAD_ADDRESS;
}
@ -367,6 +356,22 @@ arch_debug_get_interrupt_pc(bool* _isSyscall)
}
void
arch_debug_unset_current_thread(void)
{
// TODO: Implement!
}
void
arch_debug_call_with_fault_handler(cpu_ent* cpu, jmp_buf jumpBuffer,
void (*function)(void*), void* parameter)
{
// TODO: Implement! Most likely in assembly.
longjmp(jumpBuffer, 1);
}
bool
arch_is_debug_variable_defined(const char* variableName)
{

View File

@ -651,7 +651,7 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
status_t status;
int index;
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end));
@ -873,7 +873,7 @@ protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attribute
status_t status;
int index;
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, attributes));
@ -1511,3 +1511,11 @@ m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
return B_OK;
}
bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
// TODO: Implement!
return false;
}

View File

@ -16,7 +16,7 @@
static bool sHasTlbia;
status_t
status_t
arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
{
// enable FPU
@ -65,7 +65,7 @@ arch_cpu_init_post_modules(kernel_args *args)
#define CACHELINE 32
void
void
arch_cpu_sync_icache(void *address, size_t len)
{
int l, off;
@ -109,7 +109,7 @@ arch_cpu_memory_write_barrier(void)
}
void
void
arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
{
asm volatile("sync");
@ -124,7 +124,7 @@ arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
}
void
void
arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
{
int i;
@ -140,7 +140,7 @@ arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
}
void
void
arch_cpu_global_TLB_invalidate(void)
{
if (sHasTlbia) {
@ -156,7 +156,7 @@ arch_cpu_global_TLB_invalidate(void)
tlbie(address);
eieio();
ppc_sync();
address += B_PAGE_SIZE;
}
tlbsync();
@ -165,7 +165,7 @@ arch_cpu_global_TLB_invalidate(void)
}
void
void
arch_cpu_user_TLB_invalidate(void)
{
arch_cpu_global_TLB_invalidate();
@ -180,6 +180,7 @@ arch_cpu_user_memcpy(void *to, const void *from, size_t size,
char *s = (char *)from;
addr_t oldFaultHandler = *faultHandler;
// TODO: This doesn't work correctly with gcc 4 anymore!
if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
@ -201,7 +202,7 @@ error:
* \param to Pointer to the destination C-string.
* \param from Pointer to the source C-string.
* \param size Size in bytes of the string buffer pointed to by \a to.
*
*
* \return strlen(\a from).
*/
@ -211,12 +212,13 @@ arch_cpu_user_strlcpy(char *to, const char *from, size_t size, addr_t *faultHand
int from_length = 0;
addr_t oldFaultHandler = *faultHandler;
// TODO: This doesn't work correctly with gcc 4 anymore!
if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
if (size > 0) {
to[--size] = '\0';
// copy
// copy
for ( ; size; size--, from_length++, to++, from++) {
if ((*to = *from) == '\0')
break;
@ -241,6 +243,7 @@ arch_cpu_user_memset(void *s, char c, size_t count, addr_t *faultHandler)
char *xs = (char *)s;
addr_t oldFaultHandler = *faultHandler;
// TODO: This doesn't work correctly with gcc 4 anymore!
if (ppc_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
@ -286,6 +289,7 @@ arch_cpu_idle(void)
bool
ppc_set_fault_handler(addr_t *handlerLocation, addr_t handler)
{
// TODO: This doesn't work correctly with gcc 4 anymore!
*handlerLocation = handler;
return false;
}

View File

@ -64,25 +64,14 @@ get_current_stack_frame()
static status_t
get_next_frame(addr_t framePointer, addr_t *next, addr_t *ip)
{
struct thread *thread = thread_get_current_thread();
addr_t oldFaultHandler = thread->fault_handler;
stack_frame frame;
if (debug_memcpy(&frame, (void*)framePointer, sizeof(frame)) != B_OK)
return B_BAD_ADDRESS;
// set fault handler, so that we can safely access user stacks
if (thread) {
if (ppc_set_fault_handler(&thread->fault_handler, (addr_t)&&error))
goto error;
}
*ip = frame.return_address;
*next = (addr_t)frame.previous;
*ip = ((struct stack_frame *)framePointer)->return_address;
*next = (addr_t)((struct stack_frame *)framePointer)->previous;
if (thread)
thread->fault_handler = oldFaultHandler;
return B_OK;
error:
thread->fault_handler = oldFaultHandler;
return B_BAD_ADDRESS;
}
@ -303,6 +292,22 @@ arch_debug_get_interrupt_pc(bool* _isSyscall)
}
void
arch_debug_unset_current_thread(void)
{
// TODO: Implement!
}
void
arch_debug_call_with_fault_handler(cpu_ent* cpu, jmp_buf jumpBuffer,
void (*function)(void*), void* parameter)
{
// TODO: Implement! Most likely in assembly.
longjmp(jumpBuffer, 1);
}
bool
arch_is_debug_variable_defined(const char* variableName)
{

View File

@ -330,7 +330,7 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
{
page_table_entry *entry;
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
// dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
@ -678,8 +678,8 @@ ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
@ -701,7 +701,7 @@ void
ppc_unmap_address_range(addr_t virtualAddress, size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
@ -713,7 +713,7 @@ ppc_unmap_address_range(addr_t virtualAddress, size_t size)
status_t
ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
{
addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
@ -746,3 +746,11 @@ ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
return B_OK;
}
bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
// TODO: Implement!
return false;
}

View File

@ -56,26 +56,36 @@ already_visited(uint32 *visited, int32 *_last, int32 *_num, uint32 ebp)
}
/*! Safe to be called only from outside the debugger.
*/
static status_t
get_next_frame(addr_t ebp, addr_t *_next, addr_t *_eip)
get_next_frame_no_debugger(addr_t ebp, addr_t *_next, addr_t *_eip)
{
// set fault handler, so that we can safely access user stacks
addr_t oldFaultHandler = thread_get_current_thread()->fault_handler;
thread_get_current_thread()->fault_handler = (addr_t)&&error;
// Fake goto to trick the compiler not to optimize the code at the label
// away.
if (ebp == 0)
goto error;
// TODO: Do this more efficiently in assembly.
stack_frame frame;
if (user_memcpy(&frame, (void*)ebp, sizeof(frame)) != B_OK)
return B_BAD_ADDRESS;
*_eip = ((struct stack_frame *)ebp)->return_address;
*_next = (addr_t)((struct stack_frame *)ebp)->previous;
*_eip = frame.return_address;
*_next = (addr_t)frame.previous;
thread_get_current_thread()->fault_handler = oldFaultHandler;
return B_OK;
}
error:
thread_get_current_thread()->fault_handler = oldFaultHandler;
return B_BAD_ADDRESS;
/*! Safe to be called only from inside the debugger.
*/
static status_t
get_next_frame_debugger(addr_t ebp, addr_t *_next, addr_t *_eip)
{
stack_frame frame;
if (debug_memcpy(&frame, (void*)ebp, sizeof(frame)) != B_OK)
return B_BAD_ADDRESS;
*_eip = frame.return_address;
*_next = (addr_t)frame.previous;
return B_OK;
}
@ -285,8 +295,10 @@ print_stack_frame(struct thread *thread, addr_t eip, addr_t ebp, addr_t nextEbp,
kprintf(" + 0x%04lx\n", eip - baseAddress);
} else {
vm_area *area = NULL;
if (thread->team->address_space != NULL)
if (thread != NULL && thread->team != NULL
&& thread->team->address_space != NULL) {
area = vm_area_lookup(thread->team->address_space, eip);
}
if (area != NULL) {
kprintf("%ld:%s@%p + %#lx\n", area->id, area->name,
(void*)area->base, eip - area->base);
@ -358,6 +370,7 @@ setup_for_thread(char *arg, struct thread **_thread, uint32 *_ebp,
*_thread = thread;
}
static bool
is_double_fault_stack_address(int32 cpu, addr_t address)
{
@ -564,7 +577,7 @@ stack_trace(int argc, char **argv)
} else {
addr_t eip, nextEbp;
if (get_next_frame(ebp, &nextEbp, &eip) != B_OK) {
if (get_next_frame_debugger(ebp, &nextEbp, &eip) != B_OK) {
kprintf("%08lx -- read fault\n", ebp);
break;
}
@ -723,7 +736,7 @@ show_call(int argc, char **argv)
} else {
addr_t eip, nextEbp;
if (get_next_frame(ebp, &nextEbp, &eip) != B_OK) {
if (get_next_frame_debugger(ebp, &nextEbp, &eip) != B_OK) {
kprintf("%08lx -- read fault\n", ebp);
break;
}
@ -894,7 +907,7 @@ arch_debug_contains_call(struct thread *thread, const char *symbol,
} else {
addr_t eip, nextEbp;
if (get_next_frame(ebp, &nextEbp, &eip) != B_OK
if (get_next_frame_no_debugger(ebp, &nextEbp, &eip) != B_OK
|| eip == 0 || ebp == 0)
break;
@ -964,7 +977,7 @@ arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
skipFrames = 0;
}
} else {
if (get_next_frame(ebp, &nextEbp, &eip) != B_OK)
if (get_next_frame_no_debugger(ebp, &nextEbp, &eip) != B_OK)
break;
}
@ -1000,6 +1013,16 @@ arch_debug_get_interrupt_pc(bool* _isSyscall)
}
/*! Sets the current thread to \c NULL.
Invoked in the kernel debugger only.
*/
void
arch_debug_unset_current_thread(void)
{
write_dr3(NULL);
}
bool
arch_is_debug_variable_defined(const char* variableName)
{

View File

@ -204,22 +204,27 @@ set_gate(desc_table *gate_addr, addr_t addr, int type, int dpl)
/*! Initializes the descriptor for interrupt vector \a n in the IDT of the
boot CPU to an interrupt-gate descriptor with the given procedure address.
specified CPU to an interrupt-gate descriptor with the given procedure
address.
For CPUs other than the boot CPU it must not be called before
arch_int_init_post_vm().
*/
static void
set_interrupt_gate(int n, void (*addr)())
set_interrupt_gate(int32 cpu, int n, void (*addr)())
{
set_gate(&sIDTs[0][n], (addr_t)addr, 14, DPL_KERNEL);
set_gate(&sIDTs[cpu][n], (addr_t)addr, 14, DPL_KERNEL);
}
/*! Initializes the descriptor for interrupt vector \a n in the IDT of the
boot CPU to an trap-gate descriptor with the given procedure address.
specified CPU to an trap-gate descriptor with the given procedure address.
For CPUs other than the boot CPU it must not be called before
arch_int_init_post_vm().
*/
static void
set_trap_gate(int n, void (*addr)())
set_trap_gate(int32 cpu, int n, void (*addr)())
{
set_gate(&sIDTs[0][n], (unsigned int)addr, 15, DPL_USER);
set_gate(&sIDTs[cpu][n], (unsigned int)addr, 15, DPL_USER);
}
@ -851,27 +856,74 @@ x86_double_fault_exception(struct iframe* frame)
frame->edi = tss->edi;
frame->flags = tss->eflags;
// Use a special handler for page faults which avoids the triple fault
// pitfalls.
set_interrupt_gate(cpu, 14, &trap14_double_fault);
debug_double_fault(cpu);
}
void
x86_page_fault_exception_double_fault(struct iframe* frame)
{
uint32 cr2;
asm("movl %%cr2, %0" : "=r" (cr2));
// Only if this CPU has a fault handler, we're allowed to be here.
cpu_ent& cpu = gCPU[x86_double_fault_get_cpu()];
addr_t faultHandler = cpu.fault_handler;
if (faultHandler != 0) {
debug_set_page_fault_info(cr2, frame->eip,
(frame->error_code & 0x2) != 0 ? DEBUG_PAGE_FAULT_WRITE : 0);
frame->eip = faultHandler;
frame->ebp = cpu.fault_handler_stack_pointer;
return;
}
// No fault handler. This is bad. Since we originally came from a double
// fault, we don't try to reenter the kernel debugger. Instead we just
// print the info we've got and enter an infinite loop.
kprintf("Page fault in double fault debugger without fault handler! "
"Touching address %p from eip %p. Entering infinite loop...\n",
(void*)cr2, (void*)frame->eip);
while (true);
}
static void
page_fault_exception(struct iframe* frame)
{
struct thread *thread = thread_get_current_thread();
bool kernelDebugger = debug_debugger_running();
unsigned int cr2;
uint32 cr2;
addr_t newip;
asm("movl %%cr2, %0" : "=r" (cr2));
if (kernelDebugger) {
// if this thread has a fault handler, we're allowed to be here
if (thread && thread->fault_handler != 0) {
debug_set_page_fault_info(cr2, frame->eip,
(frame->error_code & 0x2) != 0 ? DEBUG_PAGE_FAULT_WRITE : 0);
frame->eip = thread->fault_handler;
return;
if (debug_debugger_running()) {
// If this CPU or this thread has a fault handler, we're allowed to be
// here.
if (thread != NULL) {
cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
if (cpu->fault_handler != 0) {
debug_set_page_fault_info(cr2, frame->eip,
(frame->error_code & 0x2) != 0
? DEBUG_PAGE_FAULT_WRITE : 0);
frame->eip = cpu->fault_handler;
frame->ebp = cpu->fault_handler_stack_pointer;
return;
}
if (thread->fault_handler != 0) {
kprintf("ERROR: thread::fault_handler used in kernel "
"debugger!\n");
debug_set_page_fault_info(cr2, frame->eip,
(frame->error_code & 0x2) != 0
? DEBUG_PAGE_FAULT_WRITE : 0);
frame->eip = thread->fault_handler;
return;
}
}
// otherwise, not really
@ -886,8 +938,16 @@ page_fault_exception(struct iframe* frame)
// disabled, which in most cases is a bug. We should add some thread
// flag allowing to explicitly indicate that this handling is desired.
if (thread && thread->fault_handler != 0) {
frame->eip = thread->fault_handler;
return;
if (frame->eip != thread->fault_handler) {
frame->eip = thread->fault_handler;
return;
}
// The fault happened at the fault handler address. This is a
// certain infinite loop.
panic("page fault, interrupts disabled, fault handler loop. "
"Touching address %p from eip %p\n", (void*)cr2,
(void*)frame->eip);
}
// If we are not running the kernel startup the page fault was not
@ -971,60 +1031,60 @@ arch_int_init(struct kernel_args *args)
// setup the standard programmable interrupt controller
pic_init();
set_interrupt_gate(0, &trap0);
set_interrupt_gate(1, &trap1);
set_interrupt_gate(2, &trap2);
set_trap_gate(3, &trap3);
set_interrupt_gate(4, &trap4);
set_interrupt_gate(5, &trap5);
set_interrupt_gate(6, &trap6);
set_interrupt_gate(7, &trap7);
set_interrupt_gate(0, 0, &trap0);
set_interrupt_gate(0, 1, &trap1);
set_interrupt_gate(0, 2, &trap2);
set_trap_gate(0, 3, &trap3);
set_interrupt_gate(0, 4, &trap4);
set_interrupt_gate(0, 5, &trap5);
set_interrupt_gate(0, 6, &trap6);
set_interrupt_gate(0, 7, &trap7);
// trap8 (double fault) is set in arch_cpu.c
set_interrupt_gate(9, &trap9);
set_interrupt_gate(10, &trap10);
set_interrupt_gate(11, &trap11);
set_interrupt_gate(12, &trap12);
set_interrupt_gate(13, &trap13);
set_interrupt_gate(14, &trap14);
// set_interrupt_gate(15, &trap15);
set_interrupt_gate(16, &trap16);
set_interrupt_gate(17, &trap17);
set_interrupt_gate(18, &trap18);
set_interrupt_gate(19, &trap19);
set_interrupt_gate(0, 9, &trap9);
set_interrupt_gate(0, 10, &trap10);
set_interrupt_gate(0, 11, &trap11);
set_interrupt_gate(0, 12, &trap12);
set_interrupt_gate(0, 13, &trap13);
set_interrupt_gate(0, 14, &trap14);
// set_interrupt_gate(0, 15, &trap15);
set_interrupt_gate(0, 16, &trap16);
set_interrupt_gate(0, 17, &trap17);
set_interrupt_gate(0, 18, &trap18);
set_interrupt_gate(0, 19, &trap19);
set_interrupt_gate(32, &trap32);
set_interrupt_gate(33, &trap33);
set_interrupt_gate(34, &trap34);
set_interrupt_gate(35, &trap35);
set_interrupt_gate(36, &trap36);
set_interrupt_gate(37, &trap37);
set_interrupt_gate(38, &trap38);
set_interrupt_gate(39, &trap39);
set_interrupt_gate(40, &trap40);
set_interrupt_gate(41, &trap41);
set_interrupt_gate(42, &trap42);
set_interrupt_gate(43, &trap43);
set_interrupt_gate(44, &trap44);
set_interrupt_gate(45, &trap45);
set_interrupt_gate(46, &trap46);
set_interrupt_gate(47, &trap47);
set_interrupt_gate(48, &trap48);
set_interrupt_gate(49, &trap49);
set_interrupt_gate(50, &trap50);
set_interrupt_gate(51, &trap51);
set_interrupt_gate(52, &trap52);
set_interrupt_gate(53, &trap53);
set_interrupt_gate(54, &trap54);
set_interrupt_gate(55, &trap55);
set_interrupt_gate(0, 32, &trap32);
set_interrupt_gate(0, 33, &trap33);
set_interrupt_gate(0, 34, &trap34);
set_interrupt_gate(0, 35, &trap35);
set_interrupt_gate(0, 36, &trap36);
set_interrupt_gate(0, 37, &trap37);
set_interrupt_gate(0, 38, &trap38);
set_interrupt_gate(0, 39, &trap39);
set_interrupt_gate(0, 40, &trap40);
set_interrupt_gate(0, 41, &trap41);
set_interrupt_gate(0, 42, &trap42);
set_interrupt_gate(0, 43, &trap43);
set_interrupt_gate(0, 44, &trap44);
set_interrupt_gate(0, 45, &trap45);
set_interrupt_gate(0, 46, &trap46);
set_interrupt_gate(0, 47, &trap47);
set_interrupt_gate(0, 48, &trap48);
set_interrupt_gate(0, 49, &trap49);
set_interrupt_gate(0, 50, &trap50);
set_interrupt_gate(0, 51, &trap51);
set_interrupt_gate(0, 52, &trap52);
set_interrupt_gate(0, 53, &trap53);
set_interrupt_gate(0, 54, &trap54);
set_interrupt_gate(0, 55, &trap55);
set_trap_gate(98, &trap98); // for performance testing only
set_trap_gate(99, &trap99);
set_trap_gate(0, 98, &trap98); // for performance testing only
set_trap_gate(0, 99, &trap99);
set_interrupt_gate(251, &trap251);
set_interrupt_gate(252, &trap252);
set_interrupt_gate(253, &trap253);
set_interrupt_gate(254, &trap254);
set_interrupt_gate(255, &trap255);
set_interrupt_gate(0, 251, &trap251);
set_interrupt_gate(0, 252, &trap252);
set_interrupt_gate(0, 253, &trap253);
set_interrupt_gate(0, 254, &trap254);
set_interrupt_gate(0, 255, &trap255);
// init interrupt handler table
table = gInterruptHandlerTable;

View File

@ -213,6 +213,7 @@ FUNCTION(double_fault):
POP_IFRAME_AND_RETURN()
FUNCTION_END(double_fault)
TRAP(trap9, 9)
TRAP_ERRC(trap10, 10)
TRAP_ERRC(trap11, 11)
@ -257,6 +258,23 @@ TRAP(trap254, 254)
TRAP(trap255, 255)
.align 8;
FUNCTION(trap14_double_fault):
pushl $14
pushl $-1
pushl $-1
PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
movl %esp, %ebp // frame pointer is the iframe
pushl %ebp
call x86_page_fault_exception_double_fault
POP_IFRAME_AND_RETURN()
FUNCTION_END(trap14_double_fault)
.align 16
STATIC_FUNCTION(int_bottom):
PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)

View File

@ -375,7 +375,7 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
page_directory_entry *pd = map->arch_data->pgdir_virt;
int index;
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end));
@ -515,7 +515,7 @@ protect_tmap(vm_translation_map *map, addr_t start, addr_t end,
page_directory_entry *pd = map->arch_data->pgdir_virt;
int index;
start = ROUNDOWN(start, B_PAGE_SIZE);
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
@ -934,3 +934,75 @@ arch_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
return B_OK;
}
/*! Verifies that the page at the given virtual address can be accessed in the
current context.
This function is invoked in the kernel debugger. Paranoid checking is in
order.
\param virtualAddress The virtual address to be checked.
\param protection The area protection for which to check. Valid is a bitwise
or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
\return \c true, if the address can be accessed in all ways specified by
\a protection, \c false otherwise.
*/
bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
// We only trust the kernel team's page directory. So switch to it first.
// Always set it to make sure the TLBs don't contain obsolete data.
addr_t physicalPageDirectory;
read_cr3(physicalPageDirectory);
write_cr3(sKernelPhysicalPageDirectory);
// get the page directory entry for the address
page_directory_entry pageDirectoryEntry;
uint32 index = VADDR_TO_PDENT(virtualAddress);
if (physicalPageDirectory == (addr_t)sKernelPhysicalPageDirectory) {
pageDirectoryEntry = sKernelVirtualPageDirectory[index];
} else {
// map the original page directory and get the entry
void* handle;
addr_t virtualPageDirectory;
status_t error = gPhysicalPageMapper->GetPageDebug(
physicalPageDirectory, &virtualPageDirectory, &handle);
if (error == B_OK) {
pageDirectoryEntry
= ((page_directory_entry*)virtualPageDirectory)[index];
gPhysicalPageMapper->PutPageDebug(virtualPageDirectory,
handle);
} else
pageDirectoryEntry.present = 0;
}
// map the page table and get the entry
page_table_entry pageTableEntry;
index = VADDR_TO_PTENT(virtualAddress);
if (pageDirectoryEntry.present != 0) {
void* handle;
addr_t virtualPageTable;
status_t error = gPhysicalPageMapper->GetPageDebug(
ADDR_REVERSE_SHIFT(pageDirectoryEntry.addr), &virtualPageTable,
&handle);
if (error == B_OK) {
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
gPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
} else
pageTableEntry.present = 0;
} else
pageTableEntry.present = 0;
// switch back to the original page directory
if (physicalPageDirectory != (addr_t)sKernelPhysicalPageDirectory)
write_cr3(physicalPageDirectory);
if (pageTableEntry.present == 0)
return false;
// present means kernel-readable, so check for writable
return (protection & B_KERNEL_WRITE_AREA) == 0 || pageTableEntry.rw != 0;
}

View File

@ -11,6 +11,7 @@
#include <arch/x86/descriptors.h>
#include "asm_offsets.h"
#include "syscall_numbers.h"
@ -285,3 +286,51 @@ FUNCTION(arch_cpu_user_memcpy):
ret
FUNCTION_END(arch_cpu_user_memcpy)
/*! \fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
Called by debug_call_with_fault_handler() to do the dirty work of setting
the fault handler and calling the function. If the function causes a page
fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
given \a jumpBuffer. Otherwise it returns normally.
debug_call_with_fault_handler() has already saved the CPU's fault_handler
and fault_handler_stack_pointer and will reset them later, so
arch_debug_call_with_fault_handler() doesn't need to care about it.
\param cpu The \c cpu_ent for the current CPU.
\param jumpBuffer Buffer to be used for longjmp().
\param function The function to be called.
\param parameter The parameter to be passed to the function to be called.
*/
FUNCTION(arch_debug_call_with_fault_handler):
push %ebp
movl %esp, %ebp
// Set fault handler address, and fault handler stack pointer address. We
// don't need to save the previous values, since that's done by the caller.
movl 8(%ebp), %eax // cpu to %eax
lea 1f, %edx
movl %edx, CPU_ENT_fault_handler(%eax)
movl %ebp, CPU_ENT_fault_handler_stack_pointer(%eax)
// call the function
movl 20(%ebp), %eax // parameter
push %eax
movl 16(%ebp), %eax // function
call *%eax
// regular return
movl %ebp, %esp
pop %ebp
ret
// fault -- return via longjmp(jumpBuffer, 1)
1:
movl %ebp, %esp // restore %esp
pushl $1
movl 12(%ebp), %eax // jumpBuffer
pushl %eax
call longjmp
FUNCTION_END(arch_debug_call_with_fault_handler)

View File

@ -8,7 +8,9 @@
// a header file with macro definitions, that can be included from assembler
// code.
#include <arch_cpu.h>
#include <cpu.h>
#include <ksyscalls.h>
#include <thread_types.h>
@ -26,6 +28,10 @@
void
dummy()
{
// struct cpu_ent
DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler);
DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler_stack_pointer);
// struct thread
DEFINE_OFFSET_MACRO(THREAD, thread, kernel_time);
DEFINE_OFFSET_MACRO(THREAD, thread, user_time);

View File

@ -23,6 +23,7 @@ void trap44();void trap45();void trap46();void trap47();void trap48();void trap4
void trap50();void trap51();void trap52();void trap53();void trap54();void trap55();
void double_fault(); // int 8
void trap14_double_fault();
void trap98();
void trap99();

View File

@ -36,6 +36,12 @@ public:
virtual page_table_entry* InterruptGetPageTableAt(
addr_t physicalAddress) = 0;
virtual status_t GetPageDebug(addr_t physicalAddress,
addr_t* _virtualAddress,
void** _handle) = 0;
virtual status_t PutPageDebug(addr_t virtualAddress,
void* _handle) = 0;
};
extern PhysicalPageMapper* gPhysicalPageMapper;

View File

@ -166,9 +166,9 @@ public:
inline status_t PutPageCurrentCPU(addr_t virtualAddress,
void* handle);
inline status_t GetPageDebug(addr_t physicalAddress,
virtual status_t GetPageDebug(addr_t physicalAddress,
addr_t* virtualAddress, void** handle);
inline status_t PutPageDebug(addr_t virtualAddress,
virtual status_t PutPageDebug(addr_t virtualAddress,
void* handle);
status_t GetSlot(bool canWait,

View File

@ -26,6 +26,7 @@
#include <thread.h>
#include <tracing.h>
#include <vm.h>
#include <vm_translation_map.h>
#include <arch/debug_console.h>
#include <arch/debug.h>
@ -47,6 +48,13 @@
#include "debug_variables.h"
struct debug_memcpy_parameters {
void* to;
const void* from;
size_t size;
};
static const char* const kKDLPrompt = "kdebug> ";
extern "C" int kgets(char* buffer, int length);
@ -678,19 +686,42 @@ kernel_debugger_loop(const char* message, int32 cpu)
kprintf("Welcome to Kernel Debugging Land...\n");
if (struct thread* thread = thread_get_current_thread()) {
// set a few temporary debug variables
// Set a few temporary debug variables and print on which CPU and in which
// thread we are running.
set_debug_variable("_cpu", sDebuggerOnCPU);
struct thread* thread = thread_get_current_thread();
if (thread == NULL) {
kprintf("Running on CPU %ld\n", sDebuggerOnCPU);
} else if (!debug_is_kernel_memory_accessible((addr_t)thread,
sizeof(struct thread), B_KERNEL_READ_AREA)) {
kprintf("Running on CPU %ld\n", sDebuggerOnCPU);
kprintf("Current thread pointer is %p, which is an address we "
"can't read from.\n", thread);
arch_debug_unset_current_thread();
} else {
set_debug_variable("_thread", (uint64)(addr_t)thread);
set_debug_variable("_threadID", thread->id);
set_debug_variable("_team", (uint64)(addr_t)thread->team);
if (thread->team != NULL)
set_debug_variable("_teamID", thread->team->id);
set_debug_variable("_cpu", sDebuggerOnCPU);
kprintf("Thread %ld \"%s\" running on CPU %ld\n", thread->id,
kprintf("Thread %ld \"%.64s\" running on CPU %ld\n", thread->id,
thread->name, sDebuggerOnCPU);
} else
kprintf("Running on CPU %ld\n", sDebuggerOnCPU);
if (thread->cpu != gCPU + cpu) {
kprintf("The thread's CPU pointer is %p, but should be %p.\n",
thread->cpu, gCPU + cpu);
arch_debug_unset_current_thread();
} else if (thread->team != NULL) {
if (debug_is_kernel_memory_accessible((addr_t)thread->team,
sizeof(struct team), B_KERNEL_READ_AREA)) {
set_debug_variable("_team", (uint64)(addr_t)thread->team);
set_debug_variable("_teamID", thread->team->id);
} else {
kprintf("The thread's team pointer is %p, which is an "
"address we can't read from.\n", thread->team);
arch_debug_unset_current_thread();
}
}
}
int32 continuableLine = -1;
// Index of the previous command line, if the command returned
@ -1140,6 +1171,14 @@ err1:
}
static void
debug_memcpy_trampoline(void* _parameters)
{
debug_memcpy_parameters* parameters = (debug_memcpy_parameters*)_parameters;
memcpy(parameters->to, parameters->from, parameters->size);
}
void
call_modules_hook(bool enter)
{
@ -1410,6 +1449,90 @@ debug_emergency_key_pressed(char key)
}
/*! Verifies that the complete given memory range is accessible in the current
context.
Invoked in the kernel debugger only.
\param address The start address of the memory range to be checked.
\param size The size of the memory range to be checked.
\param protection The area protection for which to check. Valid is a bitwise
or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
\return \c true, if the complete memory range can be accessed in all ways
specified by \a protection, \c false otherwise.
*/
bool
debug_is_kernel_memory_accessible(addr_t address, size_t size,
uint32 protection)
{
addr_t endAddress = ROUNDUP(address + size, B_PAGE_SIZE);
address = ROUNDDOWN(address, B_PAGE_SIZE);
if (!IS_KERNEL_ADDRESS(address) || endAddress < address)
return false;
for (; address < endAddress; address += B_PAGE_SIZE) {
if (!arch_vm_translation_map_is_kernel_page_accessible(address,
protection)) {
return false;
}
}
return true;
}
/*! Calls a function in a setjmp() + fault handler context.
May only be used in the kernel debugger.
\param jumpBuffer Buffer to be used for setjmp()/longjmp().
\param function The function to be called.
\param parameter The parameter to be passed to the function to be called.
\return
- \c 0, when the function executed without causing a page fault or
calling longjmp().
- \c 1, when the function caused a page fault.
- Any other value the function passes to longjmp().
*/
int
debug_call_with_fault_handler(jmp_buf jumpBuffer, void (*function)(void*),
void* parameter)
{
// save current fault handler
cpu_ent* cpu = gCPU + sDebuggerOnCPU;
addr_t oldFaultHandler = cpu->fault_handler;
addr_t oldFaultHandlerStackPointer = cpu->fault_handler_stack_pointer;
int result = setjmp(jumpBuffer);
if (result == 0) {
arch_debug_call_with_fault_handler(cpu, jumpBuffer, function,
parameter);
}
// restore old fault handler
cpu->fault_handler = oldFaultHandler;
cpu->fault_handler_stack_pointer = oldFaultHandlerStackPointer;
return result;
}
/*! Similar to user_memcpy(), but can only be invoked from within the kernel
debugger (and must not be used outside).
*/
status_t
debug_memcpy(void* to, const void* from, size_t size)
{
debug_memcpy_parameters parameters = {to, from, size};
if (debug_call_with_fault_handler(gCPU[sDebuggerOnCPU].fault_jump_buffer,
&debug_memcpy_trampoline, &parameters) != 0) {
return B_BAD_ADDRESS;
}
return B_OK;
}
// #pragma mark - public API

View File

@ -29,6 +29,14 @@
#define INVOKE_COMMAND_ERROR 2
struct invoke_command_parameters {
debugger_command* command;
int argc;
char** argv;
int result;
};
static const int32 kMaxInvokeCommandDepth = 5;
static const int32 kOutputBufferSize = 1024;
@ -153,6 +161,16 @@ static PipeDebugOutputFilter sPipeOutputFilters[
MAX_DEBUGGER_COMMAND_PIPE_LENGTH - 1];
static void
invoke_command_trampoline(void* _parameters)
{
invoke_command_parameters* parameters
= (invoke_command_parameters*)_parameters;
parameters->result = parameters->command->func(parameters->argc,
parameters->argv);
}
static int
invoke_pipe_segment(debugger_command_pipe* pipe, int32 index, char* argument)
{
@ -268,9 +286,6 @@ invoke_debugger_command(struct debugger_command *command, int argc, char** argv)
return 0;
}
struct thread* thread = thread_get_current_thread();
addr_t oldFaultHandler = thread->fault_handler;
// replace argv[0] with the actual command name
argv[0] = (char *)command->name;
@ -284,26 +299,18 @@ invoke_debugger_command(struct debugger_command *command, int argc, char** argv)
sInCommand = true;
switch (setjmp(sInvokeCommandEnv[sInvokeCommandLevel++])) {
invoke_command_parameters parameters;
parameters.command = command;
parameters.argc = argc;
parameters.argv = argv;
switch (debug_call_with_fault_handler(
sInvokeCommandEnv[sInvokeCommandLevel++],
&invoke_command_trampoline, &parameters)) {
case 0:
int result;
thread->fault_handler = (addr_t)&&error;
// Fake goto to trick the compiler not to optimize the code at the
// label away.
if (!thread)
goto error;
result = command->func(argc, argv);
thread->fault_handler = oldFaultHandler;
sInvokeCommandLevel--;
sInCommand = false;
return result;
error:
// jump to INVOKE_COMMAND_FAULT case, cleaning up the stack
longjmp(sInvokeCommandEnv[--sInvokeCommandLevel],
INVOKE_COMMAND_FAULT);
return parameters.result;
case INVOKE_COMMAND_FAULT:
{
@ -324,7 +331,6 @@ invoke_debugger_command(struct debugger_command *command, int argc, char** argv)
break;
}
thread->fault_handler = oldFaultHandler;
sInCommand = false;
return B_KDEBUG_ERROR;
}

View File

@ -4,6 +4,7 @@
* Distributed under the terms of the MIT License.
*/
#include <debug.h>
#include <ctype.h>
@ -695,7 +696,7 @@ ExpressionParser::_ParseExpression(bool expectAssignment)
break;
}
if (user_memcpy(address, &buffer, size) != B_OK) {
if (debug_memcpy(address, &buffer, size) != B_OK) {
snprintf(sTempBuffer, sizeof(sTempBuffer),
"failed to write to address %p", address);
parse_exception(sTempBuffer, position);
@ -1032,7 +1033,7 @@ ExpressionParser::_ParseDereference(void** _address, uint32* _size)
// read bytes from address into a tempory buffer
uint64 buffer;
if (user_memcpy(&buffer, address, size) != B_OK) {
if (debug_memcpy(&buffer, address, size) != B_OK) {
snprintf(sTempBuffer, sizeof(sTempBuffer),
"failed to dereference address %p", address);
parse_exception(sTempBuffer, starPosition);

View File

@ -290,8 +290,8 @@ gdb_parse_command(void)
// We cannot directly access the requested memory
// for gdb may be trying to access an stray pointer
// We copy the memory to a safe buffer using
// the bulletproof user_memcpy().
if (user_memcpy(sSafeMemory, (char*)address, len) < 0)
// the bulletproof debug_memcpy().
if (debug_memcpy(sSafeMemory, (char*)address, len) < 0)
gdb_reply("E02");
else
gdb_memreply(sSafeMemory, len);

View File

@ -1786,7 +1786,7 @@ elf_load_user_image(const char *path, struct team *team, int flags,
if (programHeaders[i].p_type != PT_LOAD)
continue;
regionAddress = (char *)ROUNDOWN(programHeaders[i].p_vaddr,
regionAddress = (char *)ROUNDDOWN(programHeaders[i].p_vaddr,
B_PAGE_SIZE);
if (programHeaders[i].p_flags & PF_WRITE) {
// rw/data segment
@ -1803,7 +1803,7 @@ elf_load_user_image(const char *path, struct team *team, int flags,
id = vm_map_file(team->id, regionName, (void **)&regionAddress,
B_EXACT_ADDRESS, fileUpperBound,
B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false,
fd, ROUNDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
if (id < B_OK) {
dprintf("error mapping file data: %s!\n", strerror(id));
status = B_NOT_AN_EXECUTABLE;
@ -1851,7 +1851,7 @@ elf_load_user_image(const char *path, struct team *team, int flags,
id = vm_map_file(team->id, regionName, (void **)&regionAddress,
B_EXACT_ADDRESS, segmentSize,
B_READ_AREA | B_EXECUTE_AREA, REGION_PRIVATE_MAP, false,
fd, ROUNDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
if (id < B_OK) {
dprintf("error mapping file text: %s!\n", strerror(id));
status = B_NOT_AN_EXECUTABLE;
@ -2070,7 +2070,7 @@ load_kernel_add_on(const char *path)
continue;
}
region->start = (addr_t)reservedAddress + ROUNDOWN(
region->start = (addr_t)reservedAddress + ROUNDDOWN(
programHeaders[i].p_vaddr, B_PAGE_SIZE);
region->size = ROUNDUP(programHeaders[i].p_memsz
+ (programHeaders[i].p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE);
@ -2083,7 +2083,7 @@ load_kernel_add_on(const char *path)
status = B_NOT_AN_EXECUTABLE;
goto error4;
}
region->delta = -ROUNDOWN(programHeaders[i].p_vaddr, B_PAGE_SIZE);
region->delta = -ROUNDDOWN(programHeaders[i].p_vaddr, B_PAGE_SIZE);
TRACE(("elf_load_kspace: created area \"%s\" at %p\n",
regionName, (void *)region->start));

View File

@ -686,7 +686,7 @@ object_cache_init(object_cache *cache, const char *name, size_t objectSize,
static void
object_cache_commit_slab(object_cache *cache, slab *slab)
{
void *pages = (void *)ROUNDOWN((addr_t)slab->pages, B_PAGE_SIZE);
void *pages = (void *)ROUNDDOWN((addr_t)slab->pages, B_PAGE_SIZE);
if (create_area(cache->name, &pages, B_EXACT_ADDRESS, cache->slab_size,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA) < B_OK)
panic("failed to create_area()");

View File

@ -31,6 +31,7 @@
#include <file_cache.h>
#include <fs/fd.h>
#include <heap.h>
#include <kernel.h>
#include <int.h>
#include <lock.h>
#include <low_resource_manager.h>
@ -63,9 +64,6 @@
# define FTRACE(x) ;
#endif
#define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1))
#define ROUNDOWN(a, b) (((a) / (b)) * (b))
class AddressSpaceReadLocker {
public:
@ -2266,7 +2264,7 @@ _vm_map_file(team_id team, const char* name, void** _address,
TRACE(("_vm_map_file(fd = %d, offset = %Ld, size = %lu, mapping %ld)\n",
fd, offset, size, mapping));
offset = ROUNDOWN(offset, B_PAGE_SIZE);
offset = ROUNDDOWN(offset, B_PAGE_SIZE);
size = PAGE_ALIGN(size);
if (mapping == REGION_NO_PRIVATE_MAP)
@ -3332,7 +3330,7 @@ display_mem(int argc, char** argv)
kprintf("NOTE: number of bytes has been cut to page size\n");
}
address = ROUNDOWN(address, B_PAGE_SIZE);
address = ROUNDDOWN(address, B_PAGE_SIZE);
if (vm_get_physical_page_debug(address, &copyAddress,
&physicalPageHandle) != B_OK) {
@ -3351,7 +3349,7 @@ display_mem(int argc, char** argv)
// string mode
for (i = 0; true; i++) {
char c;
if (user_memcpy(&c, (char*)copyAddress + i, 1) != B_OK
if (debug_memcpy(&c, (char*)copyAddress + i, 1) != B_OK
|| c == '\0')
break;
@ -3382,7 +3380,7 @@ display_mem(int argc, char** argv)
for (j = 0; j < displayed; j++) {
char c;
if (user_memcpy(&c, (char*)copyAddress + i * itemSize + j,
if (debug_memcpy(&c, (char*)copyAddress + i * itemSize + j,
1) != B_OK) {
displayed = j;
break;
@ -3400,7 +3398,7 @@ display_mem(int argc, char** argv)
kprintf(" ");
}
if (user_memcpy(&value, (uint8*)copyAddress + i * itemSize,
if (debug_memcpy(&value, (uint8*)copyAddress + i * itemSize,
itemSize) != B_OK) {
kprintf("read fault");
break;
@ -3426,7 +3424,7 @@ display_mem(int argc, char** argv)
}
if (physical) {
copyAddress = ROUNDOWN(copyAddress, B_PAGE_SIZE);
copyAddress = ROUNDDOWN(copyAddress, B_PAGE_SIZE);
vm_put_physical_page_debug(copyAddress, physicalPageHandle);
}
return 0;
@ -4033,7 +4031,7 @@ create_preloaded_image_areas(struct preloaded_image* image)
memcpy(name, fileName, length);
strcpy(name + length, "_text");
address = (void*)ROUNDOWN(image->text_region.start, B_PAGE_SIZE);
address = (void*)ROUNDDOWN(image->text_region.start, B_PAGE_SIZE);
image->text_region.id = create_area(name, &address, B_EXACT_ADDRESS,
PAGE_ALIGN(image->text_region.size), B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
@ -4041,7 +4039,7 @@ create_preloaded_image_areas(struct preloaded_image* image)
// ELF initialization code
strcpy(name + length, "_data");
address = (void*)ROUNDOWN(image->data_region.start, B_PAGE_SIZE);
address = (void*)ROUNDDOWN(image->data_region.start, B_PAGE_SIZE);
image->data_region.id = create_area(name, &address, B_EXACT_ADDRESS,
PAGE_ALIGN(image->data_region.size), B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
@ -4304,11 +4302,11 @@ vm_init(kernel_args* args)
// allocate areas to represent stuff that already exists
address = (void*)ROUNDOWN(heapBase, B_PAGE_SIZE);
address = (void*)ROUNDDOWN(heapBase, B_PAGE_SIZE);
create_area("kernel heap", &address, B_EXACT_ADDRESS, heapSize,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
address = (void*)ROUNDOWN(slabInitialBase, B_PAGE_SIZE);
address = (void*)ROUNDDOWN(slabInitialBase, B_PAGE_SIZE);
create_area("initial slab space", &address, B_EXACT_ADDRESS,
slabInitialSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA);
@ -4431,7 +4429,7 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser,
TPF(PageFaultStart(address, isWrite, isUser, faultAddress));
addr_t pageAddress = ROUNDOWN(address, B_PAGE_SIZE);
addr_t pageAddress = ROUNDDOWN(address, B_PAGE_SIZE);
vm_address_space* addressSpace = NULL;
status_t status = B_OK;
@ -4829,7 +4827,7 @@ vm_soft_fault(vm_address_space* addressSpace, addr_t originalAddress,
PageFaultContext context(addressSpace, isWrite);
addr_t address = ROUNDOWN(originalAddress, B_PAGE_SIZE);
addr_t address = ROUNDDOWN(originalAddress, B_PAGE_SIZE);
status_t status = B_OK;
atomic_add(&addressSpace->fault_count, 1);
@ -5407,7 +5405,7 @@ lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
struct vm_translation_map* map;
addr_t unalignedBase = (addr_t)address;
addr_t end = unalignedBase + numBytes;
addr_t base = ROUNDOWN(unalignedBase, B_PAGE_SIZE);
addr_t base = ROUNDDOWN(unalignedBase, B_PAGE_SIZE);
bool isUser = IS_USER_ADDRESS(address);
bool needsLocking = true;
@ -5513,7 +5511,7 @@ unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
struct vm_translation_map* map;
addr_t unalignedBase = (addr_t)address;
addr_t end = unalignedBase + numBytes;
addr_t base = ROUNDOWN(unalignedBase, B_PAGE_SIZE);
addr_t base = ROUNDDOWN(unalignedBase, B_PAGE_SIZE);
bool needsLocking = true;
if (IS_USER_ADDRESS(address)) {