Merge branch 'aslr'

This commit is contained in:
Pawel Dziepak 2013-04-17 20:07:32 +02:00
commit 46575667eb
87 changed files with 956 additions and 348 deletions

View File

@ -73,11 +73,14 @@ typedef struct area_info {
#define B_32_BIT_CONTIGUOUS 6 /* B_CONTIGUOUS, < 4 GB physical address */
/* address spec for create_area(), and clone_area() */
#define B_ANY_ADDRESS 0
#define B_EXACT_ADDRESS 1
#define B_BASE_ADDRESS 2
#define B_CLONE_ADDRESS 3
#define B_ANY_KERNEL_ADDRESS 4
#define B_ANY_ADDRESS 0
#define B_EXACT_ADDRESS 1
#define B_BASE_ADDRESS 2
#define B_CLONE_ADDRESS 3
#define B_ANY_KERNEL_ADDRESS 4
/* B_ANY_KERNEL_BLOCK_ADDRESS 5 */
#define B_RANDOMIZED_ANY_ADDRESS 6
#define B_RANDOMIZED_BASE_ADDRESS 7
/* area protection */
#define B_READ_AREA 1

View File

@ -25,7 +25,7 @@
#define USER_SIZE (0x80000000 - (0x10000 + 0x100000))
#define USER_TOP (USER_BASE + (USER_SIZE - 1))
#define KERNEL_USER_DATA_BASE 0x6fff0000
#define KERNEL_USER_DATA_BASE 0x60000000
#define USER_STACK_REGION 0x70000000
#define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1)

View File

@ -25,7 +25,7 @@
#define USER_SIZE (0x80000000 - (0x10000 + 0x100000))
#define USER_TOP (USER_BASE + (USER_SIZE - 1))
#define KERNEL_USER_DATA_BASE 0x6fff0000
#define KERNEL_USER_DATA_BASE 0x60000000
#define USER_STACK_REGION 0x70000000
#define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1)

View File

@ -28,7 +28,7 @@
#define USER_SIZE (0x80000000 - (0x10000 + 0x100000))
#define USER_TOP (USER_BASE + (USER_SIZE - 1))
#define KERNEL_USER_DATA_BASE 0x6fff0000
#define KERNEL_USER_DATA_BASE 0x60000000
#define USER_STACK_REGION 0x70000000
#define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1)

View File

@ -25,7 +25,7 @@
#define USER_SIZE (0x80000000 - (0x10000 + 0x100000))
#define USER_TOP (USER_BASE + (USER_SIZE - 1))
#define KERNEL_USER_DATA_BASE 0x6fff0000
#define KERNEL_USER_DATA_BASE 0x60000000
#define USER_STACK_REGION 0x70000000
#define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1)

View File

@ -39,6 +39,11 @@
#define IA32_MSR_EFER 0xc0000080
// MSR EFER bits
// reference
#define IA32_MSR_EFER_SYSCALL (1 << 0)
#define IA32_MSR_EFER_NX (1 << 11)
// x86_64 MSRs.
#define IA32_MSR_STAR 0xc0000081
#define IA32_MSR_LSTAR 0xc0000082
@ -131,6 +136,13 @@
#define IA32_FEATURE_AMD_EXT_3DNOWEXT (1 << 30) // 3DNow! extensions
#define IA32_FEATURE_AMD_EXT_3DNOW (1 << 31) // 3DNow!
// some of the features from cpuid eax 0x80000001, edx register (AMD) are also
// available on Intel processors
#define IA32_FEATURES_INTEL_EXT (IA32_FEATURE_AMD_EXT_SYSCALL \
| IA32_FEATURE_AMD_EXT_NX \
| IA32_FEATURE_AMD_EXT_RDTSCP \
| IA32_FEATURE_AMD_EXT_LONG)
// x86 defined features from cpuid eax 6, eax register
// reference http://www.intel.com/Assets/en_US/PDF/appnote/241618.pdf (Table 5-11)
#define IA32_FEATURE_DTS (1 << 0) //Digital Thermal Sensor

View File

@ -48,8 +48,8 @@
#define USER_SIZE (0x800000000000 - 0x200000)
#define USER_TOP (USER_BASE + (USER_SIZE - 1))
#define KERNEL_USER_DATA_BASE 0x7fffefff0000
#define USER_STACK_REGION 0x7ffff0000000
#define KERNEL_USER_DATA_BASE 0x7f0000000000
#define USER_STACK_REGION 0x7f0000000000
#define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1)
@ -76,7 +76,7 @@
#define USER_SIZE (KERNEL_BASE - 0x10000)
#define USER_TOP (USER_BASE + (USER_SIZE - 1))
#define KERNEL_USER_DATA_BASE 0x6fff0000
#define KERNEL_USER_DATA_BASE 0x60000000
#define USER_STACK_REGION 0x70000000
#define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1)

View File

@ -18,8 +18,9 @@ extern "C" {
status_t commpage_init(void);
status_t commpage_init_post_cpus(void);
void* allocate_commpage_entry(int entry, size_t size);
void* fill_commpage_entry(int entry, const void* copyFrom, size_t size);
addr_t fill_commpage_entry(int entry, const void* copyFrom, size_t size);
image_id get_commpage_image();
area_id clone_commpage_area(team_id team, void** address);
// implemented in the architecture specific part
status_t arch_commpage_init(void);

View File

@ -52,6 +52,7 @@ struct signal_frame_data {
int32 thread_flags;
uint64 syscall_restart_return_value;
uint8 syscall_restart_parameters[SYSCALL_RESTART_PARAMETER_SIZE];
void* commpage_address;
};

View File

@ -259,6 +259,8 @@ struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
size_t used_user_data;
struct free_user_thread* free_user_threads;
void* commpage_address;
struct team_debug_info debug_info;
// protected by scheduler lock

View File

@ -0,0 +1,87 @@
/*
* Copyright 2013 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Paweł Dziepak, pdziepak@quarnos.org
*/
#ifndef KERNEL_UTIL_RANDOM_H
#define KERNEL_UTIL_RANDOM_H
#include <smp.h>
#include <SupportDefs.h>
#define MAX_FAST_RANDOM_VALUE 0x7fff
#define MAX_RANDOM_VALUE 0x7fffffffu
#define MAX_SECURE_RANDOM_VALUE 0xffffffffu
static const int kFastRandomShift = 15;
static const int kRandomShift = 31;
static const int kSecureRandomShift = 32;
#ifdef __cplusplus
extern "C" {
#endif
unsigned int fast_random_value(void);
unsigned int random_value(void);
unsigned int secure_random_value(void);
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
template<typename T>
T
fast_get_random()
{
size_t shift = 0;
T random = 0;
while (shift < sizeof(T) * 8) {
random |= (T)fast_random_value() << shift;
shift += kFastRandomShift;
}
return random;
}
template<typename T>
T
get_random()
{
size_t shift = 0;
T random = 0;
while (shift < sizeof(T) * 8) {
random |= (T)random_value() << shift;
shift += kRandomShift;
}
return random;
}
template<typename T>
T
secure_get_random()
{
size_t shift = 0;
T random = 0;
while (shift < sizeof(T) * 8) {
random |= (T)secure_random_value() << shift;
shift += kSecureRandomShift;
}
return random;
}
#endif // __cplusplus
#endif // KERNEL_UTIL_RANDOM_H

View File

@ -121,6 +121,8 @@ status_t vm_delete_area(team_id teamID, area_id areaID, bool kernel);
status_t vm_create_vnode_cache(struct vnode *vnode, struct VMCache **_cache);
status_t vm_set_area_memory_type(area_id id, phys_addr_t physicalBase,
uint32 type);
status_t vm_set_area_protection(team_id team, area_id areaID,
uint32 newProtection, bool kernel);
status_t vm_get_page_mapping(team_id team, addr_t vaddr, phys_addr_t *paddr);
bool vm_test_map_modification(struct vm_page *page);
void vm_clear_map_flags(struct vm_page *page, uint32 flags);

View File

@ -28,7 +28,7 @@ extern "C" {
// Should only be used by vm internals
status_t vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite,
bool isUser, addr_t *newip);
bool isExecute, bool isUser, addr_t *newip);
void vm_unreserve_memory(size_t bytes);
status_t vm_try_reserve_memory(size_t bytes, int priority, bigtime_t timeout);
status_t vm_daemon_init(void);

View File

@ -34,7 +34,7 @@ void __init_env(const struct user_space_program_args *args);
void __init_heap(void);
void __init_heap_post_env(void);
void __init_time(void);
void __init_time(addr_t commPageTable);
void __arch_init_time(struct real_time_data *data, bool setDefaults);
bigtime_t __arch_get_system_time_offset(struct real_time_data *data);
bigtime_t __get_system_time_offset();

View File

@ -51,6 +51,7 @@ struct rld_export {
void (*call_termination_hooks)();
const struct user_space_program_args *program_args;
const void* commpage_address;
};
extern struct rld_export *__gRuntimeLoader;

View File

@ -12,8 +12,4 @@
//#define COMMPAGE_ENTRY_M68K_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0)
//#define COMMPAGE_ENTRY_M68K_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
/* 0xffff0000 colides with IO space mapped with TT1 on Atari */
#warning ARM: determine good place for compage..
#define ARCH_USER_COMMPAGE_ADDR (0xfeff0000)
#endif /* _SYSTEM_ARCH_M68K_COMMPAGE_DEFS_H */

View File

@ -12,7 +12,4 @@
#define COMMPAGE_ENTRY_M68K_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0)
#define COMMPAGE_ENTRY_M68K_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
/* 0xffff0000 colides with IO space mapped with TT1 on Atari */
#define ARCH_USER_COMMPAGE_ADDR (0xfeff0000)
#endif /* _SYSTEM_ARCH_M68K_COMMPAGE_DEFS_H */

View File

@ -14,7 +14,5 @@
#define COMMPAGE_ENTRY_MIPSEL_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0)
#define COMMPAGE_ENTRY_MIPSEL_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
#define ARCH_USER_COMMPAGE_ADDR (0xffff0000)
#endif /* _SYSTEM_ARCH_MIPSEL_COMMPAGE_DEFS_H */

View File

@ -12,6 +12,4 @@
#define COMMPAGE_ENTRY_PPC_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0)
#define COMMPAGE_ENTRY_PPC_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
#define ARCH_USER_COMMPAGE_ADDR (0xffff0000)
#endif /* _SYSTEM_ARCH_PPC_COMMPAGE_DEFS_H */

View File

@ -16,7 +16,7 @@
(COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 3)
#define COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS \
(COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 4)
#define ARCH_USER_COMMPAGE_ADDR (0xffff0000)
#define COMMPAGE_ENTRY_X86_THREAD_EXIT \
(COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 5)
#endif /* _SYSTEM_ARCH_x86_COMMPAGE_DEFS_H */

View File

@ -13,7 +13,7 @@
#define COMMPAGE_ENTRY_X86_MEMSET (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
#define COMMPAGE_ENTRY_X86_SIGNAL_HANDLER \
(COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 2)
#define ARCH_USER_COMMPAGE_ADDR (0xffffffffffff0000)
#define COMMPAGE_ENTRY_X86_THREAD_EXIT \
(COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 3)
#endif /* _SYSTEM_ARCH_x86_64_COMMPAGE_DEFS_H */

View File

@ -19,11 +19,6 @@
#define COMMPAGE_SIGNATURE 'COMM'
#define COMMPAGE_VERSION 1
#define USER_COMMPAGE_ADDR ARCH_USER_COMMPAGE_ADDR
// set by the architecture specific implementation
#define USER_COMMPAGE_TABLE ((void**)(USER_COMMPAGE_ADDR))
#include <arch_commpage_defs.h>
#endif /* _SYSTEM_COMMPAGE_DEFS_H */

View File

@ -15,7 +15,7 @@
#define USER_STACK_GUARD_SIZE (4 * B_PAGE_SIZE) // 16 kB
#define USER_MAIN_THREAD_STACK_SIZE (16 * 1024 * 1024) // 16 MB
#define USER_STACK_SIZE (256 * 1024) // 256 kB
#define MIN_USER_STACK_SIZE (4 * 1024) // 4 KB
#define MIN_USER_STACK_SIZE (8 * 1024) // 8 kB
#define MAX_USER_STACK_SIZE (16 * 1024 * 1024) // 16 MB

View File

@ -16,8 +16,9 @@
#include <unistd.h>
#include <AutoDeleter.h>
#include <util/kernel_cpp.h>
#include <net/dns_resolver.h>
#include <util/kernel_cpp.h>
#include <util/Random.h>
#define NFS4_PORT 2049
@ -655,7 +656,7 @@ Connection::Connect()
PeerAddress address(fPeerAddress.Family());
do {
port = rand() % (IPPORT_RESERVED - NFS_MIN_PORT);
port = get_random<uint16>() % (IPPORT_RESERVED - NFS_MIN_PORT);
port += NFS_MIN_PORT;
if (attempt == 9)

View File

@ -13,6 +13,7 @@
#include <AutoDeleter.h>
#include <lock.h>
#include <util/Random.h>
#include "Request.h"
#include "RootInode.h"
@ -32,9 +33,7 @@ FileSystem::FileSystem(const MountConfiguration& configuration)
fId(1),
fConfiguration(configuration)
{
fOpenOwner = rand();
fOpenOwner <<= 32;
fOpenOwner |= rand();
fOpenOwner = get_random<uint64>();
mutex_init(&fOpenOwnerLock, NULL);
mutex_init(&fOpenLock, NULL);

View File

@ -12,6 +12,7 @@
#include <stdlib.h>
#include <util/AutoLock.h>
#include <util/Random.h>
#include "RPCCallbackServer.h"
#include "RPCReply.h"
@ -83,7 +84,7 @@ Server::Server(Connection* connection, PeerAddress* address)
fPrivateData(NULL),
fCallback(NULL),
fRepairCount(0),
fXID(rand() << 1)
fXID(get_random<uint32>())
{
ASSERT(connection != NULL);
ASSERT(address != NULL);

View File

@ -12,6 +12,8 @@
#include <errno.h>
#include <string.h>
#include <util/Random.h>
#include "Cookie.h"
#include "OpenState.h"
#include "RPCCallback.h"
@ -659,8 +661,7 @@ RequestBuilder::SetClientID(RPC::Server* server)
return B_NO_MEMORY;
fRequest->Stream().AddUInt(OpSetClientID);
uint64 verifier = rand();
verifier = verifier << 32 | rand();
uint64 verifier = get_random<uint64>();
fRequest->Stream().AddUHyper(verifier);
status_t result = _GenerateClientId(fRequest->Stream(), server);

View File

@ -13,7 +13,6 @@
#include <Locker.h>
#include <AutoLocker.h>
#include <commpage_defs.h>
#include <OS.h>
#include <system_info.h>
#include <util/DoublyLinkedList.h>
@ -517,24 +516,6 @@ DebuggerInterface::GetImageInfos(BObjectList<ImageInfo>& infos)
}
}
// Also add the "commpage" image, which belongs to the kernel, but is used
// by userland teams.
cookie = 0;
while (get_next_image_info(B_SYSTEM_TEAM, &cookie, &imageInfo) == B_OK) {
if ((addr_t)imageInfo.text >= USER_COMMPAGE_ADDR
&& (addr_t)imageInfo.text < USER_COMMPAGE_ADDR + COMMPAGE_SIZE) {
ImageInfo* info = new(std::nothrow) ImageInfo(B_SYSTEM_TEAM,
imageInfo.id, imageInfo.name, imageInfo.type,
(addr_t)imageInfo.text, imageInfo.text_size,
(addr_t)imageInfo.data, imageInfo.data_size);
if (info == NULL || !infos.AddItem(info)) {
delete info;
return B_NO_MEMORY;
}
break;
}
}
return B_OK;
}

View File

@ -400,3 +400,65 @@ KernelImage::Init(const image_info& info)
fSymbolTable, &fSymbolCount, fStringTable, &fStringTableSize,
&fLoadDelta);
}
CommPageImage::CommPageImage()
{
}
CommPageImage::~CommPageImage()
{
delete[] fSymbolTable;
delete[] fStringTable;
}
status_t
CommPageImage::Init(const image_info& info)
{
// find kernel image for commpage
image_id commPageID = -1;
image_info commPageInfo;
int32 cookie = 0;
while (_kern_get_next_image_info(B_SYSTEM_TEAM, &cookie, &commPageInfo,
sizeof(image_info)) == B_OK) {
if (!strcmp("commpage", commPageInfo.name)) {
commPageID = commPageInfo.id;
break;
}
}
if (commPageID < 0)
return B_ENTRY_NOT_FOUND;
fInfo = commPageInfo;
fInfo.text = info.text;
// get the table sizes
fSymbolCount = 0;
fStringTableSize = 0;
status_t error = _kern_read_kernel_image_symbols(commPageID, NULL,
&fSymbolCount, NULL, &fStringTableSize, NULL);
if (error != B_OK)
return error;
// allocate the tables
fSymbolTable = new(std::nothrow) elf_sym[fSymbolCount];
fStringTable = new(std::nothrow) char[fStringTableSize];
if (fSymbolTable == NULL || fStringTable == NULL)
return B_NO_MEMORY;
// get the info
error = _kern_read_kernel_image_symbols(commPageID,
fSymbolTable, &fSymbolCount, fStringTable, &fStringTableSize, NULL);
if (error != B_OK) {
delete[] fSymbolTable;
delete[] fStringTable;
return error;
}
fLoadDelta = (addr_t)info.text;
return B_OK;
}

View File

@ -111,6 +111,15 @@ public:
status_t Init(const image_info& info);
};
class CommPageImage : public SymbolTableBasedImage {
public:
CommPageImage();
virtual ~CommPageImage();
status_t Init(const image_info& info);
};
} // namespace Debug
} // namespace BPrivate

View File

@ -295,6 +295,14 @@ SymbolLookup::Init()
error = kernelImage->Init(imageInfo);
image = kernelImage;
} else if (!strcmp("commpage", imageInfo.name)) {
// commpage image
CommPageImage* commPageImage = new(std::nothrow) CommPageImage;
if (commPageImage == NULL)
return B_NO_MEMORY;
error = commPageImage->Init(imageInfo);
image = commPageImage;
} else {
// userland image -- try to load an image file
ImageFile* imageFile = new(std::nothrow) ImageFile;

View File

@ -277,7 +277,7 @@ arch_arm_data_abort(struct iframe *frame)
enable_interrupts();
vm_page_fault(far, frame->pc, isWrite, isUser, &newip);
vm_page_fault(far, frame->pc, isWrite, false, isUser, &newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the

View File

@ -238,6 +238,7 @@ m68k_exception_entry(struct iframe *iframe)
vm_page_fault(fault_address(iframe), iframe->cpu.pc,
fault_was_write(iframe), // store or load
false,
iframe->cpu.sr & SR_S, // was the system in user or supervisor
&newip);
if (newip != 0) {

View File

@ -164,6 +164,7 @@ ppc_exception_entry(int vector, struct iframe *iframe)
vm_page_fault(iframe->dar, iframe->srr0,
iframe->dsisr & (1 << 25), // store or load
false,
iframe->srr1 & (1 << 14), // was the system in user or supervisor
&newip);
if (newip != 0) {

View File

@ -115,7 +115,7 @@ FUNCTION(x86_swap_pgdir):
ret
FUNCTION_END(x86_swap_pgdir)
/* thread exit stub - is copied to the userspace stack in arch_thread_enter_uspace() */
/* thread exit stub */
.align 4
FUNCTION(x86_userspace_thread_exit):
pushl %eax

View File

@ -766,7 +766,9 @@ FUNCTION(x86_sysenter):
pushl $USER_CODE_SEG // user cs
// user_eip
movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_SYSCALL, %edx
movl THREAD_team(%edx), %edx
movl TEAM_commpage_address(%edx), %edx
addl 4 * COMMPAGE_ENTRY_X86_SYSCALL(%edx), %edx
addl $4, %edx // sysenter is at offset 2, 2 bytes long
pushl %edx

View File

@ -89,14 +89,13 @@ register_signal_handler_function(const char* functionName, int32 commpageIndex,
ASSERT(expectedAddress == symbolInfo.address);
// fill in the commpage table entry
fill_commpage_entry(commpageIndex, (void*)symbolInfo.address,
symbolInfo.size);
addr_t position = fill_commpage_entry(commpageIndex,
(void*)symbolInfo.address, symbolInfo.size);
// add symbol to the commpage image
image_id image = get_commpage_image();
elf_add_memory_image_symbol(image, commpageSymbolName,
((addr_t*)USER_COMMPAGE_ADDR)[commpageIndex], symbolInfo.size,
B_SYMBOL_TYPE_TEXT);
elf_add_memory_image_symbol(image, commpageSymbolName, position,
symbolInfo.size, B_SYMBOL_TYPE_TEXT);
}
@ -116,10 +115,10 @@ x86_initialize_commpage_signal_handler()
addr_t
x86_get_user_signal_handler_wrapper(bool beosHandler)
x86_get_user_signal_handler_wrapper(bool beosHandler, void* commPageAdddress)
{
int32 index = beosHandler
? COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS
: COMMPAGE_ENTRY_X86_SIGNAL_HANDLER;
return ((addr_t*)USER_COMMPAGE_ADDR)[index];
return ((addr_t*)commPageAdddress)[index] + (addr_t)commPageAdddress;
}

View File

@ -37,7 +37,8 @@ FUNCTION(x86_signal_frame_function_beos):
lea SIGNAL_FRAME_DATA_context + UCONTEXT_T_uc_mcontext(%esi), %eax
push %eax
push %edi
movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_MEMCPY, %eax
movl SIGNAL_FRAME_DATA_commpage_address(%esi), %eax
addl 4 * COMMPAGE_ENTRY_X86_MEMCPY(%eax), %eax
call *%eax
addl $12, %esp
@ -57,7 +58,8 @@ FUNCTION(x86_signal_frame_function_beos):
push %edi
lea SIGNAL_FRAME_DATA_context + UCONTEXT_T_uc_mcontext(%esi), %eax
push %eax
movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_MEMCPY, %eax
movl SIGNAL_FRAME_DATA_commpage_address(%esi), %eax
addl 4 * COMMPAGE_ENTRY_X86_MEMCPY(%eax), %eax
call *%eax
addl $12 + VREGS_sizeof, %esp

View File

@ -106,11 +106,11 @@ x86_initialize_syscall(void)
// fill in the table entry
size_t len = (size_t)((addr_t)syscallCodeEnd - (addr_t)syscallCode);
fill_commpage_entry(COMMPAGE_ENTRY_X86_SYSCALL, syscallCode, len);
addr_t position = fill_commpage_entry(COMMPAGE_ENTRY_X86_SYSCALL,
syscallCode, len);
// add syscall to the commpage image
image_id image = get_commpage_image();
elf_add_memory_image_symbol(image, "commpage_syscall",
((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SYSCALL], len,
elf_add_memory_image_symbol(image, "commpage_syscall", position, len,
B_SYMBOL_TYPE_TEXT);
}

View File

@ -13,6 +13,7 @@
#include <arch/user_debugger.h>
#include <arch_cpu.h>
#include <commpage.h>
#include <cpu.h>
#include <debug.h>
#include <kernel.h>
@ -23,6 +24,7 @@
#include <tls.h>
#include <tracing.h>
#include <util/AutoLock.h>
#include <util/Random.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
@ -200,6 +202,15 @@ arch_thread_dump_info(void *info)
}
static addr_t
arch_randomize_stack_pointer(addr_t value)
{
STATIC_ASSERT(MAX_RANDOM_VALUE >= B_PAGE_SIZE - 1);
value -= random_value() & (B_PAGE_SIZE - 1);
return value & ~addr_t(0xf);
}
/*! Sets up initial thread context and enters user space
*/
status_t
@ -207,21 +218,19 @@ arch_thread_enter_userspace(Thread* thread, addr_t entry, void* args1,
void* args2)
{
addr_t stackTop = thread->user_stack_base + thread->user_stack_size;
uint32 codeSize = (addr_t)x86_end_userspace_thread_exit
- (addr_t)x86_userspace_thread_exit;
uint32 args[3];
TRACE(("arch_thread_enter_userspace: entry 0x%lx, args %p %p, "
"ustack_top 0x%lx\n", entry, args1, args2, stackTop));
// copy the little stub that calls exit_thread() when the thread entry
// function returns, as well as the arguments of the entry function
stackTop -= codeSize;
stackTop = arch_randomize_stack_pointer(stackTop);
if (user_memcpy((void *)stackTop, (const void *)&x86_userspace_thread_exit, codeSize) < B_OK)
return B_BAD_ADDRESS;
args[0] = stackTop;
// Copy the address of the stub that calls exit_thread() when the thread
// entry function returns to the top of the stack to act as the return
// address. The stub is inside commpage.
addr_t commPageAddress = (addr_t)thread->team->commpage_address;
args[0] = ((addr_t*)commPageAddress)[COMMPAGE_ENTRY_X86_THREAD_EXIT]
+ commPageAddress;
args[1] = (uint32)args1;
args[2] = (uint32)args2;
stackTop -= sizeof(args);
@ -345,7 +354,8 @@ arch_setup_signal_frame(Thread* thread, struct sigaction* action,
// the prepared stack, executing the signal handler wrapper function.
frame->user_sp = (addr_t)userStack;
frame->ip = x86_get_user_signal_handler_wrapper(
(action->sa_flags & SA_BEOS_COMPATIBLE_HANDLER) != 0);
(action->sa_flags & SA_BEOS_COMPATIBLE_HANDLER) != 0,
thread->team->commpage_address);
return B_OK;
}

View File

@ -118,7 +118,7 @@ FUNCTION(x86_swap_pgdir):
FUNCTION_END(x86_swap_pgdir)
/* thread exit stub - copied to the userspace stack in arch_thread_enter_uspace() */
/* thread exit stub */
.align 8
FUNCTION(x86_userspace_thread_exit):
movq %rax, %rdi

View File

@ -28,12 +28,12 @@ x86_initialize_commpage_signal_handler()
// Copy the signal handler code to the commpage.
size_t len = (size_t)((addr_t)handlerCodeEnd - (addr_t)handlerCode);
fill_commpage_entry(COMMPAGE_ENTRY_X86_SIGNAL_HANDLER, handlerCode, len);
addr_t position = fill_commpage_entry(COMMPAGE_ENTRY_X86_SIGNAL_HANDLER,
handlerCode, len);
// Add symbol to the commpage image.
image_id image = get_commpage_image();
elf_add_memory_image_symbol(image, "commpage_signal_handler",
((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER],
elf_add_memory_image_symbol(image, "commpage_signal_handler", position,
len, B_SYMBOL_TYPE_TEXT);
}

View File

@ -20,7 +20,8 @@ static void
init_syscall_registers(void* dummy, int cpuNum)
{
// Enable SYSCALL (EFER.SCE = 1).
x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) | (1 << 0));
x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER)
| IA32_MSR_EFER_SYSCALL);
// Flags to clear upon entry. Want interrupts disabled and the direction
// flag cleared.

View File

@ -22,6 +22,7 @@
#include <thread.h>
#include <tls.h>
#include <tracing.h>
#include <util/Random.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
@ -197,6 +198,15 @@ arch_thread_dump_info(void* info)
}
static addr_t
arch_randomize_stack_pointer(addr_t value)
{
STATIC_ASSERT(MAX_RANDOM_VALUE >= B_PAGE_SIZE - 1);
value -= random_value() & (B_PAGE_SIZE - 1);
return value & ~addr_t(0xf);
}
/*! Sets up initial thread context and enters user space
*/
status_t
@ -208,20 +218,14 @@ arch_thread_enter_userspace(Thread* thread, addr_t entry, void* args1,
TRACE("arch_thread_enter_userspace: entry %#lx, args %p %p, "
"stackTop %#lx\n", entry, args1, args2, stackTop);
// Copy the little stub that calls exit_thread() when the thread entry
// function returns.
// TODO: This will become a problem later if we want to support execute
// disable, the stack shouldn't really be executable.
size_t codeSize = (addr_t)x86_end_userspace_thread_exit
- (addr_t)x86_userspace_thread_exit;
stackTop -= codeSize;
if (user_memcpy((void*)stackTop, (const void*)&x86_userspace_thread_exit,
codeSize) != B_OK)
return B_BAD_ADDRESS;
stackTop = arch_randomize_stack_pointer(stackTop);
// Copy the address of the stub to the top of the stack to act as the
// return address.
addr_t codeAddr = stackTop;
// Copy the address of the stub that calls exit_thread() when the thread
// entry function returns to the top of the stack to act as the return
// address. The stub is inside commpage.
addr_t commPageAddress = (addr_t)thread->team->commpage_address;
addr_t codeAddr = ((addr_t*)commPageAddress)[COMMPAGE_ENTRY_X86_THREAD_EXIT]
+ commPageAddress;
stackTop -= sizeof(codeAddr);
if (user_memcpy((void*)stackTop, (const void*)&codeAddr, sizeof(codeAddr))
!= B_OK)
@ -340,8 +344,10 @@ arch_setup_signal_frame(Thread* thread, struct sigaction* action,
// Set up the iframe to execute the signal handler wrapper on our prepared
// stack. First argument points to the frame data.
addr_t* commPageAddress = (addr_t*)thread->team->commpage_address;
frame->user_sp = (addr_t)userStack;
frame->ip = ((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER];
frame->ip = commPageAddress[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER]
+ (addr_t)commPageAddress;
frame->di = (addr_t)userSignalFrameData;
return B_OK;

View File

@ -605,10 +605,12 @@ detect_cpu(int currentCPU)
get_current_cpuid(&cpuid, 1);
cpu->arch.feature[FEATURE_COMMON] = cpuid.eax_1.features; // edx
cpu->arch.feature[FEATURE_EXT] = cpuid.eax_1.extended_features; // ecx
if (cpu->arch.vendor == VENDOR_AMD) {
if (cpu->arch.vendor == VENDOR_AMD || cpu->arch.vendor == VENDOR_INTEL) {
get_current_cpuid(&cpuid, 0x80000001);
cpu->arch.feature[FEATURE_EXT_AMD] = cpuid.regs.edx; // edx
}
if (cpu->arch.vendor == VENDOR_INTEL)
cpu->arch.feature[FEATURE_EXT_AMD] &= IA32_FEATURES_INTEL_EXT;
get_current_cpuid(&cpuid, 6);
cpu->arch.feature[FEATURE_6_EAX] = cpuid.regs.eax;
cpu->arch.feature[FEATURE_6_ECX] = cpuid.regs.ecx;
@ -862,21 +864,26 @@ arch_cpu_init_post_modules(kernel_args* args)
// put the optimized functions into the commpage
size_t memcpyLen = (addr_t)gOptimizedFunctions.memcpy_end
- (addr_t)gOptimizedFunctions.memcpy;
fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMCPY,
addr_t memcpyPosition = fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMCPY,
(const void*)gOptimizedFunctions.memcpy, memcpyLen);
size_t memsetLen = (addr_t)gOptimizedFunctions.memset_end
- (addr_t)gOptimizedFunctions.memset;
fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMSET,
addr_t memsetPosition = fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMSET,
(const void*)gOptimizedFunctions.memset, memsetLen);
size_t threadExitLen = (addr_t)x86_end_userspace_thread_exit
- (addr_t)x86_userspace_thread_exit;
addr_t threadExitPosition = fill_commpage_entry(
COMMPAGE_ENTRY_X86_THREAD_EXIT, (const void*)x86_userspace_thread_exit,
threadExitLen);
// add the functions to the commpage image
image_id image = get_commpage_image();
elf_add_memory_image_symbol(image, "commpage_memcpy",
((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_MEMCPY], memcpyLen,
B_SYMBOL_TYPE_TEXT);
elf_add_memory_image_symbol(image, "commpage_memset",
((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_MEMSET], memsetLen,
B_SYMBOL_TYPE_TEXT);
elf_add_memory_image_symbol(image, "commpage_memcpy", memcpyPosition,
memcpyLen, B_SYMBOL_TYPE_TEXT);
elf_add_memory_image_symbol(image, "commpage_memset", memsetPosition,
memsetLen, B_SYMBOL_TYPE_TEXT);
elf_add_memory_image_symbol(image, "commpage_thread_exit",
threadExitPosition, threadExitLen, B_SYMBOL_TYPE_TEXT);
return B_OK;
}

View File

@ -319,8 +319,9 @@ x86_page_fault_exception(struct iframe* frame)
enable_interrupts();
vm_page_fault(cr2, frame->ip,
(frame->error_code & 0x2) != 0, // write access
(frame->error_code & 0x4) != 0, // userland
(frame->error_code & 0x2)!= 0, // write access
(frame->error_code & 0x10) != 0, // instruction fetch
(frame->error_code & 0x4) != 0, // userland
&newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the

View File

@ -728,6 +728,15 @@ arch_vm_supports_protection(uint32 protection)
return false;
}
// Userland and the kernel have the same setting of NX-bit.
// That's why we do not allow any area that user can access, but not execute
// and the kernel can execute.
if ((protection & (B_READ_AREA | B_WRITE_AREA)) != 0
&& (protection & B_EXECUTE_AREA) == 0
&& (protection & B_KERNEL_EXECUTE_AREA) != 0) {
return false;
}
return true;
}

View File

@ -86,13 +86,16 @@ arch_vm_translation_map_init(kernel_args *args,
gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit;
#elif B_HAIKU_PHYSICAL_BITS == 64
bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
bool paeNeeded = false;
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
phys_addr_t end = args->physical_memory_range[i].start
+ args->physical_memory_range[i].size;
if (end > 0x100000000LL) {
paeNeeded = true;
break;
bool paeNeeded = x86_check_feature(IA32_FEATURE_AMD_EXT_NX,
FEATURE_EXT_AMD);
if (!paeNeeded) {
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
phys_addr_t end = args->physical_memory_range[i].start
+ args->physical_memory_range[i].size;
if (end > 0x100000000LL) {
paeNeeded = true;
break;
}
}
}

View File

@ -34,7 +34,11 @@ dummy()
DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler);
DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler_stack_pointer);
// struct Team
DEFINE_OFFSET_MACRO(TEAM, Team, commpage_address);
// struct Thread
DEFINE_OFFSET_MACRO(THREAD, Thread, team);
DEFINE_OFFSET_MACRO(THREAD, Thread, time_lock);
DEFINE_OFFSET_MACRO(THREAD, Thread, kernel_time);
DEFINE_OFFSET_MACRO(THREAD, Thread, user_time);
@ -88,6 +92,7 @@ dummy()
DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, user_data);
DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, handler);
DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, siginfo_handler);
DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, commpage_address);
// struct ucontext_t
DEFINE_OFFSET_MACRO(UCONTEXT_T, __ucontext_t, uc_mcontext);

View File

@ -59,6 +59,10 @@ X86PagingMethod64Bit::Init(kernel_args* args,
fKernelPhysicalPML4 = args->arch_args.phys_pgdir;
fKernelVirtualPML4 = (uint64*)(addr_t)args->arch_args.vir_pgdir;
// enable NX-bit on all CPUs
if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD))
call_all_cpus_sync(&_EnableExecutionDisable, NULL);
// Ensure that the user half of the address space is clear. This removes
// the temporary identity mapping made by the boot loader.
memset(fKernelVirtualPML4, 0, sizeof(uint64) * 256);
@ -367,6 +371,8 @@ X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry,
page |= X86_64_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
page |= X86_64_PTE_WRITABLE;
if ((attributes & B_EXECUTE_AREA) == 0)
page |= X86_64_PTE_NOT_EXECUTABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
page |= X86_64_PTE_WRITABLE;
@ -374,3 +380,11 @@ X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry,
SetTableEntry(entry, page);
}
void
X86PagingMethod64Bit::_EnableExecutionDisable(void* dummy, int cpu)
{
x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER)
| IA32_MSR_EFER_NX);
}

View File

@ -96,6 +96,8 @@ public:
uint32 memoryType);
private:
static void _EnableExecutionDisable(void* dummy, int cpu);
phys_addr_t fKernelPhysicalPML4;
uint64* fKernelVirtualPML4;

View File

@ -627,11 +627,13 @@ X86VMTranslationMap64Bit::Query(addr_t virtualAddress,
// Translate the page state flags.
if ((entry & X86_64_PTE_USER) != 0) {
*_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
| B_READ_AREA
| ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
}
*_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_KERNEL_EXECUTE_AREA : 0)
| ((entry & X86_64_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & X86_64_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & X86_64_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
@ -671,6 +673,8 @@ X86VMTranslationMap64Bit::Protect(addr_t start, addr_t end, uint32 attributes,
newProtectionFlags = X86_64_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
newProtectionFlags |= X86_64_PTE_WRITABLE;
if ((attributes & B_EXECUTE_AREA) == 0)
newProtectionFlags |= X86_64_PTE_NOT_EXECUTABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
newProtectionFlags = X86_64_PTE_WRITABLE;

View File

@ -59,7 +59,9 @@
#define X86_64_PTE_GLOBAL (1LL << 8)
#define X86_64_PTE_NOT_EXECUTABLE (1LL << 63)
#define X86_64_PTE_ADDRESS_MASK 0x000ffffffffff000L
#define X86_64_PTE_PROTECTION_MASK (X86_64_PTE_WRITABLE | X86_64_PTE_USER)
#define X86_64_PTE_PROTECTION_MASK (X86_64_PTE_NOT_EXECUTABLE \
| X86_64_PTE_WRITABLE \
| X86_64_PTE_USER)
#define X86_64_PTE_MEMORY_TYPE_MASK (X86_64_PTE_WRITE_THROUGH \
| X86_64_PTE_CACHING_DISABLED)

View File

@ -165,6 +165,12 @@ private:
{
x86_write_cr3((addr_t)physicalPDPT);
x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE | IA32_CR4_GLOBAL_PAGES);
// if availalbe enable NX-bit (No eXecute)
if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) {
x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER)
| IA32_MSR_EFER_NX);
}
}
void _TranslatePageTable(addr_t virtualBase)
@ -778,6 +784,8 @@ X86PagingMethodPAE::PutPageTableEntryInTable(pae_page_table_entry* entry,
page |= X86_PAE_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
page |= X86_PAE_PTE_WRITABLE;
if ((attributes & B_EXECUTE_AREA) == 0)
page |= X86_PAE_PTE_NOT_EXECUTABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
page |= X86_PAE_PTE_WRITABLE;

View File

@ -687,11 +687,14 @@ X86VMTranslationMapPAE::Query(addr_t virtualAddress,
// translate the page state flags
if ((entry & X86_PAE_PTE_USER) != 0) {
*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
| B_READ_AREA
| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
}
*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
? B_KERNEL_EXECUTE_AREA : 0)
| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
@ -733,11 +736,14 @@ X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress,
// translate the page state flags
if ((entry & X86_PAE_PTE_USER) != 0) {
*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
| B_READ_AREA
| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0);
}
*_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0
? B_KERNEL_EXECUTE_AREA : 0)
| ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
@ -766,6 +772,8 @@ X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
newProtectionFlags = X86_PAE_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
newProtectionFlags |= X86_PAE_PTE_WRITABLE;
if ((attributes & B_EXECUTE_AREA) == 0)
newProtectionFlags |= X86_PAE_PTE_NOT_EXECUTABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
newProtectionFlags = X86_PAE_PTE_WRITABLE;

View File

@ -49,7 +49,8 @@
#define X86_PAE_PTE_IGNORED3 0x0000000000000800LL
#define X86_PAE_PTE_ADDRESS_MASK 0x000ffffffffff000LL
#define X86_PAE_PTE_NOT_EXECUTABLE 0x8000000000000000LL
#define X86_PAE_PTE_PROTECTION_MASK (X86_PAE_PTE_WRITABLE \
#define X86_PAE_PTE_PROTECTION_MASK (X86_PAE_PTE_NOT_EXECUTABLE \
|X86_PAE_PTE_WRITABLE \
| X86_PAE_PTE_USER)
#define X86_PAE_PTE_MEMORY_TYPE_MASK (X86_PAE_PTE_WRITE_THROUGH \
| X86_PAE_PTE_CACHING_DISABLED)

View File

@ -11,7 +11,8 @@
void x86_initialize_commpage_signal_handler();
#ifndef __x86_64__
addr_t x86_get_user_signal_handler_wrapper(bool beosHandler);
addr_t x86_get_user_signal_handler_wrapper(bool beosHandler,
void* commPageAddress);
#endif

View File

@ -15,9 +15,7 @@
static area_id sCommPageArea;
static area_id sUserCommPageArea;
static addr_t* sCommPageAddress;
static addr_t* sUserCommPageAddress;
static void* sFreeCommPageSpace;
static image_id sCommPageImage;
@ -30,20 +28,19 @@ allocate_commpage_entry(int entry, size_t size)
{
void* space = sFreeCommPageSpace;
sFreeCommPageSpace = ALIGN_ENTRY((addr_t)sFreeCommPageSpace + size);
sCommPageAddress[entry] = (addr_t)sUserCommPageAddress
+ ((addr_t)space - (addr_t)sCommPageAddress);
sCommPageAddress[entry] = (addr_t)space - (addr_t)sCommPageAddress;
dprintf("allocate_commpage_entry(%d, %lu) -> %p\n", entry, size,
(void*)sCommPageAddress[entry]);
return space;
}
void*
addr_t
fill_commpage_entry(int entry, const void* copyFrom, size_t size)
{
void* space = allocate_commpage_entry(entry, size);
memcpy(space, copyFrom, size);
return space;
return (addr_t)space - (addr_t)sCommPageAddress;
}
@ -54,20 +51,24 @@ get_commpage_image()
}
area_id
clone_commpage_area(team_id team, void** address)
{
*address = (void*)KERNEL_USER_DATA_BASE;
return vm_clone_area(team, "commpage", address,
B_RANDOMIZED_BASE_ADDRESS, B_READ_AREA | B_EXECUTE_AREA | B_KERNEL_AREA,
REGION_PRIVATE_MAP, sCommPageArea, true);
}
status_t
commpage_init(void)
{
// create a read/write kernel area
sCommPageArea = create_area("commpage", (void **)&sCommPageAddress,
sCommPageArea = create_area("kernel_commpage", (void **)&sCommPageAddress,
B_ANY_ADDRESS, COMMPAGE_SIZE, B_FULL_LOCK,
B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA);
// clone it at a fixed address with user read/only permissions
sUserCommPageAddress = (addr_t*)USER_COMMPAGE_ADDR;
sUserCommPageArea = clone_area("user_commpage",
(void **)&sUserCommPageAddress, B_EXACT_ADDRESS,
B_READ_AREA | B_EXECUTE_AREA, sCommPageArea);
// zero it out
memset(sCommPageAddress, 0, COMMPAGE_SIZE);
@ -79,10 +80,10 @@ commpage_init(void)
sFreeCommPageSpace = ALIGN_ENTRY(&sCommPageAddress[COMMPAGE_TABLE_ENTRIES]);
// create the image for the commpage
sCommPageImage = elf_create_memory_image("commpage", USER_COMMPAGE_ADDR,
COMMPAGE_SIZE, 0, 0);
sCommPageImage = elf_create_memory_image("commpage", 0, COMMPAGE_SIZE, 0,
0);
elf_add_memory_image_symbol(sCommPageImage, "commpage_table",
USER_COMMPAGE_ADDR, COMMPAGE_TABLE_ENTRIES * sizeof(addr_t),
0, COMMPAGE_TABLE_ENTRIES * sizeof(addr_t),
B_SYMBOL_TYPE_DATA);
arch_commpage_init();

View File

@ -9,7 +9,6 @@
#include <AutoDeleter.h>
#include <commpage_defs.h>
#include <kernel.h>
#include <util/AutoLock.h>
#include <vm/vm.h>
@ -257,12 +256,6 @@ BreakpointManager::CanAccessAddress(const void* _address, bool write)
if (IS_USER_ADDRESS(address))
return true;
// a commpage address can at least be read
if (address >= USER_COMMPAGE_ADDR
&& address < USER_COMMPAGE_ADDR + COMMPAGE_SIZE) {
return !write;
}
return false;
}

View File

@ -23,6 +23,7 @@
#include <algorithm>
#include <AutoDeleter.h>
#include <commpage.h>
#include <boot/kernel_args.h>
#include <debug.h>
#include <image_defs.h>
@ -1068,7 +1069,7 @@ elf_resolve_symbol(struct elf_image_info *image, elf_sym *symbol,
/*! Until we have shared library support, just this links against the kernel */
static int
elf_relocate(struct elf_image_info *image)
elf_relocate(struct elf_image_info* image, struct elf_image_info* resolveImage)
{
int status = B_NO_ERROR;
@ -1078,7 +1079,7 @@ elf_relocate(struct elf_image_info *image)
if (image->rel) {
TRACE(("total %i rel relocs\n", image->rel_len / (int)sizeof(elf_rel)));
status = arch_elf_relocate_rel(image, sKernelImage, image->rel,
status = arch_elf_relocate_rel(image, resolveImage, image->rel,
image->rel_len);
if (status < B_OK)
return status;
@ -1088,12 +1089,12 @@ elf_relocate(struct elf_image_info *image)
if (image->pltrel_type == DT_REL) {
TRACE(("total %i plt-relocs\n",
image->pltrel_len / (int)sizeof(elf_rel)));
status = arch_elf_relocate_rel(image, sKernelImage, image->pltrel,
status = arch_elf_relocate_rel(image, resolveImage, image->pltrel,
image->pltrel_len);
} else {
TRACE(("total %i plt-relocs\n",
image->pltrel_len / (int)sizeof(elf_rela)));
status = arch_elf_relocate_rela(image, sKernelImage,
status = arch_elf_relocate_rela(image, resolveImage,
(elf_rela *)image->pltrel, image->pltrel_len);
}
if (status < B_OK)
@ -1104,7 +1105,7 @@ elf_relocate(struct elf_image_info *image)
TRACE(("total %i rel relocs\n",
image->rela_len / (int)sizeof(elf_rela)));
status = arch_elf_relocate_rela(image, sKernelImage, image->rela,
status = arch_elf_relocate_rela(image, resolveImage, image->rela,
image->rela_len);
if (status < B_OK)
return status;
@ -1287,7 +1288,7 @@ insert_preloaded_image(preloaded_elf_image *preloadedImage, bool kernel)
if (status != B_OK)
goto error1;
status = elf_relocate(image);
status = elf_relocate(image, sKernelImage);
if (status != B_OK)
goto error1;
} else
@ -1363,6 +1364,7 @@ public:
if (!_Read((runtime_loader_debug_area*)area->Base(), fDebugArea))
return B_BAD_ADDRESS;
fTeam = team;
return B_OK;
}
@ -1381,8 +1383,22 @@ public:
// get the image for the address
image_t image;
status_t error = _FindImageAtAddress(address, image);
if (error != B_OK)
if (error != B_OK) {
// commpage requires special treatment since kernel stores symbol
// information
addr_t commPageAddress = (addr_t)fTeam->commpage_address;
if (address >= commPageAddress
&& address < commPageAddress + COMMPAGE_SIZE) {
if (*_imageName)
*_imageName = "commpage";
address -= (addr_t)commPageAddress;
error = elf_debug_lookup_symbol_address(address, _baseAddress,
_symbolName, NULL, _exactMatch);
if (_baseAddress)
*_baseAddress += (addr_t)fTeam->commpage_address;
}
return error;
}
strlcpy(fImageName, image.name, sizeof(fImageName));
@ -1522,6 +1538,7 @@ public:
// gcc 2.95.3 doesn't like it defined in-place
private:
Team* fTeam;
runtime_loader_debug_area fDebugArea;
char fImageName[B_OS_NAME_LENGTH];
char fSymbolName[256];
@ -1808,6 +1825,9 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
ssize_t length;
int fd;
int i;
addr_t delta = 0;
uint32 addressSpec = B_RANDOMIZED_BASE_ADDRESS;
area_id* mappedAreas = NULL;
TRACE(("elf_load: entry path '%s', team %p\n", path, team));
@ -1837,6 +1857,14 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
if (status < B_OK)
goto error;
struct elf_image_info* image;
image = create_image_struct();
if (image == NULL) {
status = B_NO_MEMORY;
goto error;
}
image->elf_header = &elfHeader;
// read program header
programHeaders = (elf_phdr *)malloc(
@ -1844,7 +1872,7 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
if (programHeaders == NULL) {
dprintf("error allocating space for program headers\n");
status = B_NO_MEMORY;
goto error;
goto error2;
}
TRACE(("reading in program headers at 0x%lx, length 0x%x\n",
@ -1854,12 +1882,12 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
if (length < B_OK) {
status = length;
dprintf("error reading in program headers\n");
goto error;
goto error2;
}
if (length != elfHeader.e_phnum * elfHeader.e_phentsize) {
dprintf("short read while reading in program headers\n");
status = -1;
goto error;
goto error2;
}
// construct a nice name for the region we have to create below
@ -1879,7 +1907,14 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
strcpy(baseName, leaf);
}
// map the program's segments into memory
// map the program's segments into memory, initially with rw access
// correct area protection will be set after relocation
mappedAreas = (area_id*)malloc(sizeof(area_id) * elfHeader.e_phnum);
if (mappedAreas == NULL) {
status = B_NO_MEMORY;
goto error2;
}
image_info imageInfo;
memset(&imageInfo, 0, sizeof(image_info));
@ -1887,13 +1922,23 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
for (i = 0; i < elfHeader.e_phnum; i++) {
char regionName[B_OS_NAME_LENGTH];
char *regionAddress;
char *originalRegionAddress;
area_id id;
mappedAreas[i] = -1;
if (programHeaders[i].p_type == PT_DYNAMIC) {
image->dynamic_section = programHeaders[i].p_vaddr;
continue;
}
if (programHeaders[i].p_type != PT_LOAD)
continue;
regionAddress = (char *)ROUNDDOWN(programHeaders[i].p_vaddr,
B_PAGE_SIZE);
regionAddress = (char *)(ROUNDDOWN(programHeaders[i].p_vaddr,
B_PAGE_SIZE) + delta);
originalRegionAddress = regionAddress;
if (programHeaders[i].p_flags & PF_WRITE) {
// rw/data segment
size_t memUpperBound = (programHeaders[i].p_vaddr % B_PAGE_SIZE)
@ -1907,18 +1952,22 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
sprintf(regionName, "%s_seg%drw", baseName, i);
id = vm_map_file(team->id, regionName, (void **)&regionAddress,
B_EXACT_ADDRESS, fileUpperBound,
addressSpec, fileUpperBound,
B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false,
fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
if (id < B_OK) {
dprintf("error mapping file data: %s!\n", strerror(id));
status = B_NOT_AN_EXECUTABLE;
goto error;
goto error2;
}
mappedAreas[i] = id;
imageInfo.data = regionAddress;
imageInfo.data_size = memUpperBound;
image->data_region.start = (addr_t)regionAddress;
image->data_region.size = memUpperBound;
// clean garbage brought by mmap (the region behind the file,
// at least parts of it are the bss and have to be zeroed)
addr_t start = (addr_t)regionAddress
@ -1948,7 +1997,7 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
if (id < B_OK) {
dprintf("error allocating bss area: %s!\n", strerror(id));
status = B_NOT_AN_EXECUTABLE;
goto error;
goto error2;
}
}
} else {
@ -1959,18 +2008,62 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
+ (programHeaders[i].p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE);
id = vm_map_file(team->id, regionName, (void **)&regionAddress,
B_EXACT_ADDRESS, segmentSize,
B_READ_AREA | B_EXECUTE_AREA, REGION_PRIVATE_MAP, false,
fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
addressSpec, segmentSize,
B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false, fd,
ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE));
if (id < B_OK) {
dprintf("error mapping file text: %s!\n", strerror(id));
status = B_NOT_AN_EXECUTABLE;
goto error;
goto error2;
}
mappedAreas[i] = id;
imageInfo.text = regionAddress;
imageInfo.text_size = segmentSize;
image->text_region.start = (addr_t)regionAddress;
image->text_region.size = segmentSize;
}
if (addressSpec != B_EXACT_ADDRESS) {
addressSpec = B_EXACT_ADDRESS;
delta = regionAddress - originalRegionAddress;
}
}
image->data_region.delta = delta;
image->text_region.delta = delta;
// modify the dynamic ptr by the delta of the regions
image->dynamic_section += image->text_region.delta;
status = elf_parse_dynamic_section(image);
if (status != B_OK)
goto error2;
status = elf_relocate(image, image);
if (status != B_OK)
goto error2;
// set correct area protection
for (i = 0; i < elfHeader.e_phnum; i++) {
if (mappedAreas[i] == -1)
continue;
uint32 protection = 0;
if (programHeaders[i].p_flags & PF_EXECUTE)
protection |= B_EXECUTE_AREA;
if (programHeaders[i].p_flags & PF_WRITE)
protection |= B_WRITE_AREA;
if (programHeaders[i].p_flags & PF_READ)
protection |= B_READ_AREA;
status = vm_set_area_protection(team->id, mappedAreas[i], protection,
true);
if (status != B_OK)
goto error2;
}
// register the loaded image
@ -1992,9 +2085,15 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry)
TRACE(("elf_load: done!\n"));
*entry = elfHeader.e_entry;
*entry = elfHeader.e_entry + delta;
status = B_OK;
error2:
free(mappedAreas);
image->elf_header = NULL;
delete_elf_image(image);
error:
free(programHeaders);
_kern_close(fd);
@ -2241,7 +2340,7 @@ load_kernel_add_on(const char *path)
if (status != B_OK)
goto error5;
status = elf_relocate(image);
status = elf_relocate(image, sKernelImage);
if (status < B_OK)
goto error5;

View File

@ -25,6 +25,7 @@
#include <smp.h>
#include <thread.h>
#include <timer.h>
#include <util/Random.h>
#include "scheduler_common.h"
#include "scheduler_tracing.h"
@ -89,19 +90,6 @@ struct scheduler_thread_data {
};
static int
_rand(void)
{
static int next = 0;
if (next == 0)
next = system_time();
next = next * 1103515245 + 12345;
return (next >> 16) & 0x7FFF;
}
static int
dump_run_queue(int argc, char **argv)
{
@ -422,7 +410,7 @@ affine_reschedule(void)
// skip normal threads sometimes
// (twice as probable per priority level)
if ((_rand() >> (15 - priorityDiff)) != 0)
if ((fast_random_value() >> (15 - priorityDiff)) != 0)
break;
nextThread = lowerNextThread;

View File

@ -23,6 +23,7 @@
#include <scheduler_defs.h>
#include <thread.h>
#include <timer.h>
#include <util/Random.h>
#include "scheduler_common.h"
#include "scheduler_tracing.h"
@ -43,19 +44,6 @@ const bigtime_t kThreadQuantum = 3000;
static Thread *sRunQueue = NULL;
static int
_rand(void)
{
static int next = 0;
if (next == 0)
next = system_time();
next = next * 1103515245 + 12345;
return (next >> 16) & 0x7FFF;
}
static int
dump_run_queue(int argc, char **argv)
{
@ -272,7 +260,7 @@ simple_reschedule(void)
// skip normal threads sometimes
// (twice as probable per priority level)
if ((_rand() >> (15 - priorityDiff)) != 0)
if ((fast_random_value() >> (15 - priorityDiff)) != 0)
break;
nextThread = lowerNextThread;

View File

@ -24,6 +24,7 @@
#include <smp.h>
#include <thread.h>
#include <timer.h>
#include <util/Random.h>
#include "scheduler_common.h"
#include "scheduler_tracing.h"
@ -46,19 +47,6 @@ static int32 sCPUCount = 1;
static int32 sNextCPUForSelection = 0;
static int
_rand(void)
{
static int next = 0;
if (next == 0)
next = system_time();
next = next * 1103515245 + 12345;
return (next >> 16) & 0x7FFF;
}
static int
dump_run_queue(int argc, char **argv)
{
@ -360,7 +348,7 @@ reschedule(void)
// skip normal threads sometimes
// (twice as probable per priority level)
if ((_rand() >> (15 - priorityDiff)) != 0)
if ((fast_random_value() >> (15 - priorityDiff)) != 0)
break;
nextThread = lowerNextThread;

View File

@ -892,6 +892,10 @@ setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
memcpy(frameData.syscall_restart_parameters,
thread->syscall_restart.parameters,
sizeof(frameData.syscall_restart_parameters));
// commpage address
frameData.commpage_address = thread->team->commpage_address;
// syscall_restart_return_value is filled in by the architecture specific
// code.

View File

@ -26,6 +26,7 @@
#include <extended_system_info_defs.h>
#include <commpage.h>
#include <boot_device.h>
#include <elf.h>
#include <file_cache.h>
@ -158,6 +159,9 @@ static int32 sUsedTeams = 1;
static TeamNotificationService sNotificationService;
static const size_t kTeamUserDataReservedSize = 128 * B_PAGE_SIZE;
static const size_t kTeamUserDataInitialSize = 4 * B_PAGE_SIZE;
// #pragma mark - TeamListIterator
@ -447,6 +451,8 @@ Team::Team(team_id id, bool kernel)
user_data_size = 0;
free_user_threads = NULL;
commpage_address = NULL;
supplementary_groups = NULL;
supplementary_group_count = 0;
@ -1324,23 +1330,44 @@ remove_team_from_group(Team* team)
static status_t
create_team_user_data(Team* team)
create_team_user_data(Team* team, void* exactAddress = NULL)
{
void* address;
size_t size = 4 * B_PAGE_SIZE;
uint32 addressSpec;
if (exactAddress != NULL) {
address = exactAddress;
addressSpec = B_EXACT_ADDRESS;
} else {
address = (void*)KERNEL_USER_DATA_BASE;
addressSpec = B_RANDOMIZED_BASE_ADDRESS;
}
status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
virtual_address_restrictions virtualRestrictions = {};
virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
virtualRestrictions.address_specification = B_BASE_ADDRESS;
if (result == B_OK || exactAddress != NULL) {
if (exactAddress != NULL)
virtualRestrictions.address = exactAddress;
else
virtualRestrictions.address = address;
virtualRestrictions.address_specification = B_EXACT_ADDRESS;
} else {
virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
}
physical_address_restrictions physicalRestrictions = {};
team->user_data_area = create_area_etc(team->id, "user area", size,
B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0, &virtualRestrictions,
&physicalRestrictions, &address);
team->user_data_area = create_area_etc(team->id, "user area",
kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0,
&virtualRestrictions, &physicalRestrictions, &address);
if (team->user_data_area < 0)
return team->user_data_area;
team->user_data = (addr_t)address;
team->used_user_data = 0;
team->user_data_size = size;
team->user_data_size = kTeamUserDataInitialSize;
team->free_user_threads = NULL;
return B_OK;
@ -1352,6 +1379,9 @@ delete_team_user_data(Team* team)
{
if (team->user_data_area >= 0) {
vm_delete_area(team->id, team->user_data_area, true);
vm_unreserve_address_range(team->id, (void*)team->user_data,
kTeamUserDataReservedSize);
team->user_data = 0;
team->used_user_data = 0;
team->user_data_size = 0;
@ -1539,6 +1569,32 @@ team_create_thread_start_internal(void* args)
// the arguments are already on the user stack, we no longer need
// them in this form
// Clone commpage area
area_id commPageArea = clone_commpage_area(team->id,
&team->commpage_address);
if (commPageArea < B_OK) {
TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
strerror(commPageArea)));
return commPageArea;
}
// Register commpage image
image_id commPageImage = get_commpage_image();
image_info imageInfo;
err = get_image_info(commPageImage, &imageInfo);
if (err != B_OK) {
TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
strerror(err)));
return err;
}
imageInfo.text = team->commpage_address;
image_id image = register_image(team, &imageInfo, sizeof(image_info));
if (image < 0) {
TRACE(("team_create_thread_start: register_image() failed: %s\n",
strerror(image)));
return image;
}
// NOTE: Normally arch_thread_enter_userspace() never returns, that is
// automatic variables with function scope will never be destroyed.
{
@ -1572,7 +1628,7 @@ team_create_thread_start_internal(void* args)
// enter userspace -- returns only in case of error
return thread_enter_userspace_new_team(thread, (addr_t)entry,
programArgs, NULL);
programArgs, team->commpage_address);
}
@ -1972,6 +2028,8 @@ fork_team(void)
team->SetName(parentTeam->Name());
team->SetArgs(parentTeam->Args());
team->commpage_address = parentTeam->commpage_address;
// Inherit the parent's user/group.
inherit_parent_user_and_group(team, parentTeam);
@ -2035,7 +2093,7 @@ fork_team(void)
while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
if (info.area == parentTeam->user_data_area) {
// don't clone the user area; just create a new one
status = create_team_user_data(team);
status = create_team_user_data(team, info.address);
if (status != B_OK)
break;
@ -3360,7 +3418,7 @@ team_allocate_user_thread(Team* team)
while (true) {
// enough space left?
size_t needed = ROUNDUP(sizeof(user_thread), 8);
size_t needed = ROUNDUP(sizeof(user_thread), 128);
if (team->user_data_size - team->used_user_data < needed) {
// try to resize the area
if (resize_area(team->user_data_area,

View File

@ -821,19 +821,10 @@ create_thread_user_stack(Team* team, Thread* thread, void* _stackBase,
snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack",
thread->name, thread->id);
virtual_address_restrictions virtualRestrictions = {};
if (thread->id == team->id) {
// The main thread gets a fixed position at the top of the stack
// address range.
stackBase = (uint8*)(USER_STACK_REGION + USER_STACK_REGION_SIZE
- areaSize);
virtualRestrictions.address_specification = B_EXACT_ADDRESS;
stackBase = (uint8*)USER_STACK_REGION;
} else {
// not a main thread
stackBase = (uint8*)(addr_t)USER_STACK_REGION;
virtualRestrictions.address_specification = B_BASE_ADDRESS;
}
virtual_address_restrictions virtualRestrictions = {};
virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
virtualRestrictions.address = (void*)stackBase;
physical_address_restrictions physicalRestrictions = {};

View File

@ -14,6 +14,7 @@ KernelMergeObject kernel_util.o :
queue.cpp
ring_buffer.cpp
RadixBitmap.cpp
Random.cpp
: $(TARGET_KERNEL_PIC_CCFLAGS) -DUSING_LIBGCC
;

View File

@ -0,0 +1,128 @@
/*
* Copyright 2013 Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Paweł Dziepak, pdziepak@quarnos.org
*/
#include <util/Random.h>
#include <OS.h>
static uint32 sFastLast = 0;
static uint32 sLast = 0;
static uint32 sSecureLast = 0;
// MD4 helper definitions, based on RFC 1320
#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
#define G(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define STEP(f, a, b, c, d, xk, s) \
(a += f((b), (c), (d)) + (xk), a = (a << (s)) | (a >> (32 - (s))))
// MD4 based hash function. Simplified in order to improve performance.
static uint32
hash(uint32* data)
{
const uint32 kMD4Round2 = 0x5a827999;
const uint32 kMD4Round3 = 0x6ed9eba1;
uint32 a = 0x67452301;
uint32 b = 0xefcdab89;
uint32 c = 0x98badcfe;
uint32 d = 0x10325476;
STEP(F, a, b, c, d, data[0], 3);
STEP(F, d, a, b, c, data[1], 7);
STEP(F, c, d, a, b, data[2], 11);
STEP(F, b, c, d, a, data[3], 19);
STEP(F, a, b, c, d, data[4], 3);
STEP(F, d, a, b, c, data[5], 7);
STEP(F, c, d, a, b, data[6], 11);
STEP(F, b, c, d, a, data[7], 19);
STEP(G, a, b, c, d, data[1] + kMD4Round2, 3);
STEP(G, d, a, b, c, data[5] + kMD4Round2, 5);
STEP(G, c, d, a, b, data[6] + kMD4Round2, 9);
STEP(G, b, c, d, a, data[2] + kMD4Round2, 13);
STEP(G, a, b, c, d, data[3] + kMD4Round2, 3);
STEP(G, d, a, b, c, data[7] + kMD4Round2, 5);
STEP(G, c, d, a, b, data[4] + kMD4Round2, 9);
STEP(G, b, c, d, a, data[0] + kMD4Round2, 13);
STEP(H, a, b, c, d, data[1] + kMD4Round3, 3);
STEP(H, d, a, b, c, data[6] + kMD4Round3, 9);
STEP(H, c, d, a, b, data[5] + kMD4Round3, 11);
STEP(H, b, c, d, a, data[2] + kMD4Round3, 15);
STEP(H, a, b, c, d, data[3] + kMD4Round3, 3);
STEP(H, d, a, b, c, data[4] + kMD4Round3, 9);
STEP(H, c, d, a, b, data[7] + kMD4Round3, 11);
STEP(H, b, c, d, a, data[0] + kMD4Round3, 15);
return b;
}
// In the following functions there are race conditions when many threads
// attempt to update static variable last. However, since such conflicts
// are non-deterministic it is not a big problem.
// A simple linear congruential generator
unsigned int
fast_random_value()
{
if (sFastLast == 0)
sFastLast = system_time();
uint32 random = sFastLast * 1103515245 + 12345;
sFastLast = random;
return (random >> 16) & 0x7fff;
}
// Taken from "Random number generators: good ones are hard to find",
// Park and Miller, Communications of the ACM, vol. 31, no. 10,
// October 1988, p. 1195.
unsigned int
random_value()
{
if (sLast == 0)
sLast = system_time();
uint32 hi = sLast / 127773;
uint32 lo = sLast % 127773;
int32 random = 16807 * lo - 2836 * hi;
if (random <= 0)
random += MAX_RANDOM_VALUE;
sLast = random;
return random % (MAX_RANDOM_VALUE + 1);
}
unsigned int
secure_random_value()
{
static vint32 count = 0;
uint32 data[8];
data[0] = atomic_add(&count, 1);
data[1] = system_time();
data[2] = find_thread(NULL);
data[3] = smp_get_current_cpu();
data[4] = smp_get_num_cpus();
data[5] = sFastLast;
data[6] = sLast;
data[7] = sSecureLast;
uint32 random = hash(data);
sSecureLast = random;
return random;
}

View File

@ -17,6 +17,7 @@
#include <heap.h>
#include <thread.h>
#include <util/atomic.h>
#include <util/Random.h>
#include <vm/vm.h>
#include <vm/VMArea.h>
@ -29,6 +30,15 @@
#endif
#ifdef B_HAIKU_64_BIT
const addr_t VMUserAddressSpace::kMaxRandomize = 0x8000000000ul;
const addr_t VMUserAddressSpace::kMaxInitialRandomize = 0x20000000000ul;
#else
const addr_t VMUserAddressSpace::kMaxRandomize = 0x800000ul;
const addr_t VMUserAddressSpace::kMaxInitialRandomize = 0x2000000ul;
#endif
/*! Verifies that an area with the given aligned base and size fits into
the spot defined by base and limit and checks for overflows.
*/
@ -40,6 +50,14 @@ is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
}
static inline bool
is_randomized(uint32 addressSpec)
{
return addressSpec == B_RANDOMIZED_ANY_ADDRESS
|| addressSpec == B_RANDOMIZED_BASE_ADDRESS;
}
VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size)
:
VMAddressSpace(id, base, size, "address space"),
@ -137,6 +155,7 @@ VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
break;
case B_BASE_ADDRESS:
case B_RANDOMIZED_BASE_ADDRESS:
searchBase = (addr_t)addressRestrictions->address;
searchEnd = fEndAddress;
break;
@ -144,11 +163,8 @@ VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
case B_ANY_ADDRESS:
case B_ANY_KERNEL_ADDRESS:
case B_ANY_KERNEL_BLOCK_ADDRESS:
case B_RANDOMIZED_ANY_ADDRESS:
searchBase = fBase;
// TODO: remove this again when vm86 mode is moved into the kernel
// completely (currently needs a userland address space!)
if (searchBase == USER_BASE)
searchBase = USER_BASE_ANY;
searchEnd = fEndAddress;
break;
@ -156,6 +172,11 @@ VMUserAddressSpace::InsertArea(VMArea* _area, size_t size,
return B_BAD_VALUE;
}
// TODO: remove this again when vm86 mode is moved into the kernel
// completely (currently needs a userland address space!)
if (addressRestrictions->address_specification != B_EXACT_ADDRESS)
searchBase = max_c(searchBase, USER_BASE_ANY);
status = _InsertAreaSlot(searchBase, size, searchEnd,
addressRestrictions->address_specification,
addressRestrictions->alignment, area, allocationFlags);
@ -371,6 +392,29 @@ VMUserAddressSpace::Dump() const
}
addr_t
VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end,
size_t alignment, bool initial)
{
ASSERT((start & addr_t(alignment - 1)) == 0);
if (start == end)
return start;
addr_t range = end - start;
if (initial)
range = min_c(range, kMaxInitialRandomize);
else
range = min_c(range, kMaxRandomize);
addr_t random = secure_get_random<addr_t>();
random %= range;
random &= ~addr_t(alignment - 1);
return start + random;
}
/*! Finds a reserved area that covers the region spanned by \a start and
\a size, inserts the \a area into that region and makes sure that
there are reserved regions for the remaining parts.
@ -459,6 +503,7 @@ VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
VMUserArea* last = NULL;
VMUserArea* next;
bool foundSpot = false;
addr_t originalStart = 0;
TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start "
"0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n",
@ -491,6 +536,11 @@ VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end,
start = ROUNDUP(start, alignment);
if (addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
originalStart = start;
start = _RandomizeAddress(start, end - size, alignment, true);
}
// walk up to the spot where we should start searching
second_chance:
VMUserAreaList::Iterator it = fAreas.GetIterator();
@ -510,13 +560,23 @@ second_chance:
case B_ANY_ADDRESS:
case B_ANY_KERNEL_ADDRESS:
case B_ANY_KERNEL_BLOCK_ADDRESS:
case B_RANDOMIZED_ANY_ADDRESS:
case B_BASE_ADDRESS:
case B_RANDOMIZED_BASE_ADDRESS:
{
// find a hole big enough for a new area
if (last == NULL) {
// see if we can build it at the beginning of the virtual map
addr_t alignedBase = ROUNDUP(start, alignment);
if (is_valid_spot(start, alignedBase, size,
next == NULL ? end : next->Base())) {
addr_t nextBase = next == NULL ? end : min_c(next->Base(), end);
if (is_valid_spot(start, alignedBase, size, nextBase)) {
addr_t rangeEnd = min_c(nextBase - size, end);
if (is_randomized(addressSpec)) {
alignedBase = _RandomizeAddress(alignedBase, rangeEnd,
alignment);
}
foundSpot = true;
area->SetBase(alignedBase);
break;
@ -527,11 +587,19 @@ second_chance:
}
// keep walking
while (next != NULL) {
while (next != NULL && next->Base() + size - 1 <= end) {
addr_t alignedBase = ROUNDUP(last->Base() + last->Size(),
alignment);
addr_t nextBase = min_c(end, next->Base());
if (is_valid_spot(last->Base() + (last->Size() - 1),
alignedBase, size, next->Base())) {
alignedBase, size, nextBase)) {
addr_t rangeEnd = min_c(nextBase - size, end);
if (is_randomized(addressSpec)) {
alignedBase = _RandomizeAddress(alignedBase,
rangeEnd, alignment);
}
foundSpot = true;
area->SetBase(alignedBase);
break;
@ -548,10 +616,33 @@ second_chance:
alignment);
if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase,
size, end)) {
if (is_randomized(addressSpec)) {
alignedBase = _RandomizeAddress(alignedBase, end - size,
alignment);
}
// got a spot
foundSpot = true;
area->SetBase(alignedBase);
break;
} else if (addressSpec == B_BASE_ADDRESS
|| addressSpec == B_RANDOMIZED_BASE_ADDRESS) {
// we didn't find a free spot in the requested range, so we'll
// try again without any restrictions
start = USER_BASE_ANY;
if (!is_randomized(addressSpec))
addressSpec = B_ANY_ADDRESS;
else if (start == originalStart)
addressSpec = B_RANDOMIZED_ANY_ADDRESS;
else {
start = originalStart;
addressSpec = B_RANDOMIZED_BASE_ADDRESS;
}
last = NULL;
goto second_chance;
} else if (area->id != RESERVED_AREA_ID) {
// We didn't find a free spot - if there are any reserved areas,
// we can now test those for free space
@ -562,7 +653,8 @@ second_chance:
if (next->id != RESERVED_AREA_ID) {
last = next;
continue;
}
} else if (next->Base() + size - 1 > end)
break;
// TODO: take free space after the reserved area into
// account!
@ -582,23 +674,49 @@ second_chance:
if ((next->protection & RESERVED_AVOID_BASE) == 0
&& alignedBase == next->Base()
&& next->Size() >= size) {
addr_t rangeEnd = min_c(next->Size() - size, end);
if (is_randomized(addressSpec)) {
alignedBase = _RandomizeAddress(next->Base(),
rangeEnd, alignment);
}
addr_t offset = alignedBase - next->Base();
// The new area will be placed at the beginning of the
// reserved area and the reserved area will be offset
// and resized
foundSpot = true;
next->SetBase(next->Base() + size);
next->SetSize(next->Size() - size);
next->SetBase(next->Base() + offset + size);
next->SetSize(next->Size() - offset - size);
area->SetBase(alignedBase);
break;
}
if (is_valid_spot(next->Base(), alignedBase, size,
next->Base() + (next->Size() - 1))) {
min_c(next->Base() + next->Size() - 1, end))) {
// The new area will be placed at the end of the
// reserved area, and the reserved area will be resized
// to make space
alignedBase = ROUNDDOWN(
next->Base() + next->Size() - size, alignment);
if (is_randomized(addressSpec)) {
addr_t alignedNextBase = ROUNDUP(next->Base(),
alignment);
addr_t startRange = next->Base() + next->Size();
startRange -= size + kMaxRandomize;
startRange = ROUNDDOWN(startRange, alignment);
startRange = max_c(startRange, alignedNextBase);
addr_t rangeEnd
= min_c(next->Base() + next->Size() - size,
end);
alignedBase = _RandomizeAddress(startRange,
rangeEnd, alignment);
} else {
alignedBase = ROUNDDOWN(
next->Base() + next->Size() - size, alignment);
}
foundSpot = true;
next->SetSize(alignedBase - next->Base());
@ -610,55 +728,10 @@ second_chance:
last = next;
}
}
break;
}
case B_BASE_ADDRESS:
{
// find a hole big enough for a new area beginning with "start"
if (last == NULL) {
// see if we can build it at the beginning of the specified
// start
if (next == NULL || next->Base() > start + (size - 1)) {
foundSpot = true;
area->SetBase(start);
break;
}
last = next;
next = it.Next();
}
// keep walking
while (next != NULL) {
if (next->Base() - (last->Base() + last->Size()) >= size) {
// we found a spot (it'll be filled up below)
break;
}
last = next;
next = it.Next();
}
addr_t lastEnd = last->Base() + (last->Size() - 1);
if (next != NULL || end - lastEnd >= size) {
// got a spot
foundSpot = true;
if (lastEnd < start)
area->SetBase(start);
else
area->SetBase(lastEnd + 1);
break;
}
// we didn't find a free spot in the requested range, so we'll
// try again without any restrictions
start = fBase;
addressSpec = B_ANY_ADDRESS;
last = NULL;
goto second_chance;
}
case B_EXACT_ADDRESS:
// see if we can create it exactly here
if ((last == NULL || last->Base() + (last->Size() - 1) < start)

View File

@ -53,6 +53,9 @@ public:
virtual void Dump() const;
private:
static addr_t _RandomizeAddress(addr_t start, addr_t end,
size_t alignment, bool initial = false);
status_t _InsertAreaIntoReservedRegion(addr_t start,
size_t size, VMUserArea* area,
uint32 allocationFlags);
@ -62,6 +65,9 @@ private:
uint32 allocationFlags);
private:
static const addr_t kMaxRandomize;
static const addr_t kMaxInitialRandomize;
VMUserAreaList fAreas;
mutable VMUserArea* fAreaHint;
};

View File

@ -267,13 +267,14 @@ static cache_info* sCacheInfoTable;
static void delete_area(VMAddressSpace* addressSpace, VMArea* area,
bool addressSpaceCleanup);
static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address,
bool isWrite, bool isUser, vm_page** wirePage,
bool isWrite, bool isExecute, bool isUser, vm_page** wirePage,
VMAreaWiredRange* wiredRange = NULL);
static status_t map_backing_store(VMAddressSpace* addressSpace,
VMCache* cache, off_t offset, const char* areaName, addr_t size, int wiring,
int protection, int mapping, uint32 flags,
const virtual_address_restrictions* addressRestrictions, bool kernel,
VMArea** _area, void** _virtualAddress);
static void fix_protection(uint32* protection);
// #pragma mark -
@ -315,6 +316,7 @@ enum {
PAGE_FAULT_ERROR_KERNEL_ONLY,
PAGE_FAULT_ERROR_WRITE_PROTECTED,
PAGE_FAULT_ERROR_READ_PROTECTED,
PAGE_FAULT_ERROR_EXECUTE_PROTECTED,
PAGE_FAULT_ERROR_KERNEL_BAD_USER_MEMORY,
PAGE_FAULT_ERROR_NO_ADDRESS_SPACE
};
@ -346,6 +348,10 @@ public:
case PAGE_FAULT_ERROR_READ_PROTECTED:
out.Print("page fault error: area: %ld, read protected", fArea);
break;
case PAGE_FAULT_ERROR_EXECUTE_PROTECTED:
out.Print("page fault error: area: %ld, execute protected",
fArea);
break;
case PAGE_FAULT_ERROR_KERNEL_BAD_USER_MEMORY:
out.Print("page fault error: kernel touching bad user memory");
break;
@ -1219,6 +1225,8 @@ vm_create_anonymous_area(team_id team, const char *name, addr_t size,
case B_BASE_ADDRESS:
case B_ANY_KERNEL_ADDRESS:
case B_ANY_KERNEL_BLOCK_ADDRESS:
case B_RANDOMIZED_ANY_ADDRESS:
case B_RANDOMIZED_BASE_ADDRESS:
break;
default:
@ -2520,10 +2528,12 @@ vm_copy_area(team_id team, const char* name, void** _address,
}
static status_t
status_t
vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
bool kernel)
{
fix_protection(&newProtection);
TRACE(("vm_set_area_protection(team = %#" B_PRIx32 ", area = %#" B_PRIx32
", protection = %#" B_PRIx32 ")\n", team, areaID, newProtection));
@ -3992,8 +4002,8 @@ forbid_page_faults(void)
status_t
vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser,
addr_t* newIP)
vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isExecute,
bool isUser, addr_t* newIP)
{
FTRACE(("vm_page_fault: page fault at 0x%lx, ip 0x%lx\n", address,
faultAddress));
@ -4036,8 +4046,8 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser,
}
if (status == B_OK) {
status = vm_soft_fault(addressSpace, pageAddress, isWrite, isUser,
NULL);
status = vm_soft_fault(addressSpace, pageAddress, isWrite, isExecute,
isUser, NULL);
}
if (status < B_OK) {
@ -4072,8 +4082,8 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser,
"\"%s\" (%" B_PRId32 ") tried to %s address %#lx, ip %#lx "
"(\"%s\" +%#lx)\n", thread->name, thread->id,
thread->team->Name(), thread->team->id,
isWrite ? "write" : "read", address, faultAddress,
area ? area->name : "???", faultAddress - (area ?
isWrite ? "write" : (isExecute ? "execute" : "read"), address,
faultAddress, area ? area->name : "???", faultAddress - (area ?
area->Base() : 0x0));
// We can print a stack trace of the userland thread here.
@ -4362,7 +4372,8 @@ fault_get_page(PageFaultContext& context)
*/
static status_t
vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
bool isWrite, bool isUser, vm_page** wirePage, VMAreaWiredRange* wiredRange)
bool isWrite, bool isExecute, bool isUser, vm_page** wirePage,
VMAreaWiredRange* wiredRange)
{
FTRACE(("vm_soft_fault: thid 0x%" B_PRIx32 " address 0x%" B_PRIxADDR ", "
"isWrite %d, isUser %d\n", thread_get_current_thread_id(),
@ -4417,7 +4428,16 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
VMPageFaultTracing::PAGE_FAULT_ERROR_WRITE_PROTECTED));
status = B_PERMISSION_DENIED;
break;
} else if (!isWrite && (protection
} else if (isExecute && (protection
& (B_EXECUTE_AREA
| (isUser ? 0 : B_KERNEL_EXECUTE_AREA))) == 0) {
dprintf("instruction fetch attempted on execute-protected area 0x%"
B_PRIx32 " at %p\n", area->id, (void*)originalAddress);
TPF(PageFaultError(area->id,
VMPageFaultTracing::PAGE_FAULT_ERROR_EXECUTE_PROTECTED));
status = B_PERMISSION_DENIED;
break;
} else if (!isWrite && !isExecute && (protection
& (B_READ_AREA | (isUser ? 0 : B_KERNEL_READ_AREA))) == 0) {
dprintf("read access attempted on read-protected area 0x%" B_PRIx32
" at %p\n", area->id, (void*)originalAddress);
@ -4754,7 +4774,8 @@ vm_set_area_memory_type(area_id id, phys_addr_t physicalBase, uint32 type)
/*! This function enforces some protection properties:
- if B_WRITE_AREA is set, B_WRITE_KERNEL_AREA is set as well
- if B_WRITE_AREA is set, B_KERNEL_WRITE_AREA is set as well
- if B_EXECUTE_AREA is set, B_KERNEL_EXECUTE_AREA is set as well
- if only B_READ_AREA has been set, B_KERNEL_READ_AREA is also set
- if no protection is specified, it defaults to B_KERNEL_READ_AREA
and B_KERNEL_WRITE_AREA.
@ -4768,6 +4789,8 @@ fix_protection(uint32* protection)
*protection |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA;
else
*protection |= B_KERNEL_READ_AREA;
if ((*protection & B_EXECUTE_AREA) != 0)
*protection |= B_KERNEL_EXECUTE_AREA;
}
}
@ -5220,8 +5243,8 @@ vm_wire_page(team_id team, addr_t address, bool writable,
cacheChainLocker.Unlock();
addressSpaceLocker.Unlock();
error = vm_soft_fault(addressSpace, pageAddress, writable, isUser,
&page, &info->range);
error = vm_soft_fault(addressSpace, pageAddress, writable, false,
isUser, &page, &info->range);
if (error != B_OK) {
// The page could not be mapped -- clean up.
@ -5399,7 +5422,7 @@ lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
addressSpaceLocker.Unlock();
error = vm_soft_fault(addressSpace, nextAddress, writable,
isUser, &page, range);
false, isUser, &page, range);
addressSpaceLocker.Lock();
cacheChainLocker.SetTo(vm_area_get_locked_cache(area));
@ -5788,8 +5811,6 @@ _get_next_area_info(team_id team, ssize_t* cookie, area_info* info, size_t size)
status_t
set_area_protection(area_id area, uint32 newProtection)
{
fix_protection(&newProtection);
return vm_set_area_protection(VMAddressSpace::KernelID(), area,
newProtection, true);
}
@ -6017,8 +6038,6 @@ _user_set_area_protection(area_id area, uint32 newProtection)
if ((newProtection & ~B_USER_PROTECTION) != 0)
return B_BAD_VALUE;
fix_protection(&newProtection);
return vm_set_area_protection(VMAddressSpace::CurrentID(), area,
newProtection, false);
}
@ -6125,6 +6144,11 @@ _user_create_area(const char* userName, void** userAddress, uint32 addressSpec,
&& IS_KERNEL_ADDRESS(address))
return B_BAD_VALUE;
if (addressSpec == B_ANY_ADDRESS)
addressSpec = B_RANDOMIZED_ANY_ADDRESS;
if (addressSpec == B_BASE_ADDRESS)
addressSpec = B_RANDOMIZED_BASE_ADDRESS;
fix_protection(&protection);
virtual_address_restrictions virtualRestrictions = {};

View File

@ -5,7 +5,7 @@ ENTRY(runtime_loader)
SEARCH_DIR("libgcc");
SECTIONS
{
. = 0x00100000 + SIZEOF_HEADERS;
. = 0x00000000 + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }

View File

@ -5,7 +5,7 @@ ENTRY(runtime_loader)
SEARCH_DIR("libgcc");
SECTIONS
{
. = 0x00200000 + SIZEOF_HEADERS;
. = 0x00000000 + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }

View File

@ -24,6 +24,8 @@ struct rld_export *__gRuntimeLoader = NULL;
// This little bugger is set to something meaningful by the runtime loader
// Ugly, eh?
const void* __gCommPageAddress;
char *__progname = NULL;
int __libc_argc;
char **__libc_argv;
@ -44,6 +46,8 @@ void
initialize_before(image_id imageID)
{
char *programPath = __gRuntimeLoader->program_args->args[0];
__gCommPageAddress = __gRuntimeLoader->commpage_address;
if (programPath) {
if ((__progname = strrchr(programPath, '/')) == NULL)
__progname = programPath;
@ -62,7 +66,7 @@ initialize_before(image_id imageID)
pthread_self()->id = find_thread(NULL);
__init_time();
__init_time((addr_t)__gCommPageAddress);
__init_heap();
__init_env(__gRuntimeLoader->program_args);
__init_heap_post_env();

View File

@ -17,11 +17,13 @@
#include <asm_defs.h>
#include <commpage_defs.h>
#define _SYSCALL(name, n) \
.align 8; \
FUNCTION(name): \
movl $n,%eax; \
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_SYSCALL * 4); \
#define _SYSCALL(name, n) \
.align 8; \
FUNCTION(name): \
movl $n, %eax; \
movl __gCommPageAddress, %edx; \
addl 4 * COMMPAGE_ENTRY_X86_SYSCALL(%edx), %edx; \
jmp %edx; \
FUNCTION_END(name)
#define SYSCALL0(name, n) _SYSCALL(name, n)

View File

@ -24,10 +24,11 @@ static struct real_time_data* sRealTimeData;
void
__init_time(void)
__init_time(addr_t commPageTable)
{
sRealTimeData = (struct real_time_data*)
USER_COMMPAGE_TABLE[COMMPAGE_ENTRY_REAL_TIME_DATA];
(((addr_t*)commPageTable)[COMMPAGE_ENTRY_REAL_TIME_DATA]
+ commPageTable);
__arch_init_time(sRealTimeData, false);
}

View File

@ -99,12 +99,12 @@ __init_heap(void)
// size of the heap is guaranteed until the space is really needed.
sHeapBase = (void *)kHeapReservationBase;
status_t status = _kern_reserve_address_range((addr_t *)&sHeapBase,
B_EXACT_ADDRESS, kHeapReservationSize);
B_RANDOMIZED_BASE_ADDRESS, kHeapReservationSize);
if (status != B_OK)
sHeapBase = NULL;
sHeapArea = create_area("heap", (void **)&sHeapBase,
status == B_OK ? B_EXACT_ADDRESS : B_BASE_ADDRESS,
status == B_OK ? B_EXACT_ADDRESS : B_RANDOMIZED_BASE_ADDRESS,
kInitialHeapSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
if (sHeapArea < B_OK)
return sHeapArea;
@ -271,8 +271,8 @@ hoardSbrk(long size)
// allocation.
if (area < 0) {
base = (void*)(sFreeHeapBase + sHeapAreaSize);
area = create_area("heap", &base, B_BASE_ADDRESS, newHeapSize,
B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS,
newHeapSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
}
if (area < 0) {

View File

@ -10,9 +10,13 @@
.align 4
FUNCTION(memcpy):
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMCPY * 4)
movl __gCommPageAddress, %eax
addl 4 * COMMPAGE_ENTRY_X86_MEMCPY(%eax), %eax
jmp *%eax
FUNCTION_END(memcpy)
FUNCTION(memset):
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMSET * 4)
movl __gCommPageAddress, %eax
addl 4 * COMMPAGE_ENTRY_X86_MEMSET(%eax), %eax
jmp *%eax
FUNCTION_END(memset)

View File

@ -8,10 +8,15 @@
FUNCTION(memcpy):
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMCPY * 8)
movq __gCommPageAddress@GOTPCREL(%rip), %rax
movq (%rax), %rax
addq 8 * COMMPAGE_ENTRY_X86_MEMCPY(%rax), %rax
jmp *%rax
FUNCTION_END(memcpy)
FUNCTION(memset):
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMSET * 8)
movq __gCommPageAddress@GOTPCREL(%rip), %rax
movq (%rax), %rax
addq 8 * COMMPAGE_ENTRY_X86_MEMSET(%rax), %rax
jmp *%rax
FUNCTION_END(memset)

View File

@ -113,9 +113,13 @@ mmap(void* address, size_t length, int protection, int flags, int fd,
int mapping = (flags & MAP_SHARED) != 0
? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP;
uint32 addressSpec = address == NULL ? B_ANY_ADDRESS : B_BASE_ADDRESS;
uint32 addressSpec;
if ((flags & MAP_FIXED) != 0)
addressSpec = B_EXACT_ADDRESS;
else if (address != NULL)
addressSpec = B_RANDOMIZED_BASE_ADDRESS;
else
addressSpec = B_RANDOMIZED_ANY_ADDRESS;
uint32 areaProtection = 0;
if ((protection & PROT_READ) != 0)

View File

@ -91,7 +91,7 @@ Ld runtime_loader :
$(TARGET_STATIC_LIBSUPC++)
$(TARGET_GCC_LIBGCC)
: $(HAIKU_TOP)/src/system/ldscripts/$(TARGET_ARCH)/runtime_loader.ld
: --no-undefined
: --no-undefined -shared -soname=runtime_loader
;
HaikuSubInclude arch $(TARGET_ARCH) ;

View File

@ -1031,7 +1031,7 @@ rldelf_init(void)
runtime_loader_debug_area *area;
area_id areaID = _kern_create_area(RUNTIME_LOADER_DEBUG_AREA_NAME,
(void **)&area, B_ANY_ADDRESS, size, B_NO_LOCK,
(void **)&area, B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK,
B_READ_AREA | B_WRITE_AREA);
if (areaID < B_OK) {
FATAL("Failed to create debug area.\n");

View File

@ -65,4 +65,5 @@ void
rldexport_init(void)
{
gRuntimeLoader.program_args = gProgramArgs;
gRuntimeLoader.commpage_address = __gCommPageAddress;
}

View File

@ -178,8 +178,8 @@ static status_t
add_area(size_t size)
{
void *base;
area_id area = _kern_create_area("rld heap", &base, B_ANY_ADDRESS, size,
B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
area_id area = _kern_create_area("rld heap", &base,
B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
if (area < B_OK)
return area;

View File

@ -165,7 +165,7 @@ topological_sort(image_t* image, uint32 slot, image_t** initList,
/*! Finds the load address and address specifier of the given image region.
*/
static void
get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
get_image_region_load_address(image_t* image, uint32 index, long lastDelta,
bool fixed, addr_t& loadAddress, uint32& addressSpecifier)
{
if (image->dynamic_ptr != 0 && !fixed) {
@ -173,7 +173,7 @@ get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta,
if (index == 0) {
// but only the first segment gets a free ride
loadAddress = RLD_PROGRAM_BASE;
addressSpecifier = B_BASE_ADDRESS;
addressSpecifier = B_RANDOMIZED_BASE_ADDRESS;
} else {
loadAddress = image->regions[index].vmstart + lastDelta;
addressSpecifier = B_EXACT_ADDRESS;
@ -298,7 +298,7 @@ map_image(int fd, char const* path, image_t* image, bool fixed)
addr_t loadAddress;
size_t reservedSize = 0;
size_t length = 0;
uint32 addressSpecifier = B_ANY_ADDRESS;
uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS;
for (uint32 i = 0; i < image->num_regions; i++) {
// for BeOS compatibility: if we load an old BeOS executable, we

View File

@ -22,6 +22,7 @@
struct user_space_program_args *gProgramArgs;
void *__gCommPageAddress;
static const char *
@ -366,12 +367,13 @@ out:
specified by its ld-script.
*/
int
runtime_loader(void *_args)
runtime_loader(void* _args, void* commpage)
{
void *entry = NULL;
int returnCode;
gProgramArgs = (struct user_space_program_args *)_args;
__gCommPageAddress = commpage;
// Relocate the args and env arrays -- they are organized in a contiguous
// buffer which the kernel just copied into user space without adjusting the

View File

@ -43,6 +43,7 @@ struct SymbolLookupCache;
extern struct user_space_program_args* gProgramArgs;
extern void* __gCommPageAddress;
extern struct rld_export gRuntimeLoader;
extern char* (*gGetEnv)(const char* name);
extern bool gProgramLoaded;
@ -53,7 +54,7 @@ extern image_t* gProgramImage;
extern "C" {
#endif
int runtime_loader(void* arg);
int runtime_loader(void* arg, void* commpage);
int open_executable(char* name, image_type type, const char* rpath,
const char* programPath, const char* compatibilitySubDir);
status_t test_executable(const char* path, char* interpreter);