diff --git a/headers/os/kernel/OS.h b/headers/os/kernel/OS.h index 8532de03a9..dc91207136 100644 --- a/headers/os/kernel/OS.h +++ b/headers/os/kernel/OS.h @@ -73,11 +73,14 @@ typedef struct area_info { #define B_32_BIT_CONTIGUOUS 6 /* B_CONTIGUOUS, < 4 GB physical address */ /* address spec for create_area(), and clone_area() */ -#define B_ANY_ADDRESS 0 -#define B_EXACT_ADDRESS 1 -#define B_BASE_ADDRESS 2 -#define B_CLONE_ADDRESS 3 -#define B_ANY_KERNEL_ADDRESS 4 +#define B_ANY_ADDRESS 0 +#define B_EXACT_ADDRESS 1 +#define B_BASE_ADDRESS 2 +#define B_CLONE_ADDRESS 3 +#define B_ANY_KERNEL_ADDRESS 4 +/* B_ANY_KERNEL_BLOCK_ADDRESS 5 */ +#define B_RANDOMIZED_ANY_ADDRESS 6 +#define B_RANDOMIZED_BASE_ADDRESS 7 /* area protection */ #define B_READ_AREA 1 diff --git a/headers/private/kernel/arch/arm/arch_kernel.h b/headers/private/kernel/arch/arm/arch_kernel.h index 44a05e74a6..766ab42b10 100644 --- a/headers/private/kernel/arch/arm/arch_kernel.h +++ b/headers/private/kernel/arch/arm/arch_kernel.h @@ -25,7 +25,7 @@ #define USER_SIZE (0x80000000 - (0x10000 + 0x100000)) #define USER_TOP (USER_BASE + (USER_SIZE - 1)) -#define KERNEL_USER_DATA_BASE 0x6fff0000 +#define KERNEL_USER_DATA_BASE 0x60000000 #define USER_STACK_REGION 0x70000000 #define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1) diff --git a/headers/private/kernel/arch/m68k/arch_kernel.h b/headers/private/kernel/arch/m68k/arch_kernel.h index 7f9806999a..cef14fb2f0 100644 --- a/headers/private/kernel/arch/m68k/arch_kernel.h +++ b/headers/private/kernel/arch/m68k/arch_kernel.h @@ -25,7 +25,7 @@ #define USER_SIZE (0x80000000 - (0x10000 + 0x100000)) #define USER_TOP (USER_BASE + (USER_SIZE - 1)) -#define KERNEL_USER_DATA_BASE 0x6fff0000 +#define KERNEL_USER_DATA_BASE 0x60000000 #define USER_STACK_REGION 0x70000000 #define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1) diff --git a/headers/private/kernel/arch/mipsel/arch_kernel.h b/headers/private/kernel/arch/mipsel/arch_kernel.h index 42f3c8fbaf..237459177d 100644 --- a/headers/private/kernel/arch/mipsel/arch_kernel.h +++ b/headers/private/kernel/arch/mipsel/arch_kernel.h @@ -28,7 +28,7 @@ #define USER_SIZE (0x80000000 - (0x10000 + 0x100000)) #define USER_TOP (USER_BASE + (USER_SIZE - 1)) -#define KERNEL_USER_DATA_BASE 0x6fff0000 +#define KERNEL_USER_DATA_BASE 0x60000000 #define USER_STACK_REGION 0x70000000 #define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1) diff --git a/headers/private/kernel/arch/ppc/arch_kernel.h b/headers/private/kernel/arch/ppc/arch_kernel.h index c7d448084b..803a9aade7 100644 --- a/headers/private/kernel/arch/ppc/arch_kernel.h +++ b/headers/private/kernel/arch/ppc/arch_kernel.h @@ -25,7 +25,7 @@ #define USER_SIZE (0x80000000 - (0x10000 + 0x100000)) #define USER_TOP (USER_BASE + (USER_SIZE - 1)) -#define KERNEL_USER_DATA_BASE 0x6fff0000 +#define KERNEL_USER_DATA_BASE 0x60000000 #define USER_STACK_REGION 0x70000000 #define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1) diff --git a/headers/private/kernel/arch/x86/arch_cpu.h b/headers/private/kernel/arch/x86/arch_cpu.h index 58694f3423..bc9f4c8599 100644 --- a/headers/private/kernel/arch/x86/arch_cpu.h +++ b/headers/private/kernel/arch/x86/arch_cpu.h @@ -39,6 +39,11 @@ #define IA32_MSR_EFER 0xc0000080 +// MSR EFER bits +// reference +#define IA32_MSR_EFER_SYSCALL (1 << 0) +#define IA32_MSR_EFER_NX (1 << 11) + // x86_64 MSRs. #define IA32_MSR_STAR 0xc0000081 #define IA32_MSR_LSTAR 0xc0000082 @@ -131,6 +136,13 @@ #define IA32_FEATURE_AMD_EXT_3DNOWEXT (1 << 30) // 3DNow! extensions #define IA32_FEATURE_AMD_EXT_3DNOW (1 << 31) // 3DNow! +// some of the features from cpuid eax 0x80000001, edx register (AMD) are also +// available on Intel processors +#define IA32_FEATURES_INTEL_EXT (IA32_FEATURE_AMD_EXT_SYSCALL \ + | IA32_FEATURE_AMD_EXT_NX \ + | IA32_FEATURE_AMD_EXT_RDTSCP \ + | IA32_FEATURE_AMD_EXT_LONG) + // x86 defined features from cpuid eax 6, eax register // reference http://www.intel.com/Assets/en_US/PDF/appnote/241618.pdf (Table 5-11) #define IA32_FEATURE_DTS (1 << 0) //Digital Thermal Sensor diff --git a/headers/private/kernel/arch/x86/arch_kernel.h b/headers/private/kernel/arch/x86/arch_kernel.h index 02f9b1588c..f5d6c4dba4 100644 --- a/headers/private/kernel/arch/x86/arch_kernel.h +++ b/headers/private/kernel/arch/x86/arch_kernel.h @@ -48,8 +48,8 @@ #define USER_SIZE (0x800000000000 - 0x200000) #define USER_TOP (USER_BASE + (USER_SIZE - 1)) -#define KERNEL_USER_DATA_BASE 0x7fffefff0000 -#define USER_STACK_REGION 0x7ffff0000000 +#define KERNEL_USER_DATA_BASE 0x7f0000000000 +#define USER_STACK_REGION 0x7f0000000000 #define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1) @@ -76,7 +76,7 @@ #define USER_SIZE (KERNEL_BASE - 0x10000) #define USER_TOP (USER_BASE + (USER_SIZE - 1)) -#define KERNEL_USER_DATA_BASE 0x6fff0000 +#define KERNEL_USER_DATA_BASE 0x60000000 #define USER_STACK_REGION 0x70000000 #define USER_STACK_REGION_SIZE ((USER_TOP - USER_STACK_REGION) + 1) diff --git a/headers/private/kernel/commpage.h b/headers/private/kernel/commpage.h index b30f82a4cb..dd8ac97612 100644 --- a/headers/private/kernel/commpage.h +++ b/headers/private/kernel/commpage.h @@ -18,8 +18,9 @@ extern "C" { status_t commpage_init(void); status_t commpage_init_post_cpus(void); void* allocate_commpage_entry(int entry, size_t size); -void* fill_commpage_entry(int entry, const void* copyFrom, size_t size); +addr_t fill_commpage_entry(int entry, const void* copyFrom, size_t size); image_id get_commpage_image(); +area_id clone_commpage_area(team_id team, void** address); // implemented in the architecture specific part status_t arch_commpage_init(void); diff --git a/headers/private/kernel/ksignal.h b/headers/private/kernel/ksignal.h index b7ec3c3bab..4727053c8d 100644 --- a/headers/private/kernel/ksignal.h +++ b/headers/private/kernel/ksignal.h @@ -52,6 +52,7 @@ struct signal_frame_data { int32 thread_flags; uint64 syscall_restart_return_value; uint8 syscall_restart_parameters[SYSCALL_RESTART_PARAMETER_SIZE]; + void* commpage_address; }; diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h index 78e893da3b..1c06271a50 100644 --- a/headers/private/kernel/thread_types.h +++ b/headers/private/kernel/thread_types.h @@ -259,6 +259,8 @@ struct Team : TeamThreadIteratorEntry, KernelReferenceable, size_t used_user_data; struct free_user_thread* free_user_threads; + void* commpage_address; + struct team_debug_info debug_info; // protected by scheduler lock diff --git a/headers/private/kernel/util/Random.h b/headers/private/kernel/util/Random.h new file mode 100644 index 0000000000..d30976d198 --- /dev/null +++ b/headers/private/kernel/util/Random.h @@ -0,0 +1,87 @@ +/* + * Copyright 2013 Haiku, Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * PaweÅ‚ Dziepak, pdziepak@quarnos.org + */ +#ifndef KERNEL_UTIL_RANDOM_H +#define KERNEL_UTIL_RANDOM_H + + +#include +#include + + +#define MAX_FAST_RANDOM_VALUE 0x7fff +#define MAX_RANDOM_VALUE 0x7fffffffu +#define MAX_SECURE_RANDOM_VALUE 0xffffffffu + +static const int kFastRandomShift = 15; +static const int kRandomShift = 31; +static const int kSecureRandomShift = 32; + +#ifdef __cplusplus +extern "C" { +#endif + +unsigned int fast_random_value(void); +unsigned int random_value(void); +unsigned int secure_random_value(void); + +#ifdef __cplusplus +} +#endif + + +#ifdef __cplusplus + +template +T +fast_get_random() +{ + size_t shift = 0; + T random = 0; + while (shift < sizeof(T) * 8) { + random |= (T)fast_random_value() << shift; + shift += kFastRandomShift; + } + + return random; +} + + +template +T +get_random() +{ + size_t shift = 0; + T random = 0; + while (shift < sizeof(T) * 8) { + random |= (T)random_value() << shift; + shift += kRandomShift; + } + + return random; +} + + +template +T +secure_get_random() +{ + size_t shift = 0; + T random = 0; + while (shift < sizeof(T) * 8) { + random |= (T)secure_random_value() << shift; + shift += kSecureRandomShift; + } + + return random; +} + + +#endif // __cplusplus + +#endif // KERNEL_UTIL_RANDOM_H + diff --git a/headers/private/kernel/vm/vm.h b/headers/private/kernel/vm/vm.h index bd9fd16aa8..1962f418a2 100644 --- a/headers/private/kernel/vm/vm.h +++ b/headers/private/kernel/vm/vm.h @@ -121,6 +121,8 @@ status_t vm_delete_area(team_id teamID, area_id areaID, bool kernel); status_t vm_create_vnode_cache(struct vnode *vnode, struct VMCache **_cache); status_t vm_set_area_memory_type(area_id id, phys_addr_t physicalBase, uint32 type); +status_t vm_set_area_protection(team_id team, area_id areaID, + uint32 newProtection, bool kernel); status_t vm_get_page_mapping(team_id team, addr_t vaddr, phys_addr_t *paddr); bool vm_test_map_modification(struct vm_page *page); void vm_clear_map_flags(struct vm_page *page, uint32 flags); diff --git a/headers/private/kernel/vm/vm_priv.h b/headers/private/kernel/vm/vm_priv.h index afb15a714a..9091c60c81 100644 --- a/headers/private/kernel/vm/vm_priv.h +++ b/headers/private/kernel/vm/vm_priv.h @@ -28,7 +28,7 @@ extern "C" { // Should only be used by vm internals status_t vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, - bool isUser, addr_t *newip); + bool isExecute, bool isUser, addr_t *newip); void vm_unreserve_memory(size_t bytes); status_t vm_try_reserve_memory(size_t bytes, int priority, bigtime_t timeout); status_t vm_daemon_init(void); diff --git a/headers/private/libroot/libroot_private.h b/headers/private/libroot/libroot_private.h index 7a3357b614..593ffd3526 100644 --- a/headers/private/libroot/libroot_private.h +++ b/headers/private/libroot/libroot_private.h @@ -34,7 +34,7 @@ void __init_env(const struct user_space_program_args *args); void __init_heap(void); void __init_heap_post_env(void); -void __init_time(void); +void __init_time(addr_t commPageTable); void __arch_init_time(struct real_time_data *data, bool setDefaults); bigtime_t __arch_get_system_time_offset(struct real_time_data *data); bigtime_t __get_system_time_offset(); diff --git a/headers/private/runtime_loader/runtime_loader.h b/headers/private/runtime_loader/runtime_loader.h index cc9b96cd98..39e675f8a9 100644 --- a/headers/private/runtime_loader/runtime_loader.h +++ b/headers/private/runtime_loader/runtime_loader.h @@ -51,6 +51,7 @@ struct rld_export { void (*call_termination_hooks)(); const struct user_space_program_args *program_args; + const void* commpage_address; }; extern struct rld_export *__gRuntimeLoader; diff --git a/headers/private/system/arch/arm/arch_commpage_defs.h b/headers/private/system/arch/arm/arch_commpage_defs.h index 57fb821e96..39bf6f641d 100644 --- a/headers/private/system/arch/arm/arch_commpage_defs.h +++ b/headers/private/system/arch/arm/arch_commpage_defs.h @@ -12,8 +12,4 @@ //#define COMMPAGE_ENTRY_M68K_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0) //#define COMMPAGE_ENTRY_M68K_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1) -/* 0xffff0000 colides with IO space mapped with TT1 on Atari */ -#warning ARM: determine good place for compage.. -#define ARCH_USER_COMMPAGE_ADDR (0xfeff0000) - #endif /* _SYSTEM_ARCH_M68K_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/arch/m68k/arch_commpage_defs.h b/headers/private/system/arch/m68k/arch_commpage_defs.h index 0b6d6e354d..71bc119904 100644 --- a/headers/private/system/arch/m68k/arch_commpage_defs.h +++ b/headers/private/system/arch/m68k/arch_commpage_defs.h @@ -12,7 +12,4 @@ #define COMMPAGE_ENTRY_M68K_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0) #define COMMPAGE_ENTRY_M68K_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1) -/* 0xffff0000 colides with IO space mapped with TT1 on Atari */ -#define ARCH_USER_COMMPAGE_ADDR (0xfeff0000) - #endif /* _SYSTEM_ARCH_M68K_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/arch/mipsel/arch_commpage_defs.h b/headers/private/system/arch/mipsel/arch_commpage_defs.h index 64320ef662..516877d8be 100644 --- a/headers/private/system/arch/mipsel/arch_commpage_defs.h +++ b/headers/private/system/arch/mipsel/arch_commpage_defs.h @@ -14,7 +14,5 @@ #define COMMPAGE_ENTRY_MIPSEL_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0) #define COMMPAGE_ENTRY_MIPSEL_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1) -#define ARCH_USER_COMMPAGE_ADDR (0xffff0000) - #endif /* _SYSTEM_ARCH_MIPSEL_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/arch/ppc/arch_commpage_defs.h b/headers/private/system/arch/ppc/arch_commpage_defs.h index 419d388a1a..d2cd8cbe6c 100644 --- a/headers/private/system/arch/ppc/arch_commpage_defs.h +++ b/headers/private/system/arch/ppc/arch_commpage_defs.h @@ -12,6 +12,4 @@ #define COMMPAGE_ENTRY_PPC_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0) #define COMMPAGE_ENTRY_PPC_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1) -#define ARCH_USER_COMMPAGE_ADDR (0xffff0000) - #endif /* _SYSTEM_ARCH_PPC_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/arch/x86/arch_commpage_defs.h b/headers/private/system/arch/x86/arch_commpage_defs.h index 5f27f676ee..f2bda3e148 100644 --- a/headers/private/system/arch/x86/arch_commpage_defs.h +++ b/headers/private/system/arch/x86/arch_commpage_defs.h @@ -16,7 +16,7 @@ (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 3) #define COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS \ (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 4) - -#define ARCH_USER_COMMPAGE_ADDR (0xffff0000) +#define COMMPAGE_ENTRY_X86_THREAD_EXIT \ + (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 5) #endif /* _SYSTEM_ARCH_x86_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/arch/x86_64/arch_commpage_defs.h b/headers/private/system/arch/x86_64/arch_commpage_defs.h index bf7809e38a..85fa54e104 100644 --- a/headers/private/system/arch/x86_64/arch_commpage_defs.h +++ b/headers/private/system/arch/x86_64/arch_commpage_defs.h @@ -13,7 +13,7 @@ #define COMMPAGE_ENTRY_X86_MEMSET (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1) #define COMMPAGE_ENTRY_X86_SIGNAL_HANDLER \ (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 2) - -#define ARCH_USER_COMMPAGE_ADDR (0xffffffffffff0000) +#define COMMPAGE_ENTRY_X86_THREAD_EXIT \ + (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 3) #endif /* _SYSTEM_ARCH_x86_64_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/commpage_defs.h b/headers/private/system/commpage_defs.h index c47c0b828d..0a69403182 100644 --- a/headers/private/system/commpage_defs.h +++ b/headers/private/system/commpage_defs.h @@ -19,11 +19,6 @@ #define COMMPAGE_SIGNATURE 'COMM' #define COMMPAGE_VERSION 1 -#define USER_COMMPAGE_ADDR ARCH_USER_COMMPAGE_ADDR - // set by the architecture specific implementation - -#define USER_COMMPAGE_TABLE ((void**)(USER_COMMPAGE_ADDR)) - #include #endif /* _SYSTEM_COMMPAGE_DEFS_H */ diff --git a/headers/private/system/thread_defs.h b/headers/private/system/thread_defs.h index 2d559378f4..3d7a3c1654 100644 --- a/headers/private/system/thread_defs.h +++ b/headers/private/system/thread_defs.h @@ -15,7 +15,7 @@ #define USER_STACK_GUARD_SIZE (4 * B_PAGE_SIZE) // 16 kB #define USER_MAIN_THREAD_STACK_SIZE (16 * 1024 * 1024) // 16 MB #define USER_STACK_SIZE (256 * 1024) // 256 kB -#define MIN_USER_STACK_SIZE (4 * 1024) // 4 KB +#define MIN_USER_STACK_SIZE (8 * 1024) // 8 kB #define MAX_USER_STACK_SIZE (16 * 1024 * 1024) // 16 MB diff --git a/src/add-ons/kernel/file_systems/nfs4/Connection.cpp b/src/add-ons/kernel/file_systems/nfs4/Connection.cpp index 5994b51eee..2fa044d3f7 100644 --- a/src/add-ons/kernel/file_systems/nfs4/Connection.cpp +++ b/src/add-ons/kernel/file_systems/nfs4/Connection.cpp @@ -16,8 +16,9 @@ #include #include -#include #include +#include +#include #define NFS4_PORT 2049 @@ -655,7 +656,7 @@ Connection::Connect() PeerAddress address(fPeerAddress.Family()); do { - port = rand() % (IPPORT_RESERVED - NFS_MIN_PORT); + port = get_random() % (IPPORT_RESERVED - NFS_MIN_PORT); port += NFS_MIN_PORT; if (attempt == 9) diff --git a/src/add-ons/kernel/file_systems/nfs4/FileSystem.cpp b/src/add-ons/kernel/file_systems/nfs4/FileSystem.cpp index c1a5a7b9ad..184e79a373 100644 --- a/src/add-ons/kernel/file_systems/nfs4/FileSystem.cpp +++ b/src/add-ons/kernel/file_systems/nfs4/FileSystem.cpp @@ -13,6 +13,7 @@ #include #include +#include #include "Request.h" #include "RootInode.h" @@ -32,9 +33,7 @@ FileSystem::FileSystem(const MountConfiguration& configuration) fId(1), fConfiguration(configuration) { - fOpenOwner = rand(); - fOpenOwner <<= 32; - fOpenOwner |= rand(); + fOpenOwner = get_random(); mutex_init(&fOpenOwnerLock, NULL); mutex_init(&fOpenLock, NULL); diff --git a/src/add-ons/kernel/file_systems/nfs4/RPCServer.cpp b/src/add-ons/kernel/file_systems/nfs4/RPCServer.cpp index 7190fee8db..fc5cb132a1 100644 --- a/src/add-ons/kernel/file_systems/nfs4/RPCServer.cpp +++ b/src/add-ons/kernel/file_systems/nfs4/RPCServer.cpp @@ -12,6 +12,7 @@ #include #include +#include #include "RPCCallbackServer.h" #include "RPCReply.h" @@ -83,7 +84,7 @@ Server::Server(Connection* connection, PeerAddress* address) fPrivateData(NULL), fCallback(NULL), fRepairCount(0), - fXID(rand() << 1) + fXID(get_random()) { ASSERT(connection != NULL); ASSERT(address != NULL); diff --git a/src/add-ons/kernel/file_systems/nfs4/RequestBuilder.cpp b/src/add-ons/kernel/file_systems/nfs4/RequestBuilder.cpp index f900b846ae..e518f86681 100644 --- a/src/add-ons/kernel/file_systems/nfs4/RequestBuilder.cpp +++ b/src/add-ons/kernel/file_systems/nfs4/RequestBuilder.cpp @@ -12,6 +12,8 @@ #include #include +#include + #include "Cookie.h" #include "OpenState.h" #include "RPCCallback.h" @@ -659,8 +661,7 @@ RequestBuilder::SetClientID(RPC::Server* server) return B_NO_MEMORY; fRequest->Stream().AddUInt(OpSetClientID); - uint64 verifier = rand(); - verifier = verifier << 32 | rand(); + uint64 verifier = get_random(); fRequest->Stream().AddUHyper(verifier); status_t result = _GenerateClientId(fRequest->Stream(), server); diff --git a/src/apps/debugger/debugger_interface/DebuggerInterface.cpp b/src/apps/debugger/debugger_interface/DebuggerInterface.cpp index d4305348df..641d9dc8d6 100644 --- a/src/apps/debugger/debugger_interface/DebuggerInterface.cpp +++ b/src/apps/debugger/debugger_interface/DebuggerInterface.cpp @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -517,24 +516,6 @@ DebuggerInterface::GetImageInfos(BObjectList& infos) } } - // Also add the "commpage" image, which belongs to the kernel, but is used - // by userland teams. - cookie = 0; - while (get_next_image_info(B_SYSTEM_TEAM, &cookie, &imageInfo) == B_OK) { - if ((addr_t)imageInfo.text >= USER_COMMPAGE_ADDR - && (addr_t)imageInfo.text < USER_COMMPAGE_ADDR + COMMPAGE_SIZE) { - ImageInfo* info = new(std::nothrow) ImageInfo(B_SYSTEM_TEAM, - imageInfo.id, imageInfo.name, imageInfo.type, - (addr_t)imageInfo.text, imageInfo.text_size, - (addr_t)imageInfo.data, imageInfo.data_size); - if (info == NULL || !infos.AddItem(info)) { - delete info; - return B_NO_MEMORY; - } - break; - } - } - return B_OK; } diff --git a/src/kits/debug/Image.cpp b/src/kits/debug/Image.cpp index 1022792350..b2f23d3a3e 100644 --- a/src/kits/debug/Image.cpp +++ b/src/kits/debug/Image.cpp @@ -400,3 +400,65 @@ KernelImage::Init(const image_info& info) fSymbolTable, &fSymbolCount, fStringTable, &fStringTableSize, &fLoadDelta); } + + +CommPageImage::CommPageImage() +{ +} + + +CommPageImage::~CommPageImage() +{ + delete[] fSymbolTable; + delete[] fStringTable; +} + + +status_t +CommPageImage::Init(const image_info& info) +{ + // find kernel image for commpage + image_id commPageID = -1; + image_info commPageInfo; + + int32 cookie = 0; + while (_kern_get_next_image_info(B_SYSTEM_TEAM, &cookie, &commPageInfo, + sizeof(image_info)) == B_OK) { + if (!strcmp("commpage", commPageInfo.name)) { + commPageID = commPageInfo.id; + break; + } + } + if (commPageID < 0) + return B_ENTRY_NOT_FOUND; + + fInfo = commPageInfo; + fInfo.text = info.text; + + // get the table sizes + fSymbolCount = 0; + fStringTableSize = 0; + status_t error = _kern_read_kernel_image_symbols(commPageID, NULL, + &fSymbolCount, NULL, &fStringTableSize, NULL); + if (error != B_OK) + return error; + + // allocate the tables + fSymbolTable = new(std::nothrow) elf_sym[fSymbolCount]; + fStringTable = new(std::nothrow) char[fStringTableSize]; + if (fSymbolTable == NULL || fStringTable == NULL) + return B_NO_MEMORY; + + // get the info + error = _kern_read_kernel_image_symbols(commPageID, + fSymbolTable, &fSymbolCount, fStringTable, &fStringTableSize, NULL); + if (error != B_OK) { + delete[] fSymbolTable; + delete[] fStringTable; + return error; + } + + fLoadDelta = (addr_t)info.text; + + return B_OK; +} diff --git a/src/kits/debug/Image.h b/src/kits/debug/Image.h index aa4e76db89..4d4be64951 100644 --- a/src/kits/debug/Image.h +++ b/src/kits/debug/Image.h @@ -111,6 +111,15 @@ public: status_t Init(const image_info& info); }; + +class CommPageImage : public SymbolTableBasedImage { +public: + CommPageImage(); + virtual ~CommPageImage(); + + status_t Init(const image_info& info); +}; + } // namespace Debug } // namespace BPrivate diff --git a/src/kits/debug/SymbolLookup.cpp b/src/kits/debug/SymbolLookup.cpp index cb061064e6..2d259fb666 100644 --- a/src/kits/debug/SymbolLookup.cpp +++ b/src/kits/debug/SymbolLookup.cpp @@ -295,6 +295,14 @@ SymbolLookup::Init() error = kernelImage->Init(imageInfo); image = kernelImage; + } else if (!strcmp("commpage", imageInfo.name)) { + // commpage image + CommPageImage* commPageImage = new(std::nothrow) CommPageImage; + if (commPageImage == NULL) + return B_NO_MEMORY; + + error = commPageImage->Init(imageInfo); + image = commPageImage; } else { // userland image -- try to load an image file ImageFile* imageFile = new(std::nothrow) ImageFile; diff --git a/src/system/kernel/arch/arm/arch_int.cpp b/src/system/kernel/arch/arm/arch_int.cpp index b6f18d47f4..b41cc7154d 100644 --- a/src/system/kernel/arch/arm/arch_int.cpp +++ b/src/system/kernel/arch/arm/arch_int.cpp @@ -277,7 +277,7 @@ arch_arm_data_abort(struct iframe *frame) enable_interrupts(); - vm_page_fault(far, frame->pc, isWrite, isUser, &newip); + vm_page_fault(far, frame->pc, isWrite, false, isUser, &newip); if (newip != 0) { // the page fault handler wants us to modify the iframe to set the diff --git a/src/system/kernel/arch/m68k/arch_int.cpp b/src/system/kernel/arch/m68k/arch_int.cpp index a982a75e8a..b80942ac66 100644 --- a/src/system/kernel/arch/m68k/arch_int.cpp +++ b/src/system/kernel/arch/m68k/arch_int.cpp @@ -238,6 +238,7 @@ m68k_exception_entry(struct iframe *iframe) vm_page_fault(fault_address(iframe), iframe->cpu.pc, fault_was_write(iframe), // store or load + false, iframe->cpu.sr & SR_S, // was the system in user or supervisor &newip); if (newip != 0) { diff --git a/src/system/kernel/arch/ppc/arch_int.cpp b/src/system/kernel/arch/ppc/arch_int.cpp index 61ede968ef..ac1c60b284 100644 --- a/src/system/kernel/arch/ppc/arch_int.cpp +++ b/src/system/kernel/arch/ppc/arch_int.cpp @@ -164,6 +164,7 @@ ppc_exception_entry(int vector, struct iframe *iframe) vm_page_fault(iframe->dar, iframe->srr0, iframe->dsisr & (1 << 25), // store or load + false, iframe->srr1 & (1 << 14), // was the system in user or supervisor &newip); if (newip != 0) { diff --git a/src/system/kernel/arch/x86/32/arch.S b/src/system/kernel/arch/x86/32/arch.S index 97eb069a40..90ef56b363 100644 --- a/src/system/kernel/arch/x86/32/arch.S +++ b/src/system/kernel/arch/x86/32/arch.S @@ -115,7 +115,7 @@ FUNCTION(x86_swap_pgdir): ret FUNCTION_END(x86_swap_pgdir) -/* thread exit stub - is copied to the userspace stack in arch_thread_enter_uspace() */ +/* thread exit stub */ .align 4 FUNCTION(x86_userspace_thread_exit): pushl %eax diff --git a/src/system/kernel/arch/x86/32/interrupts.S b/src/system/kernel/arch/x86/32/interrupts.S index 5fde6b901e..912c583cb9 100644 --- a/src/system/kernel/arch/x86/32/interrupts.S +++ b/src/system/kernel/arch/x86/32/interrupts.S @@ -766,7 +766,9 @@ FUNCTION(x86_sysenter): pushl $USER_CODE_SEG // user cs // user_eip - movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_SYSCALL, %edx + movl THREAD_team(%edx), %edx + movl TEAM_commpage_address(%edx), %edx + addl 4 * COMMPAGE_ENTRY_X86_SYSCALL(%edx), %edx addl $4, %edx // sysenter is at offset 2, 2 bytes long pushl %edx diff --git a/src/system/kernel/arch/x86/32/signals.cpp b/src/system/kernel/arch/x86/32/signals.cpp index 80977a331c..fe3913f95e 100644 --- a/src/system/kernel/arch/x86/32/signals.cpp +++ b/src/system/kernel/arch/x86/32/signals.cpp @@ -89,14 +89,13 @@ register_signal_handler_function(const char* functionName, int32 commpageIndex, ASSERT(expectedAddress == symbolInfo.address); // fill in the commpage table entry - fill_commpage_entry(commpageIndex, (void*)symbolInfo.address, - symbolInfo.size); + addr_t position = fill_commpage_entry(commpageIndex, + (void*)symbolInfo.address, symbolInfo.size); // add symbol to the commpage image image_id image = get_commpage_image(); - elf_add_memory_image_symbol(image, commpageSymbolName, - ((addr_t*)USER_COMMPAGE_ADDR)[commpageIndex], symbolInfo.size, - B_SYMBOL_TYPE_TEXT); + elf_add_memory_image_symbol(image, commpageSymbolName, position, + symbolInfo.size, B_SYMBOL_TYPE_TEXT); } @@ -116,10 +115,10 @@ x86_initialize_commpage_signal_handler() addr_t -x86_get_user_signal_handler_wrapper(bool beosHandler) +x86_get_user_signal_handler_wrapper(bool beosHandler, void* commPageAdddress) { int32 index = beosHandler ? COMMPAGE_ENTRY_X86_SIGNAL_HANDLER_BEOS : COMMPAGE_ENTRY_X86_SIGNAL_HANDLER; - return ((addr_t*)USER_COMMPAGE_ADDR)[index]; + return ((addr_t*)commPageAdddress)[index] + (addr_t)commPageAdddress; } diff --git a/src/system/kernel/arch/x86/32/signals_asm.S b/src/system/kernel/arch/x86/32/signals_asm.S index 38c32e9d54..2618d17481 100644 --- a/src/system/kernel/arch/x86/32/signals_asm.S +++ b/src/system/kernel/arch/x86/32/signals_asm.S @@ -37,7 +37,8 @@ FUNCTION(x86_signal_frame_function_beos): lea SIGNAL_FRAME_DATA_context + UCONTEXT_T_uc_mcontext(%esi), %eax push %eax push %edi - movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_MEMCPY, %eax + movl SIGNAL_FRAME_DATA_commpage_address(%esi), %eax + addl 4 * COMMPAGE_ENTRY_X86_MEMCPY(%eax), %eax call *%eax addl $12, %esp @@ -57,7 +58,8 @@ FUNCTION(x86_signal_frame_function_beos): push %edi lea SIGNAL_FRAME_DATA_context + UCONTEXT_T_uc_mcontext(%esi), %eax push %eax - movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_MEMCPY, %eax + movl SIGNAL_FRAME_DATA_commpage_address(%esi), %eax + addl 4 * COMMPAGE_ENTRY_X86_MEMCPY(%eax), %eax call *%eax addl $12 + VREGS_sizeof, %esp diff --git a/src/system/kernel/arch/x86/32/syscalls.cpp b/src/system/kernel/arch/x86/32/syscalls.cpp index f0c9b7c1a0..5b007e61e1 100644 --- a/src/system/kernel/arch/x86/32/syscalls.cpp +++ b/src/system/kernel/arch/x86/32/syscalls.cpp @@ -106,11 +106,11 @@ x86_initialize_syscall(void) // fill in the table entry size_t len = (size_t)((addr_t)syscallCodeEnd - (addr_t)syscallCode); - fill_commpage_entry(COMMPAGE_ENTRY_X86_SYSCALL, syscallCode, len); + addr_t position = fill_commpage_entry(COMMPAGE_ENTRY_X86_SYSCALL, + syscallCode, len); // add syscall to the commpage image image_id image = get_commpage_image(); - elf_add_memory_image_symbol(image, "commpage_syscall", - ((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SYSCALL], len, + elf_add_memory_image_symbol(image, "commpage_syscall", position, len, B_SYMBOL_TYPE_TEXT); } diff --git a/src/system/kernel/arch/x86/32/thread.cpp b/src/system/kernel/arch/x86/32/thread.cpp index 5878aacced..66465c2951 100644 --- a/src/system/kernel/arch/x86/32/thread.cpp +++ b/src/system/kernel/arch/x86/32/thread.cpp @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -23,6 +24,7 @@ #include #include #include +#include #include #include @@ -200,6 +202,15 @@ arch_thread_dump_info(void *info) } +static addr_t +arch_randomize_stack_pointer(addr_t value) +{ + STATIC_ASSERT(MAX_RANDOM_VALUE >= B_PAGE_SIZE - 1); + value -= random_value() & (B_PAGE_SIZE - 1); + return value & ~addr_t(0xf); +} + + /*! Sets up initial thread context and enters user space */ status_t @@ -207,21 +218,19 @@ arch_thread_enter_userspace(Thread* thread, addr_t entry, void* args1, void* args2) { addr_t stackTop = thread->user_stack_base + thread->user_stack_size; - uint32 codeSize = (addr_t)x86_end_userspace_thread_exit - - (addr_t)x86_userspace_thread_exit; uint32 args[3]; TRACE(("arch_thread_enter_userspace: entry 0x%lx, args %p %p, " "ustack_top 0x%lx\n", entry, args1, args2, stackTop)); - // copy the little stub that calls exit_thread() when the thread entry - // function returns, as well as the arguments of the entry function - stackTop -= codeSize; + stackTop = arch_randomize_stack_pointer(stackTop); - if (user_memcpy((void *)stackTop, (const void *)&x86_userspace_thread_exit, codeSize) < B_OK) - return B_BAD_ADDRESS; - - args[0] = stackTop; + // Copy the address of the stub that calls exit_thread() when the thread + // entry function returns to the top of the stack to act as the return + // address. The stub is inside commpage. + addr_t commPageAddress = (addr_t)thread->team->commpage_address; + args[0] = ((addr_t*)commPageAddress)[COMMPAGE_ENTRY_X86_THREAD_EXIT] + + commPageAddress; args[1] = (uint32)args1; args[2] = (uint32)args2; stackTop -= sizeof(args); @@ -345,7 +354,8 @@ arch_setup_signal_frame(Thread* thread, struct sigaction* action, // the prepared stack, executing the signal handler wrapper function. frame->user_sp = (addr_t)userStack; frame->ip = x86_get_user_signal_handler_wrapper( - (action->sa_flags & SA_BEOS_COMPATIBLE_HANDLER) != 0); + (action->sa_flags & SA_BEOS_COMPATIBLE_HANDLER) != 0, + thread->team->commpage_address); return B_OK; } diff --git a/src/system/kernel/arch/x86/64/arch.S b/src/system/kernel/arch/x86/64/arch.S index cbaeec86cd..3f07b957d2 100644 --- a/src/system/kernel/arch/x86/64/arch.S +++ b/src/system/kernel/arch/x86/64/arch.S @@ -118,7 +118,7 @@ FUNCTION(x86_swap_pgdir): FUNCTION_END(x86_swap_pgdir) -/* thread exit stub - copied to the userspace stack in arch_thread_enter_uspace() */ +/* thread exit stub */ .align 8 FUNCTION(x86_userspace_thread_exit): movq %rax, %rdi diff --git a/src/system/kernel/arch/x86/64/signals.cpp b/src/system/kernel/arch/x86/64/signals.cpp index 947e76fbb7..06d41ac6b2 100644 --- a/src/system/kernel/arch/x86/64/signals.cpp +++ b/src/system/kernel/arch/x86/64/signals.cpp @@ -28,12 +28,12 @@ x86_initialize_commpage_signal_handler() // Copy the signal handler code to the commpage. size_t len = (size_t)((addr_t)handlerCodeEnd - (addr_t)handlerCode); - fill_commpage_entry(COMMPAGE_ENTRY_X86_SIGNAL_HANDLER, handlerCode, len); + addr_t position = fill_commpage_entry(COMMPAGE_ENTRY_X86_SIGNAL_HANDLER, + handlerCode, len); // Add symbol to the commpage image. image_id image = get_commpage_image(); - elf_add_memory_image_symbol(image, "commpage_signal_handler", - ((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER], + elf_add_memory_image_symbol(image, "commpage_signal_handler", position, len, B_SYMBOL_TYPE_TEXT); } diff --git a/src/system/kernel/arch/x86/64/syscalls.cpp b/src/system/kernel/arch/x86/64/syscalls.cpp index 20bf44ef4e..4407498aa2 100644 --- a/src/system/kernel/arch/x86/64/syscalls.cpp +++ b/src/system/kernel/arch/x86/64/syscalls.cpp @@ -20,7 +20,8 @@ static void init_syscall_registers(void* dummy, int cpuNum) { // Enable SYSCALL (EFER.SCE = 1). - x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) | (1 << 0)); + x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) + | IA32_MSR_EFER_SYSCALL); // Flags to clear upon entry. Want interrupts disabled and the direction // flag cleared. diff --git a/src/system/kernel/arch/x86/64/thread.cpp b/src/system/kernel/arch/x86/64/thread.cpp index 03797773ea..e1a337fe3c 100644 --- a/src/system/kernel/arch/x86/64/thread.cpp +++ b/src/system/kernel/arch/x86/64/thread.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -197,6 +198,15 @@ arch_thread_dump_info(void* info) } +static addr_t +arch_randomize_stack_pointer(addr_t value) +{ + STATIC_ASSERT(MAX_RANDOM_VALUE >= B_PAGE_SIZE - 1); + value -= random_value() & (B_PAGE_SIZE - 1); + return value & ~addr_t(0xf); +} + + /*! Sets up initial thread context and enters user space */ status_t @@ -208,20 +218,14 @@ arch_thread_enter_userspace(Thread* thread, addr_t entry, void* args1, TRACE("arch_thread_enter_userspace: entry %#lx, args %p %p, " "stackTop %#lx\n", entry, args1, args2, stackTop); - // Copy the little stub that calls exit_thread() when the thread entry - // function returns. - // TODO: This will become a problem later if we want to support execute - // disable, the stack shouldn't really be executable. - size_t codeSize = (addr_t)x86_end_userspace_thread_exit - - (addr_t)x86_userspace_thread_exit; - stackTop -= codeSize; - if (user_memcpy((void*)stackTop, (const void*)&x86_userspace_thread_exit, - codeSize) != B_OK) - return B_BAD_ADDRESS; + stackTop = arch_randomize_stack_pointer(stackTop); - // Copy the address of the stub to the top of the stack to act as the - // return address. - addr_t codeAddr = stackTop; + // Copy the address of the stub that calls exit_thread() when the thread + // entry function returns to the top of the stack to act as the return + // address. The stub is inside commpage. + addr_t commPageAddress = (addr_t)thread->team->commpage_address; + addr_t codeAddr = ((addr_t*)commPageAddress)[COMMPAGE_ENTRY_X86_THREAD_EXIT] + + commPageAddress; stackTop -= sizeof(codeAddr); if (user_memcpy((void*)stackTop, (const void*)&codeAddr, sizeof(codeAddr)) != B_OK) @@ -340,8 +344,10 @@ arch_setup_signal_frame(Thread* thread, struct sigaction* action, // Set up the iframe to execute the signal handler wrapper on our prepared // stack. First argument points to the frame data. + addr_t* commPageAddress = (addr_t*)thread->team->commpage_address; frame->user_sp = (addr_t)userStack; - frame->ip = ((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER]; + frame->ip = commPageAddress[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER] + + (addr_t)commPageAddress; frame->di = (addr_t)userSignalFrameData; return B_OK; diff --git a/src/system/kernel/arch/x86/arch_cpu.cpp b/src/system/kernel/arch/x86/arch_cpu.cpp index 7438203853..ff35238130 100644 --- a/src/system/kernel/arch/x86/arch_cpu.cpp +++ b/src/system/kernel/arch/x86/arch_cpu.cpp @@ -605,10 +605,12 @@ detect_cpu(int currentCPU) get_current_cpuid(&cpuid, 1); cpu->arch.feature[FEATURE_COMMON] = cpuid.eax_1.features; // edx cpu->arch.feature[FEATURE_EXT] = cpuid.eax_1.extended_features; // ecx - if (cpu->arch.vendor == VENDOR_AMD) { + if (cpu->arch.vendor == VENDOR_AMD || cpu->arch.vendor == VENDOR_INTEL) { get_current_cpuid(&cpuid, 0x80000001); cpu->arch.feature[FEATURE_EXT_AMD] = cpuid.regs.edx; // edx } + if (cpu->arch.vendor == VENDOR_INTEL) + cpu->arch.feature[FEATURE_EXT_AMD] &= IA32_FEATURES_INTEL_EXT; get_current_cpuid(&cpuid, 6); cpu->arch.feature[FEATURE_6_EAX] = cpuid.regs.eax; cpu->arch.feature[FEATURE_6_ECX] = cpuid.regs.ecx; @@ -862,21 +864,26 @@ arch_cpu_init_post_modules(kernel_args* args) // put the optimized functions into the commpage size_t memcpyLen = (addr_t)gOptimizedFunctions.memcpy_end - (addr_t)gOptimizedFunctions.memcpy; - fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMCPY, + addr_t memcpyPosition = fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMCPY, (const void*)gOptimizedFunctions.memcpy, memcpyLen); size_t memsetLen = (addr_t)gOptimizedFunctions.memset_end - (addr_t)gOptimizedFunctions.memset; - fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMSET, + addr_t memsetPosition = fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMSET, (const void*)gOptimizedFunctions.memset, memsetLen); + size_t threadExitLen = (addr_t)x86_end_userspace_thread_exit + - (addr_t)x86_userspace_thread_exit; + addr_t threadExitPosition = fill_commpage_entry( + COMMPAGE_ENTRY_X86_THREAD_EXIT, (const void*)x86_userspace_thread_exit, + threadExitLen); // add the functions to the commpage image image_id image = get_commpage_image(); - elf_add_memory_image_symbol(image, "commpage_memcpy", - ((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_MEMCPY], memcpyLen, - B_SYMBOL_TYPE_TEXT); - elf_add_memory_image_symbol(image, "commpage_memset", - ((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_MEMSET], memsetLen, - B_SYMBOL_TYPE_TEXT); + elf_add_memory_image_symbol(image, "commpage_memcpy", memcpyPosition, + memcpyLen, B_SYMBOL_TYPE_TEXT); + elf_add_memory_image_symbol(image, "commpage_memset", memsetPosition, + memsetLen, B_SYMBOL_TYPE_TEXT); + elf_add_memory_image_symbol(image, "commpage_thread_exit", + threadExitPosition, threadExitLen, B_SYMBOL_TYPE_TEXT); return B_OK; } diff --git a/src/system/kernel/arch/x86/arch_int.cpp b/src/system/kernel/arch/x86/arch_int.cpp index 75e50a827e..e1836368cd 100644 --- a/src/system/kernel/arch/x86/arch_int.cpp +++ b/src/system/kernel/arch/x86/arch_int.cpp @@ -319,8 +319,9 @@ x86_page_fault_exception(struct iframe* frame) enable_interrupts(); vm_page_fault(cr2, frame->ip, - (frame->error_code & 0x2) != 0, // write access - (frame->error_code & 0x4) != 0, // userland + (frame->error_code & 0x2)!= 0, // write access + (frame->error_code & 0x10) != 0, // instruction fetch + (frame->error_code & 0x4) != 0, // userland &newip); if (newip != 0) { // the page fault handler wants us to modify the iframe to set the diff --git a/src/system/kernel/arch/x86/arch_vm.cpp b/src/system/kernel/arch/x86/arch_vm.cpp index 0aed76adb7..ae063057a5 100644 --- a/src/system/kernel/arch/x86/arch_vm.cpp +++ b/src/system/kernel/arch/x86/arch_vm.cpp @@ -728,6 +728,15 @@ arch_vm_supports_protection(uint32 protection) return false; } + // Userland and the kernel have the same setting of NX-bit. + // That's why we do not allow any area that user can access, but not execute + // and the kernel can execute. + if ((protection & (B_READ_AREA | B_WRITE_AREA)) != 0 + && (protection & B_EXECUTE_AREA) == 0 + && (protection & B_KERNEL_EXECUTE_AREA) != 0) { + return false; + } + return true; } diff --git a/src/system/kernel/arch/x86/arch_vm_translation_map.cpp b/src/system/kernel/arch/x86/arch_vm_translation_map.cpp index 836262365e..c2abe3f253 100644 --- a/src/system/kernel/arch/x86/arch_vm_translation_map.cpp +++ b/src/system/kernel/arch/x86/arch_vm_translation_map.cpp @@ -86,13 +86,16 @@ arch_vm_translation_map_init(kernel_args *args, gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit; #elif B_HAIKU_PHYSICAL_BITS == 64 bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON); - bool paeNeeded = false; - for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { - phys_addr_t end = args->physical_memory_range[i].start - + args->physical_memory_range[i].size; - if (end > 0x100000000LL) { - paeNeeded = true; - break; + bool paeNeeded = x86_check_feature(IA32_FEATURE_AMD_EXT_NX, + FEATURE_EXT_AMD); + if (!paeNeeded) { + for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) { + phys_addr_t end = args->physical_memory_range[i].start + + args->physical_memory_range[i].size; + if (end > 0x100000000LL) { + paeNeeded = true; + break; + } } } diff --git a/src/system/kernel/arch/x86/asm_offsets.cpp b/src/system/kernel/arch/x86/asm_offsets.cpp index 88082f90ce..787fef10e9 100644 --- a/src/system/kernel/arch/x86/asm_offsets.cpp +++ b/src/system/kernel/arch/x86/asm_offsets.cpp @@ -34,7 +34,11 @@ dummy() DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler); DEFINE_OFFSET_MACRO(CPU_ENT, cpu_ent, fault_handler_stack_pointer); + // struct Team + DEFINE_OFFSET_MACRO(TEAM, Team, commpage_address); + // struct Thread + DEFINE_OFFSET_MACRO(THREAD, Thread, team); DEFINE_OFFSET_MACRO(THREAD, Thread, time_lock); DEFINE_OFFSET_MACRO(THREAD, Thread, kernel_time); DEFINE_OFFSET_MACRO(THREAD, Thread, user_time); @@ -88,6 +92,7 @@ dummy() DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, user_data); DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, handler); DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, siginfo_handler); + DEFINE_OFFSET_MACRO(SIGNAL_FRAME_DATA, signal_frame_data, commpage_address); // struct ucontext_t DEFINE_OFFSET_MACRO(UCONTEXT_T, __ucontext_t, uc_mcontext); diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp index d84199a44d..059f99c3d6 100644 --- a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp +++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp @@ -59,6 +59,10 @@ X86PagingMethod64Bit::Init(kernel_args* args, fKernelPhysicalPML4 = args->arch_args.phys_pgdir; fKernelVirtualPML4 = (uint64*)(addr_t)args->arch_args.vir_pgdir; + // enable NX-bit on all CPUs + if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) + call_all_cpus_sync(&_EnableExecutionDisable, NULL); + // Ensure that the user half of the address space is clear. This removes // the temporary identity mapping made by the boot loader. memset(fKernelVirtualPML4, 0, sizeof(uint64) * 256); @@ -367,6 +371,8 @@ X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry, page |= X86_64_PTE_USER; if ((attributes & B_WRITE_AREA) != 0) page |= X86_64_PTE_WRITABLE; + if ((attributes & B_EXECUTE_AREA) == 0) + page |= X86_64_PTE_NOT_EXECUTABLE; } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) page |= X86_64_PTE_WRITABLE; @@ -374,3 +380,11 @@ X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry, SetTableEntry(entry, page); } + +void +X86PagingMethod64Bit::_EnableExecutionDisable(void* dummy, int cpu) +{ + x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) + | IA32_MSR_EFER_NX); +} + diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h index f561d9e995..e834434c78 100644 --- a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h +++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.h @@ -96,6 +96,8 @@ public: uint32 memoryType); private: + static void _EnableExecutionDisable(void* dummy, int cpu); + phys_addr_t fKernelPhysicalPML4; uint64* fKernelVirtualPML4; diff --git a/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp b/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp index 0e8800b15a..3f24ae5434 100644 --- a/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp +++ b/src/system/kernel/arch/x86/paging/64bit/X86VMTranslationMap64Bit.cpp @@ -627,11 +627,13 @@ X86VMTranslationMap64Bit::Query(addr_t virtualAddress, // Translate the page state flags. if ((entry & X86_64_PTE_USER) != 0) { *_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) - | B_READ_AREA; + | B_READ_AREA + | ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0); } *_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA + | ((entry & X86_64_PTE_NOT_EXECUTABLE) == 0 ? B_KERNEL_EXECUTE_AREA : 0) | ((entry & X86_64_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) | ((entry & X86_64_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) | ((entry & X86_64_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); @@ -671,6 +673,8 @@ X86VMTranslationMap64Bit::Protect(addr_t start, addr_t end, uint32 attributes, newProtectionFlags = X86_64_PTE_USER; if ((attributes & B_WRITE_AREA) != 0) newProtectionFlags |= X86_64_PTE_WRITABLE; + if ((attributes & B_EXECUTE_AREA) == 0) + newProtectionFlags |= X86_64_PTE_NOT_EXECUTABLE; } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) newProtectionFlags = X86_64_PTE_WRITABLE; diff --git a/src/system/kernel/arch/x86/paging/64bit/paging.h b/src/system/kernel/arch/x86/paging/64bit/paging.h index 2cb4fb4b06..a99afaa44c 100644 --- a/src/system/kernel/arch/x86/paging/64bit/paging.h +++ b/src/system/kernel/arch/x86/paging/64bit/paging.h @@ -59,7 +59,9 @@ #define X86_64_PTE_GLOBAL (1LL << 8) #define X86_64_PTE_NOT_EXECUTABLE (1LL << 63) #define X86_64_PTE_ADDRESS_MASK 0x000ffffffffff000L -#define X86_64_PTE_PROTECTION_MASK (X86_64_PTE_WRITABLE | X86_64_PTE_USER) +#define X86_64_PTE_PROTECTION_MASK (X86_64_PTE_NOT_EXECUTABLE \ + | X86_64_PTE_WRITABLE \ + | X86_64_PTE_USER) #define X86_64_PTE_MEMORY_TYPE_MASK (X86_64_PTE_WRITE_THROUGH \ | X86_64_PTE_CACHING_DISABLED) diff --git a/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp b/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp index 34258f0c1d..d2071bc718 100644 --- a/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp +++ b/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp @@ -165,6 +165,12 @@ private: { x86_write_cr3((addr_t)physicalPDPT); x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE | IA32_CR4_GLOBAL_PAGES); + + // if availalbe enable NX-bit (No eXecute) + if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD)) { + x86_write_msr(IA32_MSR_EFER, x86_read_msr(IA32_MSR_EFER) + | IA32_MSR_EFER_NX); + } } void _TranslatePageTable(addr_t virtualBase) @@ -778,6 +784,8 @@ X86PagingMethodPAE::PutPageTableEntryInTable(pae_page_table_entry* entry, page |= X86_PAE_PTE_USER; if ((attributes & B_WRITE_AREA) != 0) page |= X86_PAE_PTE_WRITABLE; + if ((attributes & B_EXECUTE_AREA) == 0) + page |= X86_PAE_PTE_NOT_EXECUTABLE; } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) page |= X86_PAE_PTE_WRITABLE; diff --git a/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp b/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp index 8d6853689b..c936af436a 100644 --- a/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp +++ b/src/system/kernel/arch/x86/paging/pae/X86VMTranslationMapPAE.cpp @@ -687,11 +687,14 @@ X86VMTranslationMapPAE::Query(addr_t virtualAddress, // translate the page state flags if ((entry & X86_PAE_PTE_USER) != 0) { *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) - | B_READ_AREA; + | B_READ_AREA + | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0); } *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA + | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 + ? B_KERNEL_EXECUTE_AREA : 0) | ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) | ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) | ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); @@ -733,11 +736,14 @@ X86VMTranslationMapPAE::QueryInterrupt(addr_t virtualAddress, // translate the page state flags if ((entry & X86_PAE_PTE_USER) != 0) { *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0) - | B_READ_AREA; + | B_READ_AREA + | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 ? B_EXECUTE_AREA : 0); } *_flags |= ((entry & X86_PAE_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA + | ((entry & X86_PAE_PTE_NOT_EXECUTABLE) == 0 + ? B_KERNEL_EXECUTE_AREA : 0) | ((entry & X86_PAE_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) | ((entry & X86_PAE_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) | ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0); @@ -766,6 +772,8 @@ X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes, newProtectionFlags = X86_PAE_PTE_USER; if ((attributes & B_WRITE_AREA) != 0) newProtectionFlags |= X86_PAE_PTE_WRITABLE; + if ((attributes & B_EXECUTE_AREA) == 0) + newProtectionFlags |= X86_PAE_PTE_NOT_EXECUTABLE; } else if ((attributes & B_KERNEL_WRITE_AREA) != 0) newProtectionFlags = X86_PAE_PTE_WRITABLE; diff --git a/src/system/kernel/arch/x86/paging/pae/paging.h b/src/system/kernel/arch/x86/paging/pae/paging.h index ae6d45b64a..0567dacc24 100644 --- a/src/system/kernel/arch/x86/paging/pae/paging.h +++ b/src/system/kernel/arch/x86/paging/pae/paging.h @@ -49,7 +49,8 @@ #define X86_PAE_PTE_IGNORED3 0x0000000000000800LL #define X86_PAE_PTE_ADDRESS_MASK 0x000ffffffffff000LL #define X86_PAE_PTE_NOT_EXECUTABLE 0x8000000000000000LL -#define X86_PAE_PTE_PROTECTION_MASK (X86_PAE_PTE_WRITABLE \ +#define X86_PAE_PTE_PROTECTION_MASK (X86_PAE_PTE_NOT_EXECUTABLE \ + |X86_PAE_PTE_WRITABLE \ | X86_PAE_PTE_USER) #define X86_PAE_PTE_MEMORY_TYPE_MASK (X86_PAE_PTE_WRITE_THROUGH \ | X86_PAE_PTE_CACHING_DISABLED) diff --git a/src/system/kernel/arch/x86/x86_signals.h b/src/system/kernel/arch/x86/x86_signals.h index 0e6cb50801..e37bb0dda5 100644 --- a/src/system/kernel/arch/x86/x86_signals.h +++ b/src/system/kernel/arch/x86/x86_signals.h @@ -11,7 +11,8 @@ void x86_initialize_commpage_signal_handler(); #ifndef __x86_64__ -addr_t x86_get_user_signal_handler_wrapper(bool beosHandler); +addr_t x86_get_user_signal_handler_wrapper(bool beosHandler, + void* commPageAddress); #endif diff --git a/src/system/kernel/commpage.cpp b/src/system/kernel/commpage.cpp index 4419bfd6cf..c54a674b07 100644 --- a/src/system/kernel/commpage.cpp +++ b/src/system/kernel/commpage.cpp @@ -15,9 +15,7 @@ static area_id sCommPageArea; -static area_id sUserCommPageArea; static addr_t* sCommPageAddress; -static addr_t* sUserCommPageAddress; static void* sFreeCommPageSpace; static image_id sCommPageImage; @@ -30,20 +28,19 @@ allocate_commpage_entry(int entry, size_t size) { void* space = sFreeCommPageSpace; sFreeCommPageSpace = ALIGN_ENTRY((addr_t)sFreeCommPageSpace + size); - sCommPageAddress[entry] = (addr_t)sUserCommPageAddress - + ((addr_t)space - (addr_t)sCommPageAddress); + sCommPageAddress[entry] = (addr_t)space - (addr_t)sCommPageAddress; dprintf("allocate_commpage_entry(%d, %lu) -> %p\n", entry, size, (void*)sCommPageAddress[entry]); return space; } -void* +addr_t fill_commpage_entry(int entry, const void* copyFrom, size_t size) { void* space = allocate_commpage_entry(entry, size); memcpy(space, copyFrom, size); - return space; + return (addr_t)space - (addr_t)sCommPageAddress; } @@ -54,20 +51,24 @@ get_commpage_image() } +area_id +clone_commpage_area(team_id team, void** address) +{ + *address = (void*)KERNEL_USER_DATA_BASE; + return vm_clone_area(team, "commpage", address, + B_RANDOMIZED_BASE_ADDRESS, B_READ_AREA | B_EXECUTE_AREA | B_KERNEL_AREA, + REGION_PRIVATE_MAP, sCommPageArea, true); +} + + status_t commpage_init(void) { // create a read/write kernel area - sCommPageArea = create_area("commpage", (void **)&sCommPageAddress, + sCommPageArea = create_area("kernel_commpage", (void **)&sCommPageAddress, B_ANY_ADDRESS, COMMPAGE_SIZE, B_FULL_LOCK, B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA); - // clone it at a fixed address with user read/only permissions - sUserCommPageAddress = (addr_t*)USER_COMMPAGE_ADDR; - sUserCommPageArea = clone_area("user_commpage", - (void **)&sUserCommPageAddress, B_EXACT_ADDRESS, - B_READ_AREA | B_EXECUTE_AREA, sCommPageArea); - // zero it out memset(sCommPageAddress, 0, COMMPAGE_SIZE); @@ -79,10 +80,10 @@ commpage_init(void) sFreeCommPageSpace = ALIGN_ENTRY(&sCommPageAddress[COMMPAGE_TABLE_ENTRIES]); // create the image for the commpage - sCommPageImage = elf_create_memory_image("commpage", USER_COMMPAGE_ADDR, - COMMPAGE_SIZE, 0, 0); + sCommPageImage = elf_create_memory_image("commpage", 0, COMMPAGE_SIZE, 0, + 0); elf_add_memory_image_symbol(sCommPageImage, "commpage_table", - USER_COMMPAGE_ADDR, COMMPAGE_TABLE_ENTRIES * sizeof(addr_t), + 0, COMMPAGE_TABLE_ENTRIES * sizeof(addr_t), B_SYMBOL_TYPE_DATA); arch_commpage_init(); diff --git a/src/system/kernel/debug/BreakpointManager.cpp b/src/system/kernel/debug/BreakpointManager.cpp index ceed7c8fb3..f105a4040a 100644 --- a/src/system/kernel/debug/BreakpointManager.cpp +++ b/src/system/kernel/debug/BreakpointManager.cpp @@ -9,7 +9,6 @@ #include -#include #include #include #include @@ -257,12 +256,6 @@ BreakpointManager::CanAccessAddress(const void* _address, bool write) if (IS_USER_ADDRESS(address)) return true; - // a commpage address can at least be read - if (address >= USER_COMMPAGE_ADDR - && address < USER_COMMPAGE_ADDR + COMMPAGE_SIZE) { - return !write; - } - return false; } diff --git a/src/system/kernel/elf.cpp b/src/system/kernel/elf.cpp index 0bb496fa7b..4d18448ad6 100644 --- a/src/system/kernel/elf.cpp +++ b/src/system/kernel/elf.cpp @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -1068,7 +1069,7 @@ elf_resolve_symbol(struct elf_image_info *image, elf_sym *symbol, /*! Until we have shared library support, just this links against the kernel */ static int -elf_relocate(struct elf_image_info *image) +elf_relocate(struct elf_image_info* image, struct elf_image_info* resolveImage) { int status = B_NO_ERROR; @@ -1078,7 +1079,7 @@ elf_relocate(struct elf_image_info *image) if (image->rel) { TRACE(("total %i rel relocs\n", image->rel_len / (int)sizeof(elf_rel))); - status = arch_elf_relocate_rel(image, sKernelImage, image->rel, + status = arch_elf_relocate_rel(image, resolveImage, image->rel, image->rel_len); if (status < B_OK) return status; @@ -1088,12 +1089,12 @@ elf_relocate(struct elf_image_info *image) if (image->pltrel_type == DT_REL) { TRACE(("total %i plt-relocs\n", image->pltrel_len / (int)sizeof(elf_rel))); - status = arch_elf_relocate_rel(image, sKernelImage, image->pltrel, + status = arch_elf_relocate_rel(image, resolveImage, image->pltrel, image->pltrel_len); } else { TRACE(("total %i plt-relocs\n", image->pltrel_len / (int)sizeof(elf_rela))); - status = arch_elf_relocate_rela(image, sKernelImage, + status = arch_elf_relocate_rela(image, resolveImage, (elf_rela *)image->pltrel, image->pltrel_len); } if (status < B_OK) @@ -1104,7 +1105,7 @@ elf_relocate(struct elf_image_info *image) TRACE(("total %i rel relocs\n", image->rela_len / (int)sizeof(elf_rela))); - status = arch_elf_relocate_rela(image, sKernelImage, image->rela, + status = arch_elf_relocate_rela(image, resolveImage, image->rela, image->rela_len); if (status < B_OK) return status; @@ -1287,7 +1288,7 @@ insert_preloaded_image(preloaded_elf_image *preloadedImage, bool kernel) if (status != B_OK) goto error1; - status = elf_relocate(image); + status = elf_relocate(image, sKernelImage); if (status != B_OK) goto error1; } else @@ -1363,6 +1364,7 @@ public: if (!_Read((runtime_loader_debug_area*)area->Base(), fDebugArea)) return B_BAD_ADDRESS; + fTeam = team; return B_OK; } @@ -1381,8 +1383,22 @@ public: // get the image for the address image_t image; status_t error = _FindImageAtAddress(address, image); - if (error != B_OK) + if (error != B_OK) { + // commpage requires special treatment since kernel stores symbol + // information + addr_t commPageAddress = (addr_t)fTeam->commpage_address; + if (address >= commPageAddress + && address < commPageAddress + COMMPAGE_SIZE) { + if (*_imageName) + *_imageName = "commpage"; + address -= (addr_t)commPageAddress; + error = elf_debug_lookup_symbol_address(address, _baseAddress, + _symbolName, NULL, _exactMatch); + if (_baseAddress) + *_baseAddress += (addr_t)fTeam->commpage_address; + } return error; + } strlcpy(fImageName, image.name, sizeof(fImageName)); @@ -1522,6 +1538,7 @@ public: // gcc 2.95.3 doesn't like it defined in-place private: + Team* fTeam; runtime_loader_debug_area fDebugArea; char fImageName[B_OS_NAME_LENGTH]; char fSymbolName[256]; @@ -1808,6 +1825,9 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) ssize_t length; int fd; int i; + addr_t delta = 0; + uint32 addressSpec = B_RANDOMIZED_BASE_ADDRESS; + area_id* mappedAreas = NULL; TRACE(("elf_load: entry path '%s', team %p\n", path, team)); @@ -1837,6 +1857,14 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) if (status < B_OK) goto error; + struct elf_image_info* image; + image = create_image_struct(); + if (image == NULL) { + status = B_NO_MEMORY; + goto error; + } + image->elf_header = &elfHeader; + // read program header programHeaders = (elf_phdr *)malloc( @@ -1844,7 +1872,7 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) if (programHeaders == NULL) { dprintf("error allocating space for program headers\n"); status = B_NO_MEMORY; - goto error; + goto error2; } TRACE(("reading in program headers at 0x%lx, length 0x%x\n", @@ -1854,12 +1882,12 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) if (length < B_OK) { status = length; dprintf("error reading in program headers\n"); - goto error; + goto error2; } if (length != elfHeader.e_phnum * elfHeader.e_phentsize) { dprintf("short read while reading in program headers\n"); status = -1; - goto error; + goto error2; } // construct a nice name for the region we have to create below @@ -1879,7 +1907,14 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) strcpy(baseName, leaf); } - // map the program's segments into memory + // map the program's segments into memory, initially with rw access + // correct area protection will be set after relocation + + mappedAreas = (area_id*)malloc(sizeof(area_id) * elfHeader.e_phnum); + if (mappedAreas == NULL) { + status = B_NO_MEMORY; + goto error2; + } image_info imageInfo; memset(&imageInfo, 0, sizeof(image_info)); @@ -1887,13 +1922,23 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) for (i = 0; i < elfHeader.e_phnum; i++) { char regionName[B_OS_NAME_LENGTH]; char *regionAddress; + char *originalRegionAddress; area_id id; + mappedAreas[i] = -1; + + if (programHeaders[i].p_type == PT_DYNAMIC) { + image->dynamic_section = programHeaders[i].p_vaddr; + continue; + } + if (programHeaders[i].p_type != PT_LOAD) continue; - regionAddress = (char *)ROUNDDOWN(programHeaders[i].p_vaddr, - B_PAGE_SIZE); + regionAddress = (char *)(ROUNDDOWN(programHeaders[i].p_vaddr, + B_PAGE_SIZE) + delta); + originalRegionAddress = regionAddress; + if (programHeaders[i].p_flags & PF_WRITE) { // rw/data segment size_t memUpperBound = (programHeaders[i].p_vaddr % B_PAGE_SIZE) @@ -1907,18 +1952,22 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) sprintf(regionName, "%s_seg%drw", baseName, i); id = vm_map_file(team->id, regionName, (void **)®ionAddress, - B_EXACT_ADDRESS, fileUpperBound, + addressSpec, fileUpperBound, B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false, fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE)); if (id < B_OK) { dprintf("error mapping file data: %s!\n", strerror(id)); status = B_NOT_AN_EXECUTABLE; - goto error; + goto error2; } + mappedAreas[i] = id; imageInfo.data = regionAddress; imageInfo.data_size = memUpperBound; + image->data_region.start = (addr_t)regionAddress; + image->data_region.size = memUpperBound; + // clean garbage brought by mmap (the region behind the file, // at least parts of it are the bss and have to be zeroed) addr_t start = (addr_t)regionAddress @@ -1948,7 +1997,7 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) if (id < B_OK) { dprintf("error allocating bss area: %s!\n", strerror(id)); status = B_NOT_AN_EXECUTABLE; - goto error; + goto error2; } } } else { @@ -1959,18 +2008,62 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) + (programHeaders[i].p_vaddr % B_PAGE_SIZE), B_PAGE_SIZE); id = vm_map_file(team->id, regionName, (void **)®ionAddress, - B_EXACT_ADDRESS, segmentSize, - B_READ_AREA | B_EXECUTE_AREA, REGION_PRIVATE_MAP, false, - fd, ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE)); + addressSpec, segmentSize, + B_READ_AREA | B_WRITE_AREA, REGION_PRIVATE_MAP, false, fd, + ROUNDDOWN(programHeaders[i].p_offset, B_PAGE_SIZE)); if (id < B_OK) { dprintf("error mapping file text: %s!\n", strerror(id)); status = B_NOT_AN_EXECUTABLE; - goto error; + goto error2; } + mappedAreas[i] = id; + imageInfo.text = regionAddress; imageInfo.text_size = segmentSize; + + image->text_region.start = (addr_t)regionAddress; + image->text_region.size = segmentSize; } + + if (addressSpec != B_EXACT_ADDRESS) { + addressSpec = B_EXACT_ADDRESS; + delta = regionAddress - originalRegionAddress; + } + } + + image->data_region.delta = delta; + image->text_region.delta = delta; + + // modify the dynamic ptr by the delta of the regions + image->dynamic_section += image->text_region.delta; + + status = elf_parse_dynamic_section(image); + if (status != B_OK) + goto error2; + + status = elf_relocate(image, image); + if (status != B_OK) + goto error2; + + // set correct area protection + for (i = 0; i < elfHeader.e_phnum; i++) { + if (mappedAreas[i] == -1) + continue; + + uint32 protection = 0; + + if (programHeaders[i].p_flags & PF_EXECUTE) + protection |= B_EXECUTE_AREA; + if (programHeaders[i].p_flags & PF_WRITE) + protection |= B_WRITE_AREA; + if (programHeaders[i].p_flags & PF_READ) + protection |= B_READ_AREA; + + status = vm_set_area_protection(team->id, mappedAreas[i], protection, + true); + if (status != B_OK) + goto error2; } // register the loaded image @@ -1992,9 +2085,15 @@ elf_load_user_image(const char *path, Team *team, int flags, addr_t *entry) TRACE(("elf_load: done!\n")); - *entry = elfHeader.e_entry; + *entry = elfHeader.e_entry + delta; status = B_OK; +error2: + free(mappedAreas); + + image->elf_header = NULL; + delete_elf_image(image); + error: free(programHeaders); _kern_close(fd); @@ -2241,7 +2340,7 @@ load_kernel_add_on(const char *path) if (status != B_OK) goto error5; - status = elf_relocate(image); + status = elf_relocate(image, sKernelImage); if (status < B_OK) goto error5; diff --git a/src/system/kernel/scheduler/scheduler_affine.cpp b/src/system/kernel/scheduler/scheduler_affine.cpp index 10fa82ce5a..c2a5c0c31d 100644 --- a/src/system/kernel/scheduler/scheduler_affine.cpp +++ b/src/system/kernel/scheduler/scheduler_affine.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include "scheduler_common.h" #include "scheduler_tracing.h" @@ -89,19 +90,6 @@ struct scheduler_thread_data { }; -static int -_rand(void) -{ - static int next = 0; - - if (next == 0) - next = system_time(); - - next = next * 1103515245 + 12345; - return (next >> 16) & 0x7FFF; -} - - static int dump_run_queue(int argc, char **argv) { @@ -422,7 +410,7 @@ affine_reschedule(void) // skip normal threads sometimes // (twice as probable per priority level) - if ((_rand() >> (15 - priorityDiff)) != 0) + if ((fast_random_value() >> (15 - priorityDiff)) != 0) break; nextThread = lowerNextThread; diff --git a/src/system/kernel/scheduler/scheduler_simple.cpp b/src/system/kernel/scheduler/scheduler_simple.cpp index a68a1c3a47..6b60d3ca0c 100644 --- a/src/system/kernel/scheduler/scheduler_simple.cpp +++ b/src/system/kernel/scheduler/scheduler_simple.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include "scheduler_common.h" #include "scheduler_tracing.h" @@ -43,19 +44,6 @@ const bigtime_t kThreadQuantum = 3000; static Thread *sRunQueue = NULL; -static int -_rand(void) -{ - static int next = 0; - - if (next == 0) - next = system_time(); - - next = next * 1103515245 + 12345; - return (next >> 16) & 0x7FFF; -} - - static int dump_run_queue(int argc, char **argv) { @@ -272,7 +260,7 @@ simple_reschedule(void) // skip normal threads sometimes // (twice as probable per priority level) - if ((_rand() >> (15 - priorityDiff)) != 0) + if ((fast_random_value() >> (15 - priorityDiff)) != 0) break; nextThread = lowerNextThread; diff --git a/src/system/kernel/scheduler/scheduler_simple_smp.cpp b/src/system/kernel/scheduler/scheduler_simple_smp.cpp index 7ca4d7f598..c4d1e8a2df 100644 --- a/src/system/kernel/scheduler/scheduler_simple_smp.cpp +++ b/src/system/kernel/scheduler/scheduler_simple_smp.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include "scheduler_common.h" #include "scheduler_tracing.h" @@ -46,19 +47,6 @@ static int32 sCPUCount = 1; static int32 sNextCPUForSelection = 0; -static int -_rand(void) -{ - static int next = 0; - - if (next == 0) - next = system_time(); - - next = next * 1103515245 + 12345; - return (next >> 16) & 0x7FFF; -} - - static int dump_run_queue(int argc, char **argv) { @@ -360,7 +348,7 @@ reschedule(void) // skip normal threads sometimes // (twice as probable per priority level) - if ((_rand() >> (15 - priorityDiff)) != 0) + if ((fast_random_value() >> (15 - priorityDiff)) != 0) break; nextThread = lowerNextThread; diff --git a/src/system/kernel/signal.cpp b/src/system/kernel/signal.cpp index 3279233357..a28533f645 100644 --- a/src/system/kernel/signal.cpp +++ b/src/system/kernel/signal.cpp @@ -892,6 +892,10 @@ setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal, memcpy(frameData.syscall_restart_parameters, thread->syscall_restart.parameters, sizeof(frameData.syscall_restart_parameters)); + + // commpage address + frameData.commpage_address = thread->team->commpage_address; + // syscall_restart_return_value is filled in by the architecture specific // code. diff --git a/src/system/kernel/team.cpp b/src/system/kernel/team.cpp index 381be36458..d0d68d8c62 100644 --- a/src/system/kernel/team.cpp +++ b/src/system/kernel/team.cpp @@ -26,6 +26,7 @@ #include +#include #include #include #include @@ -158,6 +159,9 @@ static int32 sUsedTeams = 1; static TeamNotificationService sNotificationService; +static const size_t kTeamUserDataReservedSize = 128 * B_PAGE_SIZE; +static const size_t kTeamUserDataInitialSize = 4 * B_PAGE_SIZE; + // #pragma mark - TeamListIterator @@ -447,6 +451,8 @@ Team::Team(team_id id, bool kernel) user_data_size = 0; free_user_threads = NULL; + commpage_address = NULL; + supplementary_groups = NULL; supplementary_group_count = 0; @@ -1324,23 +1330,44 @@ remove_team_from_group(Team* team) static status_t -create_team_user_data(Team* team) +create_team_user_data(Team* team, void* exactAddress = NULL) { void* address; - size_t size = 4 * B_PAGE_SIZE; + uint32 addressSpec; + + if (exactAddress != NULL) { + address = exactAddress; + addressSpec = B_EXACT_ADDRESS; + } else { + address = (void*)KERNEL_USER_DATA_BASE; + addressSpec = B_RANDOMIZED_BASE_ADDRESS; + } + + status_t result = vm_reserve_address_range(team->id, &address, addressSpec, + kTeamUserDataReservedSize, RESERVED_AVOID_BASE); + virtual_address_restrictions virtualRestrictions = {}; - virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE; - virtualRestrictions.address_specification = B_BASE_ADDRESS; + if (result == B_OK || exactAddress != NULL) { + if (exactAddress != NULL) + virtualRestrictions.address = exactAddress; + else + virtualRestrictions.address = address; + virtualRestrictions.address_specification = B_EXACT_ADDRESS; + } else { + virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE; + virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; + } + physical_address_restrictions physicalRestrictions = {}; - team->user_data_area = create_area_etc(team->id, "user area", size, - B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0, &virtualRestrictions, - &physicalRestrictions, &address); + team->user_data_area = create_area_etc(team->id, "user area", + kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0, + &virtualRestrictions, &physicalRestrictions, &address); if (team->user_data_area < 0) return team->user_data_area; team->user_data = (addr_t)address; team->used_user_data = 0; - team->user_data_size = size; + team->user_data_size = kTeamUserDataInitialSize; team->free_user_threads = NULL; return B_OK; @@ -1352,6 +1379,9 @@ delete_team_user_data(Team* team) { if (team->user_data_area >= 0) { vm_delete_area(team->id, team->user_data_area, true); + vm_unreserve_address_range(team->id, (void*)team->user_data, + kTeamUserDataReservedSize); + team->user_data = 0; team->used_user_data = 0; team->user_data_size = 0; @@ -1539,6 +1569,32 @@ team_create_thread_start_internal(void* args) // the arguments are already on the user stack, we no longer need // them in this form + // Clone commpage area + area_id commPageArea = clone_commpage_area(team->id, + &team->commpage_address); + if (commPageArea < B_OK) { + TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n", + strerror(commPageArea))); + return commPageArea; + } + + // Register commpage image + image_id commPageImage = get_commpage_image(); + image_info imageInfo; + err = get_image_info(commPageImage, &imageInfo); + if (err != B_OK) { + TRACE(("team_create_thread_start: get_image_info() failed: %s\n", + strerror(err))); + return err; + } + imageInfo.text = team->commpage_address; + image_id image = register_image(team, &imageInfo, sizeof(image_info)); + if (image < 0) { + TRACE(("team_create_thread_start: register_image() failed: %s\n", + strerror(image))); + return image; + } + // NOTE: Normally arch_thread_enter_userspace() never returns, that is // automatic variables with function scope will never be destroyed. { @@ -1572,7 +1628,7 @@ team_create_thread_start_internal(void* args) // enter userspace -- returns only in case of error return thread_enter_userspace_new_team(thread, (addr_t)entry, - programArgs, NULL); + programArgs, team->commpage_address); } @@ -1972,6 +2028,8 @@ fork_team(void) team->SetName(parentTeam->Name()); team->SetArgs(parentTeam->Args()); + team->commpage_address = parentTeam->commpage_address; + // Inherit the parent's user/group. inherit_parent_user_and_group(team, parentTeam); @@ -2035,7 +2093,7 @@ fork_team(void) while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) { if (info.area == parentTeam->user_data_area) { // don't clone the user area; just create a new one - status = create_team_user_data(team); + status = create_team_user_data(team, info.address); if (status != B_OK) break; @@ -3360,7 +3418,7 @@ team_allocate_user_thread(Team* team) while (true) { // enough space left? - size_t needed = ROUNDUP(sizeof(user_thread), 8); + size_t needed = ROUNDUP(sizeof(user_thread), 128); if (team->user_data_size - team->used_user_data < needed) { // try to resize the area if (resize_area(team->user_data_area, diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp index 02a574e518..93fcfe5dbb 100644 --- a/src/system/kernel/thread.cpp +++ b/src/system/kernel/thread.cpp @@ -821,19 +821,10 @@ create_thread_user_stack(Team* team, Thread* thread, void* _stackBase, snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack", thread->name, thread->id); - virtual_address_restrictions virtualRestrictions = {}; - if (thread->id == team->id) { - // The main thread gets a fixed position at the top of the stack - // address range. - stackBase = (uint8*)(USER_STACK_REGION + USER_STACK_REGION_SIZE - - areaSize); - virtualRestrictions.address_specification = B_EXACT_ADDRESS; + stackBase = (uint8*)USER_STACK_REGION; - } else { - // not a main thread - stackBase = (uint8*)(addr_t)USER_STACK_REGION; - virtualRestrictions.address_specification = B_BASE_ADDRESS; - } + virtual_address_restrictions virtualRestrictions = {}; + virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; virtualRestrictions.address = (void*)stackBase; physical_address_restrictions physicalRestrictions = {}; diff --git a/src/system/kernel/util/Jamfile b/src/system/kernel/util/Jamfile index 02ce7ce690..a8f49b9d2a 100644 --- a/src/system/kernel/util/Jamfile +++ b/src/system/kernel/util/Jamfile @@ -14,6 +14,7 @@ KernelMergeObject kernel_util.o : queue.cpp ring_buffer.cpp RadixBitmap.cpp + Random.cpp : $(TARGET_KERNEL_PIC_CCFLAGS) -DUSING_LIBGCC ; diff --git a/src/system/kernel/util/Random.cpp b/src/system/kernel/util/Random.cpp new file mode 100644 index 0000000000..46ad125ff7 --- /dev/null +++ b/src/system/kernel/util/Random.cpp @@ -0,0 +1,128 @@ +/* + * Copyright 2013 Haiku, Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * PaweÅ‚ Dziepak, pdziepak@quarnos.org + */ + + +#include + +#include + + +static uint32 sFastLast = 0; +static uint32 sLast = 0; +static uint32 sSecureLast = 0; + +// MD4 helper definitions, based on RFC 1320 +#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) +#define G(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z))) +#define H(x, y, z) ((x) ^ (y) ^ (z)) + +#define STEP(f, a, b, c, d, xk, s) \ + (a += f((b), (c), (d)) + (xk), a = (a << (s)) | (a >> (32 - (s)))) + + +// MD4 based hash function. Simplified in order to improve performance. +static uint32 +hash(uint32* data) +{ + const uint32 kMD4Round2 = 0x5a827999; + const uint32 kMD4Round3 = 0x6ed9eba1; + + uint32 a = 0x67452301; + uint32 b = 0xefcdab89; + uint32 c = 0x98badcfe; + uint32 d = 0x10325476; + + STEP(F, a, b, c, d, data[0], 3); + STEP(F, d, a, b, c, data[1], 7); + STEP(F, c, d, a, b, data[2], 11); + STEP(F, b, c, d, a, data[3], 19); + STEP(F, a, b, c, d, data[4], 3); + STEP(F, d, a, b, c, data[5], 7); + STEP(F, c, d, a, b, data[6], 11); + STEP(F, b, c, d, a, data[7], 19); + + STEP(G, a, b, c, d, data[1] + kMD4Round2, 3); + STEP(G, d, a, b, c, data[5] + kMD4Round2, 5); + STEP(G, c, d, a, b, data[6] + kMD4Round2, 9); + STEP(G, b, c, d, a, data[2] + kMD4Round2, 13); + STEP(G, a, b, c, d, data[3] + kMD4Round2, 3); + STEP(G, d, a, b, c, data[7] + kMD4Round2, 5); + STEP(G, c, d, a, b, data[4] + kMD4Round2, 9); + STEP(G, b, c, d, a, data[0] + kMD4Round2, 13); + + STEP(H, a, b, c, d, data[1] + kMD4Round3, 3); + STEP(H, d, a, b, c, data[6] + kMD4Round3, 9); + STEP(H, c, d, a, b, data[5] + kMD4Round3, 11); + STEP(H, b, c, d, a, data[2] + kMD4Round3, 15); + STEP(H, a, b, c, d, data[3] + kMD4Round3, 3); + STEP(H, d, a, b, c, data[4] + kMD4Round3, 9); + STEP(H, c, d, a, b, data[7] + kMD4Round3, 11); + STEP(H, b, c, d, a, data[0] + kMD4Round3, 15); + + return b; +} + + +// In the following functions there are race conditions when many threads +// attempt to update static variable last. However, since such conflicts +// are non-deterministic it is not a big problem. + + +// A simple linear congruential generator +unsigned int +fast_random_value() +{ + if (sFastLast == 0) + sFastLast = system_time(); + + uint32 random = sFastLast * 1103515245 + 12345; + sFastLast = random; + return (random >> 16) & 0x7fff; +} + + +// Taken from "Random number generators: good ones are hard to find", +// Park and Miller, Communications of the ACM, vol. 31, no. 10, +// October 1988, p. 1195. +unsigned int +random_value() +{ + if (sLast == 0) + sLast = system_time(); + + uint32 hi = sLast / 127773; + uint32 lo = sLast % 127773; + + int32 random = 16807 * lo - 2836 * hi; + if (random <= 0) + random += MAX_RANDOM_VALUE; + sLast = random; + return random % (MAX_RANDOM_VALUE + 1); +} + + +unsigned int +secure_random_value() +{ + static vint32 count = 0; + + uint32 data[8]; + data[0] = atomic_add(&count, 1); + data[1] = system_time(); + data[2] = find_thread(NULL); + data[3] = smp_get_current_cpu(); + data[4] = smp_get_num_cpus(); + data[5] = sFastLast; + data[6] = sLast; + data[7] = sSecureLast; + + uint32 random = hash(data); + sSecureLast = random; + return random; +} + diff --git a/src/system/kernel/vm/VMUserAddressSpace.cpp b/src/system/kernel/vm/VMUserAddressSpace.cpp index 32730a41cb..71d48a6f51 100644 --- a/src/system/kernel/vm/VMUserAddressSpace.cpp +++ b/src/system/kernel/vm/VMUserAddressSpace.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -29,6 +30,15 @@ #endif +#ifdef B_HAIKU_64_BIT +const addr_t VMUserAddressSpace::kMaxRandomize = 0x8000000000ul; +const addr_t VMUserAddressSpace::kMaxInitialRandomize = 0x20000000000ul; +#else +const addr_t VMUserAddressSpace::kMaxRandomize = 0x800000ul; +const addr_t VMUserAddressSpace::kMaxInitialRandomize = 0x2000000ul; +#endif + + /*! Verifies that an area with the given aligned base and size fits into the spot defined by base and limit and checks for overflows. */ @@ -40,6 +50,14 @@ is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit) } +static inline bool +is_randomized(uint32 addressSpec) +{ + return addressSpec == B_RANDOMIZED_ANY_ADDRESS + || addressSpec == B_RANDOMIZED_BASE_ADDRESS; +} + + VMUserAddressSpace::VMUserAddressSpace(team_id id, addr_t base, size_t size) : VMAddressSpace(id, base, size, "address space"), @@ -137,6 +155,7 @@ VMUserAddressSpace::InsertArea(VMArea* _area, size_t size, break; case B_BASE_ADDRESS: + case B_RANDOMIZED_BASE_ADDRESS: searchBase = (addr_t)addressRestrictions->address; searchEnd = fEndAddress; break; @@ -144,11 +163,8 @@ VMUserAddressSpace::InsertArea(VMArea* _area, size_t size, case B_ANY_ADDRESS: case B_ANY_KERNEL_ADDRESS: case B_ANY_KERNEL_BLOCK_ADDRESS: + case B_RANDOMIZED_ANY_ADDRESS: searchBase = fBase; - // TODO: remove this again when vm86 mode is moved into the kernel - // completely (currently needs a userland address space!) - if (searchBase == USER_BASE) - searchBase = USER_BASE_ANY; searchEnd = fEndAddress; break; @@ -156,6 +172,11 @@ VMUserAddressSpace::InsertArea(VMArea* _area, size_t size, return B_BAD_VALUE; } + // TODO: remove this again when vm86 mode is moved into the kernel + // completely (currently needs a userland address space!) + if (addressRestrictions->address_specification != B_EXACT_ADDRESS) + searchBase = max_c(searchBase, USER_BASE_ANY); + status = _InsertAreaSlot(searchBase, size, searchEnd, addressRestrictions->address_specification, addressRestrictions->alignment, area, allocationFlags); @@ -371,6 +392,29 @@ VMUserAddressSpace::Dump() const } +addr_t +VMUserAddressSpace::_RandomizeAddress(addr_t start, addr_t end, + size_t alignment, bool initial) +{ + ASSERT((start & addr_t(alignment - 1)) == 0); + + if (start == end) + return start; + + addr_t range = end - start; + if (initial) + range = min_c(range, kMaxInitialRandomize); + else + range = min_c(range, kMaxRandomize); + + addr_t random = secure_get_random(); + random %= range; + random &= ~addr_t(alignment - 1); + + return start + random; +} + + /*! Finds a reserved area that covers the region spanned by \a start and \a size, inserts the \a area into that region and makes sure that there are reserved regions for the remaining parts. @@ -459,6 +503,7 @@ VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end, VMUserArea* last = NULL; VMUserArea* next; bool foundSpot = false; + addr_t originalStart = 0; TRACE(("VMUserAddressSpace::_InsertAreaSlot: address space %p, start " "0x%lx, size %ld, end 0x%lx, addressSpec %" B_PRIu32 ", area %p\n", @@ -491,6 +536,11 @@ VMUserAddressSpace::_InsertAreaSlot(addr_t start, addr_t size, addr_t end, start = ROUNDUP(start, alignment); + if (addressSpec == B_RANDOMIZED_BASE_ADDRESS) { + originalStart = start; + start = _RandomizeAddress(start, end - size, alignment, true); + } + // walk up to the spot where we should start searching second_chance: VMUserAreaList::Iterator it = fAreas.GetIterator(); @@ -510,13 +560,23 @@ second_chance: case B_ANY_ADDRESS: case B_ANY_KERNEL_ADDRESS: case B_ANY_KERNEL_BLOCK_ADDRESS: + case B_RANDOMIZED_ANY_ADDRESS: + case B_BASE_ADDRESS: + case B_RANDOMIZED_BASE_ADDRESS: { // find a hole big enough for a new area if (last == NULL) { // see if we can build it at the beginning of the virtual map addr_t alignedBase = ROUNDUP(start, alignment); - if (is_valid_spot(start, alignedBase, size, - next == NULL ? end : next->Base())) { + addr_t nextBase = next == NULL ? end : min_c(next->Base(), end); + if (is_valid_spot(start, alignedBase, size, nextBase)) { + + addr_t rangeEnd = min_c(nextBase - size, end); + if (is_randomized(addressSpec)) { + alignedBase = _RandomizeAddress(alignedBase, rangeEnd, + alignment); + } + foundSpot = true; area->SetBase(alignedBase); break; @@ -527,11 +587,19 @@ second_chance: } // keep walking - while (next != NULL) { + while (next != NULL && next->Base() + size - 1 <= end) { addr_t alignedBase = ROUNDUP(last->Base() + last->Size(), alignment); + addr_t nextBase = min_c(end, next->Base()); if (is_valid_spot(last->Base() + (last->Size() - 1), - alignedBase, size, next->Base())) { + alignedBase, size, nextBase)) { + + addr_t rangeEnd = min_c(nextBase - size, end); + if (is_randomized(addressSpec)) { + alignedBase = _RandomizeAddress(alignedBase, + rangeEnd, alignment); + } + foundSpot = true; area->SetBase(alignedBase); break; @@ -548,10 +616,33 @@ second_chance: alignment); if (is_valid_spot(last->Base() + (last->Size() - 1), alignedBase, size, end)) { + + if (is_randomized(addressSpec)) { + alignedBase = _RandomizeAddress(alignedBase, end - size, + alignment); + } + // got a spot foundSpot = true; area->SetBase(alignedBase); break; + } else if (addressSpec == B_BASE_ADDRESS + || addressSpec == B_RANDOMIZED_BASE_ADDRESS) { + + // we didn't find a free spot in the requested range, so we'll + // try again without any restrictions + start = USER_BASE_ANY; + if (!is_randomized(addressSpec)) + addressSpec = B_ANY_ADDRESS; + else if (start == originalStart) + addressSpec = B_RANDOMIZED_ANY_ADDRESS; + else { + start = originalStart; + addressSpec = B_RANDOMIZED_BASE_ADDRESS; + } + + last = NULL; + goto second_chance; } else if (area->id != RESERVED_AREA_ID) { // We didn't find a free spot - if there are any reserved areas, // we can now test those for free space @@ -562,7 +653,8 @@ second_chance: if (next->id != RESERVED_AREA_ID) { last = next; continue; - } + } else if (next->Base() + size - 1 > end) + break; // TODO: take free space after the reserved area into // account! @@ -582,23 +674,49 @@ second_chance: if ((next->protection & RESERVED_AVOID_BASE) == 0 && alignedBase == next->Base() && next->Size() >= size) { + + addr_t rangeEnd = min_c(next->Size() - size, end); + if (is_randomized(addressSpec)) { + alignedBase = _RandomizeAddress(next->Base(), + rangeEnd, alignment); + } + addr_t offset = alignedBase - next->Base(); + // The new area will be placed at the beginning of the // reserved area and the reserved area will be offset // and resized foundSpot = true; - next->SetBase(next->Base() + size); - next->SetSize(next->Size() - size); + next->SetBase(next->Base() + offset + size); + next->SetSize(next->Size() - offset - size); area->SetBase(alignedBase); break; } if (is_valid_spot(next->Base(), alignedBase, size, - next->Base() + (next->Size() - 1))) { + min_c(next->Base() + next->Size() - 1, end))) { // The new area will be placed at the end of the // reserved area, and the reserved area will be resized // to make space - alignedBase = ROUNDDOWN( - next->Base() + next->Size() - size, alignment); + + if (is_randomized(addressSpec)) { + addr_t alignedNextBase = ROUNDUP(next->Base(), + alignment); + + addr_t startRange = next->Base() + next->Size(); + startRange -= size + kMaxRandomize; + startRange = ROUNDDOWN(startRange, alignment); + + startRange = max_c(startRange, alignedNextBase); + + addr_t rangeEnd + = min_c(next->Base() + next->Size() - size, + end); + alignedBase = _RandomizeAddress(startRange, + rangeEnd, alignment); + } else { + alignedBase = ROUNDDOWN( + next->Base() + next->Size() - size, alignment); + } foundSpot = true; next->SetSize(alignedBase - next->Base()); @@ -610,55 +728,10 @@ second_chance: last = next; } } + break; } - case B_BASE_ADDRESS: - { - // find a hole big enough for a new area beginning with "start" - if (last == NULL) { - // see if we can build it at the beginning of the specified - // start - if (next == NULL || next->Base() > start + (size - 1)) { - foundSpot = true; - area->SetBase(start); - break; - } - - last = next; - next = it.Next(); - } - - // keep walking - while (next != NULL) { - if (next->Base() - (last->Base() + last->Size()) >= size) { - // we found a spot (it'll be filled up below) - break; - } - - last = next; - next = it.Next(); - } - - addr_t lastEnd = last->Base() + (last->Size() - 1); - if (next != NULL || end - lastEnd >= size) { - // got a spot - foundSpot = true; - if (lastEnd < start) - area->SetBase(start); - else - area->SetBase(lastEnd + 1); - break; - } - - // we didn't find a free spot in the requested range, so we'll - // try again without any restrictions - start = fBase; - addressSpec = B_ANY_ADDRESS; - last = NULL; - goto second_chance; - } - case B_EXACT_ADDRESS: // see if we can create it exactly here if ((last == NULL || last->Base() + (last->Size() - 1) < start) diff --git a/src/system/kernel/vm/VMUserAddressSpace.h b/src/system/kernel/vm/VMUserAddressSpace.h index fe5d37c246..0aa42612b6 100644 --- a/src/system/kernel/vm/VMUserAddressSpace.h +++ b/src/system/kernel/vm/VMUserAddressSpace.h @@ -53,6 +53,9 @@ public: virtual void Dump() const; private: + static addr_t _RandomizeAddress(addr_t start, addr_t end, + size_t alignment, bool initial = false); + status_t _InsertAreaIntoReservedRegion(addr_t start, size_t size, VMUserArea* area, uint32 allocationFlags); @@ -62,6 +65,9 @@ private: uint32 allocationFlags); private: + static const addr_t kMaxRandomize; + static const addr_t kMaxInitialRandomize; + VMUserAreaList fAreas; mutable VMUserArea* fAreaHint; }; diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index 6e809b66a0..f5aa4e2184 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -267,13 +267,14 @@ static cache_info* sCacheInfoTable; static void delete_area(VMAddressSpace* addressSpace, VMArea* area, bool addressSpaceCleanup); static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address, - bool isWrite, bool isUser, vm_page** wirePage, + bool isWrite, bool isExecute, bool isUser, vm_page** wirePage, VMAreaWiredRange* wiredRange = NULL); static status_t map_backing_store(VMAddressSpace* addressSpace, VMCache* cache, off_t offset, const char* areaName, addr_t size, int wiring, int protection, int mapping, uint32 flags, const virtual_address_restrictions* addressRestrictions, bool kernel, VMArea** _area, void** _virtualAddress); +static void fix_protection(uint32* protection); // #pragma mark - @@ -315,6 +316,7 @@ enum { PAGE_FAULT_ERROR_KERNEL_ONLY, PAGE_FAULT_ERROR_WRITE_PROTECTED, PAGE_FAULT_ERROR_READ_PROTECTED, + PAGE_FAULT_ERROR_EXECUTE_PROTECTED, PAGE_FAULT_ERROR_KERNEL_BAD_USER_MEMORY, PAGE_FAULT_ERROR_NO_ADDRESS_SPACE }; @@ -346,6 +348,10 @@ public: case PAGE_FAULT_ERROR_READ_PROTECTED: out.Print("page fault error: area: %ld, read protected", fArea); break; + case PAGE_FAULT_ERROR_EXECUTE_PROTECTED: + out.Print("page fault error: area: %ld, execute protected", + fArea); + break; case PAGE_FAULT_ERROR_KERNEL_BAD_USER_MEMORY: out.Print("page fault error: kernel touching bad user memory"); break; @@ -1219,6 +1225,8 @@ vm_create_anonymous_area(team_id team, const char *name, addr_t size, case B_BASE_ADDRESS: case B_ANY_KERNEL_ADDRESS: case B_ANY_KERNEL_BLOCK_ADDRESS: + case B_RANDOMIZED_ANY_ADDRESS: + case B_RANDOMIZED_BASE_ADDRESS: break; default: @@ -2520,10 +2528,12 @@ vm_copy_area(team_id team, const char* name, void** _address, } -static status_t +status_t vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection, bool kernel) { + fix_protection(&newProtection); + TRACE(("vm_set_area_protection(team = %#" B_PRIx32 ", area = %#" B_PRIx32 ", protection = %#" B_PRIx32 ")\n", team, areaID, newProtection)); @@ -3992,8 +4002,8 @@ forbid_page_faults(void) status_t -vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser, - addr_t* newIP) +vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isExecute, + bool isUser, addr_t* newIP) { FTRACE(("vm_page_fault: page fault at 0x%lx, ip 0x%lx\n", address, faultAddress)); @@ -4036,8 +4046,8 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser, } if (status == B_OK) { - status = vm_soft_fault(addressSpace, pageAddress, isWrite, isUser, - NULL); + status = vm_soft_fault(addressSpace, pageAddress, isWrite, isExecute, + isUser, NULL); } if (status < B_OK) { @@ -4072,8 +4082,8 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser, "\"%s\" (%" B_PRId32 ") tried to %s address %#lx, ip %#lx " "(\"%s\" +%#lx)\n", thread->name, thread->id, thread->team->Name(), thread->team->id, - isWrite ? "write" : "read", address, faultAddress, - area ? area->name : "???", faultAddress - (area ? + isWrite ? "write" : (isExecute ? "execute" : "read"), address, + faultAddress, area ? area->name : "???", faultAddress - (area ? area->Base() : 0x0)); // We can print a stack trace of the userland thread here. @@ -4362,7 +4372,8 @@ fault_get_page(PageFaultContext& context) */ static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress, - bool isWrite, bool isUser, vm_page** wirePage, VMAreaWiredRange* wiredRange) + bool isWrite, bool isExecute, bool isUser, vm_page** wirePage, + VMAreaWiredRange* wiredRange) { FTRACE(("vm_soft_fault: thid 0x%" B_PRIx32 " address 0x%" B_PRIxADDR ", " "isWrite %d, isUser %d\n", thread_get_current_thread_id(), @@ -4417,7 +4428,16 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress, VMPageFaultTracing::PAGE_FAULT_ERROR_WRITE_PROTECTED)); status = B_PERMISSION_DENIED; break; - } else if (!isWrite && (protection + } else if (isExecute && (protection + & (B_EXECUTE_AREA + | (isUser ? 0 : B_KERNEL_EXECUTE_AREA))) == 0) { + dprintf("instruction fetch attempted on execute-protected area 0x%" + B_PRIx32 " at %p\n", area->id, (void*)originalAddress); + TPF(PageFaultError(area->id, + VMPageFaultTracing::PAGE_FAULT_ERROR_EXECUTE_PROTECTED)); + status = B_PERMISSION_DENIED; + break; + } else if (!isWrite && !isExecute && (protection & (B_READ_AREA | (isUser ? 0 : B_KERNEL_READ_AREA))) == 0) { dprintf("read access attempted on read-protected area 0x%" B_PRIx32 " at %p\n", area->id, (void*)originalAddress); @@ -4754,7 +4774,8 @@ vm_set_area_memory_type(area_id id, phys_addr_t physicalBase, uint32 type) /*! This function enforces some protection properties: - - if B_WRITE_AREA is set, B_WRITE_KERNEL_AREA is set as well + - if B_WRITE_AREA is set, B_KERNEL_WRITE_AREA is set as well + - if B_EXECUTE_AREA is set, B_KERNEL_EXECUTE_AREA is set as well - if only B_READ_AREA has been set, B_KERNEL_READ_AREA is also set - if no protection is specified, it defaults to B_KERNEL_READ_AREA and B_KERNEL_WRITE_AREA. @@ -4768,6 +4789,8 @@ fix_protection(uint32* protection) *protection |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA; else *protection |= B_KERNEL_READ_AREA; + if ((*protection & B_EXECUTE_AREA) != 0) + *protection |= B_KERNEL_EXECUTE_AREA; } } @@ -5220,8 +5243,8 @@ vm_wire_page(team_id team, addr_t address, bool writable, cacheChainLocker.Unlock(); addressSpaceLocker.Unlock(); - error = vm_soft_fault(addressSpace, pageAddress, writable, isUser, - &page, &info->range); + error = vm_soft_fault(addressSpace, pageAddress, writable, false, + isUser, &page, &info->range); if (error != B_OK) { // The page could not be mapped -- clean up. @@ -5399,7 +5422,7 @@ lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags) addressSpaceLocker.Unlock(); error = vm_soft_fault(addressSpace, nextAddress, writable, - isUser, &page, range); + false, isUser, &page, range); addressSpaceLocker.Lock(); cacheChainLocker.SetTo(vm_area_get_locked_cache(area)); @@ -5788,8 +5811,6 @@ _get_next_area_info(team_id team, ssize_t* cookie, area_info* info, size_t size) status_t set_area_protection(area_id area, uint32 newProtection) { - fix_protection(&newProtection); - return vm_set_area_protection(VMAddressSpace::KernelID(), area, newProtection, true); } @@ -6017,8 +6038,6 @@ _user_set_area_protection(area_id area, uint32 newProtection) if ((newProtection & ~B_USER_PROTECTION) != 0) return B_BAD_VALUE; - fix_protection(&newProtection); - return vm_set_area_protection(VMAddressSpace::CurrentID(), area, newProtection, false); } @@ -6125,6 +6144,11 @@ _user_create_area(const char* userName, void** userAddress, uint32 addressSpec, && IS_KERNEL_ADDRESS(address)) return B_BAD_VALUE; + if (addressSpec == B_ANY_ADDRESS) + addressSpec = B_RANDOMIZED_ANY_ADDRESS; + if (addressSpec == B_BASE_ADDRESS) + addressSpec = B_RANDOMIZED_BASE_ADDRESS; + fix_protection(&protection); virtual_address_restrictions virtualRestrictions = {}; diff --git a/src/system/ldscripts/x86/runtime_loader.ld b/src/system/ldscripts/x86/runtime_loader.ld index 2bc53c699f..ae1062f74a 100644 --- a/src/system/ldscripts/x86/runtime_loader.ld +++ b/src/system/ldscripts/x86/runtime_loader.ld @@ -5,7 +5,7 @@ ENTRY(runtime_loader) SEARCH_DIR("libgcc"); SECTIONS { - . = 0x00100000 + SIZEOF_HEADERS; + . = 0x00000000 + SIZEOF_HEADERS; .interp : { *(.interp) } .hash : { *(.hash) } diff --git a/src/system/ldscripts/x86_64/runtime_loader.ld b/src/system/ldscripts/x86_64/runtime_loader.ld index a83a6de457..ee0b42f2e3 100644 --- a/src/system/ldscripts/x86_64/runtime_loader.ld +++ b/src/system/ldscripts/x86_64/runtime_loader.ld @@ -5,7 +5,7 @@ ENTRY(runtime_loader) SEARCH_DIR("libgcc"); SECTIONS { - . = 0x00200000 + SIZEOF_HEADERS; + . = 0x00000000 + SIZEOF_HEADERS; .interp : { *(.interp) } .hash : { *(.hash) } diff --git a/src/system/libroot/libroot_init.c b/src/system/libroot/libroot_init.c index 68137366eb..cf6fc668fc 100644 --- a/src/system/libroot/libroot_init.c +++ b/src/system/libroot/libroot_init.c @@ -24,6 +24,8 @@ struct rld_export *__gRuntimeLoader = NULL; // This little bugger is set to something meaningful by the runtime loader // Ugly, eh? +const void* __gCommPageAddress; + char *__progname = NULL; int __libc_argc; char **__libc_argv; @@ -44,6 +46,8 @@ void initialize_before(image_id imageID) { char *programPath = __gRuntimeLoader->program_args->args[0]; + __gCommPageAddress = __gRuntimeLoader->commpage_address; + if (programPath) { if ((__progname = strrchr(programPath, '/')) == NULL) __progname = programPath; @@ -62,7 +66,7 @@ initialize_before(image_id imageID) pthread_self()->id = find_thread(NULL); - __init_time(); + __init_time((addr_t)__gCommPageAddress); __init_heap(); __init_env(__gRuntimeLoader->program_args); __init_heap_post_env(); diff --git a/src/system/libroot/os/arch/x86/syscalls.inc b/src/system/libroot/os/arch/x86/syscalls.inc index 95aa740535..517d650d09 100644 --- a/src/system/libroot/os/arch/x86/syscalls.inc +++ b/src/system/libroot/os/arch/x86/syscalls.inc @@ -17,11 +17,13 @@ #include #include -#define _SYSCALL(name, n) \ - .align 8; \ - FUNCTION(name): \ - movl $n,%eax; \ - jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_SYSCALL * 4); \ +#define _SYSCALL(name, n) \ + .align 8; \ + FUNCTION(name): \ + movl $n, %eax; \ + movl __gCommPageAddress, %edx; \ + addl 4 * COMMPAGE_ENTRY_X86_SYSCALL(%edx), %edx; \ + jmp %edx; \ FUNCTION_END(name) #define SYSCALL0(name, n) _SYSCALL(name, n) diff --git a/src/system/libroot/os/time.cpp b/src/system/libroot/os/time.cpp index 19d29536a5..7b882cd8e7 100644 --- a/src/system/libroot/os/time.cpp +++ b/src/system/libroot/os/time.cpp @@ -24,10 +24,11 @@ static struct real_time_data* sRealTimeData; void -__init_time(void) +__init_time(addr_t commPageTable) { sRealTimeData = (struct real_time_data*) - USER_COMMPAGE_TABLE[COMMPAGE_ENTRY_REAL_TIME_DATA]; + (((addr_t*)commPageTable)[COMMPAGE_ENTRY_REAL_TIME_DATA] + + commPageTable); __arch_init_time(sRealTimeData, false); } diff --git a/src/system/libroot/posix/malloc/arch-specific.cpp b/src/system/libroot/posix/malloc/arch-specific.cpp index 0bcaac8fcd..54d2fe00ad 100644 --- a/src/system/libroot/posix/malloc/arch-specific.cpp +++ b/src/system/libroot/posix/malloc/arch-specific.cpp @@ -99,12 +99,12 @@ __init_heap(void) // size of the heap is guaranteed until the space is really needed. sHeapBase = (void *)kHeapReservationBase; status_t status = _kern_reserve_address_range((addr_t *)&sHeapBase, - B_EXACT_ADDRESS, kHeapReservationSize); + B_RANDOMIZED_BASE_ADDRESS, kHeapReservationSize); if (status != B_OK) sHeapBase = NULL; sHeapArea = create_area("heap", (void **)&sHeapBase, - status == B_OK ? B_EXACT_ADDRESS : B_BASE_ADDRESS, + status == B_OK ? B_EXACT_ADDRESS : B_RANDOMIZED_BASE_ADDRESS, kInitialHeapSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (sHeapArea < B_OK) return sHeapArea; @@ -271,8 +271,8 @@ hoardSbrk(long size) // allocation. if (area < 0) { base = (void*)(sFreeHeapBase + sHeapAreaSize); - area = create_area("heap", &base, B_BASE_ADDRESS, newHeapSize, - B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); + area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS, + newHeapSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); } if (area < 0) { diff --git a/src/system/libroot/posix/string/arch/x86/arch_string.S b/src/system/libroot/posix/string/arch/x86/arch_string.S index 4ab85e7da0..1518baa207 100644 --- a/src/system/libroot/posix/string/arch/x86/arch_string.S +++ b/src/system/libroot/posix/string/arch/x86/arch_string.S @@ -10,9 +10,13 @@ .align 4 FUNCTION(memcpy): - jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMCPY * 4) + movl __gCommPageAddress, %eax + addl 4 * COMMPAGE_ENTRY_X86_MEMCPY(%eax), %eax + jmp *%eax FUNCTION_END(memcpy) FUNCTION(memset): - jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMSET * 4) + movl __gCommPageAddress, %eax + addl 4 * COMMPAGE_ENTRY_X86_MEMSET(%eax), %eax + jmp *%eax FUNCTION_END(memset) diff --git a/src/system/libroot/posix/string/arch/x86_64/arch_string.S b/src/system/libroot/posix/string/arch/x86_64/arch_string.S index 8bbadb31ca..e1273fdc3c 100644 --- a/src/system/libroot/posix/string/arch/x86_64/arch_string.S +++ b/src/system/libroot/posix/string/arch/x86_64/arch_string.S @@ -8,10 +8,15 @@ FUNCTION(memcpy): - jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMCPY * 8) + movq __gCommPageAddress@GOTPCREL(%rip), %rax + movq (%rax), %rax + addq 8 * COMMPAGE_ENTRY_X86_MEMCPY(%rax), %rax + jmp *%rax FUNCTION_END(memcpy) - FUNCTION(memset): - jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMSET * 8) + movq __gCommPageAddress@GOTPCREL(%rip), %rax + movq (%rax), %rax + addq 8 * COMMPAGE_ENTRY_X86_MEMSET(%rax), %rax + jmp *%rax FUNCTION_END(memset) diff --git a/src/system/libroot/posix/sys/mman.cpp b/src/system/libroot/posix/sys/mman.cpp index 17866ee9ed..68dbf0a7a2 100644 --- a/src/system/libroot/posix/sys/mman.cpp +++ b/src/system/libroot/posix/sys/mman.cpp @@ -113,9 +113,13 @@ mmap(void* address, size_t length, int protection, int flags, int fd, int mapping = (flags & MAP_SHARED) != 0 ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP; - uint32 addressSpec = address == NULL ? B_ANY_ADDRESS : B_BASE_ADDRESS; + uint32 addressSpec; if ((flags & MAP_FIXED) != 0) addressSpec = B_EXACT_ADDRESS; + else if (address != NULL) + addressSpec = B_RANDOMIZED_BASE_ADDRESS; + else + addressSpec = B_RANDOMIZED_ANY_ADDRESS; uint32 areaProtection = 0; if ((protection & PROT_READ) != 0) diff --git a/src/system/runtime_loader/Jamfile b/src/system/runtime_loader/Jamfile index d687912673..11bc81122e 100644 --- a/src/system/runtime_loader/Jamfile +++ b/src/system/runtime_loader/Jamfile @@ -91,7 +91,7 @@ Ld runtime_loader : $(TARGET_STATIC_LIBSUPC++) $(TARGET_GCC_LIBGCC) : $(HAIKU_TOP)/src/system/ldscripts/$(TARGET_ARCH)/runtime_loader.ld - : --no-undefined + : --no-undefined -shared -soname=runtime_loader ; HaikuSubInclude arch $(TARGET_ARCH) ; diff --git a/src/system/runtime_loader/elf.cpp b/src/system/runtime_loader/elf.cpp index 9415d73b55..edaf863b54 100644 --- a/src/system/runtime_loader/elf.cpp +++ b/src/system/runtime_loader/elf.cpp @@ -1031,7 +1031,7 @@ rldelf_init(void) runtime_loader_debug_area *area; area_id areaID = _kern_create_area(RUNTIME_LOADER_DEBUG_AREA_NAME, - (void **)&area, B_ANY_ADDRESS, size, B_NO_LOCK, + (void **)&area, B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (areaID < B_OK) { FATAL("Failed to create debug area.\n"); diff --git a/src/system/runtime_loader/export.cpp b/src/system/runtime_loader/export.cpp index 62275c5974..adfd2a4dd9 100644 --- a/src/system/runtime_loader/export.cpp +++ b/src/system/runtime_loader/export.cpp @@ -65,4 +65,5 @@ void rldexport_init(void) { gRuntimeLoader.program_args = gProgramArgs; + gRuntimeLoader.commpage_address = __gCommPageAddress; } diff --git a/src/system/runtime_loader/heap.cpp b/src/system/runtime_loader/heap.cpp index 02dc286b02..8cd57abf54 100644 --- a/src/system/runtime_loader/heap.cpp +++ b/src/system/runtime_loader/heap.cpp @@ -178,8 +178,8 @@ static status_t add_area(size_t size) { void *base; - area_id area = _kern_create_area("rld heap", &base, B_ANY_ADDRESS, size, - B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); + area_id area = _kern_create_area("rld heap", &base, + B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); if (area < B_OK) return area; diff --git a/src/system/runtime_loader/images.cpp b/src/system/runtime_loader/images.cpp index 6c6c460285..3bbe34bde2 100644 --- a/src/system/runtime_loader/images.cpp +++ b/src/system/runtime_loader/images.cpp @@ -165,7 +165,7 @@ topological_sort(image_t* image, uint32 slot, image_t** initList, /*! Finds the load address and address specifier of the given image region. */ static void -get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta, +get_image_region_load_address(image_t* image, uint32 index, long lastDelta, bool fixed, addr_t& loadAddress, uint32& addressSpecifier) { if (image->dynamic_ptr != 0 && !fixed) { @@ -173,7 +173,7 @@ get_image_region_load_address(image_t* image, uint32 index, int32 lastDelta, if (index == 0) { // but only the first segment gets a free ride loadAddress = RLD_PROGRAM_BASE; - addressSpecifier = B_BASE_ADDRESS; + addressSpecifier = B_RANDOMIZED_BASE_ADDRESS; } else { loadAddress = image->regions[index].vmstart + lastDelta; addressSpecifier = B_EXACT_ADDRESS; @@ -298,7 +298,7 @@ map_image(int fd, char const* path, image_t* image, bool fixed) addr_t loadAddress; size_t reservedSize = 0; size_t length = 0; - uint32 addressSpecifier = B_ANY_ADDRESS; + uint32 addressSpecifier = B_RANDOMIZED_ANY_ADDRESS; for (uint32 i = 0; i < image->num_regions; i++) { // for BeOS compatibility: if we load an old BeOS executable, we diff --git a/src/system/runtime_loader/runtime_loader.cpp b/src/system/runtime_loader/runtime_loader.cpp index 3389103901..140a7574b6 100644 --- a/src/system/runtime_loader/runtime_loader.cpp +++ b/src/system/runtime_loader/runtime_loader.cpp @@ -22,6 +22,7 @@ struct user_space_program_args *gProgramArgs; +void *__gCommPageAddress; static const char * @@ -366,12 +367,13 @@ out: specified by its ld-script. */ int -runtime_loader(void *_args) +runtime_loader(void* _args, void* commpage) { void *entry = NULL; int returnCode; gProgramArgs = (struct user_space_program_args *)_args; + __gCommPageAddress = commpage; // Relocate the args and env arrays -- they are organized in a contiguous // buffer which the kernel just copied into user space without adjusting the diff --git a/src/system/runtime_loader/runtime_loader_private.h b/src/system/runtime_loader/runtime_loader_private.h index f3f6dd38e0..2720a659a8 100644 --- a/src/system/runtime_loader/runtime_loader_private.h +++ b/src/system/runtime_loader/runtime_loader_private.h @@ -43,6 +43,7 @@ struct SymbolLookupCache; extern struct user_space_program_args* gProgramArgs; +extern void* __gCommPageAddress; extern struct rld_export gRuntimeLoader; extern char* (*gGetEnv)(const char* name); extern bool gProgramLoaded; @@ -53,7 +54,7 @@ extern image_t* gProgramImage; extern "C" { #endif -int runtime_loader(void* arg); +int runtime_loader(void* arg, void* commpage); int open_executable(char* name, image_type type, const char* rpath, const char* programPath, const char* compatibilitySubDir); status_t test_executable(const char* path, char* interpreter);