tinycc/lib/bcheck.c

876 lines
24 KiB
C
Raw Normal View History

2002-01-04 02:12:29 +03:00
/*
* Tiny C Memory and bounds checker
*
* Copyright (c) 2002 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
2002-01-04 02:12:29 +03:00
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
2002-01-04 02:12:29 +03:00
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2002-01-04 02:12:29 +03:00
*/
2002-07-25 02:11:56 +04:00
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) \
&& !defined(__DragonFly__) && !defined(__OpenBSD__)
2002-07-25 02:11:56 +04:00
#include <malloc.h>
2002-12-08 17:34:30 +03:00
#endif
2012-12-10 05:51:49 +04:00
#if !defined(_WIN32)
lib/bcheck: Don't assume heap goes right after bss At startup __bound_init() wants to mark malloc zone as invalid memory, so that any access to memory on heap, not allocated through malloc be invalid. Other pages are initialized as empty regions, access to which is not treated as invalid by bounds-checking. The problem is code incorrectly assumed that heap goes right after bss, and that is not correct for two cases: 1) if we are running from `tcc -b -run`, program text data and bss will be already in malloced memory, possibly in mmaped region insead of heap, and marking memory as invalid from _end will not cover heap and probably wrongly mark correct regions. 2) if address space randomization is turned on, again heap does not start from _end, and we'll mark as invalid something else instead of malloc area. For example with the following diagnostic patch ... diff --git a/tcc.c b/tcc.c index 5dd5725..31c46e8 100644 --- a/tcc.c +++ b/tcc.c @@ -479,6 +479,8 @@ static int parse_args(TCCState *s, int argc, char **argv) return optind; } +extern int _etext, _edata, _end; + int main(int argc, char **argv) { int i; @@ -487,6 +489,18 @@ int main(int argc, char **argv) int64_t start_time = 0; const char *default_file = NULL; + void *brk; + + brk = sbrk(0); + + fprintf(stderr, "\n>>> TCC\n\n"); + fprintf(stderr, "etext:\t%10p\n", &_etext); + fprintf(stderr, "edata:\t%10p\n", &_edata); + fprintf(stderr, "end:\t%10p\n", &_end); + fprintf(stderr, "brk:\t%10p\n", brk); + fprintf(stderr, "stack:\t%10p\n", &brk); + + fprintf(stderr, "&errno: %p\n", &errno); s = tcc_new(); output_type = TCC_OUTPUT_EXE; diff --git a/tccrun.c b/tccrun.c index 531f46a..25ed30a 100644 --- a/tccrun.c +++ b/tccrun.c @@ -91,6 +91,8 @@ LIBTCCAPI int tcc_run(TCCState *s1, int argc, char **argv) int (*prog_main)(int, char **); int ret; + fprintf(stderr, "\n\ntcc_run() ...\n\n"); + if (tcc_relocate(s1, TCC_RELOCATE_AUTO) < 0) return -1; diff --git a/lib/bcheck.c b/lib/bcheck.c index ea5b233..8b26a5f 100644 --- a/lib/bcheck.c +++ b/lib/bcheck.c @@ -296,6 +326,8 @@ static void mark_invalid(unsigned long addr, unsigned long size) start = addr; end = addr + size; + fprintf(stderr, "mark_invalid %10p - %10p\n", (void *)addr, (void *)end); + t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS; if (end != 0) t2_end = end >> BOUND_T3_BITS; ... Look how memory is laid out for `tcc -b -run ...`: $ ./tcc -B. -b -DTCC_TARGET_I386 -DCONFIG_MULTIARCHDIR=\"i386-linux-gnu\" -run \ -DONE_SOURCE ./tcc.c -B. -c x.c >>> TCC etext: 0x8065477 edata: 0x8070220 end: 0x807a95c brk: 0x807b000 stack: 0xaffff0f0 &errno: 0xa7e25688 tcc_run() ... mark_invalid 0xfff80000 - (nil) mark_invalid 0xa7c31d98 - 0xafc31d98 >>> TCC etext: 0xa7c22767 edata: 0xa7c2759c end: 0xa7c31d98 brk: 0x8211000 stack: 0xafffeff0 &errno: 0xa7e25688 Runtime error: dereferencing invalid pointer ./tccpp.c:1953: at 0xa7beebdf parse_number() (included from ./libtcc.c, ./tcc.c) ./tccpp.c:3003: by 0xa7bf0708 next() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:4465: by 0xa7bfe348 block() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:4440: by 0xa7bfe212 block() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:5529: by 0xa7c01929 gen_function() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:5767: by 0xa7c02602 decl0() (included from ./libtcc.c, ./tcc.c) The second mark_invalid goes right after in-memory-compiled program's _end, and oops, that's not where malloc zone is (starts from brk), and oops again, mark_invalid covers e.g. errno. Then compiled tcc is crasshing by bcheck on errno access: 1776 static void parse_number(const char *p) 1777 { 1778 int b, t, shift, frac_bits, s, exp_val, ch; ... 1951 *q = '\0'; 1952 t = toup(ch); 1953 errno = 0; The solution here is to use sbrk(0) as approximation for the program break start instead of &_end: - if we are a separately compiled program, __bound_init() runs early, and sbrk(0) should be equal or very near to start_brk (in case other constructors malloc something), or - if we are running from under `tcc -b -run`, sbrk(0) will return start of heap portion which is under this program control, and not mark as invalid earlier allocated memory. With this patch `tcc -b -run tcc.c ...` succeeds compiling above small-test program (diagnostic patch is still applied too): $ ./tcc -B. -b -DTCC_TARGET_I386 -DCONFIG_MULTIARCHDIR=\"i386-linux-gnu\" -run \ -DONE_SOURCE ./tcc.c -B. -c x.c >>> TCC etext: 0x8065477 edata: 0x8070220 end: 0x807a95c brk: 0x807b000 stack: 0xaffff0f0 &errno: 0xa7e25688 tcc_run() ... mark_invalid 0xfff80000 - (nil) mark_invalid 0x8211000 - 0x10211000 >>> TCC etext: 0xa7c22777 edata: 0xa7c275ac end: 0xa7c31da8 brk: 0x8211000 stack: 0xafffeff0 &errno: 0xa7e25688 (completes ok) but running `tcc -b -run tcc.c -run tests/tcctest.c` sigsegv's - that's the plot for the next patch.
2012-12-09 18:48:48 +04:00
#include <unistd.h>
2012-12-10 05:51:49 +04:00
#endif
2002-01-04 02:12:29 +03:00
//#define BOUND_DEBUG
/* define so that bound array is static (faster, but use memory if
bound checking not used) */
//#define BOUND_STATIC
/* use malloc hooks. Currently the code cannot be reliable if no hooks */
#define CONFIG_TCC_MALLOC_HOOKS
2002-12-08 17:34:30 +03:00
#define HAVE_MEMALIGN
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
|| defined(__DragonFly__) || defined(__dietlibc__) \
|| defined(__UCLIBC__) || defined(__OpenBSD__) \
|| defined(_WIN32) || defined(TCC_UCLIBC)
#warning Bound checking does not support malloc (etc.) in this environment.
2002-12-08 17:34:30 +03:00
#undef CONFIG_TCC_MALLOC_HOOKS
#undef HAVE_MEMALIGN
#endif
2002-01-04 02:12:29 +03:00
#define BOUND_T1_BITS 13
#define BOUND_T2_BITS 11
#define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
#define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
#define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
#define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
#define BOUND_E_BITS 4
#define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
#define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
/* this pointer is generated when bound check is incorrect */
#define INVALID_POINTER ((void *)(-2))
/* size of an empty region */
#define EMPTY_SIZE 0xffffffff
/* size of an invalid region */
#define INVALID_SIZE 0
typedef struct BoundEntry {
unsigned long start;
unsigned long size;
struct BoundEntry *next;
unsigned long is_invalid; /* true if pointers outside region are invalid */
} BoundEntry;
/* external interface */
void __bound_init(void);
void __bound_new_region(void *p, unsigned long size);
int __bound_delete_region(void *p);
2002-01-04 02:12:29 +03:00
2004-11-07 18:43:33 +03:00
#define FASTCALL __attribute__((regparm(3)))
void *__bound_malloc(size_t size, const void *caller);
void *__bound_memalign(size_t size, size_t align, const void *caller);
void __bound_free(void *ptr, const void *caller);
void *__bound_realloc(void *ptr, size_t size, const void *caller);
static void *libc_malloc(size_t size);
static void libc_free(void *ptr);
static void install_malloc_hooks(void);
static void restore_malloc_hooks(void);
2002-01-04 02:12:29 +03:00
#ifdef CONFIG_TCC_MALLOC_HOOKS
static void *saved_malloc_hook;
static void *saved_free_hook;
static void *saved_realloc_hook;
static void *saved_memalign_hook;
#endif
2002-01-04 02:12:29 +03:00
2002-08-18 17:25:38 +04:00
/* TCC definitions */
2002-07-25 02:11:56 +04:00
extern char __bounds_start; /* start of static bounds table */
2002-11-03 03:42:33 +03:00
/* error message, just for TCC */
const char *__bound_error_msg;
2002-08-18 17:25:38 +04:00
2002-07-25 02:11:56 +04:00
/* runtime error output */
extern void rt_error(unsigned long pc, const char *fmt, ...);
2002-01-04 02:12:29 +03:00
#ifdef BOUND_STATIC
static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
#else
static BoundEntry **__bound_t1; /* page table */
#endif
static BoundEntry *__bound_empty_t2; /* empty page, for unused pages */
static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
{
unsigned long addr, tmp;
BoundEntry *e;
e = e1;
while (e != NULL) {
addr = (unsigned long)p;
addr -= e->start;
if (addr <= e->size) {
/* put region at the head */
tmp = e1->start;
e1->start = e->start;
e->start = tmp;
tmp = e1->size;
e1->size = e->size;
e->size = tmp;
return e1;
}
e = e->next;
}
/* no entry found: return empty entry or invalid entry */
if (e1->is_invalid)
return __bound_invalid_t2;
else
return __bound_empty_t2;
}
/* print a bound error message */
2002-11-03 03:42:33 +03:00
static void bound_error(const char *fmt, ...)
2002-01-04 02:12:29 +03:00
{
2002-11-03 03:42:33 +03:00
__bound_error_msg = fmt;
*(int *)0 = 0; /* force a runtime error */
2002-01-04 02:12:29 +03:00
}
static void bound_alloc_error(void)
{
2002-11-03 03:42:33 +03:00
bound_error("not enough memory for bound checking code");
2002-01-04 02:12:29 +03:00
}
2002-01-05 19:16:47 +03:00
/* return '(p + offset)' for pointer arithmetic (a pointer can reach
the end of a region in this case */
2004-11-07 18:43:33 +03:00
void * FASTCALL __bound_ptr_add(void *p, int offset)
2002-01-04 02:12:29 +03:00
{
unsigned long addr = (unsigned long)p;
BoundEntry *e;
#if defined(BOUND_DEBUG)
2002-01-04 02:12:29 +03:00
printf("add: 0x%x %d\n", (int)p, offset);
#endif
e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
e = (BoundEntry *)((char *)e +
((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
addr -= e->start;
if (addr > e->size) {
e = __bound_find_region(e, p);
addr = (unsigned long)p - e->start;
}
addr += offset;
if (addr > e->size)
return INVALID_POINTER; /* return an invalid pointer */
return p + offset;
}
2002-01-05 19:16:47 +03:00
/* return '(p + offset)' for pointer indirection (the resulting must
be strictly inside the region */
#define BOUND_PTR_INDIR(dsize) \
2004-11-07 18:43:33 +03:00
void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset) \
2002-01-04 02:12:29 +03:00
{ \
unsigned long addr = (unsigned long)p; \
BoundEntry *e; \
\
e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)]; \
e = (BoundEntry *)((char *)e + \
((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) & \
((BOUND_T2_SIZE - 1) << BOUND_E_BITS))); \
addr -= e->start; \
if (addr > e->size) { \
e = __bound_find_region(e, p); \
addr = (unsigned long)p - e->start; \
} \
2002-01-05 19:16:47 +03:00
addr += offset + dsize; \
2002-01-04 02:12:29 +03:00
if (addr > e->size) \
2002-01-05 19:16:47 +03:00
return INVALID_POINTER; /* return an invalid pointer */ \
return p + offset; \
2002-01-04 02:12:29 +03:00
}
BOUND_PTR_INDIR(1)
BOUND_PTR_INDIR(2)
BOUND_PTR_INDIR(4)
BOUND_PTR_INDIR(8)
BOUND_PTR_INDIR(12)
BOUND_PTR_INDIR(16)
/* return the frame pointer of the caller */
2002-01-05 19:16:47 +03:00
#define GET_CALLER_FP(fp)\
{\
lib/bcheck: Prevent __bound_local_new / __bound_local_delete from being miscompiled On i386 and gcc-4.7 I found that __bound_local_new was miscompiled - look: #ifdef __i386__ /* return the frame pointer of the caller */ #define GET_CALLER_FP(fp)\ {\ unsigned long *fp1;\ __asm__ __volatile__ ("movl %%ebp,%0" :"=g" (fp1));\ fp = fp1[0];\ } #endif /* called when entering a function to add all the local regions */ void FASTCALL __bound_local_new(void *p1) { unsigned long addr, size, fp, *p = p1; GET_CALLER_FP(fp); for(;;) { addr = p[0]; if (addr == 0) break; addr += fp; size = p[1]; p += 2; __bound_new_region((void *)addr, size); } } __bound_local_new: .LFB40: .cfi_startproc pushl %esi .cfi_def_cfa_offset 8 .cfi_offset 6, -8 pushl %ebx .cfi_def_cfa_offset 12 .cfi_offset 3, -12 subl $8, %esp // NOTE prologue does not touch %ebp .cfi_def_cfa_offset 20 #APP # 235 "lib/bcheck.c" 1 movl %ebp,%edx // %ebp -> fp1 # 0 "" 2 #NO_APP movl (%edx), %esi // fp1[0] -> fp movl (%eax), %edx movl %eax, %ebx testl %edx, %edx je .L167 .p2align 2,,3 .L173: movl 4(%ebx), %eax addl $8, %ebx movl %eax, 4(%esp) addl %esi, %edx movl %edx, (%esp) call __bound_new_region movl (%ebx), %edx testl %edx, %edx jne .L173 .L167: addl $8, %esp .cfi_def_cfa_offset 12 popl %ebx .cfi_restore 3 .cfi_def_cfa_offset 8 popl %esi .cfi_restore 6 .cfi_def_cfa_offset 4 ret here GET_CALLER_FP() assumed that its using function setups it's stack frame, i.e. first save, then set %ebp to stack frame start, and then it has to do perform two lookups: 1) to get current stack frame through %ebp, and 2) get caller stack frame through (%ebp). And here is the problem: gcc decided not to setup %ebp for __bound_local_new and in such case GET_CALLER_FP actually becomes GET_CALLER_CALLER_FP and oops, wrong regions are registered in bcheck tables... The solution is to stop using hand written assembly and rely on gcc's __builtin_frame_address(1) to get callers frame stack(*). I think for the builtin gcc should generate correct code, independent of whether it decides or not to omit frame pointer in using function - it knows it. (*) judging by gcc history, __builtin_frame_address was there almost from the beginning - at least it is present in 1992 as seen from the following commit: http://gcc.gnu.org/git/?p=gcc.git;a=commit;h=be07f7bdbac76d87d3006c89855491504d5d6202 so we can rely on it being supported by all versions of gcc. In my environment the assembly of __bound_local_new changes as follows: diff --git a/bcheck0.s b/bcheck1.s index 4c02a5f..ef68918 100644 --- a/bcheck0.s +++ b/bcheck1.s @@ -1409,20 +1409,17 @@ __bound_init: __bound_local_new: .LFB40: .cfi_startproc - pushl %esi + pushl %ebp // NOTE prologue saves %ebp ... .cfi_def_cfa_offset 8 - .cfi_offset 6, -8 + .cfi_offset 5, -8 + movl %esp, %ebp // ... and reset it to local stack frame + .cfi_def_cfa_register 5 + pushl %esi pushl %ebx - .cfi_def_cfa_offset 12 - .cfi_offset 3, -12 subl $8, %esp - .cfi_def_cfa_offset 20 -#APP -# 235 "lib/bcheck.c" 1 - movl %ebp,%edx -# 0 "" 2 -#NO_APP - movl (%edx), %esi + .cfi_offset 6, -12 + .cfi_offset 3, -16 + movl 0(%ebp), %esi // stkframe -> stkframe.parent -> fp movl (%eax), %edx movl %eax, %ebx testl %edx, %edx @@ -1440,13 +1437,13 @@ __bound_local_new: jne .L173 .L167: addl $8, %esp - .cfi_def_cfa_offset 12 popl %ebx .cfi_restore 3 - .cfi_def_cfa_offset 8 popl %esi .cfi_restore 6 - .cfi_def_cfa_offset 4 + popl %ebp + .cfi_restore 5 + .cfi_def_cfa 4, 4 ret .cfi_endproc i.e. now it compiles correctly. Though I do not have x86_64 to test, my guess is that __builtin_frame_address(1) should work there too. If not - please revert only x86_64 part of the patch. Thanks. Cc: Michael Matz <matz@suse.de>
2012-11-13 13:14:26 +04:00
fp = (unsigned long)__builtin_frame_address(1);\
2002-01-05 19:16:47 +03:00
}
/* called when entering a function to add all the local regions */
2004-11-07 18:43:33 +03:00
void FASTCALL __bound_local_new(void *p1)
2002-01-05 19:16:47 +03:00
{
unsigned long addr, size, fp, *p = p1;
GET_CALLER_FP(fp);
for(;;) {
addr = p[0];
if (addr == 0)
break;
addr += fp;
size = p[1];
p += 2;
__bound_new_region((void *)addr, size);
}
}
/* called when leaving a function to delete all the local regions */
2004-11-07 18:43:33 +03:00
void FASTCALL __bound_local_delete(void *p1)
2002-01-05 19:16:47 +03:00
{
unsigned long addr, fp, *p = p1;
GET_CALLER_FP(fp);
for(;;) {
addr = p[0];
if (addr == 0)
break;
addr += fp;
p += 2;
__bound_delete_region((void *)addr);
}
}
2002-01-04 02:12:29 +03:00
static BoundEntry *__bound_new_page(void)
{
BoundEntry *page;
int i;
page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
2002-01-04 02:12:29 +03:00
if (!page)
bound_alloc_error();
for(i=0;i<BOUND_T2_SIZE;i++) {
/* put empty entries */
page[i].start = 0;
page[i].size = EMPTY_SIZE;
page[i].next = NULL;
page[i].is_invalid = 0;
}
return page;
}
/* currently we use malloc(). Should use bound_new_page() */
static BoundEntry *bound_new_entry(void)
{
BoundEntry *e;
e = libc_malloc(sizeof(BoundEntry));
2002-01-04 02:12:29 +03:00
return e;
}
static void bound_free_entry(BoundEntry *e)
{
libc_free(e);
2002-01-04 02:12:29 +03:00
}
static inline BoundEntry *get_page(int index)
{
BoundEntry *page;
page = __bound_t1[index];
if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
/* create a new page if necessary */
page = __bound_new_page();
__bound_t1[index] = page;
}
return page;
}
/* mark a region as being invalid (can only be used during init) */
static void mark_invalid(unsigned long addr, unsigned long size)
{
unsigned long start, end;
BoundEntry *page;
int t1_start, t1_end, i, j, t2_start, t2_end;
start = addr;
end = addr + size;
t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
if (end != 0)
t2_end = end >> BOUND_T3_BITS;
else
t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
#if 0
2002-01-04 02:12:29 +03:00
printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
#endif
2002-01-04 02:12:29 +03:00
/* first we handle full pages */
t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
t1_end = t2_end >> BOUND_T2_BITS;
i = t2_start & (BOUND_T2_SIZE - 1);
j = t2_end & (BOUND_T2_SIZE - 1);
if (t1_start == t1_end) {
page = get_page(t2_start >> BOUND_T2_BITS);
for(; i < j; i++) {
page[i].size = INVALID_SIZE;
page[i].is_invalid = 1;
}
} else {
if (i > 0) {
page = get_page(t2_start >> BOUND_T2_BITS);
for(; i < BOUND_T2_SIZE; i++) {
page[i].size = INVALID_SIZE;
page[i].is_invalid = 1;
}
}
for(i = t1_start; i < t1_end; i++) {
__bound_t1[i] = __bound_invalid_t2;
}
if (j != 0) {
page = get_page(t1_end);
for(i = 0; i < j; i++) {
page[i].size = INVALID_SIZE;
page[i].is_invalid = 1;
}
}
}
}
void __bound_init(void)
{
int i;
BoundEntry *page;
unsigned long start, size;
2002-07-25 02:11:56 +04:00
int *p;
2002-01-04 02:12:29 +03:00
/* save malloc hooks and install bound check hooks */
install_malloc_hooks();
2002-01-04 02:12:29 +03:00
#ifndef BOUND_STATIC
__bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
2002-01-04 02:12:29 +03:00
if (!__bound_t1)
bound_alloc_error();
#endif
__bound_empty_t2 = __bound_new_page();
for(i=0;i<BOUND_T1_SIZE;i++) {
__bound_t1[i] = __bound_empty_t2;
}
page = __bound_new_page();
for(i=0;i<BOUND_T2_SIZE;i++) {
/* put invalid entries */
page[i].start = 0;
page[i].size = INVALID_SIZE;
page[i].next = NULL;
page[i].is_invalid = 1;
}
__bound_invalid_t2 = page;
/* invalid pointer zone */
start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
size = BOUND_T23_SIZE;
mark_invalid(start, size);
#if defined(CONFIG_TCC_MALLOC_HOOKS)
/* malloc zone is also marked invalid. can only use that with
lib/bcheck: Don't assume heap goes right after bss At startup __bound_init() wants to mark malloc zone as invalid memory, so that any access to memory on heap, not allocated through malloc be invalid. Other pages are initialized as empty regions, access to which is not treated as invalid by bounds-checking. The problem is code incorrectly assumed that heap goes right after bss, and that is not correct for two cases: 1) if we are running from `tcc -b -run`, program text data and bss will be already in malloced memory, possibly in mmaped region insead of heap, and marking memory as invalid from _end will not cover heap and probably wrongly mark correct regions. 2) if address space randomization is turned on, again heap does not start from _end, and we'll mark as invalid something else instead of malloc area. For example with the following diagnostic patch ... diff --git a/tcc.c b/tcc.c index 5dd5725..31c46e8 100644 --- a/tcc.c +++ b/tcc.c @@ -479,6 +479,8 @@ static int parse_args(TCCState *s, int argc, char **argv) return optind; } +extern int _etext, _edata, _end; + int main(int argc, char **argv) { int i; @@ -487,6 +489,18 @@ int main(int argc, char **argv) int64_t start_time = 0; const char *default_file = NULL; + void *brk; + + brk = sbrk(0); + + fprintf(stderr, "\n>>> TCC\n\n"); + fprintf(stderr, "etext:\t%10p\n", &_etext); + fprintf(stderr, "edata:\t%10p\n", &_edata); + fprintf(stderr, "end:\t%10p\n", &_end); + fprintf(stderr, "brk:\t%10p\n", brk); + fprintf(stderr, "stack:\t%10p\n", &brk); + + fprintf(stderr, "&errno: %p\n", &errno); s = tcc_new(); output_type = TCC_OUTPUT_EXE; diff --git a/tccrun.c b/tccrun.c index 531f46a..25ed30a 100644 --- a/tccrun.c +++ b/tccrun.c @@ -91,6 +91,8 @@ LIBTCCAPI int tcc_run(TCCState *s1, int argc, char **argv) int (*prog_main)(int, char **); int ret; + fprintf(stderr, "\n\ntcc_run() ...\n\n"); + if (tcc_relocate(s1, TCC_RELOCATE_AUTO) < 0) return -1; diff --git a/lib/bcheck.c b/lib/bcheck.c index ea5b233..8b26a5f 100644 --- a/lib/bcheck.c +++ b/lib/bcheck.c @@ -296,6 +326,8 @@ static void mark_invalid(unsigned long addr, unsigned long size) start = addr; end = addr + size; + fprintf(stderr, "mark_invalid %10p - %10p\n", (void *)addr, (void *)end); + t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS; if (end != 0) t2_end = end >> BOUND_T3_BITS; ... Look how memory is laid out for `tcc -b -run ...`: $ ./tcc -B. -b -DTCC_TARGET_I386 -DCONFIG_MULTIARCHDIR=\"i386-linux-gnu\" -run \ -DONE_SOURCE ./tcc.c -B. -c x.c >>> TCC etext: 0x8065477 edata: 0x8070220 end: 0x807a95c brk: 0x807b000 stack: 0xaffff0f0 &errno: 0xa7e25688 tcc_run() ... mark_invalid 0xfff80000 - (nil) mark_invalid 0xa7c31d98 - 0xafc31d98 >>> TCC etext: 0xa7c22767 edata: 0xa7c2759c end: 0xa7c31d98 brk: 0x8211000 stack: 0xafffeff0 &errno: 0xa7e25688 Runtime error: dereferencing invalid pointer ./tccpp.c:1953: at 0xa7beebdf parse_number() (included from ./libtcc.c, ./tcc.c) ./tccpp.c:3003: by 0xa7bf0708 next() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:4465: by 0xa7bfe348 block() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:4440: by 0xa7bfe212 block() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:5529: by 0xa7c01929 gen_function() (included from ./libtcc.c, ./tcc.c) ./tccgen.c:5767: by 0xa7c02602 decl0() (included from ./libtcc.c, ./tcc.c) The second mark_invalid goes right after in-memory-compiled program's _end, and oops, that's not where malloc zone is (starts from brk), and oops again, mark_invalid covers e.g. errno. Then compiled tcc is crasshing by bcheck on errno access: 1776 static void parse_number(const char *p) 1777 { 1778 int b, t, shift, frac_bits, s, exp_val, ch; ... 1951 *q = '\0'; 1952 t = toup(ch); 1953 errno = 0; The solution here is to use sbrk(0) as approximation for the program break start instead of &_end: - if we are a separately compiled program, __bound_init() runs early, and sbrk(0) should be equal or very near to start_brk (in case other constructors malloc something), or - if we are running from under `tcc -b -run`, sbrk(0) will return start of heap portion which is under this program control, and not mark as invalid earlier allocated memory. With this patch `tcc -b -run tcc.c ...` succeeds compiling above small-test program (diagnostic patch is still applied too): $ ./tcc -B. -b -DTCC_TARGET_I386 -DCONFIG_MULTIARCHDIR=\"i386-linux-gnu\" -run \ -DONE_SOURCE ./tcc.c -B. -c x.c >>> TCC etext: 0x8065477 edata: 0x8070220 end: 0x807a95c brk: 0x807b000 stack: 0xaffff0f0 &errno: 0xa7e25688 tcc_run() ... mark_invalid 0xfff80000 - (nil) mark_invalid 0x8211000 - 0x10211000 >>> TCC etext: 0xa7c22777 edata: 0xa7c275ac end: 0xa7c31da8 brk: 0x8211000 stack: 0xafffeff0 &errno: 0xa7e25688 (completes ok) but running `tcc -b -run tcc.c -run tests/tcctest.c` sigsegv's - that's the plot for the next patch.
2012-12-09 18:48:48 +04:00
* hooks because all libs should use the same malloc. The solution
* would be to build a new malloc for tcc.
*
* usually heap (= malloc zone) comes right after bss, i.e. after _end, but
* not always - either if we are running from under `tcc -b -run`, or if
* address space randomization is turned on(a), heap start will be separated
* from bss end.
*
* So sbrk(0) will be a good approximation for start_brk:
*
* - if we are a separately compiled program, __bound_init() runs early,
* and sbrk(0) should be equal or very near to start_brk(b) (in case other
* constructors malloc something), or
*
* - if we are running from under `tcc -b -run`, sbrk(0) will return
* start of heap portion which is under this program control, and not
* mark as invalid earlier allocated memory.
*
*
* (a) /proc/sys/kernel/randomize_va_space = 2, on Linux;
* usually turned on by default.
*
* (b) on Linux >= v3.3, the alternative is to read
* start_brk from /proc/self/stat
*/
start = (unsigned long)sbrk(0);
2002-01-04 02:12:29 +03:00
size = 128 * 0x100000;
mark_invalid(start, size);
#endif
2002-07-25 02:11:56 +04:00
/* add all static bound check values */
p = (int *)&__bounds_start;
while (p[0] != 0) {
__bound_new_region((void *)p[0], p[1]);
p += 2;
}
2002-01-04 02:12:29 +03:00
}
2009-07-06 23:10:14 +04:00
void __bound_exit(void)
{
restore_malloc_hooks();
}
2002-01-04 02:12:29 +03:00
static inline void add_region(BoundEntry *e,
unsigned long start, unsigned long size)
{
BoundEntry *e1;
if (e->start == 0) {
/* no region : add it */
e->start = start;
e->size = size;
} else {
/* already regions in the list: add it at the head */
e1 = bound_new_entry();
e1->start = e->start;
e1->size = e->size;
e1->next = e->next;
e->start = start;
e->size = size;
e->next = e1;
}
}
/* create a new region. It should not already exist in the region list */
void __bound_new_region(void *p, unsigned long size)
{
unsigned long start, end;
BoundEntry *page, *e, *e2;
int t1_start, t1_end, i, t2_start, t2_end;
start = (unsigned long)p;
end = start + size;
t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
/* start */
page = get_page(t1_start);
t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
#ifdef BOUND_DEBUG
printf("new %lx %lx %x %x %x %x\n",
start, end, t1_start, t1_end, t2_start, t2_end);
#endif
e = (BoundEntry *)((char *)page + t2_start);
add_region(e, start, size);
if (t1_end == t1_start) {
/* same ending page */
e2 = (BoundEntry *)((char *)page + t2_end);
if (e2 > e) {
e++;
for(;e<e2;e++) {
e->start = start;
e->size = size;
}
add_region(e, start, size);
}
} else {
/* mark until end of page */
e2 = page + BOUND_T2_SIZE;
e++;
for(;e<e2;e++) {
e->start = start;
e->size = size;
}
/* mark intermediate pages, if any */
for(i=t1_start+1;i<t1_end;i++) {
page = get_page(i);
e2 = page + BOUND_T2_SIZE;
for(e=page;e<e2;e++) {
e->start = start;
e->size = size;
}
}
/* last page */
page = get_page(t1_end);
2002-01-04 02:12:29 +03:00
e2 = (BoundEntry *)((char *)page + t2_end);
for(e=page;e<e2;e++) {
e->start = start;
e->size = size;
}
add_region(e, start, size);
}
}
/* delete a region */
static inline void delete_region(BoundEntry *e,
void *p, unsigned long empty_size)
{
unsigned long addr;
BoundEntry *e1;
addr = (unsigned long)p;
addr -= e->start;
if (addr <= e->size) {
/* region found is first one */
e1 = e->next;
if (e1 == NULL) {
/* no more region: mark it empty */
e->start = 0;
e->size = empty_size;
} else {
/* copy next region in head */
e->start = e1->start;
e->size = e1->size;
e->next = e1->next;
bound_free_entry(e1);
}
} else {
/* find the matching region */
for(;;) {
e1 = e;
e = e->next;
/* region not found: do nothing */
if (e == NULL)
break;
addr = (unsigned long)p - e->start;
if (addr <= e->size) {
/* found: remove entry */
e1->next = e->next;
bound_free_entry(e);
break;
}
}
}
}
/* WARNING: 'p' must be the starting point of the region. */
/* return non zero if error */
int __bound_delete_region(void *p)
2002-01-04 02:12:29 +03:00
{
unsigned long start, end, addr, size, empty_size;
BoundEntry *page, *e, *e2;
int t1_start, t1_end, t2_start, t2_end, i;
start = (unsigned long)p;
t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
/* find region size */
page = __bound_t1[t1_start];
e = (BoundEntry *)((char *)page + t2_start);
addr = start - e->start;
if (addr > e->size)
e = __bound_find_region(e, p);
/* test if invalid region */
if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
return -1;
2002-01-04 02:12:29 +03:00
/* compute the size we put in invalid regions */
if (e->is_invalid)
empty_size = INVALID_SIZE;
else
empty_size = EMPTY_SIZE;
size = e->size;
end = start + size;
/* now we can free each entry */
t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
delete_region(e, p, empty_size);
if (t1_end == t1_start) {
/* same ending page */
e2 = (BoundEntry *)((char *)page + t2_end);
if (e2 > e) {
e++;
for(;e<e2;e++) {
e->start = 0;
e->size = empty_size;
}
delete_region(e, p, empty_size);
}
} else {
/* mark until end of page */
e2 = page + BOUND_T2_SIZE;
e++;
for(;e<e2;e++) {
e->start = 0;
e->size = empty_size;
}
/* mark intermediate pages, if any */
/* XXX: should free them */
for(i=t1_start+1;i<t1_end;i++) {
page = get_page(i);
e2 = page + BOUND_T2_SIZE;
for(e=page;e<e2;e++) {
e->start = 0;
e->size = empty_size;
}
}
/* last page */
page = get_page(t1_end);
2002-01-04 02:12:29 +03:00
e2 = (BoundEntry *)((char *)page + t2_end);
for(e=page;e<e2;e++) {
e->start = 0;
e->size = empty_size;
}
delete_region(e, p, empty_size);
}
return 0;
2002-01-04 02:12:29 +03:00
}
/* return the size of the region starting at p, or EMPTY_SIZE if non
existant region. */
static unsigned long get_region_size(void *p)
{
unsigned long addr = (unsigned long)p;
BoundEntry *e;
e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
e = (BoundEntry *)((char *)e +
((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
addr -= e->start;
if (addr > e->size)
e = __bound_find_region(e, p);
if (e->start != (unsigned long)p)
return EMPTY_SIZE;
return e->size;
}
/* patched memory functions */
lib/bcheck: Prevent libc_malloc/libc_free etc from being miscompiled On i386 and gcc-4.7 I found that libc_malloc was miscompiled - look: static void *libc_malloc(size_t size) { void *ptr; restore_malloc_hooks(); // __malloc_hook = saved_malloc_hook ptr = malloc(size); install_malloc_hooks(); // saved_malloc_hook = __malloc_hook, __malloc_hook = __bound_malloc return ptr; } .type libc_malloc, @function libc_malloc: .LFB56: .cfi_startproc pushl %edx .cfi_def_cfa_offset 8 movl %eax, (%esp) call malloc movl $__bound_malloc, __malloc_hook movl $__bound_free, __free_hook movl $__bound_realloc, __realloc_hook movl $__bound_memalign, __memalign_hook popl %ecx .cfi_def_cfa_offset 4 ret Here gcc inlined both restore_malloc_hooks() and install_malloc_hooks() and decided that saved_malloc_hook -> __malloc_hook -> saved_malloc_hook stores are not needed and could be ommitted. Only it did not know __molloc_hook affects malloc()... So add compiler barrier to both install and restore hooks functions and be done with it - the code is now ok: diff --git a/bcheck0.s b/bcheck1.s index 5f50293..4c02a5f 100644 --- a/bcheck0.s +++ b/bcheck1.s @@ -42,8 +42,24 @@ libc_malloc: .cfi_startproc pushl %edx .cfi_def_cfa_offset 8 + movl saved_malloc_hook, %edx + movl %edx, __malloc_hook + movl saved_free_hook, %edx + movl %edx, __free_hook + movl saved_realloc_hook, %edx + movl %edx, __realloc_hook + movl saved_memalign_hook, %edx + movl %edx, __memalign_hook movl %eax, (%esp) call malloc + movl __malloc_hook, %edx + movl %edx, saved_malloc_hook + movl __free_hook, %edx + movl %edx, saved_free_hook + movl __realloc_hook, %edx + movl %edx, saved_realloc_hook + movl __memalign_hook, %edx + movl %edx, saved_memalign_hook movl $__bound_malloc, __malloc_hook movl $__bound_free, __free_hook movl $__bound_realloc, __realloc_hook For barrier I use __asm__ __volatile__ ("": : : "memory") which is used as compiler barrier by Linux kernel, and mentioned in gcc docs and in wikipedia [1]. Without this patch any program compiled with tcc -b crashes in startup because of infinite recursion in libc_malloc. [1] http://en.wikipedia.org/wiki/Memory_ordering#Compiler_memory_barrier
2012-11-13 00:34:16 +04:00
/* force compiler to perform stores coded up to this point */
#define barrier() __asm__ __volatile__ ("": : : "memory")
static void install_malloc_hooks(void)
{
#ifdef CONFIG_TCC_MALLOC_HOOKS
saved_malloc_hook = __malloc_hook;
saved_free_hook = __free_hook;
saved_realloc_hook = __realloc_hook;
saved_memalign_hook = __memalign_hook;
__malloc_hook = __bound_malloc;
__free_hook = __bound_free;
__realloc_hook = __bound_realloc;
__memalign_hook = __bound_memalign;
lib/bcheck: Prevent libc_malloc/libc_free etc from being miscompiled On i386 and gcc-4.7 I found that libc_malloc was miscompiled - look: static void *libc_malloc(size_t size) { void *ptr; restore_malloc_hooks(); // __malloc_hook = saved_malloc_hook ptr = malloc(size); install_malloc_hooks(); // saved_malloc_hook = __malloc_hook, __malloc_hook = __bound_malloc return ptr; } .type libc_malloc, @function libc_malloc: .LFB56: .cfi_startproc pushl %edx .cfi_def_cfa_offset 8 movl %eax, (%esp) call malloc movl $__bound_malloc, __malloc_hook movl $__bound_free, __free_hook movl $__bound_realloc, __realloc_hook movl $__bound_memalign, __memalign_hook popl %ecx .cfi_def_cfa_offset 4 ret Here gcc inlined both restore_malloc_hooks() and install_malloc_hooks() and decided that saved_malloc_hook -> __malloc_hook -> saved_malloc_hook stores are not needed and could be ommitted. Only it did not know __molloc_hook affects malloc()... So add compiler barrier to both install and restore hooks functions and be done with it - the code is now ok: diff --git a/bcheck0.s b/bcheck1.s index 5f50293..4c02a5f 100644 --- a/bcheck0.s +++ b/bcheck1.s @@ -42,8 +42,24 @@ libc_malloc: .cfi_startproc pushl %edx .cfi_def_cfa_offset 8 + movl saved_malloc_hook, %edx + movl %edx, __malloc_hook + movl saved_free_hook, %edx + movl %edx, __free_hook + movl saved_realloc_hook, %edx + movl %edx, __realloc_hook + movl saved_memalign_hook, %edx + movl %edx, __memalign_hook movl %eax, (%esp) call malloc + movl __malloc_hook, %edx + movl %edx, saved_malloc_hook + movl __free_hook, %edx + movl %edx, saved_free_hook + movl __realloc_hook, %edx + movl %edx, saved_realloc_hook + movl __memalign_hook, %edx + movl %edx, saved_memalign_hook movl $__bound_malloc, __malloc_hook movl $__bound_free, __free_hook movl $__bound_realloc, __realloc_hook For barrier I use __asm__ __volatile__ ("": : : "memory") which is used as compiler barrier by Linux kernel, and mentioned in gcc docs and in wikipedia [1]. Without this patch any program compiled with tcc -b crashes in startup because of infinite recursion in libc_malloc. [1] http://en.wikipedia.org/wiki/Memory_ordering#Compiler_memory_barrier
2012-11-13 00:34:16 +04:00
barrier();
#endif
}
static void restore_malloc_hooks(void)
{
#ifdef CONFIG_TCC_MALLOC_HOOKS
__malloc_hook = saved_malloc_hook;
__free_hook = saved_free_hook;
__realloc_hook = saved_realloc_hook;
__memalign_hook = saved_memalign_hook;
lib/bcheck: Prevent libc_malloc/libc_free etc from being miscompiled On i386 and gcc-4.7 I found that libc_malloc was miscompiled - look: static void *libc_malloc(size_t size) { void *ptr; restore_malloc_hooks(); // __malloc_hook = saved_malloc_hook ptr = malloc(size); install_malloc_hooks(); // saved_malloc_hook = __malloc_hook, __malloc_hook = __bound_malloc return ptr; } .type libc_malloc, @function libc_malloc: .LFB56: .cfi_startproc pushl %edx .cfi_def_cfa_offset 8 movl %eax, (%esp) call malloc movl $__bound_malloc, __malloc_hook movl $__bound_free, __free_hook movl $__bound_realloc, __realloc_hook movl $__bound_memalign, __memalign_hook popl %ecx .cfi_def_cfa_offset 4 ret Here gcc inlined both restore_malloc_hooks() and install_malloc_hooks() and decided that saved_malloc_hook -> __malloc_hook -> saved_malloc_hook stores are not needed and could be ommitted. Only it did not know __molloc_hook affects malloc()... So add compiler barrier to both install and restore hooks functions and be done with it - the code is now ok: diff --git a/bcheck0.s b/bcheck1.s index 5f50293..4c02a5f 100644 --- a/bcheck0.s +++ b/bcheck1.s @@ -42,8 +42,24 @@ libc_malloc: .cfi_startproc pushl %edx .cfi_def_cfa_offset 8 + movl saved_malloc_hook, %edx + movl %edx, __malloc_hook + movl saved_free_hook, %edx + movl %edx, __free_hook + movl saved_realloc_hook, %edx + movl %edx, __realloc_hook + movl saved_memalign_hook, %edx + movl %edx, __memalign_hook movl %eax, (%esp) call malloc + movl __malloc_hook, %edx + movl %edx, saved_malloc_hook + movl __free_hook, %edx + movl %edx, saved_free_hook + movl __realloc_hook, %edx + movl %edx, saved_realloc_hook + movl __memalign_hook, %edx + movl %edx, saved_memalign_hook movl $__bound_malloc, __malloc_hook movl $__bound_free, __free_hook movl $__bound_realloc, __realloc_hook For barrier I use __asm__ __volatile__ ("": : : "memory") which is used as compiler barrier by Linux kernel, and mentioned in gcc docs and in wikipedia [1]. Without this patch any program compiled with tcc -b crashes in startup because of infinite recursion in libc_malloc. [1] http://en.wikipedia.org/wiki/Memory_ordering#Compiler_memory_barrier
2012-11-13 00:34:16 +04:00
barrier();
#endif
}
static void *libc_malloc(size_t size)
{
void *ptr;
restore_malloc_hooks();
ptr = malloc(size);
install_malloc_hooks();
return ptr;
}
static void libc_free(void *ptr)
{
restore_malloc_hooks();
free(ptr);
install_malloc_hooks();
}
2002-01-04 02:12:29 +03:00
/* XXX: we should use a malloc which ensure that it is unlikely that
two malloc'ed data have the same address if 'free' are made in
between. */
void *__bound_malloc(size_t size, const void *caller)
2002-01-04 02:12:29 +03:00
{
void *ptr;
2002-01-04 02:12:29 +03:00
/* we allocate one more byte to ensure the regions will be
separated by at least one byte. With the glibc malloc, it may
be in fact not necessary */
ptr = libc_malloc(size + 1);
2002-01-04 02:12:29 +03:00
if (!ptr)
return NULL;
__bound_new_region(ptr, size);
return ptr;
}
void *__bound_memalign(size_t size, size_t align, const void *caller)
{
void *ptr;
restore_malloc_hooks();
2002-12-08 17:34:30 +03:00
#ifndef HAVE_MEMALIGN
if (align > 4) {
/* XXX: handle it ? */
ptr = NULL;
} else {
/* we suppose that malloc aligns to at least four bytes */
ptr = malloc(size + 1);
}
#else
/* we allocate one more byte to ensure the regions will be
separated by at least one byte. With the glibc malloc, it may
be in fact not necessary */
ptr = memalign(size + 1, align);
2002-12-08 17:34:30 +03:00
#endif
install_malloc_hooks();
if (!ptr)
return NULL;
__bound_new_region(ptr, size);
return ptr;
}
void __bound_free(void *ptr, const void *caller)
2002-01-04 02:12:29 +03:00
{
if (ptr == NULL)
return;
if (__bound_delete_region(ptr) != 0)
2002-11-03 03:42:33 +03:00
bound_error("freeing invalid region");
libc_free(ptr);
2002-01-04 02:12:29 +03:00
}
void *__bound_realloc(void *ptr, size_t size, const void *caller)
2002-01-04 02:12:29 +03:00
{
void *ptr1;
int old_size;
if (size == 0) {
__bound_free(ptr, caller);
2002-01-04 02:12:29 +03:00
return NULL;
} else {
ptr1 = __bound_malloc(size, caller);
2002-01-04 02:12:29 +03:00
if (ptr == NULL || ptr1 == NULL)
return ptr1;
old_size = get_region_size(ptr);
if (old_size == EMPTY_SIZE)
2002-11-03 03:42:33 +03:00
bound_error("realloc'ing invalid pointer");
2002-01-04 02:12:29 +03:00
memcpy(ptr1, ptr, old_size);
__bound_free(ptr, caller);
2002-01-04 02:12:29 +03:00
return ptr1;
}
}
#ifndef CONFIG_TCC_MALLOC_HOOKS
void *__bound_calloc(size_t nmemb, size_t size)
{
void *ptr;
size = size * nmemb;
2004-10-02 18:03:39 +04:00
ptr = __bound_malloc(size, NULL);
if (!ptr)
return NULL;
memset(ptr, 0, size);
return ptr;
}
#endif
2002-01-04 02:12:29 +03:00
#if 0
static void bound_dump(void)
{
BoundEntry *page, *e;
int i, j;
printf("region dump:\n");
for(i=0;i<BOUND_T1_SIZE;i++) {
page = __bound_t1[i];
for(j=0;j<BOUND_T2_SIZE;j++) {
e = page + j;
/* do not print invalid or empty entries */
if (e->size != EMPTY_SIZE && e->start != 0) {
printf("%08x:",
(i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
(j << BOUND_T3_BITS));
do {
printf(" %08lx:%08lx", e->start, e->start + e->size);
e = e->next;
} while (e != NULL);
printf("\n");
}
}
}
}
#endif
/* some useful checked functions */
/* check that (p ... p + size - 1) lies inside 'p' region, if any */
static void __bound_check(const void *p, size_t size)
{
if (size == 0)
return;
p = __bound_ptr_add((void *)p, size);
if (p == INVALID_POINTER)
2002-11-03 03:42:33 +03:00
bound_error("invalid pointer");
}
void *__bound_memcpy(void *dst, const void *src, size_t size)
{
__bound_check(dst, size);
__bound_check(src, size);
/* check also region overlap */
if (src >= dst && src < dst + size)
2002-11-03 03:42:33 +03:00
bound_error("overlapping regions in memcpy()");
return memcpy(dst, src, size);
}
void *__bound_memmove(void *dst, const void *src, size_t size)
{
__bound_check(dst, size);
__bound_check(src, size);
return memmove(dst, src, size);
}
void *__bound_memset(void *dst, int c, size_t size)
{
__bound_check(dst, size);
2002-01-05 20:05:30 +03:00
return memset(dst, c, size);
}
/* XXX: could be optimized */
int __bound_strlen(const char *s)
{
const char *p;
int len;
len = 0;
for(;;) {
p = __bound_ptr_indir1((char *)s, len);
if (p == INVALID_POINTER)
2002-11-03 03:42:33 +03:00
bound_error("bad pointer in strlen()");
if (*p == '\0')
break;
len++;
}
return len;
}
char *__bound_strcpy(char *dst, const char *src)
{
int len;
len = __bound_strlen(src);
return __bound_memcpy(dst, src, len + 1);
}