From cbf2807505f63448491f73f989979d0f8440cffc Mon Sep 17 00:00:00 2001 From: Aren Date: Mon, 10 Jun 2024 11:23:19 +0300 Subject: [PATCH] 0.1 from Bellard --- Makefile | 45 + README | 64 + ctype.c | 35 + dtoa.c | 3301 ++++++++++++++++++++++++ example/boot/tccargs | 17 + example/hello.c | 32 + gunzip.c | 109 + head.S | 128 + inflate.c | 1180 +++++++++ initrd.img | Bin 0 -> 642 bytes lib.c | 492 ++++ linux-2.4.26-config | 551 ++++ linux-2.4.26-tcc.patch | 427 ++++ main.c | 253 ++ malloc.c | 5482 ++++++++++++++++++++++++++++++++++++++++ qemu-tccboot | 6 + tccboot | Bin 0 -> 141792 bytes tccboot.h | 90 + test.c | 4 + user.c | 50 + vsprintf.c | 741 ++++++ 21 files changed, 13007 insertions(+) create mode 100644 Makefile create mode 100644 README create mode 100644 ctype.c create mode 100644 dtoa.c create mode 100644 example/boot/tccargs create mode 100644 example/hello.c create mode 100644 gunzip.c create mode 100644 head.S create mode 100644 inflate.c create mode 100644 initrd.img create mode 100644 lib.c create mode 100644 linux-2.4.26-config create mode 100644 linux-2.4.26-tcc.patch create mode 100644 main.c create mode 100644 malloc.c create mode 100755 qemu-tccboot create mode 100644 tccboot create mode 100644 tccboot.h create mode 100644 test.c create mode 100644 user.c create mode 100644 vsprintf.c diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..640eabd --- /dev/null +++ b/Makefile @@ -0,0 +1,45 @@ +# modify to your linux-2.4.26 kernel path (you must build the kernel first) +KERNEL_PATH=../linux-2.4.26 +# modify to your TinyCC 0.9.21 path (you must build TinyCC first) +TCC_PATH=../.. +CC=gcc +CFLAGS=-D__KERNEL__ -Wall -O2 -g -I$(KERNEL_PATH)/include -fno-builtin-printf -DCONFIG_TCCBOOT -mpreferred-stack-boundary=2 -march=i386 -falign-functions=0 -I. + +all: tccboot initrd.img + +#tccboot.user: tcc.o main.o ctype.o vsprintf.o lib.o malloc.o dtoa.o user.o +# $(CC) -static -nostdlib -o $@ $^ + +tccboot.out: head.o tcc.o main.o ctype.o vsprintf.o lib.o malloc.o \ + dtoa.o gunzip.o + ld -e startup_32 -Ttext=0x100000 -N -o $@ $^ + +tccboot.bin: tccboot.out + objcopy -O binary -R .note -R .comment -S $< $@ + +tccboot: tccboot.bin + $(KERNEL_PATH)/arch/i386/boot/tools/build \ + -b $(KERNEL_PATH)/arch/i386/boot/bbootsect \ + $(KERNEL_PATH)/arch/i386/boot/bsetup \ + $< CURRENT > $@ + +tcc.o: $(TCC_PATH)/tcc.c + $(CC) $(CFLAGS) -c -o $@ $< + +%.o: %.c + $(CC) $(CFLAGS) -c -o $@ $< + +%.o: %.S + $(CC) -D__ASSEMBLY__ -D__KERNEL__ -I$(KERNEL_PATH)/include -c -o $@ $< + +clean: + rm -f *~ *.o tccboot.out tccboot.bin example.romfs + +cleanall: clean + rm -f tccboot example.romfs initrd.img + +example.romfs: example/boot/tccargs example/hello.c + cd example ; genromfs -f ../example.romfs + +initrd.img: example.romfs + gzip < $< > $@ \ No newline at end of file diff --git a/README b/README new file mode 100644 index 0000000..09eab89 --- /dev/null +++ b/README @@ -0,0 +1,64 @@ +Introduction: +------------ + +WARNING: don't try to play with TCCBOOT unless you are a kernel +hacker! + +TCCBOOT is a bootloader which uses TinyCC to compile C and assembly +sources and boot the resulting executable. It is typically used to +compile the Linux kernel sources at boot time. + +TCCBOOT boots the same way as a Linux kernel, so any boot loader which +can run a 'bzImage' Linux kernel image can run TCCBOOT. I only tested +it with ISOLINUX, but LILO or GRUB should work too. + +TCCBOOT reads C or assembly sources from a gzipped ROMFS filesystem +stored in an Initial Ram Disk (initrd). It first reads the file +'boot/tccargs' which contains the TinyCC command line (same syntax as +the tcc executable). The TinyCC invocation should output one binary +image 'kernel'. This image is loaded at address 0x00100000. TCCBOOT +then does a jump to the address 0x00100000 in 32 bit flat mode. This +is compatible with the ABI of the 'vmlinux' kernel image. + +Compilation: +----------- + +TCCBOOT was only tested with Linux 2.4.26. In order to build TCCBOOT, +you must first compile a 2.4.26 kernel because for simplicity TCCBOOT +uses some binary files and headers from the Linux kernel. TCCBOOT also +needs the source code of TinyCC (tested with TinyCC version +0.9.21). You can modify the Makefile to give the needed paths. + +Example: +------- + +An "Hello World" ROMFS partition is included (initrd.img). You can +rebuild it from the example/ directory. You can test it with the QEMU +PC emulator with the 'qemu-tccboot' script. + +Kernel compilation: +------------------ + +For your information, the patch 'linux-2.4.26-tcc.patch' gives the +necessary modifications to build a Linux kernel with TinyCC. The +corresponding kernel configuration is in file +linux-2.4.26-config. Patches are necessary for the following reasons: + +- unsupported assembly directives: .rept, .endr, .subsection +- '#define __ASSEMBLY__' needed in assembly sources +- static variables cannot be seen from the inline assembly code +- typing/lvalue problems with '? :' +- no long long bit fields +- 'aligned' attribute not supported for whole structs, only for fields +- obscur preprocessor bug + +Some of these problems could easily be fixed, but I am too lazy +now. It is sure that there are still many bugs in the kernel generated +by TinyCC/TCCBOOT, but at least it can boot and launch a shell. + +License: +------- + +TCCBOOT is distributed under the GNU General Public License. + +Fabrice Bellard. diff --git a/ctype.c b/ctype.c new file mode 100644 index 0000000..b5f72a5 --- /dev/null +++ b/ctype.c @@ -0,0 +1,35 @@ +/* + * linux/lib/ctype.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#include + +unsigned char _ctype[] = { +_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ +_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ +_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ +_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ +_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ +_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ +_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ +_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ +_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ +_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ +_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ +_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ +_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ +_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ +_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ +_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ +_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ +_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ + + diff --git a/dtoa.c b/dtoa.c new file mode 100644 index 0000000..b325cd8 --- /dev/null +++ b/dtoa.c @@ -0,0 +1,3301 @@ +/**************************************************************** + * + * The author of this software is David M. Gay. + * + * Copyright (c) 1991, 2000, 2001 by Lucent Technologies. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose without fee is hereby granted, provided that this entire notice + * is included in all copies of any software which is or includes a copy + * or modification of this software and in all copies of the supporting + * documentation for such software. + * + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY + * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. + * + ***************************************************************/ + +/* Please send bug reports to David M. Gay (dmg at acm dot org, + * with " at " changed at "@" and " dot " changed to "."). */ + +/* On a machine with IEEE extended-precision registers, it is + * necessary to specify double-precision (53-bit) rounding precision + * before invoking strtod or dtoa. If the machine uses (the equivalent + * of) Intel 80x87 arithmetic, the call + * _control87(PC_53, MCW_PC); + * does this with many compilers. Whether this or another call is + * appropriate depends on the compiler; for this to work, it may be + * necessary to #include "float.h" or another system-dependent header + * file. + */ + +/* strtod for IEEE-, VAX-, and IBM-arithmetic machines. + * + * This strtod returns a nearest machine number to the input decimal + * string (or sets errno to ERANGE). With IEEE arithmetic, ties are + * broken by the IEEE round-even rule. Otherwise ties are broken by + * biased rounding (add half and chop). + * + * Inspired loosely by William D. Clinger's paper "How to Read Floating + * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101]. + * + * Modifications: + * + * 1. We only require IEEE, IBM, or VAX double-precision + * arithmetic (not IEEE double-extended). + * 2. We get by with floating-point arithmetic in a case that + * Clinger missed -- when we're computing d * 10^n + * for a small integer d and the integer n is not too + * much larger than 22 (the maximum integer k for which + * we can represent 10^k exactly), we may be able to + * compute (d*10^k) * 10^(e-k) with just one roundoff. + * 3. Rather than a bit-at-a-time adjustment of the binary + * result in the hard case, we use floating-point + * arithmetic to determine the adjustment to within + * one bit; only in really hard cases do we need to + * compute a second residual. + * 4. Because of 3., we don't need a large table of powers of 10 + * for ten-to-e (just some small tables, e.g. of 10^k + * for 0 <= k <= 22). + */ + +/* + * #define IEEE_8087 for IEEE-arithmetic machines where the least + * significant byte has the lowest address. + * #define IEEE_MC68k for IEEE-arithmetic machines where the most + * significant byte has the lowest address. + * #define Long int on machines with 32-bit ints and 64-bit longs. + * #define IBM for IBM mainframe-style floating-point arithmetic. + * #define VAX for VAX-style floating-point arithmetic (D_floating). + * #define No_leftright to omit left-right logic in fast floating-point + * computation of dtoa. + * #define Honor_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3 + * and strtod and dtoa should round accordingly. + * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3 + * and Honor_FLT_ROUNDS is not #defined. + * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines + * that use extended-precision instructions to compute rounded + * products and quotients) with IBM. + * #define ROUND_BIASED for IEEE-format with biased rounding. + * #define Inaccurate_Divide for IEEE-format with correctly rounded + * products but inaccurate quotients, e.g., for Intel i860. + * #define NO_LONG_LONG on machines that do not have a "long long" + * integer type (of >= 64 bits). On such machines, you can + * #define Just_16 to store 16 bits per 32-bit Long when doing + * high-precision integer arithmetic. Whether this speeds things + * up or slows things down depends on the machine and the number + * being converted. If long long is available and the name is + * something other than "long long", #define Llong to be the name, + * and if "unsigned Llong" does not work as an unsigned version of + * Llong, #define #ULLong to be the corresponding unsigned type. + * #define KR_headers for old-style C function headers. + * #define Bad_float_h if your system lacks a float.h or if it does not + * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP, + * FLT_RADIX, FLT_ROUNDS, and DBL_MAX. + * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n) + * if memory is available and otherwise does something you deem + * appropriate. If MALLOC is undefined, malloc will be invoked + * directly -- and assumed always to succeed. + * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making + * memory allocations from a private pool of memory when possible. + * When used, the private pool is PRIVATE_MEM bytes long: 2304 bytes, + * unless #defined to be a different length. This default length + * suffices to get rid of MALLOC calls except for unusual cases, + * such as decimal-to-binary conversion of a very long string of + * digits. The longest string dtoa can return is about 751 bytes + * long. For conversions by strtod of strings of 800 digits and + * all dtoa conversions in single-threaded executions with 8-byte + * pointers, PRIVATE_MEM >= 7400 appears to suffice; with 4-byte + * pointers, PRIVATE_MEM >= 7112 appears adequate. + * #define INFNAN_CHECK on IEEE systems to cause strtod to check for + * Infinity and NaN (case insensitively). On some systems (e.g., + * some HP systems), it may be necessary to #define NAN_WORD0 + * appropriately -- to the most significant word of a quiet NaN. + * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.) + * When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined, + * strtod also accepts (case insensitively) strings of the form + * NaN(x), where x is a string of hexadecimal digits and spaces; + * if there is only one string of hexadecimal digits, it is taken + * for the 52 fraction bits of the resulting NaN; if there are two + * or more strings of hex digits, the first is for the high 20 bits, + * the second and subsequent for the low 32 bits, with intervening + * white space ignored; but if this results in none of the 52 + * fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0 + * and NAN_WORD1 are used instead. + * #define MULTIPLE_THREADS if the system offers preemptively scheduled + * multiple threads. In this case, you must provide (or suitably + * #define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed + * by FREE_DTOA_LOCK(n) for n = 0 or 1. (The second lock, accessed + * in pow5mult, ensures lazy evaluation of only one copy of high + * powers of 5; omitting this lock would introduce a small + * probability of wasting memory, but would otherwise be harmless.) + * You must also invoke freedtoa(s) to free the value s returned by + * dtoa. You may do so whether or not MULTIPLE_THREADS is #defined. + * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that + * avoids underflows on inputs whose result does not underflow. + * If you #define NO_IEEE_Scale on a machine that uses IEEE-format + * floating-point numbers and flushes underflows to zero rather + * than implementing gradual underflow, then you must also #define + * Sudden_Underflow. + * #define YES_ALIAS to permit aliasing certain double values with + * arrays of ULongs. This leads to slightly better code with + * some compilers and was always used prior to 19990916, but it + * is not strictly legal and can cause trouble with aggressively + * optimizing compilers (e.g., gcc 2.95.1 under -O2). + * #define USE_LOCALE to use the current locale's decimal_point value. + * #define SET_INEXACT if IEEE arithmetic is being used and extra + * computation should be done to set the inexact flag when the + * result is inexact and avoid setting inexact when the result + * is exact. In this case, dtoa.c must be compiled in + * an environment, perhaps provided by #include "dtoa.c" in a + * suitable wrapper, that defines two functions, + * int get_inexact(void); + * void clear_inexact(void); + * such that get_inexact() returns a nonzero value if the + * inexact bit is already set, and clear_inexact() sets the + * inexact bit to 0. When SET_INEXACT is #defined, strtod + * also does extra computations to set the underflow and overflow + * flags when appropriate (i.e., when the result is tiny and + * inexact or when it is a numeric value rounded to +-infinity). + * #define NO_ERRNO if strtod should not assign errno = ERANGE when + * the result overflows to +-Infinity or underflows to 0. + */ + +#define IEEE_8087 +#define INFNAN_CHECK +#define FLT_ROUNDS_IS_CONSTANT +#define MODE_0_ONLY + +#ifndef Long +#define Long long +#endif +#ifndef ULong +typedef unsigned Long ULong; +#endif + +#ifdef DEBUG +#include "stdio.h" +#define Bug(x) {fprintf(stderr, "%s\n", x); exit(1);} +#endif + +#include "tccboot.h" + +#ifdef USE_LOCALE +#include "locale.h" +#endif + +#ifdef MALLOC +#ifdef KR_headers +extern char *MALLOC(); +#else +extern void *MALLOC(size_t); +#endif +#else +#define MALLOC malloc +#endif + +#ifndef Omit_Private_Memory +#ifndef PRIVATE_MEM +#define PRIVATE_MEM 2304 +#endif +#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double)) +static double private_mem[PRIVATE_mem], *pmem_next = private_mem; +#endif + +#undef IEEE_Arith +#undef Avoid_Underflow +#ifdef IEEE_MC68k +#define IEEE_Arith +#endif +#ifdef IEEE_8087 +#define IEEE_Arith +#endif + +#ifdef Bad_float_h + +#ifdef IEEE_Arith +#define DBL_DIG 15 +#define DBL_MAX_10_EXP 308 +#define DBL_MAX_EXP 1024 +#define FLT_RADIX 2 +#endif /*IEEE_Arith*/ + +#ifdef IBM +#define DBL_DIG 16 +#define DBL_MAX_10_EXP 75 +#define DBL_MAX_EXP 63 +#define FLT_RADIX 16 +#define DBL_MAX 7.2370055773322621e+75 +#endif + +#ifdef VAX +#define DBL_DIG 16 +#define DBL_MAX_10_EXP 38 +#define DBL_MAX_EXP 127 +#define FLT_RADIX 2 +#define DBL_MAX 1.7014118346046923e+38 +#endif + +#ifndef LONG_MAX +#define LONG_MAX 2147483647 +#endif + +#else /* ifndef Bad_float_h */ +#include "float.h" +#endif /* Bad_float_h */ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef CONST +#ifdef KR_headers +#define CONST /* blank */ +#else +#define CONST const +#endif +#endif + +#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(VAX) + defined(IBM) != 1 +Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined. +#endif + +typedef union { double d; ULong L[2]; } U; + +#ifdef YES_ALIAS +#define dval(x) x +#ifdef IEEE_8087 +#define word0(x) ((ULong *)&x)[1] +#define word1(x) ((ULong *)&x)[0] +#else +#define word0(x) ((ULong *)&x)[0] +#define word1(x) ((ULong *)&x)[1] +#endif +#else +#ifdef IEEE_8087 +#define word0(x) ((U*)&x)->L[1] +#define word1(x) ((U*)&x)->L[0] +#else +#define word0(x) ((U*)&x)->L[0] +#define word1(x) ((U*)&x)->L[1] +#endif +#define dval(x) ((U*)&x)->d +#endif + +/* The following definition of Storeinc is appropriate for MIPS processors. + * An alternative that might be better on some machines is + * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff) + */ +#if defined(IEEE_8087) + defined(VAX) +#define Storeinc(a,b,c) (((unsigned short *)a)[1] = (unsigned short)b, \ +((unsigned short *)a)[0] = (unsigned short)c, a++) +#else +#define Storeinc(a,b,c) (((unsigned short *)a)[0] = (unsigned short)b, \ +((unsigned short *)a)[1] = (unsigned short)c, a++) +#endif + +/* #define P DBL_MANT_DIG */ +/* Ten_pmax = floor(P*log(2)/log(5)) */ +/* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */ +/* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */ +/* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */ + +#ifdef IEEE_Arith +#define Exp_shift 20 +#define Exp_shift1 20 +#define Exp_msk1 0x100000 +#define Exp_msk11 0x100000 +#define Exp_mask 0x7ff00000 +#define P 53 +#define Bias 1023 +#define Emin (-1022) +#define Exp_1 0x3ff00000 +#define Exp_11 0x3ff00000 +#define Ebits 11 +#define Frac_mask 0xfffff +#define Frac_mask1 0xfffff +#define Ten_pmax 22 +#define Bletch 0x10 +#define Bndry_mask 0xfffff +#define Bndry_mask1 0xfffff +#define LSB 1 +#define Sign_bit 0x80000000 +#define Log2P 1 +#define Tiny0 0 +#define Tiny1 1 +#define Quick_max 14 +#define Int_max 14 +#ifndef NO_IEEE_Scale +#define Avoid_Underflow +#ifdef Flush_Denorm /* debugging option */ +#undef Sudden_Underflow +#endif +#endif + +#ifndef Flt_Rounds +#ifdef FLT_ROUNDS +#define Flt_Rounds FLT_ROUNDS +#else +#define Flt_Rounds 1 +#endif +#endif /*Flt_Rounds*/ + +#ifdef Honor_FLT_ROUNDS +#define Rounding rounding +#undef Check_FLT_ROUNDS +#define Check_FLT_ROUNDS +#else +#define Rounding Flt_Rounds +#endif + +#else /* ifndef IEEE_Arith */ +#undef Check_FLT_ROUNDS +#undef Honor_FLT_ROUNDS +#undef SET_INEXACT +#undef Sudden_Underflow +#define Sudden_Underflow +#ifdef IBM +#undef Flt_Rounds +#define Flt_Rounds 0 +#define Exp_shift 24 +#define Exp_shift1 24 +#define Exp_msk1 0x1000000 +#define Exp_msk11 0x1000000 +#define Exp_mask 0x7f000000 +#define P 14 +#define Bias 65 +#define Exp_1 0x41000000 +#define Exp_11 0x41000000 +#define Ebits 8 /* exponent has 7 bits, but 8 is the right value in b2d */ +#define Frac_mask 0xffffff +#define Frac_mask1 0xffffff +#define Bletch 4 +#define Ten_pmax 22 +#define Bndry_mask 0xefffff +#define Bndry_mask1 0xffffff +#define LSB 1 +#define Sign_bit 0x80000000 +#define Log2P 4 +#define Tiny0 0x100000 +#define Tiny1 0 +#define Quick_max 14 +#define Int_max 15 +#else /* VAX */ +#undef Flt_Rounds +#define Flt_Rounds 1 +#define Exp_shift 23 +#define Exp_shift1 7 +#define Exp_msk1 0x80 +#define Exp_msk11 0x800000 +#define Exp_mask 0x7f80 +#define P 56 +#define Bias 129 +#define Exp_1 0x40800000 +#define Exp_11 0x4080 +#define Ebits 8 +#define Frac_mask 0x7fffff +#define Frac_mask1 0xffff007f +#define Ten_pmax 24 +#define Bletch 2 +#define Bndry_mask 0xffff007f +#define Bndry_mask1 0xffff007f +#define LSB 0x10000 +#define Sign_bit 0x8000 +#define Log2P 1 +#define Tiny0 0x80 +#define Tiny1 0 +#define Quick_max 15 +#define Int_max 15 +#endif /* IBM, VAX */ +#endif /* IEEE_Arith */ + +#ifndef IEEE_Arith +#define ROUND_BIASED +#endif + +#ifdef RND_PRODQUOT +#define rounded_product(a,b) a = rnd_prod(a, b) +#define rounded_quotient(a,b) a = rnd_quot(a, b) +#ifdef KR_headers +extern double rnd_prod(), rnd_quot(); +#else +extern double rnd_prod(double, double), rnd_quot(double, double); +#endif +#else +#define rounded_product(a,b) a *= b +#define rounded_quotient(a,b) a /= b +#endif + +#define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1)) +#define Big1 0xffffffff + +#ifndef Pack_32 +#define Pack_32 +#endif + +#ifdef KR_headers +#define FFFFFFFF ((((unsigned long)0xffff)<<16)|(unsigned long)0xffff) +#else +#define FFFFFFFF 0xffffffffUL +#endif + +#ifdef NO_LONG_LONG +#undef ULLong +#ifdef Just_16 +#undef Pack_32 +/* When Pack_32 is not defined, we store 16 bits per 32-bit Long. + * This makes some inner loops simpler and sometimes saves work + * during multiplications, but it often seems to make things slightly + * slower. Hence the default is now to store 32 bits per Long. + */ +#endif +#else /* long long available */ +#ifndef Llong +#define Llong long long +#endif +#ifndef ULLong +#define ULLong unsigned Llong +#endif +#endif /* NO_LONG_LONG */ + +#ifndef MULTIPLE_THREADS +#define ACQUIRE_DTOA_LOCK(n) /*nothing*/ +#define FREE_DTOA_LOCK(n) /*nothing*/ +#endif + +#define Kmax 15 + +#ifdef __cplusplus +extern "C" double strtod(const char *s00, char **se); +extern "C" char *dtoa(double d, int mode, int ndigits, + int *decpt, int *sign, char **rve); +#endif + + struct +Bigint { + struct Bigint *next; + int k, maxwds, sign, wds; + ULong x[1]; + }; + + typedef struct Bigint Bigint; + + static Bigint *freelist[Kmax+1]; + + static Bigint * +Balloc +#ifdef KR_headers + (k) int k; +#else + (int k) +#endif +{ + int x; + Bigint *rv; +#ifndef Omit_Private_Memory + unsigned int len; +#endif + + ACQUIRE_DTOA_LOCK(0); + if (rv = freelist[k]) { + freelist[k] = rv->next; + } + else { + x = 1 << k; +#ifdef Omit_Private_Memory + rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong)); +#else + len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1) + /sizeof(double); + if (pmem_next - private_mem + len <= PRIVATE_mem) { + rv = (Bigint*)pmem_next; + pmem_next += len; + } + else + rv = (Bigint*)MALLOC(len*sizeof(double)); +#endif + rv->k = k; + rv->maxwds = x; + } + FREE_DTOA_LOCK(0); + rv->sign = rv->wds = 0; + return rv; + } + + static void +Bfree +#ifdef KR_headers + (v) Bigint *v; +#else + (Bigint *v) +#endif +{ + if (v) { + ACQUIRE_DTOA_LOCK(0); + v->next = freelist[v->k]; + freelist[v->k] = v; + FREE_DTOA_LOCK(0); + } + } + +#define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \ +y->wds*sizeof(Long) + 2*sizeof(int)) + + static Bigint * +multadd +#ifdef KR_headers + (b, m, a) Bigint *b; int m, a; +#else + (Bigint *b, int m, int a) /* multiply by m and add a */ +#endif +{ + int i, wds; +#ifdef ULLong + ULong *x; + ULLong carry, y; +#else + ULong carry, *x, y; +#ifdef Pack_32 + ULong xi, z; +#endif +#endif + Bigint *b1; + + wds = b->wds; + x = b->x; + i = 0; + carry = a; + do { +#ifdef ULLong + y = *x * (ULLong)m + carry; + carry = y >> 32; + *x++ = y & FFFFFFFF; +#else +#ifdef Pack_32 + xi = *x; + y = (xi & 0xffff) * m + carry; + z = (xi >> 16) * m + (y >> 16); + carry = z >> 16; + *x++ = (z << 16) + (y & 0xffff); +#else + y = *x * m + carry; + carry = y >> 16; + *x++ = y & 0xffff; +#endif +#endif + } + while(++i < wds); + if (carry) { + if (wds >= b->maxwds) { + b1 = Balloc(b->k+1); + Bcopy(b1, b); + Bfree(b); + b = b1; + } + b->x[wds++] = carry; + b->wds = wds; + } + return b; + } + + static Bigint * +s2b +#ifdef KR_headers + (s, nd0, nd, y9) CONST char *s; int nd0, nd; ULong y9; +#else + (CONST char *s, int nd0, int nd, ULong y9) +#endif +{ + Bigint *b; + int i, k; + Long x, y; + + x = (nd + 8) / 9; + for(k = 0, y = 1; x > y; y <<= 1, k++) ; +#ifdef Pack_32 + b = Balloc(k); + b->x[0] = y9; + b->wds = 1; +#else + b = Balloc(k+1); + b->x[0] = y9 & 0xffff; + b->wds = (b->x[1] = y9 >> 16) ? 2 : 1; +#endif + + i = 9; + if (9 < nd0) { + s += 9; + do b = multadd(b, 10, *s++ - '0'); + while(++i < nd0); + s++; + } + else + s += 10; + for(; i < nd; i++) + b = multadd(b, 10, *s++ - '0'); + return b; + } + + static int +hi0bits +#ifdef KR_headers + (x) register ULong x; +#else + (register ULong x) +#endif +{ + register int k = 0; + + if (!(x & 0xffff0000)) { + k = 16; + x <<= 16; + } + if (!(x & 0xff000000)) { + k += 8; + x <<= 8; + } + if (!(x & 0xf0000000)) { + k += 4; + x <<= 4; + } + if (!(x & 0xc0000000)) { + k += 2; + x <<= 2; + } + if (!(x & 0x80000000)) { + k++; + if (!(x & 0x40000000)) + return 32; + } + return k; + } + + static int +lo0bits +#ifdef KR_headers + (y) ULong *y; +#else + (ULong *y) +#endif +{ + register int k; + register ULong x = *y; + + if (x & 7) { + if (x & 1) + return 0; + if (x & 2) { + *y = x >> 1; + return 1; + } + *y = x >> 2; + return 2; + } + k = 0; + if (!(x & 0xffff)) { + k = 16; + x >>= 16; + } + if (!(x & 0xff)) { + k += 8; + x >>= 8; + } + if (!(x & 0xf)) { + k += 4; + x >>= 4; + } + if (!(x & 0x3)) { + k += 2; + x >>= 2; + } + if (!(x & 1)) { + k++; + x >>= 1; + if (!x) + return 32; + } + *y = x; + return k; + } + + static Bigint * +i2b +#ifdef KR_headers + (i) int i; +#else + (int i) +#endif +{ + Bigint *b; + + b = Balloc(1); + b->x[0] = i; + b->wds = 1; + return b; + } + + static Bigint * +mult +#ifdef KR_headers + (a, b) Bigint *a, *b; +#else + (Bigint *a, Bigint *b) +#endif +{ + Bigint *c; + int k, wa, wb, wc; + ULong *x, *xa, *xae, *xb, *xbe, *xc, *xc0; + ULong y; +#ifdef ULLong + ULLong carry, z; +#else + ULong carry, z; +#ifdef Pack_32 + ULong z2; +#endif +#endif + + if (a->wds < b->wds) { + c = a; + a = b; + b = c; + } + k = a->k; + wa = a->wds; + wb = b->wds; + wc = wa + wb; + if (wc > a->maxwds) + k++; + c = Balloc(k); + for(x = c->x, xa = x + wc; x < xa; x++) + *x = 0; + xa = a->x; + xae = xa + wa; + xb = b->x; + xbe = xb + wb; + xc0 = c->x; +#ifdef ULLong + for(; xb < xbe; xc0++) { + if (y = *xb++) { + x = xa; + xc = xc0; + carry = 0; + do { + z = *x++ * (ULLong)y + *xc + carry; + carry = z >> 32; + *xc++ = z & FFFFFFFF; + } + while(x < xae); + *xc = carry; + } + } +#else +#ifdef Pack_32 + for(; xb < xbe; xb++, xc0++) { + if (y = *xb & 0xffff) { + x = xa; + xc = xc0; + carry = 0; + do { + z = (*x & 0xffff) * y + (*xc & 0xffff) + carry; + carry = z >> 16; + z2 = (*x++ >> 16) * y + (*xc >> 16) + carry; + carry = z2 >> 16; + Storeinc(xc, z2, z); + } + while(x < xae); + *xc = carry; + } + if (y = *xb >> 16) { + x = xa; + xc = xc0; + carry = 0; + z2 = *xc; + do { + z = (*x & 0xffff) * y + (*xc >> 16) + carry; + carry = z >> 16; + Storeinc(xc, z, z2); + z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry; + carry = z2 >> 16; + } + while(x < xae); + *xc = z2; + } + } +#else + for(; xb < xbe; xc0++) { + if (y = *xb++) { + x = xa; + xc = xc0; + carry = 0; + do { + z = *x++ * y + *xc + carry; + carry = z >> 16; + *xc++ = z & 0xffff; + } + while(x < xae); + *xc = carry; + } + } +#endif +#endif + for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ; + c->wds = wc; + return c; + } + + static Bigint *p5s; + + static Bigint * +pow5mult +#ifdef KR_headers + (b, k) Bigint *b; int k; +#else + (Bigint *b, int k) +#endif +{ + Bigint *b1, *p5, *p51; + int i; + static int p05[3] = { 5, 25, 125 }; + + if (i = k & 3) + b = multadd(b, p05[i-1], 0); + + if (!(k >>= 2)) + return b; + if (!(p5 = p5s)) { + /* first time */ +#ifdef MULTIPLE_THREADS + ACQUIRE_DTOA_LOCK(1); + if (!(p5 = p5s)) { + p5 = p5s = i2b(625); + p5->next = 0; + } + FREE_DTOA_LOCK(1); +#else + p5 = p5s = i2b(625); + p5->next = 0; +#endif + } + for(;;) { + if (k & 1) { + b1 = mult(b, p5); + Bfree(b); + b = b1; + } + if (!(k >>= 1)) + break; + if (!(p51 = p5->next)) { +#ifdef MULTIPLE_THREADS + ACQUIRE_DTOA_LOCK(1); + if (!(p51 = p5->next)) { + p51 = p5->next = mult(p5,p5); + p51->next = 0; + } + FREE_DTOA_LOCK(1); +#else + p51 = p5->next = mult(p5,p5); + p51->next = 0; +#endif + } + p5 = p51; + } + return b; + } + + static Bigint * +lshift +#ifdef KR_headers + (b, k) Bigint *b; int k; +#else + (Bigint *b, int k) +#endif +{ + int i, k1, n, n1; + Bigint *b1; + ULong *x, *x1, *xe, z; + +#ifdef Pack_32 + n = k >> 5; +#else + n = k >> 4; +#endif + k1 = b->k; + n1 = n + b->wds + 1; + for(i = b->maxwds; n1 > i; i <<= 1) + k1++; + b1 = Balloc(k1); + x1 = b1->x; + for(i = 0; i < n; i++) + *x1++ = 0; + x = b->x; + xe = x + b->wds; +#ifdef Pack_32 + if (k &= 0x1f) { + k1 = 32 - k; + z = 0; + do { + *x1++ = *x << k | z; + z = *x++ >> k1; + } + while(x < xe); + if (*x1 = z) + ++n1; + } +#else + if (k &= 0xf) { + k1 = 16 - k; + z = 0; + do { + *x1++ = *x << k & 0xffff | z; + z = *x++ >> k1; + } + while(x < xe); + if (*x1 = z) + ++n1; + } +#endif + else do + *x1++ = *x++; + while(x < xe); + b1->wds = n1 - 1; + Bfree(b); + return b1; + } + + static int +cmp +#ifdef KR_headers + (a, b) Bigint *a, *b; +#else + (Bigint *a, Bigint *b) +#endif +{ + ULong *xa, *xa0, *xb, *xb0; + int i, j; + + i = a->wds; + j = b->wds; +#ifdef DEBUG + if (i > 1 && !a->x[i-1]) + Bug("cmp called with a->x[a->wds-1] == 0"); + if (j > 1 && !b->x[j-1]) + Bug("cmp called with b->x[b->wds-1] == 0"); +#endif + if (i -= j) + return i; + xa0 = a->x; + xa = xa0 + j; + xb0 = b->x; + xb = xb0 + j; + for(;;) { + if (*--xa != *--xb) + return *xa < *xb ? -1 : 1; + if (xa <= xa0) + break; + } + return 0; + } + + static Bigint * +diff +#ifdef KR_headers + (a, b) Bigint *a, *b; +#else + (Bigint *a, Bigint *b) +#endif +{ + Bigint *c; + int i, wa, wb; + ULong *xa, *xae, *xb, *xbe, *xc; +#ifdef ULLong + ULLong borrow, y; +#else + ULong borrow, y; +#ifdef Pack_32 + ULong z; +#endif +#endif + + i = cmp(a,b); + if (!i) { + c = Balloc(0); + c->wds = 1; + c->x[0] = 0; + return c; + } + if (i < 0) { + c = a; + a = b; + b = c; + i = 1; + } + else + i = 0; + c = Balloc(a->k); + c->sign = i; + wa = a->wds; + xa = a->x; + xae = xa + wa; + wb = b->wds; + xb = b->x; + xbe = xb + wb; + xc = c->x; + borrow = 0; +#ifdef ULLong + do { + y = (ULLong)*xa++ - *xb++ - borrow; + borrow = y >> 32 & (ULong)1; + *xc++ = y & FFFFFFFF; + } + while(xb < xbe); + while(xa < xae) { + y = *xa++ - borrow; + borrow = y >> 32 & (ULong)1; + *xc++ = y & FFFFFFFF; + } +#else +#ifdef Pack_32 + do { + y = (*xa & 0xffff) - (*xb & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + z = (*xa++ >> 16) - (*xb++ >> 16) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(xc, z, y); + } + while(xb < xbe); + while(xa < xae) { + y = (*xa & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + z = (*xa++ >> 16) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(xc, z, y); + } +#else + do { + y = *xa++ - *xb++ - borrow; + borrow = (y & 0x10000) >> 16; + *xc++ = y & 0xffff; + } + while(xb < xbe); + while(xa < xae) { + y = *xa++ - borrow; + borrow = (y & 0x10000) >> 16; + *xc++ = y & 0xffff; + } +#endif +#endif + while(!*--xc) + wa--; + c->wds = wa; + return c; + } + + static double +ulp +#ifdef KR_headers + (x) double x; +#else + (double x) +#endif +{ + register Long L; + double a; + + L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1; +#ifndef Avoid_Underflow +#ifndef Sudden_Underflow + if (L > 0) { +#endif +#endif +#ifdef IBM + L |= Exp_msk1 >> 4; +#endif + word0(a) = L; + word1(a) = 0; +#ifndef Avoid_Underflow +#ifndef Sudden_Underflow + } + else { + L = -L >> Exp_shift; + if (L < Exp_shift) { + word0(a) = 0x80000 >> L; + word1(a) = 0; + } + else { + word0(a) = 0; + L -= Exp_shift; + word1(a) = L >= 31 ? 1 : 1 << 31 - L; + } + } +#endif +#endif + return dval(a); + } + + static double +b2d +#ifdef KR_headers + (a, e) Bigint *a; int *e; +#else + (Bigint *a, int *e) +#endif +{ + ULong *xa, *xa0, w, y, z; + int k; + double d; +#ifdef VAX + ULong d0, d1; +#else +#define d0 word0(d) +#define d1 word1(d) +#endif + + xa0 = a->x; + xa = xa0 + a->wds; + y = *--xa; +#ifdef DEBUG + if (!y) Bug("zero y in b2d"); +#endif + k = hi0bits(y); + *e = 32 - k; +#ifdef Pack_32 + if (k < Ebits) { + d0 = Exp_1 | y >> Ebits - k; + w = xa > xa0 ? *--xa : 0; + d1 = y << (32-Ebits) + k | w >> Ebits - k; + goto ret_d; + } + z = xa > xa0 ? *--xa : 0; + if (k -= Ebits) { + d0 = Exp_1 | y << k | z >> 32 - k; + y = xa > xa0 ? *--xa : 0; + d1 = z << k | y >> 32 - k; + } + else { + d0 = Exp_1 | y; + d1 = z; + } +#else + if (k < Ebits + 16) { + z = xa > xa0 ? *--xa : 0; + d0 = Exp_1 | y << k - Ebits | z >> Ebits + 16 - k; + w = xa > xa0 ? *--xa : 0; + y = xa > xa0 ? *--xa : 0; + d1 = z << k + 16 - Ebits | w << k - Ebits | y >> 16 + Ebits - k; + goto ret_d; + } + z = xa > xa0 ? *--xa : 0; + w = xa > xa0 ? *--xa : 0; + k -= Ebits + 16; + d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k; + y = xa > xa0 ? *--xa : 0; + d1 = w << k + 16 | y << k; +#endif + ret_d: +#ifdef VAX + word0(d) = d0 >> 16 | d0 << 16; + word1(d) = d1 >> 16 | d1 << 16; +#else +#undef d0 +#undef d1 +#endif + return dval(d); + } + + static Bigint * +d2b +#ifdef KR_headers + (d, e, bits) double d; int *e, *bits; +#else + (double d, int *e, int *bits) +#endif +{ + Bigint *b; + int de, k; + ULong *x, y, z; +#ifndef Sudden_Underflow + int i; +#endif +#ifdef VAX + ULong d0, d1; + d0 = word0(d) >> 16 | word0(d) << 16; + d1 = word1(d) >> 16 | word1(d) << 16; +#else +#define d0 word0(d) +#define d1 word1(d) +#endif + +#ifdef Pack_32 + b = Balloc(1); +#else + b = Balloc(2); +#endif + x = b->x; + + z = d0 & Frac_mask; + d0 &= 0x7fffffff; /* clear sign bit, which we ignore */ +#ifdef Sudden_Underflow + de = (int)(d0 >> Exp_shift); +#ifndef IBM + z |= Exp_msk11; +#endif +#else + if (de = (int)(d0 >> Exp_shift)) + z |= Exp_msk1; +#endif +#ifdef Pack_32 + if (y = d1) { + if (k = lo0bits(&y)) { + x[0] = y | z << 32 - k; + z >>= k; + } + else + x[0] = y; +#ifndef Sudden_Underflow + i = +#endif + b->wds = (x[1] = z) ? 2 : 1; + } + else { +#ifdef DEBUG + if (!z) + Bug("Zero passed to d2b"); +#endif + k = lo0bits(&z); + x[0] = z; +#ifndef Sudden_Underflow + i = +#endif + b->wds = 1; + k += 32; + } +#else + if (y = d1) { + if (k = lo0bits(&y)) + if (k >= 16) { + x[0] = y | z << 32 - k & 0xffff; + x[1] = z >> k - 16 & 0xffff; + x[2] = z >> k; + i = 2; + } + else { + x[0] = y & 0xffff; + x[1] = y >> 16 | z << 16 - k & 0xffff; + x[2] = z >> k & 0xffff; + x[3] = z >> k+16; + i = 3; + } + else { + x[0] = y & 0xffff; + x[1] = y >> 16; + x[2] = z & 0xffff; + x[3] = z >> 16; + i = 3; + } + } + else { +#ifdef DEBUG + if (!z) + Bug("Zero passed to d2b"); +#endif + k = lo0bits(&z); + if (k >= 16) { + x[0] = z; + i = 0; + } + else { + x[0] = z & 0xffff; + x[1] = z >> 16; + i = 1; + } + k += 32; + } + while(!x[i]) + --i; + b->wds = i + 1; +#endif +#ifndef Sudden_Underflow + if (de) { +#endif +#ifdef IBM + *e = (de - Bias - (P-1) << 2) + k; + *bits = 4*P + 8 - k - hi0bits(word0(d) & Frac_mask); +#else + *e = de - Bias - (P-1) + k; + *bits = P - k; +#endif +#ifndef Sudden_Underflow + } + else { + *e = de - Bias - (P-1) + 1 + k; +#ifdef Pack_32 + *bits = 32*i - hi0bits(x[i-1]); +#else + *bits = (i+2)*16 - hi0bits(x[i]); +#endif + } +#endif + return b; + } +#undef d0 +#undef d1 + + static double +ratio +#ifdef KR_headers + (a, b) Bigint *a, *b; +#else + (Bigint *a, Bigint *b) +#endif +{ + double da, db; + int k, ka, kb; + + dval(da) = b2d(a, &ka); + dval(db) = b2d(b, &kb); +#ifdef Pack_32 + k = ka - kb + 32*(a->wds - b->wds); +#else + k = ka - kb + 16*(a->wds - b->wds); +#endif +#ifdef IBM + if (k > 0) { + word0(da) += (k >> 2)*Exp_msk1; + if (k &= 3) + dval(da) *= 1 << k; + } + else { + k = -k; + word0(db) += (k >> 2)*Exp_msk1; + if (k &= 3) + dval(db) *= 1 << k; + } +#else + if (k > 0) + word0(da) += k*Exp_msk1; + else { + k = -k; + word0(db) += k*Exp_msk1; + } +#endif + return dval(da) / dval(db); + } + + static CONST double +tens[] = { + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22 +#ifdef VAX + , 1e23, 1e24 +#endif + }; + + static CONST double +#ifdef IEEE_Arith +bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 }; +static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128, +#ifdef Avoid_Underflow + 9007199254740992.*9007199254740992.e-256 + /* = 2^106 * 1e-53 */ +#else + 1e-256 +#endif + }; +/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */ +/* flag unnecessarily. It leads to a song and dance at the end of strtod. */ +#define Scale_Bit 0x10 +#define n_bigtens 5 +#else +#ifdef IBM +bigtens[] = { 1e16, 1e32, 1e64 }; +static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64 }; +#define n_bigtens 3 +#else +bigtens[] = { 1e16, 1e32 }; +static CONST double tinytens[] = { 1e-16, 1e-32 }; +#define n_bigtens 2 +#endif +#endif + +#ifndef IEEE_Arith +#undef INFNAN_CHECK +#endif + +#ifdef INFNAN_CHECK + +#ifndef NAN_WORD0 +#define NAN_WORD0 0x7ff80000 +#endif + +#ifndef NAN_WORD1 +#define NAN_WORD1 0 +#endif + + static int +match +#ifdef KR_headers + (sp, t) char **sp, *t; +#else + (CONST char **sp, char *t) +#endif +{ + int c, d; + CONST char *s = *sp; + + while(d = *t++) { + if ((c = *++s) >= 'A' && c <= 'Z') + c += 'a' - 'A'; + if (c != d) + return 0; + } + *sp = s + 1; + return 1; + } + +#ifndef No_Hex_NaN + static void +hexnan +#ifdef KR_headers + (rvp, sp) double *rvp; CONST char **sp; +#else + (double *rvp, CONST char **sp) +#endif +{ + ULong c, x[2]; + CONST char *s; + int havedig, udx0, xshift; + + x[0] = x[1] = 0; + havedig = xshift = 0; + udx0 = 1; + s = *sp; + while(c = *(CONST unsigned char*)++s) { + if (c >= '0' && c <= '9') + c -= '0'; + else if (c >= 'a' && c <= 'f') + c += 10 - 'a'; + else if (c >= 'A' && c <= 'F') + c += 10 - 'A'; + else if (c <= ' ') { + if (udx0 && havedig) { + udx0 = 0; + xshift = 1; + } + continue; + } + else if (/*(*/ c == ')' && havedig) { + *sp = s + 1; + break; + } + else + return; /* invalid form: don't change *sp */ + havedig = 1; + if (xshift) { + xshift = 0; + x[0] = x[1]; + x[1] = 0; + } + if (udx0) + x[0] = (x[0] << 4) | (x[1] >> 28); + x[1] = (x[1] << 4) | c; + } + if ((x[0] &= 0xfffff) || x[1]) { + word0(*rvp) = Exp_mask | x[0]; + word1(*rvp) = x[1]; + } + } +#endif /*No_Hex_NaN*/ +#endif /* INFNAN_CHECK */ + + double +strtod +#ifdef KR_headers + (s00, se) CONST char *s00; char **se; +#else + (CONST char *s00, char **se) +#endif +{ +#ifdef Avoid_Underflow + int scale; +#endif + int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign, + e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign; + CONST char *s, *s0, *s1; + double aadj, aadj1, adj, rv, rv0; + Long L; + ULong y, z; + Bigint *bb, *bb1, *bd, *bd0, *bs, *delta; +#ifdef SET_INEXACT + int inexact, oldinexact; +#endif +#ifdef Honor_FLT_ROUNDS + int rounding; +#endif +#ifdef USE_LOCALE + CONST char *s2; +#endif + + sign = nz0 = nz = 0; + dval(rv) = 0.; + for(s = s00;;s++) switch(*s) { + case '-': + sign = 1; + /* no break */ + case '+': + if (*++s) + goto break2; + /* no break */ + case 0: + goto ret0; + case '\t': + case '\n': + case '\v': + case '\f': + case '\r': + case ' ': + continue; + default: + goto break2; + } + break2: + if (*s == '0') { + nz0 = 1; + while(*++s == '0') ; + if (!*s) + goto ret; + } + s0 = s; + y = z = 0; + for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++) + if (nd < 9) + y = 10*y + c - '0'; + else if (nd < 16) + z = 10*z + c - '0'; + nd0 = nd; +#ifdef USE_LOCALE + s1 = localeconv()->decimal_point; + if (c == *s1) { + c = '.'; + if (*++s1) { + s2 = s; + for(;;) { + if (*++s2 != *s1) { + c = 0; + break; + } + if (!*++s1) { + s = s2; + break; + } + } + } + } +#endif + if (c == '.') { + c = *++s; + if (!nd) { + for(; c == '0'; c = *++s) + nz++; + if (c > '0' && c <= '9') { + s0 = s; + nf += nz; + nz = 0; + goto have_dig; + } + goto dig_done; + } + for(; c >= '0' && c <= '9'; c = *++s) { + have_dig: + nz++; + if (c -= '0') { + nf += nz; + for(i = 1; i < nz; i++) + if (nd++ < 9) + y *= 10; + else if (nd <= DBL_DIG + 1) + z *= 10; + if (nd++ < 9) + y = 10*y + c; + else if (nd <= DBL_DIG + 1) + z = 10*z + c; + nz = 0; + } + } + } + dig_done: + e = 0; + if (c == 'e' || c == 'E') { + if (!nd && !nz && !nz0) { + goto ret0; + } + s00 = s; + esign = 0; + switch(c = *++s) { + case '-': + esign = 1; + case '+': + c = *++s; + } + if (c >= '0' && c <= '9') { + while(c == '0') + c = *++s; + if (c > '0' && c <= '9') { + L = c - '0'; + s1 = s; + while((c = *++s) >= '0' && c <= '9') + L = 10*L + c - '0'; + if (s - s1 > 8 || L > 19999) + /* Avoid confusion from exponents + * so large that e might overflow. + */ + e = 19999; /* safe for 16 bit ints */ + else + e = (int)L; + if (esign) + e = -e; + } + else + e = 0; + } + else + s = s00; + } + if (!nd) { + if (!nz && !nz0) { +#ifdef INFNAN_CHECK + /* Check for Nan and Infinity */ + switch(c) { + case 'i': + case 'I': + if (match(&s,"nf")) { + --s; + if (!match(&s,"inity")) + ++s; + word0(rv) = 0x7ff00000; + word1(rv) = 0; + goto ret; + } + break; + case 'n': + case 'N': + if (match(&s, "an")) { + word0(rv) = NAN_WORD0; + word1(rv) = NAN_WORD1; +#ifndef No_Hex_NaN + if (*s == '(') /*)*/ + hexnan(&rv, &s); +#endif + goto ret; + } + } +#endif /* INFNAN_CHECK */ + ret0: + s = s00; + sign = 0; + } + goto ret; + } + e1 = e -= nf; + + /* Now we have nd0 digits, starting at s0, followed by a + * decimal point, followed by nd-nd0 digits. The number we're + * after is the integer represented by those digits times + * 10**e */ + + if (!nd0) + nd0 = nd; + k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1; + dval(rv) = y; + if (k > 9) { +#ifdef SET_INEXACT + if (k > DBL_DIG) + oldinexact = get_inexact(); +#endif + dval(rv) = tens[k - 9] * dval(rv) + z; + } + bd0 = 0; + if (nd <= DBL_DIG +#ifndef RND_PRODQUOT +#ifndef Honor_FLT_ROUNDS + && Flt_Rounds == 1 +#endif +#endif + ) { + if (!e) + goto ret; + if (e > 0) { + if (e <= Ten_pmax) { +#ifdef VAX + goto vax_ovfl_check; +#else +#ifdef Honor_FLT_ROUNDS + /* round correctly FLT_ROUNDS = 2 or 3 */ + if (sign) { + rv = -rv; + sign = 0; + } +#endif + /* rv = */ rounded_product(dval(rv), tens[e]); + goto ret; +#endif + } + i = DBL_DIG - nd; + if (e <= Ten_pmax + i) { + /* A fancier test would sometimes let us do + * this for larger i values. + */ +#ifdef Honor_FLT_ROUNDS + /* round correctly FLT_ROUNDS = 2 or 3 */ + if (sign) { + rv = -rv; + sign = 0; + } +#endif + e -= i; + dval(rv) *= tens[i]; +#ifdef VAX + /* VAX exponent range is so narrow we must + * worry about overflow here... + */ + vax_ovfl_check: + word0(rv) -= P*Exp_msk1; + /* rv = */ rounded_product(dval(rv), tens[e]); + if ((word0(rv) & Exp_mask) + > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) + goto ovfl; + word0(rv) += P*Exp_msk1; +#else + /* rv = */ rounded_product(dval(rv), tens[e]); +#endif + goto ret; + } + } +#ifndef Inaccurate_Divide + else if (e >= -Ten_pmax) { +#ifdef Honor_FLT_ROUNDS + /* round correctly FLT_ROUNDS = 2 or 3 */ + if (sign) { + rv = -rv; + sign = 0; + } +#endif + /* rv = */ rounded_quotient(dval(rv), tens[-e]); + goto ret; + } +#endif + } + e1 += nd - k; + +#ifdef IEEE_Arith +#ifdef SET_INEXACT + inexact = 1; + if (k <= DBL_DIG) + oldinexact = get_inexact(); +#endif +#ifdef Avoid_Underflow + scale = 0; +#endif +#ifdef Honor_FLT_ROUNDS + if ((rounding = Flt_Rounds) >= 2) { + if (sign) + rounding = rounding == 2 ? 0 : 2; + else + if (rounding != 2) + rounding = 0; + } +#endif +#endif /*IEEE_Arith*/ + + /* Get starting approximation = rv * 10**e1 */ + + if (e1 > 0) { + if (i = e1 & 15) + dval(rv) *= tens[i]; + if (e1 &= ~15) { + if (e1 > DBL_MAX_10_EXP) { + ovfl: +#ifndef NO_ERRNO + errno = ERANGE; +#endif + /* Can't trust HUGE_VAL */ +#ifdef IEEE_Arith +#ifdef Honor_FLT_ROUNDS + switch(rounding) { + case 0: /* toward 0 */ + case 3: /* toward -infinity */ + word0(rv) = Big0; + word1(rv) = Big1; + break; + default: + word0(rv) = Exp_mask; + word1(rv) = 0; + } +#else /*Honor_FLT_ROUNDS*/ + word0(rv) = Exp_mask; + word1(rv) = 0; +#endif /*Honor_FLT_ROUNDS*/ +#ifdef SET_INEXACT + /* set overflow bit */ + dval(rv0) = 1e300; + dval(rv0) *= dval(rv0); +#endif +#else /*IEEE_Arith*/ + word0(rv) = Big0; + word1(rv) = Big1; +#endif /*IEEE_Arith*/ + if (bd0) + goto retfree; + goto ret; + } + e1 >>= 4; + for(j = 0; e1 > 1; j++, e1 >>= 1) + if (e1 & 1) + dval(rv) *= bigtens[j]; + /* The last multiplication could overflow. */ + word0(rv) -= P*Exp_msk1; + dval(rv) *= bigtens[j]; + if ((z = word0(rv) & Exp_mask) + > Exp_msk1*(DBL_MAX_EXP+Bias-P)) + goto ovfl; + if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) { + /* set to largest number */ + /* (Can't trust DBL_MAX) */ + word0(rv) = Big0; + word1(rv) = Big1; + } + else + word0(rv) += P*Exp_msk1; + } + } + else if (e1 < 0) { + e1 = -e1; + if (i = e1 & 15) + dval(rv) /= tens[i]; + if (e1 >>= 4) { + if (e1 >= 1 << n_bigtens) + goto undfl; +#ifdef Avoid_Underflow + if (e1 & Scale_Bit) + scale = 2*P; + for(j = 0; e1 > 0; j++, e1 >>= 1) + if (e1 & 1) + dval(rv) *= tinytens[j]; + if (scale && (j = 2*P + 1 - ((word0(rv) & Exp_mask) + >> Exp_shift)) > 0) { + /* scaled rv is denormal; zap j low bits */ + if (j >= 32) { + word1(rv) = 0; + if (j >= 53) + word0(rv) = (P+2)*Exp_msk1; + else + word0(rv) &= 0xffffffff << j-32; + } + else + word1(rv) &= 0xffffffff << j; + } +#else + for(j = 0; e1 > 1; j++, e1 >>= 1) + if (e1 & 1) + dval(rv) *= tinytens[j]; + /* The last multiplication could underflow. */ + dval(rv0) = dval(rv); + dval(rv) *= tinytens[j]; + if (!dval(rv)) { + dval(rv) = 2.*dval(rv0); + dval(rv) *= tinytens[j]; +#endif + if (!dval(rv)) { + undfl: + dval(rv) = 0.; +#ifndef NO_ERRNO + errno = ERANGE; +#endif + if (bd0) + goto retfree; + goto ret; + } +#ifndef Avoid_Underflow + word0(rv) = Tiny0; + word1(rv) = Tiny1; + /* The refinement below will clean + * this approximation up. + */ + } +#endif + } + } + + /* Now the hard part -- adjusting rv to the correct value.*/ + + /* Put digits into bd: true value = bd * 10^e */ + + bd0 = s2b(s0, nd0, nd, y); + + for(;;) { + bd = Balloc(bd0->k); + Bcopy(bd, bd0); + bb = d2b(dval(rv), &bbe, &bbbits); /* rv = bb * 2^bbe */ + bs = i2b(1); + + if (e >= 0) { + bb2 = bb5 = 0; + bd2 = bd5 = e; + } + else { + bb2 = bb5 = -e; + bd2 = bd5 = 0; + } + if (bbe >= 0) + bb2 += bbe; + else + bd2 -= bbe; + bs2 = bb2; +#ifdef Honor_FLT_ROUNDS + if (rounding != 1) + bs2++; +#endif +#ifdef Avoid_Underflow + j = bbe - scale; + i = j + bbbits - 1; /* logb(rv) */ + if (i < Emin) /* denormal */ + j += P - Emin; + else + j = P + 1 - bbbits; +#else /*Avoid_Underflow*/ +#ifdef Sudden_Underflow +#ifdef IBM + j = 1 + 4*P - 3 - bbbits + ((bbe + bbbits - 1) & 3); +#else + j = P + 1 - bbbits; +#endif +#else /*Sudden_Underflow*/ + j = bbe; + i = j + bbbits - 1; /* logb(rv) */ + if (i < Emin) /* denormal */ + j += P - Emin; + else + j = P + 1 - bbbits; +#endif /*Sudden_Underflow*/ +#endif /*Avoid_Underflow*/ + bb2 += j; + bd2 += j; +#ifdef Avoid_Underflow + bd2 += scale; +#endif + i = bb2 < bd2 ? bb2 : bd2; + if (i > bs2) + i = bs2; + if (i > 0) { + bb2 -= i; + bd2 -= i; + bs2 -= i; + } + if (bb5 > 0) { + bs = pow5mult(bs, bb5); + bb1 = mult(bs, bb); + Bfree(bb); + bb = bb1; + } + if (bb2 > 0) + bb = lshift(bb, bb2); + if (bd5 > 0) + bd = pow5mult(bd, bd5); + if (bd2 > 0) + bd = lshift(bd, bd2); + if (bs2 > 0) + bs = lshift(bs, bs2); + delta = diff(bb, bd); + dsign = delta->sign; + delta->sign = 0; + i = cmp(delta, bs); +#ifdef Honor_FLT_ROUNDS + if (rounding != 1) { + if (i < 0) { + /* Error is less than an ulp */ + if (!delta->x[0] && delta->wds <= 1) { + /* exact */ +#ifdef SET_INEXACT + inexact = 0; +#endif + break; + } + if (rounding) { + if (dsign) { + adj = 1.; + goto apply_adj; + } + } + else if (!dsign) { + adj = -1.; + if (!word1(rv) + && !(word0(rv) & Frac_mask)) { + y = word0(rv) & Exp_mask; +#ifdef Avoid_Underflow + if (!scale || y > 2*P*Exp_msk1) +#else + if (y) +#endif + { + delta = lshift(delta,Log2P); + if (cmp(delta, bs) <= 0) + adj = -0.5; + } + } + apply_adj: +#ifdef Avoid_Underflow + if (scale && (y = word0(rv) & Exp_mask) + <= 2*P*Exp_msk1) + word0(adj) += (2*P+1)*Exp_msk1 - y; +#else +#ifdef Sudden_Underflow + if ((word0(rv) & Exp_mask) <= + P*Exp_msk1) { + word0(rv) += P*Exp_msk1; + dval(rv) += adj*ulp(dval(rv)); + word0(rv) -= P*Exp_msk1; + } + else +#endif /*Sudden_Underflow*/ +#endif /*Avoid_Underflow*/ + dval(rv) += adj*ulp(dval(rv)); + } + break; + } + adj = ratio(delta, bs); + if (adj < 1.) + adj = 1.; + if (adj <= 0x7ffffffe) { + /* adj = rounding ? ceil(adj) : floor(adj); */ + y = adj; + if (y != adj) { + if (!((rounding>>1) ^ dsign)) + y++; + adj = y; + } + } +#ifdef Avoid_Underflow + if (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1) + word0(adj) += (2*P+1)*Exp_msk1 - y; +#else +#ifdef Sudden_Underflow + if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { + word0(rv) += P*Exp_msk1; + adj *= ulp(dval(rv)); + if (dsign) + dval(rv) += adj; + else + dval(rv) -= adj; + word0(rv) -= P*Exp_msk1; + goto cont; + } +#endif /*Sudden_Underflow*/ +#endif /*Avoid_Underflow*/ + adj *= ulp(dval(rv)); + if (dsign) + dval(rv) += adj; + else + dval(rv) -= adj; + goto cont; + } +#endif /*Honor_FLT_ROUNDS*/ + + if (i < 0) { + /* Error is less than half an ulp -- check for + * special case of mantissa a power of two. + */ + if (dsign || word1(rv) || word0(rv) & Bndry_mask +#ifdef IEEE_Arith +#ifdef Avoid_Underflow + || (word0(rv) & Exp_mask) <= (2*P+1)*Exp_msk1 +#else + || (word0(rv) & Exp_mask) <= Exp_msk1 +#endif +#endif + ) { +#ifdef SET_INEXACT + if (!delta->x[0] && delta->wds <= 1) + inexact = 0; +#endif + break; + } + if (!delta->x[0] && delta->wds <= 1) { + /* exact result */ +#ifdef SET_INEXACT + inexact = 0; +#endif + break; + } + delta = lshift(delta,Log2P); + if (cmp(delta, bs) > 0) + goto drop_down; + break; + } + if (i == 0) { + /* exactly half-way between */ + if (dsign) { + if ((word0(rv) & Bndry_mask1) == Bndry_mask1 + && word1(rv) == ( +#ifdef Avoid_Underflow + (scale && (y = word0(rv) & Exp_mask) <= 2*P*Exp_msk1) + ? (0xffffffff & (0xffffffff << (2*P+1-(y>>Exp_shift)))) : +#endif + 0xffffffff)) { + /*boundary case -- increment exponent*/ + word0(rv) = (word0(rv) & Exp_mask) + + Exp_msk1 +#ifdef IBM + | Exp_msk1 >> 4 +#endif + ; + word1(rv) = 0; +#ifdef Avoid_Underflow + dsign = 0; +#endif + break; + } + } + else if (!(word0(rv) & Bndry_mask) && !word1(rv)) { + drop_down: + /* boundary case -- decrement exponent */ +#ifdef Sudden_Underflow /*{{*/ + L = word0(rv) & Exp_mask; +#ifdef IBM + if (L < Exp_msk1) +#else +#ifdef Avoid_Underflow + if (L <= (scale ? (2*P+1)*Exp_msk1 : Exp_msk1)) +#else + if (L <= Exp_msk1) +#endif /*Avoid_Underflow*/ +#endif /*IBM*/ + goto undfl; + L -= Exp_msk1; +#else /*Sudden_Underflow}{*/ +#ifdef Avoid_Underflow + if (scale) { + L = word0(rv) & Exp_mask; + if (L <= (2*P+1)*Exp_msk1) { + if (L > (P+2)*Exp_msk1) + /* round even ==> */ + /* accept rv */ + break; + /* rv = smallest denormal */ + goto undfl; + } + } +#endif /*Avoid_Underflow*/ + L = (word0(rv) & Exp_mask) - Exp_msk1; +#endif /*Sudden_Underflow}}*/ + word0(rv) = L | Bndry_mask1; + word1(rv) = 0xffffffff; +#ifdef IBM + goto cont; +#else + break; +#endif + } +#ifndef ROUND_BIASED + if (!(word1(rv) & LSB)) + break; +#endif + if (dsign) + dval(rv) += ulp(dval(rv)); +#ifndef ROUND_BIASED + else { + dval(rv) -= ulp(dval(rv)); +#ifndef Sudden_Underflow + if (!dval(rv)) + goto undfl; +#endif + } +#ifdef Avoid_Underflow + dsign = 1 - dsign; +#endif +#endif + break; + } + if ((aadj = ratio(delta, bs)) <= 2.) { + if (dsign) + aadj = aadj1 = 1.; + else if (word1(rv) || word0(rv) & Bndry_mask) { +#ifndef Sudden_Underflow + if (word1(rv) == Tiny1 && !word0(rv)) + goto undfl; +#endif + aadj = 1.; + aadj1 = -1.; + } + else { + /* special case -- power of FLT_RADIX to be */ + /* rounded down... */ + + if (aadj < 2./FLT_RADIX) + aadj = 1./FLT_RADIX; + else + aadj *= 0.5; + aadj1 = -aadj; + } + } + else { + aadj *= 0.5; + aadj1 = dsign ? aadj : -aadj; +#ifdef Check_FLT_ROUNDS + switch(Rounding) { + case 2: /* towards +infinity */ + aadj1 -= 0.5; + break; + case 0: /* towards 0 */ + case 3: /* towards -infinity */ + aadj1 += 0.5; + } +#else + if (Flt_Rounds == 0) + aadj1 += 0.5; +#endif /*Check_FLT_ROUNDS*/ + } + y = word0(rv) & Exp_mask; + + /* Check for overflow */ + + if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) { + dval(rv0) = dval(rv); + word0(rv) -= P*Exp_msk1; + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; + if ((word0(rv) & Exp_mask) >= + Exp_msk1*(DBL_MAX_EXP+Bias-P)) { + if (word0(rv0) == Big0 && word1(rv0) == Big1) + goto ovfl; + word0(rv) = Big0; + word1(rv) = Big1; + goto cont; + } + else + word0(rv) += P*Exp_msk1; + } + else { +#ifdef Avoid_Underflow + if (scale && y <= 2*P*Exp_msk1) { + if (aadj <= 0x7fffffff) { + if ((z = aadj) <= 0) + z = 1; + aadj = z; + aadj1 = dsign ? aadj : -aadj; + } + word0(aadj1) += (2*P+1)*Exp_msk1 - y; + } + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; +#else +#ifdef Sudden_Underflow + if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { + dval(rv0) = dval(rv); + word0(rv) += P*Exp_msk1; + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; +#ifdef IBM + if ((word0(rv) & Exp_mask) < P*Exp_msk1) +#else + if ((word0(rv) & Exp_mask) <= P*Exp_msk1) +#endif + { + if (word0(rv0) == Tiny0 + && word1(rv0) == Tiny1) + goto undfl; + word0(rv) = Tiny0; + word1(rv) = Tiny1; + goto cont; + } + else + word0(rv) -= P*Exp_msk1; + } + else { + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; + } +#else /*Sudden_Underflow*/ + /* Compute adj so that the IEEE rounding rules will + * correctly round rv + adj in some half-way cases. + * If rv * ulp(rv) is denormalized (i.e., + * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid + * trouble from bits lost to denormalization; + * example: 1.2e-307 . + */ + if (y <= (P-1)*Exp_msk1 && aadj > 1.) { + aadj1 = (double)(int)(aadj + 0.5); + if (!dsign) + aadj1 = -aadj1; + } + adj = aadj1 * ulp(dval(rv)); + dval(rv) += adj; +#endif /*Sudden_Underflow*/ +#endif /*Avoid_Underflow*/ + } + z = word0(rv) & Exp_mask; +#ifndef SET_INEXACT +#ifdef Avoid_Underflow + if (!scale) +#endif + if (y == z) { + /* Can we stop now? */ + L = (Long)aadj; + aadj -= L; + /* The tolerances below are conservative. */ + if (dsign || word1(rv) || word0(rv) & Bndry_mask) { + if (aadj < .4999999 || aadj > .5000001) + break; + } + else if (aadj < .4999999/FLT_RADIX) + break; + } +#endif + cont: + Bfree(bb); + Bfree(bd); + Bfree(bs); + Bfree(delta); + } +#ifdef SET_INEXACT + if (inexact) { + if (!oldinexact) { + word0(rv0) = Exp_1 + (70 << Exp_shift); + word1(rv0) = 0; + dval(rv0) += 1.; + } + } + else if (!oldinexact) + clear_inexact(); +#endif +#ifdef Avoid_Underflow + if (scale) { + word0(rv0) = Exp_1 - 2*P*Exp_msk1; + word1(rv0) = 0; + dval(rv) *= dval(rv0); +#ifndef NO_ERRNO + /* try to avoid the bug of testing an 8087 register value */ + if (word0(rv) == 0 && word1(rv) == 0) + errno = ERANGE; +#endif + } +#endif /* Avoid_Underflow */ +#ifdef SET_INEXACT + if (inexact && !(word0(rv) & Exp_mask)) { + /* set underflow bit */ + dval(rv0) = 1e-300; + dval(rv0) *= dval(rv0); + } +#endif + retfree: + Bfree(bb); + Bfree(bd); + Bfree(bs); + Bfree(bd0); + Bfree(delta); + ret: + if (se) + *se = (char *)s; + return sign ? -dval(rv) : dval(rv); + } + + static int +quorem +#ifdef KR_headers + (b, S) Bigint *b, *S; +#else + (Bigint *b, Bigint *S) +#endif +{ + int n; + ULong *bx, *bxe, q, *sx, *sxe; +#ifdef ULLong + ULLong borrow, carry, y, ys; +#else + ULong borrow, carry, y, ys; +#ifdef Pack_32 + ULong si, z, zs; +#endif +#endif + + n = S->wds; +#ifdef DEBUG + /*debug*/ if (b->wds > n) + /*debug*/ Bug("oversize b in quorem"); +#endif + if (b->wds < n) + return 0; + sx = S->x; + sxe = sx + --n; + bx = b->x; + bxe = bx + n; + q = *bxe / (*sxe + 1); /* ensure q <= true quotient */ +#ifdef DEBUG + /*debug*/ if (q > 9) + /*debug*/ Bug("oversized quotient in quorem"); +#endif + if (q) { + borrow = 0; + carry = 0; + do { +#ifdef ULLong + ys = *sx++ * (ULLong)q + carry; + carry = ys >> 32; + y = *bx - (ys & FFFFFFFF) - borrow; + borrow = y >> 32 & (ULong)1; + *bx++ = y & FFFFFFFF; +#else +#ifdef Pack_32 + si = *sx++; + ys = (si & 0xffff) * q + carry; + zs = (si >> 16) * q + (ys >> 16); + carry = zs >> 16; + y = (*bx & 0xffff) - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + z = (*bx >> 16) - (zs & 0xffff) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(bx, z, y); +#else + ys = *sx++ * q + carry; + carry = ys >> 16; + y = *bx - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + *bx++ = y & 0xffff; +#endif +#endif + } + while(sx <= sxe); + if (!*bxe) { + bx = b->x; + while(--bxe > bx && !*bxe) + --n; + b->wds = n; + } + } + if (cmp(b, S) >= 0) { + q++; + borrow = 0; + carry = 0; + bx = b->x; + sx = S->x; + do { +#ifdef ULLong + ys = *sx++ + carry; + carry = ys >> 32; + y = *bx - (ys & FFFFFFFF) - borrow; + borrow = y >> 32 & (ULong)1; + *bx++ = y & FFFFFFFF; +#else +#ifdef Pack_32 + si = *sx++; + ys = (si & 0xffff) + carry; + zs = (si >> 16) + (ys >> 16); + carry = zs >> 16; + y = (*bx & 0xffff) - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + z = (*bx >> 16) - (zs & 0xffff) - borrow; + borrow = (z & 0x10000) >> 16; + Storeinc(bx, z, y); +#else + ys = *sx++ + carry; + carry = ys >> 16; + y = *bx - (ys & 0xffff) - borrow; + borrow = (y & 0x10000) >> 16; + *bx++ = y & 0xffff; +#endif +#endif + } + while(sx <= sxe); + bx = b->x; + bxe = bx + n; + if (!*bxe) { + while(--bxe > bx && !*bxe) + --n; + b->wds = n; + } + } + return q; + } + +#ifndef MULTIPLE_THREADS + static char *dtoa_result; +#endif + + static char * +#ifdef KR_headers +rv_alloc(i) int i; +#else +rv_alloc(int i) +#endif +{ + int j, k, *r; + + j = sizeof(ULong); + for(k = 0; + sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= i; + j <<= 1) + k++; + r = (int*)Balloc(k); + *r = k; + return +#ifndef MULTIPLE_THREADS + dtoa_result = +#endif + (char *)(r+1); + } + + static char * +#ifdef KR_headers +nrv_alloc(s, rve, n) char *s, **rve; int n; +#else +nrv_alloc(char *s, char **rve, int n) +#endif +{ + char *rv, *t; + + t = rv = rv_alloc(n); + while(*t = *s++) t++; + if (rve) + *rve = t; + return rv; + } + +/* freedtoa(s) must be used to free values s returned by dtoa + * when MULTIPLE_THREADS is #defined. It should be used in all cases, + * but for consistency with earlier versions of dtoa, it is optional + * when MULTIPLE_THREADS is not defined. + */ + + void +#ifdef KR_headers +freedtoa(s) char *s; +#else +freedtoa(char *s) +#endif +{ + Bigint *b = (Bigint *)((int *)s - 1); + b->maxwds = 1 << (b->k = *(int*)b); + Bfree(b); +#ifndef MULTIPLE_THREADS + if (s == dtoa_result) + dtoa_result = 0; +#endif + } + +/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string. + * + * Inspired by "How to Print Floating-Point Numbers Accurately" by + * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 112-126]. + * + * Modifications: + * 1. Rather than iterating, we use a simple numeric overestimate + * to determine k = floor(log10(d)). We scale relevant + * quantities using O(log2(k)) rather than O(k) multiplications. + * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't + * try to generate digits strictly left to right. Instead, we + * compute with fewer bits and propagate the carry if necessary + * when rounding the final digit up. This is often faster. + * 3. Under the assumption that input will be rounded nearest, + * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22. + * That is, we allow equality in stopping tests when the + * round-nearest rule will give the same floating-point value + * as would satisfaction of the stopping test with strict + * inequality. + * 4. We remove common factors of powers of 2 from relevant + * quantities. + * 5. When converting floating-point integers less than 1e16, + * we use floating-point arithmetic rather than resorting + * to multiple-precision integers. + * 6. When asked to produce fewer than 15 digits, we first try + * to get by with floating-point arithmetic; we resort to + * multiple-precision integer arithmetic only if we cannot + * guarantee that the floating-point calculation has given + * the correctly rounded result. For k requested digits and + * "uniformly" distributed input, the probability is + * something like 10^(k-15) that we must resort to the Long + * calculation. + */ + + char * +dtoa +#ifdef KR_headers + (d, mode, ndigits, decpt, sign, rve) + double d; int mode, ndigits, *decpt, *sign; char **rve; +#else + (double d, int mode, int ndigits, int *decpt, int *sign, char **rve) +#endif +{ + /* Arguments ndigits, decpt, sign are similar to those + of ecvt and fcvt; trailing zeros are suppressed from + the returned string. If not null, *rve is set to point + to the end of the return value. If d is +-Infinity or NaN, + then *decpt is set to 9999. + + mode: + 0 ==> shortest string that yields d when read in + and rounded to nearest. + 1 ==> like 0, but with Steele & White stopping rule; + e.g. with IEEE P754 arithmetic , mode 0 gives + 1e23 whereas mode 1 gives 9.999999999999999e22. + 2 ==> max(1,ndigits) significant digits. This gives a + return value similar to that of ecvt, except + that trailing zeros are suppressed. + 3 ==> through ndigits past the decimal point. This + gives a return value similar to that from fcvt, + except that trailing zeros are suppressed, and + ndigits can be negative. + 4,5 ==> similar to 2 and 3, respectively, but (in + round-nearest mode) with the tests of mode 0 to + possibly return a shorter string that rounds to d. + With IEEE arithmetic and compilation with + -DHonor_FLT_ROUNDS, modes 4 and 5 behave the same + as modes 2 and 3 when FLT_ROUNDS != 1. + 6-9 ==> Debugging modes similar to mode - 4: don't try + fast floating-point estimate (if applicable). + + Values of mode other than 0-9 are treated as mode 0. + + Sufficient space is allocated to the return value + to hold the suppressed trailing zeros. + */ + + int bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1, + j, j1, k, k0, k_check, leftright, m2, m5, s2, s5, + spec_case, try_quick; + Long L; +#ifndef Sudden_Underflow + int denorm; + ULong x; +#endif + Bigint *b, *b1, *delta, *mlo, *mhi, *S; + double d2, ds, eps; + char *s, *s0; +#ifdef Honor_FLT_ROUNDS + int rounding; +#endif +#ifdef SET_INEXACT + int inexact, oldinexact; +#endif + +#ifndef MULTIPLE_THREADS + if (dtoa_result) { + freedtoa(dtoa_result); + dtoa_result = 0; + } +#endif + + if (word0(d) & Sign_bit) { + /* set sign for everything, including 0's and NaNs */ + *sign = 1; + word0(d) &= ~Sign_bit; /* clear sign bit */ + } + else + *sign = 0; + +#if defined(IEEE_Arith) + defined(VAX) +#ifdef IEEE_Arith + if ((word0(d) & Exp_mask) == Exp_mask) +#else + if (word0(d) == 0x8000) +#endif + { + /* Infinity or NaN */ + *decpt = 9999; +#ifdef IEEE_Arith + if (!word1(d) && !(word0(d) & 0xfffff)) + return nrv_alloc("Infinity", rve, 8); +#endif + return nrv_alloc("NaN", rve, 3); + } +#endif +#ifdef IBM + dval(d) += 0; /* normalize */ +#endif + if (!dval(d)) { + *decpt = 1; + return nrv_alloc("0", rve, 1); + } + +#ifdef SET_INEXACT + try_quick = oldinexact = get_inexact(); + inexact = 1; +#endif +#ifdef Honor_FLT_ROUNDS + if ((rounding = Flt_Rounds) >= 2) { + if (*sign) + rounding = rounding == 2 ? 0 : 2; + else + if (rounding != 2) + rounding = 0; + } +#endif + + b = d2b(dval(d), &be, &bbits); +#ifdef Sudden_Underflow + i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1)); +#else + if (i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1))) { +#endif + dval(d2) = dval(d); + word0(d2) &= Frac_mask1; + word0(d2) |= Exp_11; +#ifdef IBM + if (j = 11 - hi0bits(word0(d2) & Frac_mask)) + dval(d2) /= 1 << j; +#endif + + /* log(x) ~=~ log(1.5) + (x-1.5)/1.5 + * log10(x) = log(x) / log(10) + * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10)) + * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2) + * + * This suggests computing an approximation k to log10(d) by + * + * k = (i - Bias)*0.301029995663981 + * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 ); + * + * We want k to be too large rather than too small. + * The error in the first-order Taylor series approximation + * is in our favor, so we just round up the constant enough + * to compensate for any error in the multiplication of + * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077, + * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14, + * adding 1e-13 to the constant term more than suffices. + * Hence we adjust the constant term to 0.1760912590558. + * (We could get a more accurate k by invoking log10, + * but this is probably not worthwhile.) + */ + + i -= Bias; +#ifdef IBM + i <<= 2; + i += j; +#endif +#ifndef Sudden_Underflow + denorm = 0; + } + else { + /* d is denormalized */ + + i = bbits + be + (Bias + (P-1) - 1); + x = i > 32 ? word0(d) << 64 - i | word1(d) >> i - 32 + : word1(d) << 32 - i; + dval(d2) = x; + word0(d2) -= 31*Exp_msk1; /* adjust exponent */ + i -= (Bias + (P-1) - 1) + 1; + denorm = 1; + } +#endif + ds = (dval(d2)-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981; + k = (int)ds; + if (ds < 0. && ds != k) + k--; /* want k = floor(ds) */ + k_check = 1; + if (k >= 0 && k <= Ten_pmax) { + if (dval(d) < tens[k]) + k--; + k_check = 0; + } + j = bbits - i - 1; + if (j >= 0) { + b2 = 0; + s2 = j; + } + else { + b2 = -j; + s2 = 0; + } + if (k >= 0) { + b5 = 0; + s5 = k; + s2 += k; + } + else { + b2 -= k; + b5 = -k; + s5 = 0; + } + if (mode < 0 || mode > 9) + mode = 0; + +#ifndef SET_INEXACT +#ifdef Check_FLT_ROUNDS + try_quick = Rounding == 1; +#else + try_quick = 1; +#endif +#endif /*SET_INEXACT*/ + + if (mode > 5) { + mode -= 4; + try_quick = 0; + } + leftright = 1; + switch(mode) { + case 0: + case 1: + ilim = ilim1 = -1; + i = 18; + ndigits = 0; + break; + case 2: + leftright = 0; + /* no break */ + case 4: + if (ndigits <= 0) + ndigits = 1; + ilim = ilim1 = i = ndigits; + break; + case 3: + leftright = 0; + /* no break */ + case 5: + i = ndigits + k + 1; + ilim = i; + ilim1 = i - 1; + if (i <= 0) + i = 1; + } + s = s0 = rv_alloc(i); + +#ifdef Honor_FLT_ROUNDS + if (mode > 1 && rounding != 1) + leftright = 0; +#endif + + if (ilim >= 0 && ilim <= Quick_max && try_quick) { + + /* Try to get by with floating-point arithmetic. */ + + i = 0; + dval(d2) = dval(d); + k0 = k; + ilim0 = ilim; + ieps = 2; /* conservative */ + if (k > 0) { + ds = tens[k&0xf]; + j = k >> 4; + if (j & Bletch) { + /* prevent overflows */ + j &= Bletch - 1; + dval(d) /= bigtens[n_bigtens-1]; + ieps++; + } + for(; j; j >>= 1, i++) + if (j & 1) { + ieps++; + ds *= bigtens[i]; + } + dval(d) /= ds; + } + else if (j1 = -k) { + dval(d) *= tens[j1 & 0xf]; + for(j = j1 >> 4; j; j >>= 1, i++) + if (j & 1) { + ieps++; + dval(d) *= bigtens[i]; + } + } + if (k_check && dval(d) < 1. && ilim > 0) { + if (ilim1 <= 0) + goto fast_failed; + ilim = ilim1; + k--; + dval(d) *= 10.; + ieps++; + } + dval(eps) = ieps*dval(d) + 7.; + word0(eps) -= (P-1)*Exp_msk1; + if (ilim == 0) { + S = mhi = 0; + dval(d) -= 5.; + if (dval(d) > dval(eps)) + goto one_digit; + if (dval(d) < -dval(eps)) + goto no_digits; + goto fast_failed; + } +#ifndef No_leftright + if (leftright) { + /* Use Steele & White method of only + * generating digits needed. + */ + dval(eps) = 0.5/tens[ilim-1] - dval(eps); + for(i = 0;;) { + L = dval(d); + dval(d) -= L; + *s++ = '0' + (int)L; + if (dval(d) < dval(eps)) + goto ret1; + if (1. - dval(d) < dval(eps)) + goto bump_up; + if (++i >= ilim) + break; + dval(eps) *= 10.; + dval(d) *= 10.; + } + } + else { +#endif + /* Generate ilim digits, then fix them up. */ + dval(eps) *= tens[ilim-1]; + for(i = 1;; i++, dval(d) *= 10.) { + L = (Long)(dval(d)); + if (!(dval(d) -= L)) + ilim = i; + *s++ = '0' + (int)L; + if (i == ilim) { + if (dval(d) > 0.5 + dval(eps)) + goto bump_up; + else if (dval(d) < 0.5 - dval(eps)) { + while(*--s == '0'); + s++; + goto ret1; + } + break; + } + } +#ifndef No_leftright + } +#endif + fast_failed: + s = s0; + dval(d) = dval(d2); + k = k0; + ilim = ilim0; + } + + /* Do we have a "small" integer? */ + + if (be >= 0 && k <= Int_max) { + /* Yes. */ + ds = tens[k]; + if (ndigits < 0 && ilim <= 0) { + S = mhi = 0; + if (ilim < 0 || dval(d) <= 5*ds) + goto no_digits; + goto one_digit; + } + for(i = 1;; i++, dval(d) *= 10.) { + L = (Long)(dval(d) / ds); + dval(d) -= L*ds; +#ifdef Check_FLT_ROUNDS + /* If FLT_ROUNDS == 2, L will usually be high by 1 */ + if (dval(d) < 0) { + L--; + dval(d) += ds; + } +#endif + *s++ = '0' + (int)L; + if (!dval(d)) { +#ifdef SET_INEXACT + inexact = 0; +#endif + break; + } + if (i == ilim) { +#ifdef Honor_FLT_ROUNDS + if (mode > 1) + switch(rounding) { + case 0: goto ret1; + case 2: goto bump_up; + } +#endif + dval(d) += dval(d); + if (dval(d) > ds || dval(d) == ds && L & 1) { + bump_up: + while(*--s == '9') + if (s == s0) { + k++; + *s = '0'; + break; + } + ++*s++; + } + break; + } + } + goto ret1; + } + + m2 = b2; + m5 = b5; + mhi = mlo = 0; + if (leftright) { + i = +#ifndef Sudden_Underflow + denorm ? be + (Bias + (P-1) - 1 + 1) : +#endif +#ifdef IBM + 1 + 4*P - 3 - bbits + ((bbits + be - 1) & 3); +#else + 1 + P - bbits; +#endif + b2 += i; + s2 += i; + mhi = i2b(1); + } + if (m2 > 0 && s2 > 0) { + i = m2 < s2 ? m2 : s2; + b2 -= i; + m2 -= i; + s2 -= i; + } + if (b5 > 0) { + if (leftright) { + if (m5 > 0) { + mhi = pow5mult(mhi, m5); + b1 = mult(mhi, b); + Bfree(b); + b = b1; + } + if (j = b5 - m5) + b = pow5mult(b, j); + } + else + b = pow5mult(b, b5); + } + S = i2b(1); + if (s5 > 0) + S = pow5mult(S, s5); + + /* Check for special case that d is a normalized power of 2. */ + + spec_case = 0; + if ((mode < 2 || leftright) +#ifdef Honor_FLT_ROUNDS + && rounding == 1 +#endif + ) { + if (!word1(d) && !(word0(d) & Bndry_mask) +#ifndef Sudden_Underflow + && word0(d) & (Exp_mask & ~Exp_msk1) +#endif + ) { + /* The special case */ + b2 += Log2P; + s2 += Log2P; + spec_case = 1; + } + } + + /* Arrange for convenient computation of quotients: + * shift left if necessary so divisor has 4 leading 0 bits. + * + * Perhaps we should just compute leading 28 bits of S once + * and for all and pass them and a shift to quorem, so it + * can do shifts and ors to compute the numerator for q. + */ +#ifdef Pack_32 + if (i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f) + i = 32 - i; +#else + if (i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0xf) + i = 16 - i; +#endif + if (i > 4) { + i -= 4; + b2 += i; + m2 += i; + s2 += i; + } + else if (i < 4) { + i += 28; + b2 += i; + m2 += i; + s2 += i; + } + if (b2 > 0) + b = lshift(b, b2); + if (s2 > 0) + S = lshift(S, s2); + if (k_check) { + if (cmp(b,S) < 0) { + k--; + b = multadd(b, 10, 0); /* we botched the k estimate */ + if (leftright) + mhi = multadd(mhi, 10, 0); + ilim = ilim1; + } + } + if (ilim <= 0 && (mode == 3 || mode == 5)) { + if (ilim < 0 || cmp(b,S = multadd(S,5,0)) <= 0) { + /* no digits, fcvt style */ + no_digits: + k = -1 - ndigits; + goto ret; + } + one_digit: + *s++ = '1'; + k++; + goto ret; + } + if (leftright) { + if (m2 > 0) + mhi = lshift(mhi, m2); + + /* Compute mlo -- check for special case + * that d is a normalized power of 2. + */ + + mlo = mhi; + if (spec_case) { + mhi = Balloc(mhi->k); + Bcopy(mhi, mlo); + mhi = lshift(mhi, Log2P); + } + + for(i = 1;;i++) { + dig = quorem(b,S) + '0'; + /* Do we yet have the shortest decimal string + * that will round to d? + */ + j = cmp(b, mlo); + delta = diff(S, mhi); + j1 = delta->sign ? 1 : cmp(b, delta); + Bfree(delta); +#ifndef ROUND_BIASED + if (j1 == 0 && mode != 1 && !(word1(d) & 1) +#ifdef Honor_FLT_ROUNDS + && rounding >= 1 +#endif + ) { + if (dig == '9') + goto round_9_up; + if (j > 0) + dig++; +#ifdef SET_INEXACT + else if (!b->x[0] && b->wds <= 1) + inexact = 0; +#endif + *s++ = dig; + goto ret; + } +#endif + if (j < 0 || j == 0 && mode != 1 +#ifndef ROUND_BIASED + && !(word1(d) & 1) +#endif + ) { + if (!b->x[0] && b->wds <= 1) { +#ifdef SET_INEXACT + inexact = 0; +#endif + goto accept_dig; + } +#ifdef Honor_FLT_ROUNDS + if (mode > 1) + switch(rounding) { + case 0: goto accept_dig; + case 2: goto keep_dig; + } +#endif /*Honor_FLT_ROUNDS*/ + if (j1 > 0) { + b = lshift(b, 1); + j1 = cmp(b, S); + if ((j1 > 0 || j1 == 0 && dig & 1) + && dig++ == '9') + goto round_9_up; + } + accept_dig: + *s++ = dig; + goto ret; + } + if (j1 > 0) { +#ifdef Honor_FLT_ROUNDS + if (!rounding) + goto accept_dig; +#endif + if (dig == '9') { /* possible if i == 1 */ + round_9_up: + *s++ = '9'; + goto roundoff; + } + *s++ = dig + 1; + goto ret; + } +#ifdef Honor_FLT_ROUNDS + keep_dig: +#endif + *s++ = dig; + if (i == ilim) + break; + b = multadd(b, 10, 0); + if (mlo == mhi) + mlo = mhi = multadd(mhi, 10, 0); + else { + mlo = multadd(mlo, 10, 0); + mhi = multadd(mhi, 10, 0); + } + } + } + else + for(i = 1;; i++) { + *s++ = dig = quorem(b,S) + '0'; + if (!b->x[0] && b->wds <= 1) { +#ifdef SET_INEXACT + inexact = 0; +#endif + goto ret; + } + if (i >= ilim) + break; + b = multadd(b, 10, 0); + } + + /* Round off last digit */ + +#ifdef Honor_FLT_ROUNDS + switch(rounding) { + case 0: goto trimzeros; + case 2: goto roundoff; + } +#endif + b = lshift(b, 1); + j = cmp(b, S); + if (j > 0 || j == 0 && dig & 1) { + roundoff: + while(*--s == '9') + if (s == s0) { + k++; + *s++ = '1'; + goto ret; + } + ++*s++; + } + else { + trimzeros: + while(*--s == '0'); + s++; + } + ret: + Bfree(S); + if (mhi) { + if (mlo && mlo != mhi) + Bfree(mlo); + Bfree(mhi); + } + ret1: +#ifdef SET_INEXACT + if (inexact) { + if (!oldinexact) { + word0(d) = Exp_1 + (70 << Exp_shift); + word1(d) = 0; + dval(d) += 1.; + } + } + else if (!oldinexact) + clear_inexact(); +#endif + Bfree(b); + *s = 0; + *decpt = k + 1; + if (rve) + *rve = s; + return s0; + } +#ifdef __cplusplus +} +#endif diff --git a/example/boot/tccargs b/example/boot/tccargs new file mode 100644 index 0000000..950c013 --- /dev/null +++ b/example/boot/tccargs @@ -0,0 +1,17 @@ +# This file contains the TinyCC command line arguments needed to +# compile the hello.c program. + +# the output binary (DO NOT CHANGE IT) +-o kernel +# no default libraries +-nostdlib +# no default includes paths +-nostdinc +# statically linked output +-static +# address of the start of the .text section +-Wl,-Ttext,000100000 +# force binary output format +-Wl,--oformat,binary +# sources files +hello.c diff --git a/example/hello.c b/example/hello.c new file mode 100644 index 0000000..48b4666 --- /dev/null +++ b/example/hello.c @@ -0,0 +1,32 @@ +/* simple Hello World program on the QEMU serial port */ + +void puts(const char *s); + +void _start(void) +{ + puts("Hello World\n"); + while (1); +} + +void outb(int port, int val) +{ + asm("outb %b1, %w0" : : "d" (port), "a" (val)); +} + +unsigned char inb(int port) +{ + int val; + asm("inb %w1, %b0" : "=a"(val) : "d" (port)); + return val; +} + +void puts(const char *s) +{ + while (*s) { + outb(0x3f8, *s++); + while ((inb(0x3f8 + 5) & 0x60) != 0x60); + } +} + + + diff --git a/gunzip.c b/gunzip.c new file mode 100644 index 0000000..5bef9db --- /dev/null +++ b/gunzip.c @@ -0,0 +1,109 @@ +#include "tccboot.h" + +/* + * gzip declarations + */ + +#define OF(args) args +#define STATIC static + +#define memzero(s, n) memset ((s), 0, (n)) + +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; + +#define WSIZE 0x8000 /* Window size must be at least 32k, */ + /* and a power of two */ + +static uch *inbuf; /* input buffer */ +static uch window[WSIZE]; /* Sliding window buffer */ + +static unsigned insize = 0; /* valid bytes in inbuf */ +static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ +static unsigned outcnt = 0; /* bytes in output buffer */ + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) + +/* Diagnostic functions */ +#ifdef DEBUG +# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x ;} +# define Tracevv(x) {if (verbose>1) fprintf x ;} +# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} +# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +static int fill_inbuf(void); +static void flush_window(void); +static void error(char *m); + +static void gzip_mark(void **ptr) +{ +} +static void gzip_release(void **ptr) +{ +} + +static long bytes_out; +static uch *output_data; +static const uint8_t *input_data; +static int input_len; + +#include "inflate.c" + +/* =========================================================================== + * Fill the input buffer. This is called only when the buffer is empty + * and at least one byte is really needed. + */ +static int fill_inbuf(void) +{ + if (insize != 0) { + error("ran out of input data\n"); + } + + inbuf = (uint8_t *)input_data; + insize = input_len; + inptr = 1; + return inbuf[0]; +} + +static void flush_window(void) +{ + memcpy(output_data, window, outcnt); + output_data += outcnt; + bytes_out += outcnt; + outcnt = 0; +} + +static void error(char *x) +{ + fatal("%s", x); +} + +int do_gunzip(uint8_t *dest, const uint8_t *src, int src_len) +{ + input_data = src; + input_len = src_len; + output_data = dest; + bytes_out = 0; + gunzip(); + return bytes_out; +} diff --git a/head.S b/head.S new file mode 100644 index 0000000..af34a44 --- /dev/null +++ b/head.S @@ -0,0 +1,128 @@ +/* + * linux/boot/head.S + * + * Copyright (C) 1991, 1992, 1993 Linus Torvalds + */ + +/* + * head.S contains the 32-bit startup code. + * + * NOTE!!! Startup happens at absolute address 0x00001000, which is also where + * the page directory will exist. The startup code will be overwritten by + * the page directory. [According to comments etc elsewhere on a compressed + * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC] + * + * Page 0 is deliberately kept safe, since System Management Mode code in + * laptops may need to access the BIOS data stored there. This is also + * useful for future device drivers that either access the BIOS via VM86 + * mode. + */ + +/* + * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 + */ +.text + +#include +#include + + .globl startup_32 + +startup_32: + cld + cli + movl $(__KERNEL_DS),%eax + movl %eax,%ds + movl %eax,%es + movl %eax,%fs + movl %eax,%gs + + lss SYMBOL_NAME(stack_start),%esp + xorl %eax,%eax +1: incl %eax # check that A20 really IS enabled + movl %eax,0x000000 # loop forever if it isn't + cmpl %eax,0x100000 + je 1b + +/* + * Initialize eflags. Some BIOS's leave bits like NT set. This would + * confuse the debugger if this code is traced. + * XXX - best to initialize before switching to protected mode. + */ + pushl $0 + popfl +/* + * Clear BSS + */ + xorl %eax,%eax + movl $ SYMBOL_NAME(_edata),%edi + movl $ SYMBOL_NAME(_end),%ecx + subl %edi,%ecx + cld + rep + stosb +/* + * Do the decompression, and jump to the new kernel.. + */ + subl $16,%esp # place for structure on the stack + movl %esp,%eax + pushl %esi # real mode pointer as second arg + pushl %eax # address of structure as first arg + call SYMBOL_NAME(compile_kernel) + orl %eax,%eax + jnz 3f + popl %esi # discard address + popl %esi # real mode pointer + xorl %ebx,%ebx + ljmp $(__KERNEL_CS), $0x100000 + +/* + * We come here, if we were loaded high. + * We need to move the move-in-place routine down to 0x1000 + * and then start it with the buffer addresses in registers, + * which we got from the stack. + */ +3: + movl $move_routine_start,%esi + movl $0x1000,%edi + movl $move_routine_end,%ecx + subl %esi,%ecx + addl $3,%ecx + shrl $2,%ecx + cld + rep + movsl + + popl %esi # discard the address + popl %ebx # real mode pointer + popl %esi # low_buffer_start + popl %ecx # lcount + popl %edx # high_buffer_start + popl %eax # hcount + movl $0x100000,%edi + cli # make sure we don't get interrupted + ljmp $(__KERNEL_CS), $0x1000 # and jump to the move routine + +/* + * Routine (template) for moving the decompressed kernel in place, + * if we were high loaded. This _must_ PIC-code ! + */ +move_routine_start: + movl %ecx,%ebp + shrl $2,%ecx + rep + movsl + movl %ebp,%ecx + andl $3,%ecx + rep + movsb + movl %edx,%esi + movl %eax,%ecx # NOTE: rep movsb won't move if %ecx == 0 + addl $3,%ecx + shrl $2,%ecx + rep + movsl + movl %ebx,%esi # Restore setup pointer + xorl %ebx,%ebx + ljmp $(__KERNEL_CS), $0x100000 +move_routine_end: diff --git a/inflate.c b/inflate.c new file mode 100644 index 0000000..e1b05ab --- /dev/null +++ b/inflate.c @@ -0,0 +1,1180 @@ +#define DEBG(x) +#define DEBG1(x) +/* inflate.c -- Not copyrighted 1992 by Mark Adler + version c10p1, 10 January 1993 */ + +/* + * Adapted for booting Linux by Hannu Savolainen 1993 + * based on gzip-1.0.3 + * + * Nicolas Pitre , 1999/04/14 : + * Little mods for all variable to reside either into rodata or bss segments + * by marking constant variables with 'const' and initializing all the others + * at run-time only. This allows for the kernel uncompressor to run + * directly from Flash or ROM memory on embedded systems. + */ + +/* + Inflate deflated (PKZIP's method 8 compressed) data. The compression + method searches for as much of the current string of bytes (up to a + length of 258) in the previous 32 K bytes. If it doesn't find any + matches (of at least length 3), it codes the next byte. Otherwise, it + codes the length of the matched string and its distance backwards from + the current position. There is a single Huffman code that codes both + single bytes (called "literals") and match lengths. A second Huffman + code codes the distance information, which follows a length code. Each + length or distance code actually represents a base value and a number + of "extra" (sometimes zero) bits to get to add to the base value. At + the end of each deflated block is a special end-of-block (EOB) literal/ + length code. The decoding process is basically: get a literal/length + code; if EOB then done; if a literal, emit the decoded byte; if a + length then get the distance and emit the referred-to bytes from the + sliding window of previously emitted data. + + There are (currently) three kinds of inflate blocks: stored, fixed, and + dynamic. The compressor deals with some chunk of data at a time, and + decides which method to use on a chunk-by-chunk basis. A chunk might + typically be 32 K or 64 K. If the chunk is incompressible, then the + "stored" method is used. In this case, the bytes are simply stored as + is, eight bits per byte, with none of the above coding. The bytes are + preceded by a count, since there is no longer an EOB code. + + If the data is compressible, then either the fixed or dynamic methods + are used. In the dynamic method, the compressed data is preceded by + an encoding of the literal/length and distance Huffman codes that are + to be used to decode this block. The representation is itself Huffman + coded, and so is preceded by a description of that code. These code + descriptions take up a little space, and so for small blocks, there is + a predefined set of codes, called the fixed codes. The fixed method is + used if the block codes up smaller that way (usually for quite small + chunks), otherwise the dynamic method is used. In the latter case, the + codes are customized to the probabilities in the current block, and so + can code it much better than the pre-determined fixed codes. + + The Huffman codes themselves are decoded using a multi-level table + lookup, in order to maximize the speed of decoding plus the speed of + building the decoding tables. See the comments below that precede the + lbits and dbits tuning parameters. + */ + + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarly, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + +#ifdef RCSID +static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #"; +#endif + +#ifndef STATIC + +#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H) +# include +# include +#endif + +#include "gzip.h" +#define STATIC +#endif /* !STATIC */ + +#define slide window + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). + Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 + means that v is a literal, 16 < e < 32 means that v is a pointer to + the next table, which codes e - 16 bits, and lastly e == 99 indicates + an unused code. If a code with e == 99 is looked up, this implies an + error in the data. */ +struct huft { + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ + union { + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ + } v; +}; + + +/* Function prototypes */ +STATIC int huft_build OF((unsigned *, unsigned, unsigned, + const ush *, const ush *, struct huft **, int *)); +STATIC int huft_free OF((struct huft *)); +STATIC int inflate_codes OF((struct huft *, struct huft *, int, int)); +STATIC int inflate_stored OF((void)); +STATIC int inflate_fixed OF((void)); +STATIC int inflate_dynamic OF((void)); +STATIC int inflate_block OF((int *)); +STATIC int inflate OF((void)); + + +/* The inflate algorithm uses a sliding 32 K byte window on the uncompressed + stream to find repeated byte strings. This is implemented here as a + circular buffer. The index is updated simply by incrementing and then + ANDing with 0x7fff (32K-1). */ +/* It is left to other modules to supply the 32 K area. It is assumed + to be usable as if it were declared "uch slide[32768];" or as just + "uch *slide;" and then malloc'ed in the latter case. The definition + must be in unzip.h, included above. */ +/* unsigned wp; current position in slide */ +#define wp outcnt +#define flush_output(w) (wp=(w),flush_window()) + +/* Tables for deflate from PKZIP's appnote.txt. */ +static const unsigned border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* note: see note #13 above about the 258 in this list. */ +static const ush cplext[] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +static const ush cpdext[] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + + + +/* Macros for inflate() bit peeking and grabbing. + The usage is: + + NEEDBITS(j) + x = b & mask_bits[j]; + DUMPBITS(j) + + where NEEDBITS makes sure that b has at least j bits in it, and + DUMPBITS removes the bits from b. The macros use the variable k + for the number of bits in b. Normally, b and k are register + variables for speed, and are initialized at the beginning of a + routine that uses these macros from a global bit buffer and count. + + If we assume that EOB will be the longest code, then we will never + ask for bits with NEEDBITS that are beyond the end of the stream. + So, NEEDBITS should not read any more bytes than are needed to + meet the request. Then no bytes need to be "returned" to the buffer + at the end of the last block. + + However, this assumption is not true for fixed blocks--the EOB code + is 7 bits, but the other literal/length codes can be 8 or 9 bits. + (The EOB code is shorter than other codes because fixed blocks are + generally short. So, while a block always has an EOB, many other + literal/length codes have a significantly lower probability of + showing up at all.) However, by making the first table have a + lookup of seven bits, the EOB code will be found in that first + lookup, and so will not require that too many bits be pulled from + the stream. + */ + +STATIC ulg bb; /* bit buffer */ +STATIC unsigned bk; /* bits in bit buffer */ + +STATIC const ush mask_bits[] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +#define NEXTBYTE() (uch)get_byte() +#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} + + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +STATIC const int lbits = 9; /* bits in base literal/length lookup table */ +STATIC const int dbits = 6; /* bits in base distance lookup table */ + + +/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ + + +STATIC unsigned hufts; /* track memory usage */ + + +STATIC int huft_build(b, n, s, d, e, t, m) +unsigned *b; /* code lengths in bits (all assumed <= BMAX) */ +unsigned n; /* number of codes (assumed <= N_MAX) */ +unsigned s; /* number of simple-valued codes (0..s-1) */ +const ush *d; /* list of base values for non-simple codes */ +const ush *e; /* list of extra bits for non-simple codes */ +struct huft **t; /* result: starting table */ +int *m; /* maximum lookup bits, returns actual */ +/* Given a list of code lengths and a maximum table size, make a set of + tables to decode that set of codes. Return zero on success, one if + the given code set is incomplete (the tables are still built in this + case), two if the input is invalid (all zero length codes or an + oversubscribed set of lengths), and three if not enough memory. */ +{ + unsigned a; /* counter for codes of length k */ + unsigned c[BMAX+1]; /* bit length count table */ + unsigned f; /* i repeats in table every f entries */ + int g; /* maximum code length */ + int h; /* table level */ + register unsigned i; /* counter, current code */ + register unsigned j; /* counter */ + register int k; /* number of bits in current code */ + int l; /* bits per table (returned in m) */ + register unsigned *p; /* pointer into c[], b[], or v[] */ + register struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + struct huft *u[BMAX]; /* table stack */ + unsigned v[N_MAX]; /* values in order of bit length */ + register int w; /* bits before this table == (l * h) */ + unsigned x[BMAX+1]; /* bit offsets, then code stack */ + unsigned *xp; /* pointer into x */ + int y; /* number of dummy codes added */ + unsigned z; /* number of entries in current table */ + +DEBG("huft1 "); + + /* Generate counts for each bit length */ + memzero(c, sizeof(c)); + p = b; i = n; + do { + Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n-i, *p)); + c[*p]++; /* assume all entries <= BMAX */ + p++; /* Can't combine with above line (Solaris bug) */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (struct huft *)NULL; + *m = 0; + return 0; + } + +DEBG("huft2 "); + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((unsigned)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((unsigned)l > i) + l = i; + *m = l; + +DEBG("huft3 "); + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return 2; /* bad input: more codes than bits */ + if ((y -= c[i]) < 0) + return 2; + c[i] += y; + +DEBG("huft4 "); + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p++); + } + +DEBG("huft5 "); + + /* Make a table of values in order of bit lengths */ + p = b; i = 0; + do { + if ((j = *p++) != 0) + v[x[j]++] = i; + } while (++i < n); + +DEBG("h6 "); + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ +DEBG("h6a "); + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { +DEBG("h6b "); + a = c[k]; + while (a--) + { +DEBG("h6b1 "); + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { +DEBG1("1 "); + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ +DEBG1("2 "); + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } +DEBG1("3 "); + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == + (struct huft *)NULL) + { + if (h) + huft_free(u[0]); + return 3; /* not enough memory */ + } +DEBG1("4 "); + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->v.t)) = (struct huft *)NULL; + u[h] = ++q; /* table starts after link */ + +DEBG1("5 "); + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } +DEBG1("6 "); + } +DEBG("h6c "); + + /* set up table entry in r */ + r.b = (uch)(k - w); + if (p >= v + n) + r.e = 99; /* out of values--invalid code */ + else if (*p < s) + { + r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = (ush)(*p); /* simple code is just the value */ + p++; /* one compiler does not like *p++ */ + } + else + { + r.e = (uch)e[*p - s]; /* non-simple--look up in lists */ + r.v.n = d[*p++ - s]; + } +DEBG("h6d "); + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } +DEBG("h6e "); + } +DEBG("h6f "); + } + +DEBG("huft7 "); + + /* Return true (1) if we were given an incomplete table */ + return y != 0 && g != 1; +} + + + +STATIC int huft_free(t) +struct huft *t; /* table to free */ +/* Free the malloc'ed tables built by huft_build(), which makes a linked + list of the tables it made, with the links in a dummy first entry of + each table. */ +{ + register struct huft *p, *q; + + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + p = t; + while (p != (struct huft *)NULL) + { + q = (--p)->v.t; + free((char*)p); + p = q; + } + return 0; +} + + +STATIC int inflate_codes(tl, td, bl, bd) +struct huft *tl, *td; /* literal/length and distance decoder tables */ +int bl, bd; /* number of bits decoded by tl[] and td[] */ +/* inflate (decompress) the codes in a deflated (compressed) block. + Return an error code or zero if it all goes ok. */ +{ + register unsigned e; /* table entry flag/number of extra bits */ + unsigned n, d; /* length and index for copy */ + unsigned w; /* current window position */ + struct huft *t; /* pointer to table entry */ + unsigned ml, md; /* masks for bl and bd bits */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + + /* make local copies of globals */ + b = bb; /* initialize bit buffer */ + k = bk; + w = wp; /* initialize window position */ + + /* inflate the coded data */ + ml = mask_bits[bl]; /* precompute masks for speed */ + md = mask_bits[bd]; + for (;;) /* do until end of block */ + { + NEEDBITS((unsigned)bl) + if ((e = (t = tl + ((unsigned)b & ml))->e) > 16) + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + DUMPBITS(t->b) + if (e == 16) /* then it's a literal */ + { + slide[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", slide[w-1])); + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if (e == 15) + break; + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)bd) + if ((e = (t = td + ((unsigned)b & md))->e) > 16) + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + DUMPBITS(t->b) + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + Tracevv((stderr,"\\[%d,%d]", w-d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if (w - d >= e) /* (this test assumes unsigned comparison) */ + { + memcpy(slide + w, slide + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + do { + slide[w++] = slide[d++]; + Tracevv((stderr, "%c", slide[w-1])); + } while (--e); + if (w == WSIZE) + { + flush_output(w); + w = 0; + } + } while (n); + } + } + + + /* restore the globals from the locals */ + wp = w; /* restore global window pointer */ + bb = b; /* restore global bit buffer */ + bk = k; + + /* done */ + return 0; +} + + + +STATIC int inflate_stored() +/* "decompress" an inflated type 0 (stored) block. */ +{ + unsigned n; /* number of bytes in block */ + unsigned w; /* current window position */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + +DEBG(""); + return 0; +} + + + +STATIC int inflate_fixed() +/* decompress an inflated type 1 (fixed Huffman codes) block. We should + either replace this with a custom decoder, or at least precompute the + Huffman tables. */ +{ + int i; /* temporary variable */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned l[288]; /* length list for huft_build */ + +DEBG(" 1) + { + huft_free(tl); + + DEBG(">"); + return i; + } + + + /* decompress until an end-of-block code */ + if (inflate_codes(tl, td, bl, bd)) + return 1; + + + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); + return 0; +} + + + +STATIC int inflate_dynamic() +/* decompress an inflated type 2 (dynamic Huffman codes) block. */ +{ + int i; /* temporary variables */ + unsigned j; + unsigned l; /* last length */ + unsigned m; /* mask for bit lengths table */ + unsigned n; /* number of lengths to get */ + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + int bl; /* lookup bits for tl */ + int bd; /* lookup bits for td */ + unsigned nb; /* number of bit length codes */ + unsigned nl; /* number of literal/length codes */ + unsigned nd; /* number of distance codes */ +#ifdef PKZIP_BUG_WORKAROUND + unsigned ll[288+32]; /* literal/length and distance code lengths */ +#else + unsigned ll[286+30]; /* literal/length and distance code lengths */ +#endif + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + +DEBG(" 288 || nd > 32) +#else + if (nl > 286 || nd > 30) +#endif + return 1; /* bad lengths */ + +DEBG("dyn1 "); + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) + { + NEEDBITS(3) + ll[border[j]] = (unsigned)b & 7; + DUMPBITS(3) + } + for (; j < 19; j++) + ll[border[j]] = 0; + +DEBG("dyn2 "); + + /* build decoding table for trees--single level, 7 bit lookup */ + bl = 7; + if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0) + { + if (i == 1) + huft_free(tl); + return i; /* incomplete code set */ + } + +DEBG("dyn3 "); + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[bl]; + i = l = 0; + while ((unsigned)i < n) + { + NEEDBITS((unsigned)bl) + j = (td = tl + ((unsigned)b & m))->b; + DUMPBITS(j) + j = td->v.n; + if (j < 16) /* length of code in bits (0..15) */ + ll[i++] = l = j; /* save last length in l */ + else if (j == 16) /* repeat last length 3 to 6 times */ + { + NEEDBITS(2) + j = 3 + ((unsigned)b & 3); + DUMPBITS(2) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = l; + } + else if (j == 17) /* 3 to 10 zero length codes */ + { + NEEDBITS(3) + j = 3 + ((unsigned)b & 7); + DUMPBITS(3) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = 0; + l = 0; + } + else /* j == 18: 11 to 138 zero length codes */ + { + NEEDBITS(7) + j = 11 + ((unsigned)b & 0x7f); + DUMPBITS(7) + if ((unsigned)i + j > n) + return 1; + while (j--) + ll[i++] = 0; + l = 0; + } + } + +DEBG("dyn4 "); + + /* free decoding table for trees */ + huft_free(tl); + +DEBG("dyn5 "); + + /* restore the global bit buffer */ + bb = b; + bk = k; + +DEBG("dyn5a "); + + /* build the decoding tables for literal/length and distance codes */ + bl = lbits; + if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) + { +DEBG("dyn5b "); + if (i == 1) { + error(" incomplete literal tree\n"); + huft_free(tl); + } + return i; /* incomplete code set */ + } +DEBG("dyn5c "); + bd = dbits; + if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) + { +DEBG("dyn5d "); + if (i == 1) { + error(" incomplete distance tree\n"); +#ifdef PKZIP_BUG_WORKAROUND + i = 0; + } +#else + huft_free(td); + } + huft_free(tl); + return i; /* incomplete code set */ +#endif + } + +DEBG("dyn6 "); + + /* decompress until an end-of-block code */ + if (inflate_codes(tl, td, bl, bd)) + return 1; + +DEBG("dyn7 "); + + /* free the decoding tables, return */ + huft_free(tl); + huft_free(td); + + DEBG(">"); + return 0; +} + + + +STATIC int inflate_block(e) +int *e; /* last block flag */ +/* decompress an inflated block */ +{ + unsigned t; /* block type */ + register ulg b; /* bit buffer */ + register unsigned k; /* number of bits in bit buffer */ + + DEBG(""); + + /* bad block type */ + return 2; +} + + + +STATIC int inflate() +/* decompress an inflated entry */ +{ + int e; /* last block flag */ + int r; /* result code */ + unsigned h; /* maximum struct huft's malloc'ed */ + void *ptr; + + /* initialize window, bit buffer */ + wp = 0; + bk = 0; + bb = 0; + + + /* decompress until the last block */ + h = 0; + do { + hufts = 0; + gzip_mark(&ptr); + if ((r = inflate_block(&e)) != 0) { + gzip_release(&ptr); + return r; + } + gzip_release(&ptr); + if (hufts > h) + h = hufts; + } while (!e); + + /* Undo too much lookahead. The next read will be byte aligned so we + * can discard unused bits in the last meaningful byte. + */ + while (bk >= 8) { + bk -= 8; + inptr--; + } + + /* flush out slide */ + flush_output(wp); + + + /* return success */ +#ifdef DEBUG + fprintf(stderr, "<%u> ", h); +#endif /* DEBUG */ + return 0; +} + +#if 0 +/********************************************************************** + * + * The following are support routines for inflate.c + * + **********************************************************************/ + +static ulg crc_32_tab[256]; +static ulg crc; /* initialized in makecrc() so it'll reside in bss */ +#define CRC_VALUE (crc ^ 0xffffffffUL) + +/* + * Code to compute the CRC-32 table. Borrowed from + * gzip-1.0.3/makecrc.c. + */ + +static void +makecrc(void) +{ +/* Not copyrighted 1990 Mark Adler */ + + unsigned long c; /* crc shift register */ + unsigned long e; /* polynomial exclusive-or pattern */ + int i; /* counter for all possible eight bit values */ + int k; /* byte being shifted into crc apparatus */ + + /* terms of polynomial defining this crc (except x^32): */ + static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; + + /* Make exclusive-or pattern from polynomial */ + e = 0; + for (i = 0; i < sizeof(p)/sizeof(int); i++) + e |= 1L << (31 - p[i]); + + crc_32_tab[0] = 0; + + for (i = 1; i < 256; i++) + { + c = 0; + for (k = i | 256; k != 1; k >>= 1) + { + c = c & 1 ? (c >> 1) ^ e : c >> 1; + if (k & 1) + c ^= e; + } + crc_32_tab[i] = c; + } + + /* this is initialized here so this code could reside in ROM */ + crc = (ulg)0xffffffffUL; /* shift register contents */ +} +#endif + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +/* + * Do the uncompression! + */ +static int gunzip(void) +{ + uch flags; + unsigned char magic[2]; /* magic header */ + char method; + ulg orig_crc = 0; /* original crc */ + ulg orig_len = 0; /* original uncompressed length */ + int res; + + magic[0] = (unsigned char)get_byte(); + magic[1] = (unsigned char)get_byte(); + method = (unsigned char)get_byte(); + + if (magic[0] != 037 || + ((magic[1] != 0213) && (magic[1] != 0236))) { + error("bad gzip magic numbers"); + return -1; + } + + /* We only support method #8, DEFLATED */ + if (method != 8) { + error("internal error, invalid method"); + return -1; + } + + flags = (uch)get_byte(); + if ((flags & ENCRYPTED) != 0) { + error("Input is encrypted\n"); + return -1; + } + if ((flags & CONTINUATION) != 0) { + error("Multi part input\n"); + return -1; + } + if ((flags & RESERVED) != 0) { + error("Input has invalid flags\n"); + return -1; + } + (ulg)get_byte(); /* Get timestamp */ + ((ulg)get_byte()) << 8; + ((ulg)get_byte()) << 16; + ((ulg)get_byte()) << 24; + + (void)get_byte(); /* Ignore extra flags for the moment */ + (void)get_byte(); /* Ignore OS type for the moment */ + + if ((flags & EXTRA_FIELD) != 0) { + unsigned len = (unsigned)get_byte(); + len |= ((unsigned)get_byte())<<8; + while (len--) (void)get_byte(); + } + + /* Get original file name if it was truncated */ + if ((flags & ORIG_NAME) != 0) { + /* Discard the old name */ + while (get_byte() != 0) /* null */ ; + } + + /* Discard file comment if any */ + if ((flags & COMMENT) != 0) { + while (get_byte() != 0) /* null */ ; + } + + /* Decompress */ + if ((res = inflate())) { + switch (res) { + case 0: + break; + case 1: + error("invalid compressed format (err=1)"); + break; + case 2: + error("invalid compressed format (err=2)"); + break; + case 3: + error("out of memory"); + break; + default: + error("invalid compressed format (other)"); + } + return -1; + } + + /* Get the crc and original length */ + /* crc32 (see algorithm.doc) + * uncompressed input size modulo 2^32 + */ + orig_crc = (ulg) get_byte(); + orig_crc |= (ulg) get_byte() << 8; + orig_crc |= (ulg) get_byte() << 16; + orig_crc |= (ulg) get_byte() << 24; + + orig_len = (ulg) get_byte(); + orig_len |= (ulg) get_byte() << 8; + orig_len |= (ulg) get_byte() << 16; + orig_len |= (ulg) get_byte() << 24; + +#if 0 + /* Validate decompression */ + if (orig_crc != CRC_VALUE) { + error("crc error"); + return -1; + } +#endif + if (orig_len != bytes_out) { + error("length error"); + return -1; + } + return 0; +} + + diff --git a/initrd.img b/initrd.img new file mode 100644 index 0000000000000000000000000000000000000000..98f23167855999cf74634b57352623e5555745f8 GIT binary patch literal 642 zcmV-|0)71-iwFQO?s`E01MO4IZqq;z4i%@papX3*Dsoagb$|k*>H(FCR)|(eLA@Zs z#$Gq8u6M27b=wG4>H~m;#DNnM55SEV;1&7=JW83doi;RZ=F+j8%+EL9{K>I3#UOWa zr*pHcUVNXR2@eL3xDfa9P9xj+0x4(b=U@7rIeB*xKRZ9|&jA)D_;X6y*H0fmkBsqG z0N+{;;_v*0|K|)p%=32^n!g(#Zm)*7e428*?=FIiFdRz{C0A^N&(B6R-A`=;S!xjlrQaWXgg;J)ULTSyuVHjNcE5bDcRn9{7>v36;UZZUJOU!m&%Xk!=mTIqq4 zMTzU)q6q96tFr03!RWf}SDPG{AB9r;x?}{T9db40;;sk-g22*f5W&(> z62KFDf-HcLGP)529CgZRSS#)1zQ&`~txA2#J|%5ytX)<>6mnC6QC%Qd=Rr;RN2zuQ zD|}^j!*KHN4YkGEktv_sec = 0; + tv->tv_usec = 0; + return 0; +} + +time_t time(time_t *t) +{ + if (t) + *t = 0; + return 0; +} + +struct tm *localtime(const time_t *timep) +{ + static struct tm static_tm; + return &static_tm; +} + +int setjmp(jmp_buf buf) +{ + return 0; +} + +void longjmp(jmp_buf buf, int val) +{ + exit(1); +} + +/**********************************************************/ +#define MALLOC_MAX_SIZE (128 * 1024 * 1024) +extern uint8_t _end; +uint8_t *malloc_ptr = &_end; + +void *sbrk(int increment) +{ + uint8_t *ptr, *new_ptr; + + if (increment == 0) + return malloc_ptr; + ptr = malloc_ptr; + new_ptr = malloc_ptr + increment; + if (new_ptr > (&_end + MALLOC_MAX_SIZE)) { + errno = ENOMEM; + return (void *)-1; + } + malloc_ptr = new_ptr; + return ptr; +} + +#if 0 +#define MALLOC_ALIGN 4096 + +void free(void *ptr) +{ +} + +void *realloc(void *oldptr, size_t size) +{ + void *ptr; + if (size == 0) { + free(oldptr); + return NULL; + } else { + ptr = malloc(size); + /* XXX: incorrect */ + if (oldptr) { + memcpy(ptr, oldptr, size); + free(oldptr); + } + return ptr; + } +} +#endif +/**********************************************************/ + +uint8_t *romfs_base; + +/* The basic structures of the romfs filesystem */ + +#define ROMBSIZE BLOCK_SIZE +#define ROMBSBITS BLOCK_SIZE_BITS +#define ROMBMASK (ROMBSIZE-1) +#define ROMFS_MAGIC 0x7275 + +#define ROMFS_MAXFN 128 + +#define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff)) +#define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff)) +#define __mk4(a,b,c,d) htonl(__mkl(__mkw(a,b),__mkw(c,d))) +#define ROMSB_WORD0 __mk4('-','r','o','m') +#define ROMSB_WORD1 __mk4('1','f','s','-') + +/* On-disk "super block" */ + +struct romfs_super_block { + uint32_t word0; + uint32_t word1; + uint32_t size; + uint32_t checksum; + char name[0]; /* volume name */ +}; + +/* On disk inode */ + +struct romfs_inode { + uint32_t next; /* low 4 bits see ROMFH_ */ + uint32_t spec; + uint32_t size; + uint32_t checksum; + char name[0]; +}; + +#define ROMFH_TYPE 7 +#define ROMFH_HRD 0 +#define ROMFH_DIR 1 +#define ROMFH_REG 2 +#define ROMFH_SYM 3 +#define ROMFH_BLK 4 +#define ROMFH_CHR 5 +#define ROMFH_SCK 6 +#define ROMFH_FIF 7 +#define ROMFH_EXEC 8 + +/* Alignment */ + +#define ROMFH_ALIGN 16 + +#define MAX_FILE_HANDLES 256 + +typedef struct FileHandle { + uint8_t *base; + unsigned long size, max_size; + unsigned long pos; + int is_rw; +} FileHandle; + +static FileHandle file_handles[MAX_FILE_HANDLES]; + +static uint8_t *output_base; +static size_t output_max_size, output_size; +static uint8_t output_filename[128]; + +void set_output_file(const char *filename, + uint8_t *base, size_t size) +{ + strcpy(output_filename, filename); + output_base = base; + output_max_size = size; +} + +long get_output_file_size(void) +{ + return output_size; +} + +static inline int get_file_handle(void) +{ + int i; + + for(i = 0; i < MAX_FILE_HANDLES; i++) { + if (!file_handles[i].base) + return i; + } + errno = ENOMEM; + return -1; +} + +int open(const char *filename, int access, ...) +{ + struct romfs_super_block *sb; + unsigned long addr, next; + struct romfs_inode *inode; + int type, fd, len; + char dir[1024]; + const char *p, *r; + + if (access & O_CREAT) { + /* specific case for file creation */ + if (strcmp(filename, output_filename) != 0) + return -EPERM; + fd = get_file_handle(); + if (fd < 0) + return fd; + file_handles[fd].base = output_base; + file_handles[fd].max_size = output_max_size; + file_handles[fd].is_rw = 1; + file_handles[fd].pos = 0; + file_handles[fd].size = 0; + return fd; + } + + show_filename(filename); + + sb = (void *)romfs_base; + if (sb->word0 != ROMSB_WORD0 || + sb->word1 != ROMSB_WORD1) + goto fail; + addr = ((unsigned long)sb->name + strlen(sb->name) + 1 + ROMFH_ALIGN - 1) & + ~(ROMFH_ALIGN - 1); + inode = (void *)addr; + + /* search the directory */ + p = filename; + while (*p == '/') + p++; + for(;;) { + r = strchr(p, '/'); + if (!r) + break; + len = r - p; + if (len > sizeof(dir) - 1) + goto fail; + memcpy(dir, p, len); + dir[len] = '\0'; + p = r + 1; +#ifdef ROMFS_DEBUG + printf("dir=%s\n", dir); +#endif + + for(;;) { + next = ntohl(inode->next); + type = next & 0xf; + next &= ~0xf; + if (!strcmp(dir, inode->name)) { +#ifdef ROMFS_DEBUG + printf("dirname=%s type=0x%x\n", inode->name, type); +#endif + if ((type & ROMFH_TYPE) == ROMFH_DIR) { + chdir: + addr = ((unsigned long)inode->name + strlen(inode->name) + + 1 + ROMFH_ALIGN - 1) & + ~(ROMFH_ALIGN - 1); + inode = (void *)addr; + break; + } else if ((type & ROMFH_TYPE) == ROMFH_HRD) { + addr = ntohl(inode->spec); + inode = (void *)(romfs_base + addr); + next = ntohl(inode->next); + type = next & 0xf; + if ((type & ROMFH_TYPE) != ROMFH_DIR) + goto fail; + goto chdir; + } + } + if (next == 0) + goto fail; + inode = (void *)(romfs_base + next); + } + } + for(;;) { + next = ntohl(inode->next); + type = next & 0xf; + next &= ~0xf; +#ifdef ROMFS_DEBUG + printf("name=%s type=0x%x\n", inode->name, type); +#endif + if ((type & ROMFH_TYPE) == ROMFH_REG) { + if (!strcmp(p, inode->name)) { + fd = get_file_handle(); + if (fd < 0) + return fd; + addr = ((unsigned long)inode->name + strlen(inode->name) + + 1 + ROMFH_ALIGN - 1) & + ~(ROMFH_ALIGN - 1); + file_handles[fd].base = (void *)addr; + file_handles[fd].is_rw = 0; + file_handles[fd].pos = 0; + file_handles[fd].size = ntohl(inode->size); + return fd; + } + } + if (next == 0) + break; + inode = (void *)(romfs_base + next); + } + fail: + errno = ENOENT; + return -1; +} + +int read(int fd, void *buf, size_t size) +{ + FileHandle *fh = &file_handles[fd]; + int len; + + len = fh->size - fh->pos; + if (len > (int)size) + len = size; + memcpy(buf, fh->base + fh->pos, len); + fh->pos += len; + return len; +} + +int write(int fd, const void *buf, size_t size) +{ + FileHandle *fh = &file_handles[fd]; + int len; + + if (!fh->is_rw) + return -EIO; + len = fh->max_size - fh->pos; + if ((int)size > len) { + errno = ENOSPC; + return -1; + } + memcpy(fh->base + fh->pos, buf, size); + fh->pos += size; + if (fh->pos > fh->size) { + fh->size = fh->pos; + output_size = fh->pos; + } + return size; +} + +long lseek(int fd, long offset, int whence) +{ + FileHandle *fh = &file_handles[fd]; + + switch(whence) { + case SEEK_SET: + break; + case SEEK_END: + offset += fh->size; + break; + case SEEK_CUR: + offset += fh->pos; + break; + default: + errno = EINVAL; + return -1; + } + if (offset < 0) { + errno = EINVAL; + return -1; + } + fh->pos = offset; + return offset; +} + +int close(int fd) +{ + FileHandle *fh = &file_handles[fd]; + fh->base = NULL; + return 0; +} + +/**********************************************************/ + +float strtof(const char *nptr, char **endptr) +{ + fatal("unimplemented: %s", __func__ ); +} + +long double strtold(const char *nptr, char **endptr) +{ + fatal("unimplemented: %s", __func__ ); +} + +double ldexp(double x, int exp) +{ + fatal("unimplemented: %s", __func__ ); +} + +FILE *fopen(const char *path, const char *mode) +{ + fatal("unimplemented: %s", __func__ ); +} + +FILE *fdopen(int fildes, const char *mode) +{ + FILE *f; + f = malloc(sizeof(FILE)); + f->fd = fildes; + return f; +} + +int fclose(FILE *stream) +{ + close(stream->fd); + free(stream); +} + +size_t fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream) +{ + int ret; + if (nmemb == 1) { + ret = write(stream->fd, ptr, size); + if (ret < 0) + return ret; + } else { + ret = write(stream->fd, ptr, size * nmemb); + if (ret < 0) + return ret; + if (nmemb != 0) + ret /= nmemb; + } + return ret; +} + +int fputc(int c, FILE *stream) +{ + uint8_t ch = c; + write(stream->fd, &ch, 1); + return 0; +} diff --git a/linux-2.4.26-config b/linux-2.4.26-config new file mode 100644 index 0000000..9c76217 --- /dev/null +++ b/linux-2.4.26-config @@ -0,0 +1,551 @@ +# +# Automatically generated by make menuconfig: don't edit +# +CONFIG_X86=y +# CONFIG_SBUS is not set +CONFIG_UID16=y + +# +# Code maturity level options +# +# CONFIG_EXPERIMENTAL is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor type and features +# +# CONFIG_M386 is not set +CONFIG_M486=y +# CONFIG_M586 is not set +# CONFIG_M586TSC is not set +# CONFIG_M586MMX is not set +# CONFIG_M686 is not set +# CONFIG_MPENTIUMIII is not set +# CONFIG_MPENTIUM4 is not set +# CONFIG_MK6 is not set +# CONFIG_MK7 is not set +# CONFIG_MK8 is not set +# CONFIG_MELAN is not set +# CONFIG_MCRUSOE is not set +# CONFIG_MWINCHIPC6 is not set +# CONFIG_MWINCHIP2 is not set +# CONFIG_MWINCHIP3D is not set +# CONFIG_MCYRIXIII is not set +# CONFIG_MVIAC3_2 is not set +CONFIG_X86_WP_WORKS_OK=y +CONFIG_X86_INVLPG=y +CONFIG_X86_CMPXCHG=y +CONFIG_X86_XADD=y +CONFIG_X86_BSWAP=y +CONFIG_X86_POPAD_OK=y +# CONFIG_RWSEM_GENERIC_SPINLOCK is not set +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_X86_L1_CACHE_SHIFT=4 +CONFIG_X86_USE_STRING_486=y +CONFIG_X86_ALIGNMENT_16=y +CONFIG_X86_PPRO_FENCE=y +# CONFIG_X86_F00F_WORKS_OK is not set +# CONFIG_X86_MCE is not set +# CONFIG_TOSHIBA is not set +# CONFIG_I8K is not set +# CONFIG_MICROCODE is not set +# CONFIG_X86_MSR is not set +# CONFIG_X86_CPUID is not set +CONFIG_NOHIGHMEM=y +# CONFIG_HIGHMEM4G is not set +# CONFIG_HIGHMEM64G is not set +# CONFIG_HIGHMEM is not set +# CONFIG_MATH_EMULATION is not set +# CONFIG_MTRR is not set +# CONFIG_SMP is not set +# CONFIG_X86_UP_APIC is not set +# CONFIG_X86_UP_IOAPIC is not set +# CONFIG_X86_TSC_DISABLE is not set + +# +# General setup +# +CONFIG_NET=y +# CONFIG_PCI is not set +# CONFIG_ISA is not set +# CONFIG_EISA is not set +# CONFIG_MCA is not set +# CONFIG_HOTPLUG is not set +# CONFIG_PCMCIA is not set +# CONFIG_HOTPLUG_PCI is not set +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_KCORE_ELF=y +# CONFIG_KCORE_AOUT is not set +# CONFIG_BINFMT_AOUT is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_OOM_KILLER is not set +# CONFIG_PM is not set +# CONFIG_APM is not set + +# +# ACPI Support +# +# CONFIG_ACPI is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play configuration +# +# CONFIG_PNP is not set +# CONFIG_ISAPNP is not set + +# +# Block devices +# +CONFIG_BLK_DEV_FD=y +# CONFIG_BLK_DEV_XD is not set +# CONFIG_PARIDE is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_CISS_SCSI_TAPE is not set +# CONFIG_CISS_MONITOR_THREAD is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_BLK_STATS is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set +# CONFIG_BLK_DEV_MD is not set +# CONFIG_MD_LINEAR is not set +# CONFIG_MD_RAID0 is not set +# CONFIG_MD_RAID1 is not set +# CONFIG_MD_RAID5 is not set +# CONFIG_MD_MULTIPATH is not set +# CONFIG_BLK_DEV_LVM is not set + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +# CONFIG_NETFILTER is not set +# CONFIG_FILTER is not set +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set + +# +# Appletalk devices +# +# CONFIG_DEV_APPLETALK is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set +# CONFIG_PHONE_IXJ is not set +# CONFIG_PHONE_IXJ_PCMCIA is not set + +# +# ATA/IDE/MFM/RLL support +# +CONFIG_IDE=y + +# +# IDE, ATA and ATAPI Block devices +# +CONFIG_BLK_DEV_IDE=y +# CONFIG_BLK_DEV_HD_IDE is not set +# CONFIG_BLK_DEV_HD is not set +CONFIG_BLK_DEV_IDEDISK=y +CONFIG_IDEDISK_MULTI_MODE=y +# CONFIG_IDEDISK_STROKE is not set +# CONFIG_BLK_DEV_IDECS is not set +CONFIG_BLK_DEV_IDECD=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_BLK_DEV_IDESCSI is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_BLK_DEV_CMD640 is not set +# CONFIG_BLK_DEV_CMD640_ENHANCED is not set +# CONFIG_BLK_DEV_ISAPNP is not set +# CONFIG_IDE_CHIPSETS is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_DMA_NONPCI is not set +# CONFIG_BLK_DEV_ATARAID is not set +# CONFIG_BLK_DEV_ATARAID_PDC is not set +# CONFIG_BLK_DEV_ATARAID_HPT is not set +# CONFIG_BLK_DEV_ATARAID_MEDLEY is not set +# CONFIG_BLK_DEV_ATARAID_SII is not set + +# +# SCSI support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set +# CONFIG_FUSION_BOOT is not set +# CONFIG_FUSION_ISENSE is not set +# CONFIG_FUSION_CTL is not set +# CONFIG_FUSION_LAN is not set + +# +# I2O device support +# +# CONFIG_I2O is not set +# CONFIG_I2O_BLOCK is not set +# CONFIG_I2O_LAN is not set +# CONFIG_I2O_SCSI is not set +# CONFIG_I2O_PROC is not set + +# +# Network device support +# +CONFIG_NETDEVICES=y + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_SUNLANCE is not set +# CONFIG_SUNBMAC is not set +# CONFIG_SUNQE is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_LANCE is not set +# CONFIG_NET_VENDOR_SMC is not set +# CONFIG_NET_VENDOR_RACAL is not set +# CONFIG_NET_ISA is not set +# CONFIG_NET_PCI is not set +# CONFIG_NET_POCKET is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_MYRI_SBUS is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set +# CONFIG_FDDI is not set +# CONFIG_PLIP is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set +# CONFIG_NET_FC is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# Amateur Radio support +# +# CONFIG_HAMRADIO is not set + +# +# IrDA (infrared) support +# +# CONFIG_IRDA is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input core support +# +# CONFIG_INPUT is not set +# CONFIG_INPUT_KEYBDEV is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_UINPUT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_SERIAL=y +CONFIG_SERIAL_CONSOLE=y +# CONFIG_SERIAL_EXTENDED is not set +# CONFIG_SERIAL_NONSTANDARD is not set +CONFIG_UNIX98_PTYS=y +CONFIG_UNIX98_PTY_COUNT=256 + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Mice +# +# CONFIG_BUSMOUSE is not set +CONFIG_MOUSE=y +CONFIG_PSMOUSE=y +# CONFIG_82C710_MOUSE is not set +# CONFIG_PC110_PAD is not set +# CONFIG_MK712_MOUSE is not set + +# +# Joysticks +# +# CONFIG_INPUT_GAMEPORT is not set +# CONFIG_QIC02_TAPE is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_IPMI_PANIC_EVENT is not set +# CONFIG_IPMI_DEVICE_INTERFACE is not set +# CONFIG_IPMI_KCS is not set +# CONFIG_IPMI_WATCHDOG is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_SCx200 is not set +# CONFIG_SCx200_GPIO is not set +# CONFIG_AMD_RNG is not set +# CONFIG_INTEL_RNG is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_AMD_PM768 is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set + +# +# Direct Rendering Manager (XFree86 DRI support) +# +# CONFIG_DRM is not set +# CONFIG_MWAVE is not set +# CONFIG_OBMOUSE is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# File systems +# +# CONFIG_QUOTA is not set +# CONFIG_QFMT_V2 is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_REISERFS_CHECK is not set +# CONFIG_REISERFS_PROC_INFO is not set +# CONFIG_ADFS_FS is not set +# CONFIG_ADFS_FS_RW is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BEFS_DEBUG is not set +# CONFIG_BFS_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_JBD_DEBUG is not set +# CONFIG_FAT_FS is not set +# CONFIG_MSDOS_FS is not set +# CONFIG_UMSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +CONFIG_TMPFS=y +CONFIG_RAMFS=y +CONFIG_ISO9660_FS=y +# CONFIG_JOLIET is not set +# CONFIG_ZISOFS is not set +# CONFIG_JFS_FS is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS_RW is not set +# CONFIG_HPFS_FS is not set +CONFIG_PROC_FS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVFS_MOUNT is not set +# CONFIG_DEVFS_DEBUG is not set +CONFIG_DEVPTS_FS=y +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX4FS_RW is not set +CONFIG_ROMFS_FS=y +CONFIG_EXT2_FS=y +# CONFIG_SYSV_FS is not set +# CONFIG_UDF_FS is not set +# CONFIG_UDF_RW is not set +# CONFIG_UFS_FS is not set +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_XFS_FS is not set +# CONFIG_XFS_QUOTA is not set +# CONFIG_XFS_RT is not set +# CONFIG_XFS_TRACE is not set +# CONFIG_XFS_DEBUG is not set + +# +# Network File Systems +# +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_NFS_FS is not set +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_ROOT_NFS is not set +# CONFIG_NFSD is not set +# CONFIG_NFSD_V3 is not set +# CONFIG_NFSD_TCP is not set +# CONFIG_SUNRPC is not set +# CONFIG_LOCKD is not set +# CONFIG_SMB_FS is not set +# CONFIG_NCP_FS is not set +# CONFIG_NCPFS_PACKET_SIGNING is not set +# CONFIG_NCPFS_IOCTL_LOCKING is not set +# CONFIG_NCPFS_STRONG is not set +# CONFIG_NCPFS_NFS_NS is not set +# CONFIG_NCPFS_OS2_NS is not set +# CONFIG_NCPFS_SMALLDOS is not set +# CONFIG_NCPFS_NLS is not set +# CONFIG_NCPFS_EXTRAS is not set +# CONFIG_ZISOFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_SMB_NLS is not set +# CONFIG_NLS is not set + +# +# Console drivers +# +CONFIG_VGA_CONSOLE=y +# CONFIG_VIDEO_SELECT is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# Support for USB gadgets +# +# CONFIG_USB_GADGET is not set + +# +# Bluetooth support +# +# CONFIG_BLUEZ is not set + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_HIGHMEM is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_IOVIRT is not set +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_FRAME_POINTER is not set +CONFIG_LOG_BUF_SHIFT=0 + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_ZLIB_INFLATE is not set +# CONFIG_ZLIB_DEFLATE is not set diff --git a/linux-2.4.26-tcc.patch b/linux-2.4.26-tcc.patch new file mode 100644 index 0000000..85c84f5 --- /dev/null +++ b/linux-2.4.26-tcc.patch @@ -0,0 +1,427 @@ +diff -ruNw /tmp/linux-2.4.26/arch/i386/kernel/entry.S linux-2.4.26/arch/i386/kernel/entry.S +--- /tmp/linux-2.4.26/arch/i386/kernel/entry.S 2003-06-13 16:51:29.000000000 +0200 ++++ linux-2.4.26/arch/i386/kernel/entry.S 2004-10-23 22:19:02.000000000 +0200 +@@ -1,3 +1,4 @@ ++#define __ASSEMBLY__ + /* + * linux/arch/i386/entry.S + * +@@ -664,6 +665,21 @@ + .long SYMBOL_NAME(sys_ni_syscall) /* sys_remap_file_pages */ + .long SYMBOL_NAME(sys_ni_syscall) /* sys_set_tid_address */ + ++#if 0 + .rept NR_syscalls-(.-sys_call_table)/4 + .long SYMBOL_NAME(sys_ni_syscall) + .endr ++#else ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++ .long SYMBOL_NAME(sys_ni_syscall) ++#endif +diff -ruNw /tmp/linux-2.4.26/arch/i386/kernel/head.S linux-2.4.26/arch/i386/kernel/head.S +--- /tmp/linux-2.4.26/arch/i386/kernel/head.S 2003-11-28 19:26:19.000000000 +0100 ++++ linux-2.4.26/arch/i386/kernel/head.S 2004-10-25 21:18:30.000000000 +0200 +@@ -1,3 +1,4 @@ ++#define __ASSEMBLY__ + /* + * linux/arch/i386/kernel/head.S -- the 32-bit startup code. + * +@@ -41,6 +42,8 @@ + * + * On entry, %esi points to the real-mode code as a 32-bit pointer. + */ ++.globl _start ++_start: + startup_32: + /* + * Set segments to known values +diff -ruNw /tmp/linux-2.4.26/arch/i386/kernel/i387.c linux-2.4.26/arch/i386/kernel/i387.c +--- /tmp/linux-2.4.26/arch/i386/kernel/i387.c 2003-08-25 13:44:39.000000000 +0200 ++++ linux-2.4.26/arch/i386/kernel/i387.c 2004-10-14 04:17:43.000000000 +0200 +@@ -25,7 +25,7 @@ + #define HAVE_HWFP 1 + #endif + +-static union i387_union empty_fpu_state; ++union i387_union empty_fpu_state; + + void __init boot_init_fpu(void) + { +diff -ruNw /tmp/linux-2.4.26/arch/i386/kernel/process.c linux-2.4.26/arch/i386/kernel/process.c +--- /tmp/linux-2.4.26/arch/i386/kernel/process.c 2004-02-18 14:36:30.000000000 +0100 ++++ linux-2.4.26/arch/i386/kernel/process.c 2004-10-25 21:30:36.000000000 +0200 +@@ -52,7 +52,7 @@ + + #include + +-asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); ++asmlinkage void ret_from_fork(void) /* __asm__("ret_from_fork") */ ; + + int hlt_counter; + +@@ -217,7 +217,7 @@ + 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ + }; + +-static struct ++struct + { + unsigned short size __attribute__ ((packed)); + unsigned long long * base __attribute__ ((packed)); +diff -ruNw /tmp/linux-2.4.26/arch/i386/kernel/setup.c linux-2.4.26/arch/i386/kernel/setup.c +--- /tmp/linux-2.4.26/arch/i386/kernel/setup.c 2004-04-14 15:05:25.000000000 +0200 ++++ linux-2.4.26/arch/i386/kernel/setup.c 2004-10-17 19:38:37.000000000 +0200 +@@ -1392,7 +1392,7 @@ + */ + + extern void vide(void); +-__asm__(".align 4\nvide: ret"); ++__asm__(".align 4\n.globl vide\nvide: ret"); + + static int __init init_amd(struct cpuinfo_x86 *c) + { +diff -ruNw /tmp/linux-2.4.26/arch/i386/mm/init.c linux-2.4.26/arch/i386/mm/init.c +--- /tmp/linux-2.4.26/arch/i386/mm/init.c 2004-04-14 15:05:25.000000000 +0200 ++++ linux-2.4.26/arch/i386/mm/init.c 2004-10-23 22:35:47.000000000 +0200 +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + #include + + mmu_gather_t mmu_gathers[NR_CPUS]; +diff -ruNw /tmp/linux-2.4.26/arch/i386/mm/pageattr.c linux-2.4.26/arch/i386/mm/pageattr.c +--- /tmp/linux-2.4.26/arch/i386/mm/pageattr.c 2002-11-29 00:53:09.000000000 +0100 ++++ linux-2.4.26/arch/i386/mm/pageattr.c 2004-10-14 00:43:58.000000000 +0200 +@@ -44,8 +44,12 @@ + addr = address & LARGE_PAGE_MASK; + pbase = (pte_t *)page_address(base); + for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { +- pbase[i] = mk_pte_phys(addr, +- addr == address ? prot : PAGE_KERNEL); ++ pgprot_t prot1; ++ if (addr == address) ++ prot1 = prot; ++ else ++ prot1 = PAGE_KERNEL; ++ pbase[i] = mk_pte_phys(addr, prot1); + } + return base; + } +diff -ruNw /tmp/linux-2.4.26/drivers/ide/ide-lib.c linux-2.4.26/drivers/ide/ide-lib.c +--- /tmp/linux-2.4.26/drivers/ide/ide-lib.c 2003-06-13 16:51:33.000000000 +0200 ++++ linux-2.4.26/drivers/ide/ide-lib.c 2004-10-23 23:00:51.000000000 +0200 +@@ -171,7 +171,7 @@ + BUG(); + return min(speed, speed_max[mode]); + #else /* !CONFIG_BLK_DEV_IDEDMA */ +- return min(speed, XFER_PIO_4); ++ return min((int)speed, XFER_PIO_4); + #endif /* CONFIG_BLK_DEV_IDEDMA */ + } + +diff -ruNw /tmp/linux-2.4.26/fs/partitions/efi.h linux-2.4.26/fs/partitions/efi.h +--- /tmp/linux-2.4.26/fs/partitions/efi.h 2003-08-25 13:44:43.000000000 +0200 ++++ linux-2.4.26/fs/partitions/efi.h 2004-10-25 21:32:29.000000000 +0200 +@@ -85,9 +85,13 @@ + } __attribute__ ((packed)) gpt_header; + + typedef struct _gpt_entry_attributes { ++#if 0 + u64 required_to_function:1; + u64 reserved:47; + u64 type_guid_specific:16; ++#else ++ u64 required; ++#endif + } __attribute__ ((packed)) gpt_entry_attributes; + + typedef struct _gpt_entry { +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/bugs.h linux-2.4.26/include/asm-i386/bugs.h +--- /tmp/linux-2.4.26/include/asm-i386/bugs.h 2002-08-03 02:39:45.000000000 +0200 ++++ linux-2.4.26/include/asm-i386/bugs.h 2004-10-25 21:31:34.000000000 +0200 +@@ -50,8 +50,8 @@ + + __setup("no387", no_387); + +-static double __initdata x = 4195835.0; +-static double __initdata y = 3145727.0; ++double __initdata x = 4195835.0; ++double __initdata y = 3145727.0; + + /* + * This used to check for exceptions.. +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/byteorder.h linux-2.4.26/include/asm-i386/byteorder.h +--- /tmp/linux-2.4.26/include/asm-i386/byteorder.h 2003-06-13 16:51:38.000000000 +0200 ++++ linux-2.4.26/include/asm-i386/byteorder.h 2004-10-23 23:08:08.000000000 +0200 +@@ -42,8 +42,10 @@ + __u64 u; + } v; + v.u = val; +-#ifdef CONFIG_X86_BSWAP +- asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" ++#if defined(CONFIG_X86_BSWAP) && 0 ++ /* XXX: constraint bug ++ bswap %eax ; bswap (%ecx) ; xchgl %eax,(%ecx) */ ++ asm("bswap %0 ; bswap %1 ; xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); + #else +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/hw_irq.h linux-2.4.26/include/asm-i386/hw_irq.h +--- /tmp/linux-2.4.26/include/asm-i386/hw_irq.h 2003-08-25 13:44:43.000000000 +0200 ++++ linux-2.4.26/include/asm-i386/hw_irq.h 2004-10-23 23:08:08.000000000 +0200 +@@ -156,6 +156,7 @@ + asmlinkage void call_do_IRQ(void); \ + __asm__( \ + "\n" __ALIGN_STR"\n" \ ++ ".globl common_interrupt\n\t" \ + "common_interrupt:\n\t" \ + SAVE_ALL \ + SYMBOL_NAME_STR(call_do_IRQ)":\n\t" \ +@@ -176,6 +177,7 @@ + asmlinkage void IRQ_NAME(nr); \ + __asm__( \ + "\n"__ALIGN_STR"\n" \ ++".globl " SYMBOL_NAME_STR(IRQ) #nr "_interrupt\n\t"\ + SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ + "pushl $"#nr"-256\n\t" \ + "jmp common_interrupt"); +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/page.h linux-2.4.26/include/asm-i386/page.h +--- /tmp/linux-2.4.26/include/asm-i386/page.h 2002-08-03 02:39:45.000000000 +0200 ++++ linux-2.4.26/include/asm-i386/page.h 2004-10-23 23:08:08.000000000 +0200 +@@ -95,7 +95,7 @@ + * undefined" opcode for parsing in the trap handler. + */ + +-#if 1 /* Set to zero for a slightly smaller kernel */ ++#if 0 /* Set to zero for a slightly smaller kernel */ + #define BUG() \ + __asm__ __volatile__( "ud2\n" \ + "\t.word %c0\n" \ +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/processor.h linux-2.4.26/include/asm-i386/processor.h +--- /tmp/linux-2.4.26/include/asm-i386/processor.h 2004-02-18 14:36:32.000000000 +0100 ++++ linux-2.4.26/include/asm-i386/processor.h 2004-10-23 23:08:08.000000000 +0200 +@@ -300,6 +300,7 @@ + long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ + long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ + long padding[56]; ++ int dummy[0] __attribute__ ((aligned (16))); + } __attribute__ ((aligned (16))); + + struct i387_soft_struct { +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/semaphore.h linux-2.4.26/include/asm-i386/semaphore.h +--- /tmp/linux-2.4.26/include/asm-i386/semaphore.h 2002-11-29 00:53:15.000000000 +0100 ++++ linux-2.4.26/include/asm-i386/semaphore.h 2004-10-25 21:31:34.000000000 +0200 +@@ -207,7 +207,7 @@ + "2:\tcall __up_wakeup\n\t" + "jmp 1b\n" + LOCK_SECTION_END +- ".subsection 0\n" ++ /* ".subsection 0\n" */ + :"=m" (sem->count) + :"c" (sem) + :"memory"); +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/string.h linux-2.4.26/include/asm-i386/string.h +--- /tmp/linux-2.4.26/include/asm-i386/string.h 2001-11-22 20:46:18.000000000 +0100 ++++ linux-2.4.26/include/asm-i386/string.h 2004-10-23 23:08:08.000000000 +0200 +@@ -178,7 +178,7 @@ + "leal -1(%%esi),%0\n" + "2:\ttestb %%al,%%al\n\t" + "jne 1b" +- :"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c)); ++ :"=r" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c)); + return __res; + } + +diff -ruNw /tmp/linux-2.4.26/include/asm-i386/system.h linux-2.4.26/include/asm-i386/system.h +--- /tmp/linux-2.4.26/include/asm-i386/system.h 2004-04-14 15:05:40.000000000 +0200 ++++ linux-2.4.26/include/asm-i386/system.h 2004-10-25 21:30:22.000000000 +0200 +@@ -28,7 +28,7 @@ + "popl %%esi\n\t" \ + :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ + "=b" (last) \ +- :"m" (next->thread.esp),"m" (next->thread.eip), \ ++ :"g" (next->thread.esp),"g" (next->thread.eip), \ + "a" (prev), "d" (next), \ + "b" (prev)); \ + } while (0) +@@ -313,7 +313,7 @@ + #define set_wmb(var, value) do { var = value; wmb(); } while (0) + + /* interrupt control.. */ +-#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */) ++#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0" : "=g" (x) /* no input */) + #define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc") + #define __cli() __asm__ __volatile__("cli": : :"memory") + #define __sti() __asm__ __volatile__("sti": : :"memory") +diff -ruNw /tmp/linux-2.4.26/include/linux/byteorder/generic.h linux-2.4.26/include/linux/byteorder/generic.h +--- /tmp/linux-2.4.26/include/linux/byteorder/generic.h 2003-11-28 19:26:21.000000000 +0100 ++++ linux-2.4.26/include/linux/byteorder/generic.h 2004-10-17 22:09:20.000000000 +0200 +@@ -86,8 +86,8 @@ + */ + #define cpu_to_le64 __cpu_to_le64 + #define le64_to_cpu __le64_to_cpu +-#define cpu_to_le32 __cpu_to_le32 +-#define le32_to_cpu __le32_to_cpu ++#define cpu_to_le32(x) __cpu_to_le32(x) ++#define le32_to_cpu(x) __le32_to_cpu(x) + #define cpu_to_le16 __cpu_to_le16 + #define le16_to_cpu __le16_to_cpu + #define cpu_to_be64 __cpu_to_be64 +diff -ruNw /tmp/linux-2.4.26/include/linux/linkage.h linux-2.4.26/include/linux/linkage.h +--- /tmp/linux-2.4.26/include/linux/linkage.h 2000-12-11 21:49:54.000000000 +0100 ++++ linux-2.4.26/include/linux/linkage.h 2004-10-23 23:08:08.000000000 +0200 +@@ -19,11 +19,7 @@ + + #define SYMBOL_NAME_STR(X) #X + #define SYMBOL_NAME(X) X +-#ifdef __STDC__ +-#define SYMBOL_NAME_LABEL(X) X##: +-#else +-#define SYMBOL_NAME_LABEL(X) X/**/: +-#endif ++#define SYMBOL_NAME_LABEL(X) X: + + #ifdef __arm__ + #define __ALIGN .align 0 +diff -ruNw /tmp/linux-2.4.26/include/linux/spinlock.h linux-2.4.26/include/linux/spinlock.h +--- /tmp/linux-2.4.26/include/linux/spinlock.h 2004-02-18 14:36:32.000000000 +0100 ++++ linux-2.4.26/include/linux/spinlock.h 2004-10-25 21:31:34.000000000 +0200 +@@ -41,6 +41,7 @@ + + #include + ++#if 0 + #define LOCK_SECTION_NAME \ + ".text.lock." __stringify(KBUILD_BASENAME) + +@@ -51,6 +52,11 @@ + LOCK_SECTION_NAME ":\n\t" \ + ".endif\n\t" + ++#else ++#define LOCK_SECTION_NAME ".text.lock" ++#define LOCK_SECTION_START(extra) ".section " LOCK_SECTION_NAME "\n\t" ++#endif ++ + #define LOCK_SECTION_END \ + ".previous\n\t" + +diff -ruNw /tmp/linux-2.4.26/include/linux/wait.h linux-2.4.26/include/linux/wait.h +--- /tmp/linux-2.4.26/include/linux/wait.h 2003-08-25 13:44:44.000000000 +0200 ++++ linux-2.4.26/include/linux/wait.h 2004-10-25 21:31:34.000000000 +0200 +@@ -64,14 +64,14 @@ + # define wq_lock_t spinlock_t + # define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED + +-# define wq_read_lock spin_lock +-# define wq_read_lock_irqsave spin_lock_irqsave +-# define wq_read_unlock spin_unlock +-# define wq_read_unlock_irqrestore spin_unlock_irqrestore +-# define wq_write_lock_irq spin_lock_irq +-# define wq_write_lock_irqsave spin_lock_irqsave +-# define wq_write_unlock_irqrestore spin_unlock_irqrestore +-# define wq_write_unlock spin_unlock ++# define wq_read_lock(lock) spin_lock(lock) ++# define wq_read_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags) ++# define wq_read_unlock(lock) spin_unlock(lock) ++# define wq_read_unlock_irqrestore(lock, flags) spin_unlock_irqrestore(lock, flags) ++# define wq_write_lock_irq(lock) spin_lock_irq(lock) ++# define wq_write_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags) ++# define wq_write_unlock_irqrestore(lock, flags) spin_unlock_irqrestore(lock, flags) ++# define wq_write_unlock(lock) spin_unlock(lock) + #endif + + struct __wait_queue_head { +diff -ruNw /tmp/linux-2.4.26/net/core/dev.c linux-2.4.26/net/core/dev.c +--- /tmp/linux-2.4.26/net/core/dev.c 2004-04-14 15:05:41.000000000 +0200 ++++ linux-2.4.26/net/core/dev.c 2004-10-14 03:27:45.000000000 +0200 +@@ -2013,8 +2013,17 @@ + ret = 0; + if ((old_flags^flags)&IFF_UP) /* Bit is different ? */ + { +- ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); ++ int (*dev_func)(struct net_device *); + ++#if 0 ++ ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); ++#else ++ if (old_flags & IFF_UP) ++ dev_func = dev_close; ++ else ++ dev_func = dev_open; ++ ret = dev_func(dev); ++#endif + if (ret == 0) + dev_mc_upload(dev); + } +diff -ruNw /tmp/linux-2.4.26/net/ipv4/raw.c linux-2.4.26/net/ipv4/raw.c +--- /tmp/linux-2.4.26/net/ipv4/raw.c 2003-08-25 13:44:44.000000000 +0200 ++++ linux-2.4.26/net/ipv4/raw.c 2004-10-23 22:53:26.000000000 +0200 +@@ -311,6 +311,10 @@ + u32 daddr; + u8 tos; + int err; ++ int (*getfrag)(const void *, ++ char *, ++ unsigned int, ++ unsigned int); + + /* This check is ONLY to check for arithmetic overflow + on integer(!) len. Not more! Real check will be made +@@ -426,8 +430,11 @@ + rfh.dst = &rt->u.dst; + if (!ipc.addr) + ipc.addr = rt->rt_dst; +- err = ip_build_xmit(sk, sk->protinfo.af_inet.hdrincl ? raw_getrawfrag : +- raw_getfrag, &rfh, len, &ipc, rt, msg->msg_flags); ++ if (sk->protinfo.af_inet.hdrincl) ++ getfrag =raw_getrawfrag; ++ else ++ getfrag = raw_getfrag; ++ err = ip_build_xmit(sk, getfrag, &rfh, len, &ipc, rt, msg->msg_flags); + + done: + if (free) +diff -ruNw /tmp/linux-2.4.26/net/ipv4/udp.c linux-2.4.26/net/ipv4/udp.c +--- /tmp/linux-2.4.26/net/ipv4/udp.c 2004-04-14 15:05:41.000000000 +0200 ++++ linux-2.4.26/net/ipv4/udp.c 2004-10-23 22:54:30.000000000 +0200 +@@ -441,6 +441,10 @@ + u32 daddr; + u8 tos; + int err; ++ int (*getfrag)(const void *, ++ char *, ++ unsigned int, ++ unsigned int); + + /* This check is ONLY to check for arithmetic overflow + on integer(!) len. Not more! Real check will be made +@@ -560,11 +564,12 @@ + /* RFC1122: OK. Provides the checksumming facility (MUST) as per */ + /* 4.1.3.4. It's configurable by the application via setsockopt() */ + /* (MAY) and it defaults to on (MUST). */ +- ++ if (sk->no_check == UDP_CSUM_NOXMIT) ++ getfrag = udp_getfrag_nosum; ++ else ++ getfrag = udp_getfrag; + err = ip_build_xmit(sk, +- (sk->no_check == UDP_CSUM_NOXMIT ? +- udp_getfrag_nosum : +- udp_getfrag), ++ getfrag, + &ufh, ulen, &ipc, rt, msg->msg_flags); + + out: diff --git a/main.c b/main.c new file mode 100644 index 0000000..ad55b3f --- /dev/null +++ b/main.c @@ -0,0 +1,253 @@ +#include "tccboot.h" +#include +#include + +#define TCCARGS_FILE "/boot/tccargs" +#define KERNEL_MAX_SIZE (8 * 1024 * 1024) +#define INITRD_MAX_SIZE (20 * 1024 * 1024) +#define INITRD_MIN_ADDR 0x800000 +#define KERNEL_FILENAME "kernel" + +//#define DEBUG_INITRD_ADDR + +#define MAX_ARGS 1024 + +struct moveparams { + uint8_t *low_buffer_start; int lcount; + uint8_t *high_buffer_start; int hcount; +}; + +static unsigned char *real_mode; /* Pointer to real-mode data */ +#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2)) +#ifndef STANDARD_MEMORY_BIOS_CALL +#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0)) +#endif +#define SCREEN_INFO (*(struct screen_info *)(real_mode+0)) +#define INITRD_START (*(unsigned long *) (real_mode+0x218)) +#define INITRD_SIZE (*(unsigned long *) (real_mode+0x21c)) + +#define STACK_SIZE (256 * 1024) + +long user_stack [STACK_SIZE]; + +struct { + long * a; + short b; +} stack_start = { & user_stack [STACK_SIZE] , __KERNEL_DS }; + + +static char *vidmem = (char *)0xb8000; +static int vidport; +static int lines, cols; + +void video_init(void) +{ + if (SCREEN_INFO.orig_video_mode == 7) { + vidmem = (char *) 0xb0000; + vidport = 0x3b4; + } else { + vidmem = (char *) 0xb8000; + vidport = 0x3d4; + } + + lines = SCREEN_INFO.orig_video_lines; + cols = SCREEN_INFO.orig_video_cols; +} + +static void scroll(void) +{ + int i; + + memcpy ( vidmem, vidmem + cols * 2, ( lines - 1 ) * cols * 2 ); + for ( i = ( lines - 1 ) * cols * 2; i < lines * cols * 2; i += 2 ) + vidmem[i] = ' '; +} + +void putstr(const char *s) +{ + int x,y,pos; + char c; + + x = SCREEN_INFO.orig_x; + y = SCREEN_INFO.orig_y; + + while ( ( c = *s++ ) != '\0' ) { + if ( c == '\n' ) { + x = 0; + if ( ++y >= lines ) { + scroll(); + y--; + } + } else if (c == '\r') { + x = 0; + } else { + vidmem [ ( x + cols * y ) * 2 ] = c; + if ( ++x >= cols ) { + x = 0; + if ( ++y >= lines ) { + scroll(); + y--; + } + } + } + } + + SCREEN_INFO.orig_x = x; + SCREEN_INFO.orig_y = y; + + pos = (x + cols * y) * 2; /* Update cursor position */ + outb_p(14, vidport); + outb_p(0xff & (pos >> 9), vidport+1); + outb_p(15, vidport); + outb_p(0xff & (pos >> 1), vidport+1); +} + +void exit(int val) +{ + printf("\n\n -- System halted"); + while (1); +} + +char *tcc_args[MAX_ARGS]; + +static int expand_args(char ***pargv, const char *str) +{ + const char *s1; + char **argv, *arg; + int argc, len; + + argc = 0; + argv = tcc_args; + argv[argc++] = "tcc"; + for(;;) { + while (isspace(*str)) + str++; + if (*str == '\0') + break; + if (*str == '#') { + while (*str != '\n') + str++; + continue; + } + s1 = str; + while (*str != '\0' && !isspace(*str)) + str++; + len = str - s1; + arg = malloc(len + 1); + memcpy(arg, s1, len); + arg[len] = '\0'; + argv[argc++] = arg; + } + *pargv = argv; + return argc; +} + +void show_filename(const char *filename) +{ + int len; + static int counter; + char counter_ch[4] = "-\\|/"; + + len = strlen(filename); + if (len >= 2 && filename[len - 2] == '.' && filename[len - 1] == 'c') { + printf("%c %-50s\r", counter_ch[counter], filename); + counter = (counter + 1) & 3; + } +} + +int compile_kernel(struct moveparams *mv, void *rmode) +{ + int fd, len; + char *args_buf; + char **argv; + int argc, ret, romfs_len; + uint8_t *kernel_ptr, *initrd_ptr; + unsigned long romfs_base1; + + real_mode = rmode; + + video_init(); + + /* this is risky, but normally the initrd is not mapped inside the + malloc structures. However, it can overlap with its new + location */ + if (!INITRD_SIZE || !INITRD_START) + fatal("the kernel source must be in a ROMFS filesystem stored as Initial Ram Disk (INITRD)"); + len = INITRD_SIZE; + /* NOTE: it is very important to move initrd first to avoid + destroying it later */ + initrd_ptr = memalign(16, len); + memmove(initrd_ptr, (void *)INITRD_START, len); + if (initrd_ptr[0] == 037 && ((initrd_ptr[1] == 0213) || + (initrd_ptr[1] == 0236))) { + printf("Decompressing initrd...\n"); + romfs_base = memalign(16, INITRD_MAX_SIZE); + romfs_len = do_gunzip(romfs_base, initrd_ptr, len); + /* realloc it to minimize memory usage */ + romfs_base = realloc(romfs_base, romfs_len); + free(initrd_ptr); + } else { + romfs_base = initrd_ptr; + romfs_len = len; + } + + kernel_ptr = malloc(KERNEL_MAX_SIZE); + set_output_file("kernel", kernel_ptr, KERNEL_MAX_SIZE); + +#ifdef DEBUG_INITRD_ADDR + printf("romfs_base=%p romfs_len=%d kernel_ptr=%p\n", + romfs_base, romfs_len, kernel_ptr); +#endif + printf("Compiling kernel...\n"); + + fd = open("/boot/tccargs", O_RDONLY); + if (fd < 0) + fatal("Could not find '%s'", TCCARGS_FILE); + len = lseek(fd, 0, SEEK_END); + lseek(fd, 0, SEEK_SET); + args_buf = malloc(len + 1); + len = read(fd, args_buf, len); + close(fd); + + args_buf[len] = '\0'; + argc = expand_args(&argv, args_buf); + argv[argc] = NULL; + free(args_buf); +#if 0 + { + int i; + for(i=0;ilcount = 0; + mv->hcount = get_output_file_size(); + mv->high_buffer_start = kernel_ptr; + + /* relocate the uncompressed initrd so that the kernel cannot + overwrite it */ + romfs_base1 = ((unsigned long)(mv->high_buffer_start) + + mv->hcount + PAGE_SIZE - 1) & + ~(PAGE_SIZE - 1); + if (romfs_base1 < INITRD_MIN_ADDR) + romfs_base1 = INITRD_MIN_ADDR; + if (!(kernel_ptr >= romfs_base + romfs_len && + (unsigned long)romfs_base >= INITRD_MIN_ADDR) && + (unsigned long)romfs_base < romfs_base1) { + memmove((void *)romfs_base1, romfs_base, romfs_len); + romfs_base = (void *)romfs_base1; + } +#ifdef DEBUG_INITRD_ADDR + printf("initrd_start=%p initrd_size=%d\n", romfs_base, romfs_len); +#endif + INITRD_START = (unsigned long)romfs_base; + INITRD_SIZE = romfs_len; + return 1; +} diff --git a/malloc.c b/malloc.c new file mode 100644 index 0000000..3703b23 --- /dev/null +++ b/malloc.c @@ -0,0 +1,5482 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain. Use, modify, and + redistribute this code without permission or acknowledgement in any + way you wish. Send questions, comments, complaints, performance + data, etc to dl@cs.oswego.edu + +* VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O), and link it into another program. All + of the compile-time options default to reasonable values for use on + most unix platforms. Compile -DWIN32 for reasonable defaults on windows. + You might later want to step through various compile-time and dynamic + tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. + +* Why use this malloc? + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and tunable. + Consistent balance across these factors results in a good general-purpose + allocator for malloc-intensive programs. + + The main properties of the algorithms are: + * For large (>= 512 bytes) requests, it is a pure best-fit allocator, + with ties normally decided via FIFO (i.e. least recently used). + * For small (<= 64 bytes by default) requests, it is a caching + allocator, that maintains pools of quickly recycled chunks. + * In between, and for combinations of large and small requests, it does + the best it can trying to meet both goals at once. + * For very large requests (>= 128KB by default), it relies on system + memory mapping facilities, if supported. + + For a longer but slightly out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + + You may already by default be using a C library containing a malloc + that is based on some version of this malloc (for example in + linux). You might still want to use the one in this file in order to + customize settings or to avoid overheads associated with library + versions. + +* Contents, described in more detail in "description of public routines" below. + + Standard (ANSI/SVID/...) functions: + malloc(size_t n); + calloc(size_t n_elements, size_t element_size); + free(Void_t* p); + realloc(Void_t* p, size_t n); + memalign(size_t alignment, size_t n); + valloc(size_t n); + mallinfo() + mallopt(int parameter_number, int parameter_value) + + Additional functions: + independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]); + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); + pvalloc(size_t n); + cfree(Void_t* p); + malloc_trim(size_t pad); + malloc_usable_size(Void_t* p); + malloc_stats(); + +* Vital statistics: + + Supported pointer representation: 4 or 8 bytes + Supported size_t representation: 4 or 8 bytes + Note that size_t is allowed to be 4 bytes even if pointers are 8. + You can adjust this by defining INTERNAL_SIZE_T + + Alignment: 2 * sizeof(size_t) (default) + (i.e., 8 byte alignment with 4byte size_t). This suffices for + nearly all current machines and C compilers. However, you can + define MALLOC_ALIGNMENT to be wider than this if necessary. + + Minimum overhead per allocated chunk: 4 or 8 bytes + Each malloced chunk has a hidden word of overhead holding size + and status information. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) + 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) + + When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte + ptrs but 4 byte size) or 24 (for 8/8) additional bytes are + needed; 4 (8) for a trailing size field and 8 (16) bytes for + free list pointers. Thus, the minimum allocatable size is + 16/24/32 bytes. + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is 2 * + sizeof(size_t) bytes plus the remainder from a system page (the + minimal mmap unit); typically 4096 or 8192 bytes. + + Maximum allocated size: 4-byte size_t: 2^32 minus about two pages + 8-byte size_t: 2^64 minus about two pages + + It is assumed that (possibly signed) size_t values suffice to + represent chunk sizes. `Possibly signed' is due to the fact + that `size_t' may be defined on a system as either a signed or + an unsigned type. The ISO C standard says that it must be + unsigned, but a few systems are known not to adhere to this. + Additionally, even when size_t is unsigned, sbrk (which is by + default used to obtain memory from system) accepts signed + arguments, and may not be able to handle size_t-wide arguments + with negative sign bit. Generally, values that would + appear as negative after accounting for overhead and alignment + are supported only via mmap(), which does not have this + limitation. + + Requests for sizes outside the allowed range will perform an optional + failure action and then return null. (Requests may also + also fail because a system is out of memory.) + + Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined + + When USE_MALLOC_LOCK is defined, wrappers are created to + surround every public call with either a pthread mutex or + a win32 spinlock (depending on WIN32). This is not + especially fast, and can be a major bottleneck. + It is designed only to provide minimal protection + in concurrent environments, and to provide a basis for + extensions. If you are using malloc in a concurrent program, + you would be far better off obtaining ptmalloc, which is + derived from a version of this malloc, and is well-tuned for + concurrent programs. (See http://www.malloc.de) Note that + even when USE_MALLOC_LOCK is defined, you can can guarantee + full thread-safety only if no threads acquire memory through + direct calls to MORECORE or other system-level allocators. + + Compliance: I believe it is compliant with the 1997 Single Unix Specification + (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Synopsis of compile-time options: + + People have reported using previous versions of this malloc on all + versions of Unix, sometimes by tweaking some of the defines + below. It has been tested most extensively on Solaris and + Linux. It is also reported to work on WIN32 platforms. + People also report using it in stand-alone embedded systems. + + The implementation is in straight, hand-tuned ANSI C. It is not + at all modular. (Sorry!) It uses a lot of macros. To be at all + usable, this code should be compiled using an optimizing compiler + (for example gcc -O3) that can simplify expressions and control + paths. (FAQ: some macros import variables as arguments rather than + declare locals because people reported that some debuggers + otherwise get confused.) + + OPTION DEFAULT VALUE + + Compilation Environment options: + + __STD_C derived from C compiler defines + WIN32 NOT defined + HAVE_MEMCPY defined + USE_MEMCPY 1 if HAVE_MEMCPY is defined + HAVE_MMAP defined as 1 + MMAP_CLEARS 1 + HAVE_MREMAP 0 unless linux defined + malloc_getpagesize derived from system #includes, or 4096 if not + HAVE_USR_INCLUDE_MALLOC_H NOT defined + LACKS_UNISTD_H NOT defined unless WIN32 + LACKS_SYS_PARAM_H NOT defined unless WIN32 + LACKS_SYS_MMAN_H NOT defined unless WIN32 + LACKS_FCNTL_H NOT defined + + Changing default word sizes: + + INTERNAL_SIZE_T size_t + MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T) + PTR_UINT unsigned long + CHUNK_SIZE_T unsigned long + + Configuration and functionality options: + + USE_DL_PREFIX NOT defined + USE_PUBLIC_MALLOC_WRAPPERS NOT defined + USE_MALLOC_LOCK NOT defined + DEBUG NOT defined + REALLOC_ZERO_BYTES_FREES NOT defined + MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op + TRIM_FASTBINS 0 + FIRST_SORTED_BIN_SIZE 512 + + Options for customizing MORECORE: + + MORECORE sbrk + MORECORE_CONTIGUOUS 1 + MORECORE_CANNOT_TRIM NOT defined + MMAP_AS_MORECORE_SIZE (1024 * 1024) + + Tuning options that are also dynamically changeable via mallopt: + + DEFAULT_MXFAST 64 + DEFAULT_TRIM_THRESHOLD 256 * 1024 + DEFAULT_TOP_PAD 0 + DEFAULT_MMAP_THRESHOLD 256 * 1024 + DEFAULT_MMAP_MAX 65536 + + There are several other #defined constants and macros that you + probably don't want to touch unless you are extending or adapting malloc. +*/ + +/* + WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. +*/ + +/* #define WIN32 */ + +#ifdef WIN32 + +#define WIN32_LEAN_AND_MEAN +#include + +/* Win32 doesn't supply or need the following headers */ +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H + +/* Use the supplied emulation of sbrk */ +#define MORECORE sbrk +#define MORECORE_CONTIGUOUS 1 +#define MORECORE_FAILURE ((void*)(-1)) + +/* Use the supplied emulation of mmap and munmap */ +//#define HAVE_MMAP 1 +#define MUNMAP_FAILURE (-1) +#define MMAP_CLEARS 1 + +/* These values don't really matter in windows mmap emulation */ +#define MAP_PRIVATE 1 +#define MAP_ANONYMOUS 2 +#define PROT_READ 1 +#define PROT_WRITE 2 + +/* Emulation functions defined at the end of this file */ + +/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */ +#ifdef USE_MALLOC_LOCK +static int slwait(int *sl); +static int slrelease(int *sl); +#endif + +static long getpagesize(void); +static long getregionsize(void); +static void *sbrk(long size); +static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg); +static long munmap(void *ptr, long size); + +static void vminfo (unsigned long*free, unsigned long*reserved, unsigned long*committed); +static int cpuinfo (int whole, unsigned long*kernel, unsigned long*user); + +#endif + +/* + __STD_C should be nonzero if using ANSI-standard C compiler, a C++ + compiler, or a C compiler sufficiently close to ANSI to get away + with it. +*/ + +#ifndef __STD_C +#if defined(__STDC__) || defined(_cplusplus) +#define __STD_C 1 +#else +#define __STD_C 0 +#endif +#endif /*__STD_C*/ + + +/* + Void_t* is the pointer type that malloc should say it returns +*/ + +#ifndef Void_t +#if (__STD_C || defined(WIN32)) +#define Void_t void +#else +#define Void_t char +#endif +#endif /*Void_t*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/* define LACKS_UNISTD_H if your system does not have a . */ + +/* #define LACKS_UNISTD_H */ + +#include "tccboot.h" + +/* + Debugging: + + Because freed chunks may be overwritten with bookkeeping fields, this + malloc will often die when freed memory is overwritten by user + programs. This can be very effective (albeit in an annoying way) + in helping track down dangling pointers. + + If you compile with -DDEBUG, a number of assertion checks are + enabled that will catch more memory errors. You probably won't be + able to make much sense of the actual assertion errors, but they + should help you locate incorrectly overwritten memory. The + checking is fairly extensive, and will slow down execution + noticeably. Calling malloc_stats or mallinfo with DEBUG set will + attempt to check every non-mmapped allocated and free chunk in the + course of computing the summmaries. (By nature, mmapped regions + cannot be checked very much automatically.) + + Setting DEBUG may also be helpful if you are trying to modify + this code. The assertions in the check routines spell out in more + detail the assumptions and invariants underlying the algorithms. + + Setting DEBUG does NOT provide an automated mechanism for checking + that all accesses to malloced memory stay within their + bounds. However, there are several add-ons and adaptations of this + or other mallocs available that do this. +*/ + +#if DEBUG +#include +#else +#define assert(x) ((void)0) +#endif + +/* + The unsigned integer type used for comparing any two chunk sizes. + This should be at least as wide as size_t, but should not be signed. +*/ + +#ifndef CHUNK_SIZE_T +#define CHUNK_SIZE_T unsigned long +#endif + +/* + The unsigned integer type used to hold addresses when they are are + manipulated as integers. Except that it is not defined on all + systems, intptr_t would suffice. +*/ +#ifndef PTR_UINT +#define PTR_UINT unsigned long +#endif + + +/* + INTERNAL_SIZE_T is the word-size used for internal bookkeeping + of chunk sizes. + + The default version is the same as size_t. + + While not strictly necessary, it is best to define this as an + unsigned type, even if size_t is a signed type. This may avoid some + artificial size limitations on some systems. + + On a 64-bit machine, you may be able to reduce malloc overhead by + defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the + expense of not being able to handle more than 2^32 of malloced + space. If this limitation is acceptable, you are encouraged to set + this unless you are on a platform requiring 16byte alignments. In + this case the alignment requirements turn out to negate any + potential advantages of decreasing size_t word size. + + Implementors: Beware of the possible combinations of: + - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits, + and might be the same width as int or as long + - size_t might have different width and signedness as INTERNAL_SIZE_T + - int and long might be 32 or 64 bits, and might be the same width + To deal with this, most comparisons and difference computations + among INTERNAL_SIZE_Ts should cast them to CHUNK_SIZE_T, being + aware of the fact that casting an unsigned int to a wider long does + not sign-extend. (This also makes checking for negative numbers + awkward.) Some of these casts result in harmless compiler warnings + on some systems. +*/ + +#ifndef INTERNAL_SIZE_T +#define INTERNAL_SIZE_T size_t +#endif + +/* The corresponding word size */ +#define SIZE_SZ (sizeof(INTERNAL_SIZE_T)) + + + +/* + MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks. + It must be a power of two at least 2 * SIZE_SZ, even on machines + for which smaller alignments would suffice. It may be defined as + larger than this though. Note however that code and data structures + are optimized for the case of 8-byte alignment. +*/ + + +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT (2 * SIZE_SZ) +#endif + +/* The corresponding bit mask value */ +#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1) + + + +/* + REALLOC_ZERO_BYTES_FREES should be set if a call to + realloc with zero bytes should be the same as a call to free. + Some people think it should. Otherwise, since this malloc + returns a unique pointer for malloc(0), so does realloc(p, 0). +*/ + +/* #define REALLOC_ZERO_BYTES_FREES */ + +/* + TRIM_FASTBINS controls whether free() of a very small chunk can + immediately lead to trimming. Setting to true (1) can reduce memory + footprint, but will almost always slow down programs that use a lot + of small chunks. + + Define this only if you are willing to give up some speed to more + aggressively reduce system-level memory footprint when releasing + memory in programs that use many small chunks. You can get + essentially the same effect by setting MXFAST to 0, but this can + lead to even greater slowdowns in programs using many small chunks. + TRIM_FASTBINS is an in-between compile-time option, that disables + only those chunks bordering topmost memory from being placed in + fastbins. +*/ + +#ifndef TRIM_FASTBINS +#define TRIM_FASTBINS 0 +#endif + + +/* + USE_DL_PREFIX will prefix all public routines with the string 'dl'. + This is necessary when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. +*/ + +/* #define USE_DL_PREFIX */ + + +/* + USE_MALLOC_LOCK causes wrapper functions to surround each + callable routine with pthread mutex lock/unlock. + + USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined +*/ + + +/* #define USE_MALLOC_LOCK */ + + +/* + If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is + actually a wrapper function that first calls MALLOC_PREACTION, then + calls the internal routine, and follows it with + MALLOC_POSTACTION. This is needed for locking, but you can also use + this, without USE_MALLOC_LOCK, for purposes of interception, + instrumentation, etc. It is a sad fact that using wrappers often + noticeably degrades performance of malloc-intensive programs. +*/ + +#ifdef USE_MALLOC_LOCK +#define USE_PUBLIC_MALLOC_WRAPPERS +#else +/* #define USE_PUBLIC_MALLOC_WRAPPERS */ +#endif + + +/* + Two-phase name translation. + All of the actual routines are given mangled names. + When wrappers are used, they become the public callable versions. + When DL_PREFIX is used, the callable names are prefixed. +*/ + +#ifndef USE_PUBLIC_MALLOC_WRAPPERS +#define cALLOc public_cALLOc +#define fREe public_fREe +#define cFREe public_cFREe +#define mALLOc public_mALLOc +#define mEMALIGn public_mEMALIGn +#define rEALLOc public_rEALLOc +#define vALLOc public_vALLOc +#define pVALLOc public_pVALLOc +#define mALLINFo public_mALLINFo +#define mALLOPt public_mALLOPt +#define mTRIm public_mTRIm +#define mSTATs public_mSTATs +#define mUSABLe public_mUSABLe +#define iCALLOc public_iCALLOc +#define iCOMALLOc public_iCOMALLOc +#endif + +#ifdef USE_DL_PREFIX +#define public_cALLOc dlcalloc +#define public_fREe dlfree +#define public_cFREe dlcfree +#define public_mALLOc dlmalloc +#define public_mEMALIGn dlmemalign +#define public_rEALLOc dlrealloc +#define public_vALLOc dlvalloc +#define public_pVALLOc dlpvalloc +#define public_mALLINFo dlmallinfo +#define public_mALLOPt dlmallopt +#define public_mTRIm dlmalloc_trim +#define public_mSTATs dlmalloc_stats +#define public_mUSABLe dlmalloc_usable_size +#define public_iCALLOc dlindependent_calloc +#define public_iCOMALLOc dlindependent_comalloc +#else /* USE_DL_PREFIX */ +#define public_cALLOc calloc +#define public_fREe free +#define public_cFREe cfree +#define public_mALLOc malloc +#define public_mEMALIGn memalign +#define public_rEALLOc realloc +#define public_vALLOc valloc +#define public_pVALLOc pvalloc +#define public_mALLINFo mallinfo +#define public_mALLOPt mallopt +#define public_mTRIm malloc_trim +#define public_mSTATs malloc_stats +#define public_mUSABLe malloc_usable_size +#define public_iCALLOc independent_calloc +#define public_iCOMALLOc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + HAVE_MEMCPY should be defined if you are not otherwise using + ANSI STD C, but still have memcpy and memset in your C library + and want to use them in calloc and realloc. Otherwise simple + macro versions are defined below. + + USE_MEMCPY should be defined as 1 if you actually want to + have memset and memcpy called. People report that the macro + versions are faster than libc versions on some systems. + + Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks + (of <= 36 bytes) are manually unrolled in realloc and calloc. +*/ + +#define HAVE_MEMCPY + +#ifndef USE_MEMCPY +#ifdef HAVE_MEMCPY +#define USE_MEMCPY 1 +#else +#define USE_MEMCPY 0 +#endif +#endif + + +/* + MALLOC_FAILURE_ACTION is the action to take before "return 0" when + malloc fails to be able to return memory, either because memory is + exhausted or because of illegal arguments. + + By default, sets errno if running on STD_C platform, else does nothing. +*/ + +#ifndef MALLOC_FAILURE_ACTION +#if __STD_C +#define MALLOC_FAILURE_ACTION \ + errno = ENOMEM; + +#else +#define MALLOC_FAILURE_ACTION +#endif +#endif + +/* + MORECORE-related declarations. By default, rely on sbrk +*/ + + +#ifdef LACKS_UNISTD_H +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +#if __STD_C +extern Void_t* sbrk(ptrdiff_t); +#else +extern Void_t* sbrk(); +#endif +#endif +#endif + +/* + MORECORE is the name of the routine to call to obtain more memory + from the system. See below for general guidance on writing + alternative MORECORE functions, as well as a version for WIN32 and a + sample version for pre-OSX macos. +*/ + +#ifndef MORECORE +#define MORECORE sbrk +#endif + +/* + MORECORE_FAILURE is the value returned upon failure of MORECORE + as well as mmap. Since it cannot be an otherwise valid memory address, + and must reflect values of standard sys calls, you probably ought not + try to redefine it. +*/ + +#ifndef MORECORE_FAILURE +#define MORECORE_FAILURE (-1) +#endif + +/* + If MORECORE_CONTIGUOUS is true, take advantage of fact that + consecutive calls to MORECORE with positive arguments always return + contiguous increasing addresses. This is true of unix sbrk. Even + if not defined, when regions happen to be contiguous, malloc will + permit allocations spanning regions obtained from different + calls. But defining this when applicable enables some stronger + consistency checks and space efficiencies. +*/ + +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif + +/* + Define MORECORE_CANNOT_TRIM if your version of MORECORE + cannot release space back to the system when given negative + arguments. This is generally necessary only if you are using + a hand-crafted MORECORE function that cannot handle negative arguments. +*/ + +/* #define MORECORE_CANNOT_TRIM */ + + +/* + Define HAVE_MMAP as true to optionally make malloc() use mmap() to + allocate very large blocks. These will be returned to the + operating system immediately after a free(). Also, if mmap + is available, it is used as a backup strategy in cases where + MORECORE fails to provide space from system. + + This malloc is best tuned to work with mmap for large requests. + If you do not have mmap, operations involving very large chunks (1MB + or so) may be slower than you'd like. +*/ + +#if HAVE_MMAP +/* + Standard unix mmap using /dev/zero clears memory so calloc doesn't + need to. +*/ + +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif + +#else /* no mmap */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 0 +#endif +#endif + + +/* + MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if + sbrk fails, and mmap is used as a backup (which is done only if + HAVE_MMAP). The value must be a multiple of page size. This + backup strategy generally applies only when systems have "holes" in + address space, so sbrk cannot perform contiguous expansion, but + there is still space available on system. On systems for which + this is known to be useful (i.e. most linux kernels), this occurs + only when programs allocate huge amounts of memory. Between this, + and the fact that mmap regions tend to be limited, the size should + be large, to avoid too many mmap calls and thus avoid running out + of kernel resources. +*/ + +#ifndef MMAP_AS_MORECORE_SIZE +#define MMAP_AS_MORECORE_SIZE (1024 * 1024) +#endif + +/* + Define HAVE_MREMAP to make realloc() use mremap() to re-allocate + large blocks. This is currently only possible on Linux with + kernel versions newer than 1.3.77. +*/ + +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else +#define HAVE_MREMAP 0 +#endif + +#endif /* HAVE_MMAP */ + + +/* + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. Note that this value is + cached during initialization into a field of malloc_state. So even + if malloc_getpagesize is a function, it is only called once. + + The following mechanics for getpagesize were adapted from bsd/gnu + getpagesize.h. If none of the system-probes here apply, a value of + 4096 is used, which should be OK: If they don't apply, then using + the actual value probably doesn't impact performance. +*/ + + +#define malloc_getpagesize PAGE_SIZE + +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any SVID/XPG compliant system that has + a /usr/include/malloc.h defining struct mallinfo. (If you'd like to + install such a thing yourself, cut out the preliminary declarations + as described above and below and save them in a malloc.h file. But + there's no compelling reason to bother to do this.) + + The main declaration needed is the mallinfo struct that is returned + (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a + bunch of fields that are not even meaningful in this version of + malloc. These fields are are instead filled by mallinfo() with + other numbers that might be of interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else an SVID2/XPG2 compliant + version is declared below. These must be precisely the same for + mallinfo() to work. The original SVID version of this struct, + defined on most systems with mallinfo, declares all fields as + ints. But some others define as unsigned long. If your system + defines the fields using a type of different width than listed here, + you must #include your system version and #define + HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else + +/* SVID2/XPG mallinfo structure */ + +struct mallinfo { + int arena; /* non-mmapped space allocated from system */ + int ordblks; /* number of free chunks */ + int smblks; /* number of fastbin blocks */ + int hblks; /* number of mmapped regions */ + int hblkhd; /* space in mmapped regions */ + int usmblks; /* maximum total allocated space */ + int fsmblks; /* space available in freed fastbin blocks */ + int uordblks; /* total allocated space */ + int fordblks; /* total free space */ + int keepcost; /* top-most, releasable (via malloc_trim) space */ +}; + +/* + SVID/XPG defines four standard parameter numbers for mallopt, + normally defined in malloc.h. Only one of these (M_MXFAST) is used + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, + so setting them has no effect. But this malloc also supports other + options in mallopt described below. +*/ +#endif + + +/* ---------- description of public routines ------------ */ + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or null + if no space is available. Additionally, on failure, errno is + set to ENOMEM on ANSI C systems. + + If n is zero, malloc returns a minumum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit + systems.) On most systems, size_t is an unsigned type, so calls + with negative arguments are interpreted as requests for huge amounts + of space, which will often fail. The maximum supported value of n + differs across systems, but is in all cases less than the maximum + representable value of a size_t. +*/ +#if __STD_C +Void_t* public_mALLOc(size_t); +#else +Void_t* public_mALLOc(); +#endif + +/* + free(Void_t* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. It can have arbitrary (i.e., bad!) + effects if p has already been freed. + + Unless disabled (using mallopt), freeing very large spaces will + when possible, automatically trigger operations that give + back unused memory to the system, thus reducing program footprint. +*/ +#if __STD_C +void public_fREe(Void_t*); +#else +void public_fREe(); +#endif + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +#if __STD_C +Void_t* public_cALLOc(size_t, size_t); +#else +Void_t* public_cALLOc(); +#endif + +/* + realloc(Void_t* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p when possible, otherwise it employs the + equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. Unless the #define + REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of + zero (re)allocates a minimum-sized chunk. + + Large chunks that were internally obtained via mmap will always + be reallocated using malloc-copy-free sequences unless + the system supports MREMAP (currently only linux). + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ +#if __STD_C +Void_t* public_rEALLOc(Void_t*, size_t); +#else +Void_t* public_rEALLOc(); +#endif + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +#if __STD_C +Void_t* public_mEMALIGn(size_t, size_t); +#else +Void_t* public_mEMALIGn(); +#endif + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +#if __STD_C +Void_t* public_vALLOc(size_t); +#else +Void_t* public_vALLOc(); +#endif + + + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. Only one of these (M_MXFAST) is used + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, + so setting them has no effect. But this malloc also supports four + other options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_MXFAST 1 64 0-80 (0 disables fastbins) + M_TRIM_THRESHOLD -1 256*1024 any (-1U disables trimming) + M_TOP_PAD -2 0 any + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) + M_MMAP_MAX -4 65536 any (0 disables use of mmap) +*/ +#if __STD_C +int public_mALLOPt(int, int); +#else +int public_mALLOPt(); +#endif + + +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: the number of fastbin blocks (i.e., small chunks that + have been freed but not use resused or consolidated) + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: total bytes held in fastbin blocks + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +#if __STD_C +struct mallinfo public_mALLINFo(void); +#else +struct mallinfo public_mALLINFo(); +#endif + +/* + independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +#if __STD_C +Void_t** public_iCALLOc(size_t, size_t, Void_t**); +#else +Void_t** public_iCALLOc(); +#endif + +/* + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +#if __STD_C +Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**); +#else +Void_t** public_iCOMALLOc(); +#endif + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +#if __STD_C +Void_t* public_pVALLOc(size_t); +#else +Void_t* public_pVALLOc(); +#endif + +/* + cfree(Void_t* p); + Equivalent to free(p). + + cfree is needed/defined on some systems that pair it with calloc, + for odd historical reasons (such as: cfree is used in example + code in the first edition of K&R). +*/ +#if __STD_C +void public_cFREe(Void_t*); +#else +void public_cFREe(); +#endif + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative + arguments to sbrk) if there is unused memory at the `high' end of + the malloc pool. You can call this after freeing large blocks of + memory to potentially reduce the system-level memory requirements + of a program. However, it cannot guarantee to reduce memory. Under + some allocation patterns, some large free blocks of memory will be + locked between two used chunks, so they cannot be given back to + the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, + only the minimum amount of memory to maintain internal data + structures will be left (one page or less). Non-zero arguments + can be supplied to maintain enough trailing space to service + future expected allocations without having to re-obtain memory + from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. + On systems that do not support "negative sbrks", it will always + rreturn 0. +*/ +#if __STD_C +int public_mTRIm(size_t); +#else +int public_mTRIm(); +#endif + +/* + malloc_usable_size(Void_t* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); + +*/ +#if __STD_C +size_t public_mUSABLe(Void_t*); +#else +size_t public_mUSABLe(); +#endif + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. + +*/ +#if __STD_C +void public_mSTATs(); +#else +void public_mSTATs(); +#endif + +/* mallopt tuning options */ + +/* + M_MXFAST is the maximum request size used for "fastbins", special bins + that hold returned chunks without consolidating their spaces. This + enables future requests for chunks of the same size to be handled + very quickly, but can increase fragmentation, and thus increase the + overall memory footprint of a program. + + This malloc manages fastbins very conservatively yet still + efficiently, so fragmentation is rarely a problem for values less + than or equal to the default. The maximum supported value of MXFAST + is 80. You wouldn't want it any higher than this anyway. Fastbins + are designed especially for use with many small structs, objects or + strings -- the default handles structs/objects/arrays with sizes up + to 16 4byte fields, or small strings representing words, tokens, + etc. Using fastbins for larger objects normally worsens + fragmentation without improving speed. + + M_MXFAST is set in REQUEST size units. It is internally used in + chunksize units, which adds padding and alignment. You can reduce + M_MXFAST to 0 to disable all use of fastbins. This causes the malloc + algorithm to be a closer approximation of fifo-best-fit in all cases, + not just for larger requests, but will generally cause it to be + slower. +*/ + + +/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ +#ifndef M_MXFAST +#define M_MXFAST 1 +#endif + +#ifndef DEFAULT_MXFAST +#define DEFAULT_MXFAST 64 +#endif + + +/* + M_TRIM_THRESHOLD is the maximum amount of unused top-most memory + to keep before releasing via malloc_trim in free(). + + Automatic trimming is mainly useful in long-lived programs. + Because trimming via sbrk can be slow on some systems, and can + sometimes be wasteful (in cases where programs immediately + afterward allocate more large chunks) the value should be high + enough so that your overall system performance would improve by + releasing this much memory. + + The trim threshold and the mmap control parameters (see below) + can be traded off with one another. Trimming and mmapping are + two different ways of releasing unused memory back to the + system. Between these two, it is often possible to keep + system-level demands of a long-lived program down to a bare + minimum. For example, in one test suite of sessions measuring + the XF86 X server on Linux, using a trim threshold of 128K and a + mmap threshold of 192K led to near-minimal long term resource + consumption. + + If you are using this malloc in a long-lived program, it should + pay to experiment with these values. As a rough guide, you + might set to a value close to the average size of a process + (program) running on your system. Releasing this much memory + would allow such a process to run in memory. Generally, it's + worth it to tune for trimming rather tham memory mapping when a + program undergoes phases where several large chunks are + allocated and released in ways that can reuse each other's + storage, perhaps mixed with phases where there are no such + chunks at all. And in well-behaved long-lived programs, + controlling release of large blocks via trimming versus mapping + is usually faster. + + However, in most programs, these parameters serve mainly as + protection against the system-level effects of carrying around + massive amounts of unneeded memory. Since frequent calls to + sbrk, mmap, and munmap otherwise degrade performance, the default + parameters are set to relatively high values that serve only as + safeguards. + + The trim value must be greater than page size to have any useful + effect. To disable trimming completely, you can set to + (unsigned long)(-1) + + Trim settings interact with fastbin (MXFAST) settings: Unless + TRIM_FASTBINS is defined, automatic trimming never takes place upon + freeing a chunk with size less than or equal to MXFAST. Trimming is + instead delayed until subsequent freeing of larger chunks. However, + you can still force an attempted trim by calling malloc_trim. + + Also, trimming is not generally possible in cases where + the main arena is obtained via mmap. + + Note that the trick some people use of mallocing a huge space and + then freeing it at program startup, in an attempt to reserve system + memory, doesn't have the intended effect under automatic trimming, + since that memory will immediately be returned to the system. +*/ + +#define M_TRIM_THRESHOLD -1 + +#ifndef DEFAULT_TRIM_THRESHOLD +#define DEFAULT_TRIM_THRESHOLD (256 * 1024) +#endif + +/* + M_TOP_PAD is the amount of extra `padding' space to allocate or + retain whenever sbrk is called. It is used in two ways internally: + + * When sbrk is called to extend the top of the arena to satisfy + a new malloc request, this much padding is added to the sbrk + request. + + * When malloc_trim is called automatically from free(), + it is used as the `pad' argument. + + In both cases, the actual amount of padding is rounded + so that the end of the arena is always a system page boundary. + + The main reason for using padding is to avoid calling sbrk so + often. Having even a small pad greatly reduces the likelihood + that nearly every malloc request during program start-up (or + after trimming) will invoke sbrk, which needlessly wastes + time. + + Automatic rounding-up to page-size units is normally sufficient + to avoid measurable overhead, so the default is 0. However, in + systems where sbrk is relatively slow, it can pay to increase + this value, at the expense of carrying around more memory than + the program needs. +*/ + +#define M_TOP_PAD -2 + +#ifndef DEFAULT_TOP_PAD +#define DEFAULT_TOP_PAD (0) +#endif + +/* + M_MMAP_THRESHOLD is the request size threshold for using mmap() + to service a request. Requests of at least this size that cannot + be allocated using already-existing space will be serviced via mmap. + (If enough normal freed space already exists it is used instead.) + + Using mmap segregates relatively large chunks of memory so that + they can be individually obtained and released from the host + system. A request serviced through mmap is never reused by any + other request (at least not directly; the system may just so + happen to remap successive requests to the same locations). + + Segregating space in this way has the benefits that: + + 1. Mmapped space can ALWAYS be individually released back + to the system, which helps keep the system level memory + demands of a long-lived program low. + 2. Mapped memory can never become `locked' between + other chunks, as can happen with normally allocated chunks, which + means that even trimming via malloc_trim would not release them. + 3. On some systems with "holes" in address spaces, mmap can obtain + memory that sbrk cannot. + + However, it has the disadvantages that: + + 1. The space cannot be reclaimed, consolidated, and then + used to service later requests, as happens with normal chunks. + 2. It can lead to more wastage because of mmap page alignment + requirements + 3. It causes malloc performance to be more dependent on host + system memory management support routines which may vary in + implementation quality and may impose arbitrary + limitations. Generally, servicing a request via normal + malloc steps is faster than going through a system's mmap. + + The advantages of mmap nearly always outweigh disadvantages for + "large" chunks, but the value of "large" varies across systems. The + default is an empirically derived value that works well in most + systems. +*/ + +#define M_MMAP_THRESHOLD -3 + +#ifndef DEFAULT_MMAP_THRESHOLD +#define DEFAULT_MMAP_THRESHOLD (256 * 1024) +#endif + +/* + M_MMAP_MAX is the maximum number of requests to simultaneously + service using mmap. This parameter exists because +. Some systems have a limited number of internal tables for + use by mmap, and using more than a few of them may degrade + performance. + + The default is set to a value that serves only as a safeguard. + Setting to 0 disables use of mmap for servicing large requests. If + HAVE_MMAP is not set, the default value is 0, and attempts to set it + to non-zero values in mallopt will fail. +*/ + +#define M_MMAP_MAX -4 + +#ifndef DEFAULT_MMAP_MAX +#if HAVE_MMAP +#define DEFAULT_MMAP_MAX (65536) +#else +#define DEFAULT_MMAP_MAX (0) +#endif +#endif + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/* --------------------- public wrappers ---------------------- */ + +#ifdef USE_PUBLIC_MALLOC_WRAPPERS + +/* Declare all routines as internal */ +#if __STD_C +static Void_t* mALLOc(size_t); +static void fREe(Void_t*); +static Void_t* rEALLOc(Void_t*, size_t); +static Void_t* mEMALIGn(size_t, size_t); +static Void_t* vALLOc(size_t); +static Void_t* pVALLOc(size_t); +static Void_t* cALLOc(size_t, size_t); +static Void_t** iCALLOc(size_t, size_t, Void_t**); +static Void_t** iCOMALLOc(size_t, size_t*, Void_t**); +static void cFREe(Void_t*); +static int mTRIm(size_t); +static size_t mUSABLe(Void_t*); +static void mSTATs(); +static int mALLOPt(int, int); +static struct mallinfo mALLINFo(void); +#else +static Void_t* mALLOc(); +static void fREe(); +static Void_t* rEALLOc(); +static Void_t* mEMALIGn(); +static Void_t* vALLOc(); +static Void_t* pVALLOc(); +static Void_t* cALLOc(); +static Void_t** iCALLOc(); +static Void_t** iCOMALLOc(); +static void cFREe(); +static int mTRIm(); +static size_t mUSABLe(); +static void mSTATs(); +static int mALLOPt(); +static struct mallinfo mALLINFo(); +#endif + +/* + MALLOC_PREACTION and MALLOC_POSTACTION should be + defined to return 0 on success, and nonzero on failure. + The return value of MALLOC_POSTACTION is currently ignored + in wrapper functions since there is no reasonable default + action to take on failure. +*/ + + +#ifdef USE_MALLOC_LOCK + +#ifdef WIN32 + +static int mALLOC_MUTEx; +#define MALLOC_PREACTION slwait(&mALLOC_MUTEx) +#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx) + +#else + +#include + +static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER; + +#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx) +#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx) + +#endif /* USE_MALLOC_LOCK */ + +#else + +/* Substitute anything you like for these */ + +#define MALLOC_PREACTION (0) +#define MALLOC_POSTACTION (0) + +#endif + +Void_t* public_mALLOc(size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = mALLOc(bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +void public_fREe(Void_t* m) { + if (MALLOC_PREACTION != 0) { + return; + } + fREe(m); + if (MALLOC_POSTACTION != 0) { + } +} + +Void_t* public_rEALLOc(Void_t* m, size_t bytes) { + if (MALLOC_PREACTION != 0) { + return 0; + } + m = rEALLOc(m, bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = mEMALIGn(alignment, bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_vALLOc(size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = vALLOc(bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_pVALLOc(size_t bytes) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = pVALLOc(bytes); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t* public_cALLOc(size_t n, size_t elem_size) { + Void_t* m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = cALLOc(n, elem_size); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + + +Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) { + Void_t** m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = iCALLOc(n, elem_size, chunks); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) { + Void_t** m; + if (MALLOC_PREACTION != 0) { + return 0; + } + m = iCOMALLOc(n, sizes, chunks); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +void public_cFREe(Void_t* m) { + if (MALLOC_PREACTION != 0) { + return; + } + cFREe(m); + if (MALLOC_POSTACTION != 0) { + } +} + +int public_mTRIm(size_t s) { + int result; + if (MALLOC_PREACTION != 0) { + return 0; + } + result = mTRIm(s); + if (MALLOC_POSTACTION != 0) { + } + return result; +} + +size_t public_mUSABLe(Void_t* m) { + size_t result; + if (MALLOC_PREACTION != 0) { + return 0; + } + result = mUSABLe(m); + if (MALLOC_POSTACTION != 0) { + } + return result; +} + +void public_mSTATs() { + if (MALLOC_PREACTION != 0) { + return; + } + mSTATs(); + if (MALLOC_POSTACTION != 0) { + } +} + +struct mallinfo public_mALLINFo() { + struct mallinfo m; + if (MALLOC_PREACTION != 0) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + return nm; + } + m = mALLINFo(); + if (MALLOC_POSTACTION != 0) { + } + return m; +} + +int public_mALLOPt(int p, int v) { + int result; + if (MALLOC_PREACTION != 0) { + return 0; + } + result = mALLOPt(p, v); + if (MALLOC_POSTACTION != 0) { + } + return result; +} + +#endif + + + +/* ------------- Optional versions of memcopy ---------------- */ + + +#if USE_MEMCPY + +/* + Note: memcpy is ONLY invoked with non-overlapping regions, + so the (usually slower) memmove is not needed. +*/ + +#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes) +#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes) + +#else /* !USE_MEMCPY */ + +/* Use Duff's device for good zeroing/copying performance. */ + +#define MALLOC_ZERO(charp, nbytes) \ +do { \ + INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \ + CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ + long mcn; \ + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ + switch (mctmp) { \ + case 0: for(;;) { *mzp++ = 0; \ + case 7: *mzp++ = 0; \ + case 6: *mzp++ = 0; \ + case 5: *mzp++ = 0; \ + case 4: *mzp++ = 0; \ + case 3: *mzp++ = 0; \ + case 2: *mzp++ = 0; \ + case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \ + } \ +} while(0) + +#define MALLOC_COPY(dest,src,nbytes) \ +do { \ + INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ + INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ + CHUNK_SIZE_T mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \ + long mcn; \ + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \ + switch (mctmp) { \ + case 0: for(;;) { *mcdst++ = *mcsrc++; \ + case 7: *mcdst++ = *mcsrc++; \ + case 6: *mcdst++ = *mcsrc++; \ + case 5: *mcdst++ = *mcsrc++; \ + case 4: *mcdst++ = *mcsrc++; \ + case 3: *mcdst++ = *mcsrc++; \ + case 2: *mcdst++ = *mcsrc++; \ + case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \ + } \ +} while(0) + +#endif + +/* ------------------ MMAP support ------------------ */ + + +#if HAVE_MMAP + +#ifndef LACKS_FCNTL_H +#include +#endif + +#ifndef LACKS_SYS_MMAN_H +#include +#endif + +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif + +/* + Nearly all versions of mmap support MAP_ANONYMOUS, + so the following is unlikely to be needed, but is + supplied just in case. +*/ + +#ifndef MAP_ANONYMOUS + +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + +#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) + +#else + +#define MMAP(addr, size, prot, flags) \ + (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) + +#endif + + +#endif /* HAVE_MMAP */ + + +/* + ----------------------- Chunk representations ----------------------- +*/ + + +/* + This struct declaration is misleading (but accurate and necessary). + It declares a "view" into memory allowing access to necessary + fields at known offsets from a given base. See explanation below. +*/ + +struct malloc_chunk { + + INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */ + INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */ + + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + + +typedef struct malloc_chunk* mchunkptr; + +/* + malloc_chunk details: + + (The following includes lightly edited explanations by Colin Plumb.) + + Chunks of memory are maintained using a `boundary tag' method as + described in e.g., Knuth or Standish. (See the paper by Paul + Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a + survey of such techniques.) Sizes of free chunks are stored both + in the front of each chunk and at the end. This makes + consolidating fragmented chunks into bigger chunks very fast. The + size fields also hold bits representing whether chunks are free or + in use. + + An allocated chunk looks like this: + + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk, if allocated | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | User data starts here... . + . . + . (malloc_usable_space() bytes) . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + Where "chunk" is the front of the chunk for the purpose of most of + the malloc code, but "mem" is the pointer that is returned to the + user. "Nextchunk" is the beginning of the next contiguous chunk. + + Chunks always begin on even word boundries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + Free chunks are stored in circular doubly-linked lists, and look like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + The P (PREV_INUSE) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, + preventing access to non-existent (or non-owned) memory. If + prev_inuse is set for any given chunk, then you CANNOT determine + the size of the previous chunk, and might even get a memory + addressing fault when trying to do so. + + Note that the `foot' of the current chunk is actually represented + as the prev_size of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The two exceptions to all this are + + 1. The special chunk `top' doesn't bother using the + trailing size field since there is no next contiguous chunk + that would have to index off it. After initialization, `top' + is forced to always exist. If it would become less than + MINSIZE bytes long, it is replenished. + + 2. Chunks allocated via mmap, which have the second-lowest-order + bit (IS_MMAPPED) set in their size fields. Because they are + allocated one-by-one, each must contain its own trailing size field. + +*/ + +/* + ---------- Size and alignment checks and conversions ---------- +*/ + +/* conversion from malloc headers to user pointers, and back */ + +#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ)) + +/* The smallest possible chunk */ +#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk)) + +/* The smallest size we can malloc is an aligned minimal chunk */ + +#define MINSIZE \ + (CHUNK_SIZE_T)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) + +/* Check if m has acceptable alignment */ + +#define aligned_OK(m) (((PTR_UINT)((m)) & (MALLOC_ALIGN_MASK)) == 0) + + +/* + Check if a request is so large that it would wrap around zero when + padded and aligned. To simplify some other code, the bound is made + low enough so that adding MINSIZE will also not wrap around sero. +*/ + +#define REQUEST_OUT_OF_RANGE(req) \ + ((CHUNK_SIZE_T)(req) >= \ + (CHUNK_SIZE_T)(INTERNAL_SIZE_T)(-2 * MINSIZE)) + +/* pad request bytes into a usable size -- internal version */ + +#define request2size(req) \ + (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ + MINSIZE : \ + ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) + +/* Same, except also perform argument check */ + +#define checked_request2size(req, sz) \ + if (REQUEST_OUT_OF_RANGE(req)) { \ + MALLOC_FAILURE_ACTION; \ + return 0; \ + } \ + (sz) = request2size(req); + +/* + --------------- Physical chunk operations --------------- +*/ + + +/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ +#define PREV_INUSE 0x1 + +/* extract inuse bit of previous chunk */ +#define prev_inuse(p) ((p)->size & PREV_INUSE) + + +/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ +#define IS_MMAPPED 0x2 + +/* check for mmap()'ed chunk */ +#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED) + +/* + Bits to mask off when extracting size + + Note: IS_MMAPPED is intentionally not masked off from size field in + macros for which mmapped chunks should never be seen. This should + cause helpful core dumps to occur if it is tried by accident by + people extending or adapting this malloc. +*/ +#define SIZE_BITS (PREV_INUSE|IS_MMAPPED) + +/* Get size, ignoring use bits */ +#define chunksize(p) ((p)->size & ~(SIZE_BITS)) + + +/* Ptr to next physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) )) + +/* Ptr to previous physical malloc_chunk */ +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) )) + +/* Treat space at ptr + offset as a chunk */ +#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) + +/* extract p's inuse bit */ +#define inuse(p)\ +((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE) + +/* set/clear chunk as being inuse without otherwise disturbing */ +#define set_inuse(p)\ +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE + +#define clear_inuse(p)\ +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE) + + +/* check/set/clear inuse bits in known places */ +#define inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE) + +#define set_inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE) + +#define clear_inuse_bit_at_offset(p, s)\ + (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE)) + + +/* Set size at head, without disturbing its use bit */ +#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s))) + +/* Set size/use field */ +#define set_head(p, s) ((p)->size = (s)) + +/* Set size at footer (only when chunk is not in use) */ +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s)) + + +/* + -------------------- Internal data structures -------------------- + + All internal state is held in an instance of malloc_state defined + below. There are no other static variables, except in two optional + cases: + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. + * If HAVE_MMAP is true, but mmap doesn't support + MAP_ANONYMOUS, a dummy file descriptor for mmap. + + Beware of lots of tricks that minimize the total bookkeeping space + requirements. The result is a little over 1K bytes (for 4byte + pointers and size_t.) +*/ + +/* + Bins + + An array of bin headers for free chunks. Each bin is doubly + linked. The bins are approximately proportionally (log) spaced. + There are a lot of these bins (128). This may look excessive, but + works very well in practice. Most bins hold sizes that are + unusual as malloc request sizes, but are more usual for fragments + and consolidated sets of chunks, which is what these bins hold, so + they can be found quickly. All procedures maintain the invariant + that no consolidated chunk physically borders another one, so each + chunk in a list is known to be preceeded and followed by either + inuse chunks or the ends of memory. + + Chunks in bins are kept in size order, with ties going to the + approximately least recently used chunk. Ordering isn't needed + for the small bins, which all contain the same-sized chunks, but + facilitates best-fit allocation for larger chunks. These lists + are just sequential. Keeping them in order almost never requires + enough traversal to warrant using fancier ordered data + structures. + + Chunks of the same size are linked with the most + recently freed at the front, and allocations are taken from the + back. This results in LRU (FIFO) allocation order, which tends + to give each chunk an equal opportunity to be consolidated with + adjacent freed chunks, resulting in larger free chunks and less + fragmentation. + + To simplify use in double-linked lists, each bin header acts + as a malloc_chunk. This avoids special-casing for headers. + But to conserve space and improve locality, we allocate + only the fd/bk pointers of bins, and then use repositioning tricks + to treat these as the fields of a malloc_chunk*. +*/ + +typedef struct malloc_chunk* mbinptr; + +/* addressing -- note that bin_at(0) does not exist */ +#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1))) + +/* analog of ++bin */ +#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1))) + +/* Reminders about list directionality within bins */ +#define first(b) ((b)->fd) +#define last(b) ((b)->bk) + +/* Take a chunk off a bin list */ +#define unlink(P, BK, FD) { \ + FD = P->fd; \ + BK = P->bk; \ + FD->bk = BK; \ + BK->fd = FD; \ +} + +/* + Indexing + + Bins for sizes < 512 bytes contain chunks of all the same size, spaced + 8 bytes apart. Larger bins are approximately logarithmically spaced: + + 64 bins of size 8 + 32 bins of size 64 + 16 bins of size 512 + 8 bins of size 4096 + 4 bins of size 32768 + 2 bins of size 262144 + 1 bin of size what's left + + The bins top out around 1MB because we expect to service large + requests via mmap. +*/ + +#define NBINS 96 +#define NSMALLBINS 32 +#define SMALLBIN_WIDTH 8 +#define MIN_LARGE_SIZE 256 + +#define in_smallbin_range(sz) \ + ((CHUNK_SIZE_T)(sz) < (CHUNK_SIZE_T)MIN_LARGE_SIZE) + +#define smallbin_index(sz) (((unsigned)(sz)) >> 3) + +/* + Compute index for size. We expect this to be inlined when + compiled with optimization, else not, which works out well. +*/ +static int largebin_index(unsigned int sz) { + unsigned int x = sz >> SMALLBIN_WIDTH; + unsigned int m; /* bit position of highest set bit of m */ + + if (x >= 0x10000) return NBINS-1; + + /* On intel, use BSRL instruction to find highest bit */ +#if defined(__GNUC__) && defined(i386) + + __asm__("bsrl %1,%0\n\t" + : "=r" (m) + : "g" (x)); + +#else + { + /* + Based on branch-free nlz algorithm in chapter 5 of Henry + S. Warren Jr's book "Hacker's Delight". + */ + + unsigned int n = ((x - 0x100) >> 16) & 8; + x <<= n; + m = ((x - 0x1000) >> 16) & 4; + n += m; + x <<= m; + m = ((x - 0x4000) >> 16) & 2; + n += m; + x = (x << m) >> 14; + m = 13 - n + (x & ~(x>>1)); + } +#endif + + /* Use next 2 bits to create finer-granularity bins */ + return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3); +} + +#define bin_index(sz) \ + ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz)) + +/* + FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the + first bin that is maintained in sorted order. This must + be the smallest size corresponding to a given bin. + + Normally, this should be MIN_LARGE_SIZE. But you can weaken + best fit guarantees to sometimes speed up malloc by increasing value. + Doing this means that malloc may choose a chunk that is + non-best-fitting by up to the width of the bin. + + Some useful cutoff values: + 512 - all bins sorted + 2560 - leaves bins <= 64 bytes wide unsorted + 12288 - leaves bins <= 512 bytes wide unsorted + 65536 - leaves bins <= 4096 bytes wide unsorted + 262144 - leaves bins <= 32768 bytes wide unsorted + -1 - no bins sorted (not recommended!) +*/ + +#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE +/* #define FIRST_SORTED_BIN_SIZE 65536 */ + +/* + Unsorted chunks + + All remainders from chunk splits, as well as all returned chunks, + are first placed in the "unsorted" bin. They are then placed + in regular bins after malloc gives them ONE chance to be used before + binning. So, basically, the unsorted_chunks list acts as a queue, + with chunks being placed on it in free (and malloc_consolidate), + and taken off (to be either used or placed in bins) in malloc. +*/ + +/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ +#define unsorted_chunks(M) (bin_at(M, 1)) + +/* + Top + + The top-most available chunk (i.e., the one bordering the end of + available memory) is treated specially. It is never included in + any bin, is used only if no other chunk is available, and is + released back to the system if it is very large (see + M_TRIM_THRESHOLD). Because top initially + points to its own bin with initial zero size, thus forcing + extension on the first malloc request, we avoid having any special + code in malloc to check whether it even exists yet. But we still + need to do so when getting memory from system, so we make + initial_top treat the bin as a legal but unusable chunk during the + interval between initialization and the first call to + sYSMALLOc. (This is somewhat delicate, since it relies on + the 2 preceding words to be zero during this interval as well.) +*/ + +/* Conveniently, the unsorted bin can be used as dummy top on first call */ +#define initial_top(M) (unsorted_chunks(M)) + +/* + Binmap + + To help compensate for the large number of bins, a one-level index + structure is used for bin-by-bin searching. `binmap' is a + bitvector recording whether bins are definitely empty so they can + be skipped over during during traversals. The bits are NOT always + cleared as soon as bins are empty, but instead only + when they are noticed to be empty during traversal in malloc. +*/ + +/* Conservatively use 32 bits per map word, even if on 64bit system */ +#define BINMAPSHIFT 5 +#define BITSPERMAP (1U << BINMAPSHIFT) +#define BINMAPSIZE (NBINS / BITSPERMAP) + +#define idx2block(i) ((i) >> BINMAPSHIFT) +#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1)))) + +#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i)) +#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i))) +#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i)) + +/* + Fastbins + + An array of lists holding recently freed small chunks. Fastbins + are not doubly linked. It is faster to single-link them, and + since chunks are never removed from the middles of these lists, + double linking is not necessary. Also, unlike regular bins, they + are not even processed in FIFO order (they use faster LIFO) since + ordering doesn't much matter in the transient contexts in which + fastbins are normally used. + + Chunks in fastbins keep their inuse bit set, so they cannot + be consolidated with other free chunks. malloc_consolidate + releases all chunks in fastbins and consolidates them with + other free chunks. +*/ + +typedef struct malloc_chunk* mfastbinptr; + +/* offset 2 to use otherwise unindexable first 2 bins */ +#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2) + +/* The maximum fastbin request size we support */ +#define MAX_FAST_SIZE 80 + +#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1) + +/* + FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() + that triggers automatic consolidation of possibly-surrounding + fastbin chunks. This is a heuristic, so the exact value should not + matter too much. It is defined at half the default trim threshold as a + compromise heuristic to only attempt consolidation if it is likely + to lead to trimming. However, it is not dynamically tunable, since + consolidation reduces fragmentation surrounding loarge chunks even + if trimming is not used. +*/ + +#define FASTBIN_CONSOLIDATION_THRESHOLD \ + ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1) + +/* + Since the lowest 2 bits in max_fast don't matter in size comparisons, + they are used as flags. +*/ + +/* + ANYCHUNKS_BIT held in max_fast indicates that there may be any + freed chunks at all. It is set true when entering a chunk into any + bin. +*/ + +#define ANYCHUNKS_BIT (1U) + +#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT)) +#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT) +#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT) + +/* + FASTCHUNKS_BIT held in max_fast indicates that there are probably + some fastbin chunks. It is set true on entering a chunk into any + fastbin, and cleared only in malloc_consolidate. +*/ + +#define FASTCHUNKS_BIT (2U) + +#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT)) +#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT)) +#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT)) + +/* + Set value of max_fast. + Use impossibly small value if 0. +*/ + +#define set_max_fast(M, s) \ + (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \ + ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT)) + +#define get_max_fast(M) \ + ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT)) + + +/* + morecore_properties is a status word holding dynamically discovered + or controlled properties of the morecore function +*/ + +#define MORECORE_CONTIGUOUS_BIT (1U) + +#define contiguous(M) \ + (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT)) +#define noncontiguous(M) \ + (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0) +#define set_contiguous(M) \ + ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT) +#define set_noncontiguous(M) \ + ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT) + + +/* + ----------- Internal state representation and initialization ----------- +*/ + +struct malloc_state { + + /* The maximum chunk size to be eligible for fastbin */ + INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */ + + /* Fastbins */ + mfastbinptr fastbins[NFASTBINS]; + + /* Base of the topmost chunk -- not otherwise kept in a bin */ + mchunkptr top; + + /* The remainder from the most recent split of a small request */ + mchunkptr last_remainder; + + /* Normal bins packed as described above */ + mchunkptr bins[NBINS * 2]; + + /* Bitmap of bins. Trailing zero map handles cases of largest binned size */ + unsigned int binmap[BINMAPSIZE+1]; + + /* Tunable parameters */ + CHUNK_SIZE_T trim_threshold; + INTERNAL_SIZE_T top_pad; + INTERNAL_SIZE_T mmap_threshold; + + /* Memory map support */ + int n_mmaps; + int n_mmaps_max; + int max_n_mmaps; + + /* Cache malloc_getpagesize */ + unsigned int pagesize; + + /* Track properties of MORECORE */ + unsigned int morecore_properties; + + /* Statistics */ + INTERNAL_SIZE_T mmapped_mem; + INTERNAL_SIZE_T sbrked_mem; + INTERNAL_SIZE_T max_sbrked_mem; + INTERNAL_SIZE_T max_mmapped_mem; + INTERNAL_SIZE_T max_total_mem; +}; + +typedef struct malloc_state *mstate; + +/* + There is exactly one instance of this struct in this malloc. + If you are adapting this malloc in a way that does NOT use a static + malloc_state, you MUST explicitly zero-fill it before using. This + malloc relies on the property that malloc_state is initialized to + all zeroes (as is true of C statics). +*/ + +static struct malloc_state av_; /* never directly referenced */ + +/* + All uses of av_ are via get_malloc_state(). + At most one "call" to get_malloc_state is made per invocation of + the public versions of malloc and free, but other routines + that in turn invoke malloc and/or free may call more then once. + Also, it is called in check* routines if DEBUG is set. +*/ + +#define get_malloc_state() (&(av_)) + +/* + Initialize a malloc_state struct. + + This is called only from within malloc_consolidate, which needs + be called in the same contexts anyway. It is never called directly + outside of malloc_consolidate because some optimizing compilers try + to inline it at all call points, which turns out not to be an + optimization at all. (Inlining it in malloc_consolidate is fine though.) +*/ + +#if __STD_C +static void malloc_init_state(mstate av) +#else +static void malloc_init_state(av) mstate av; +#endif +{ + int i; + mbinptr bin; + + /* Establish circular links for normal bins */ + for (i = 1; i < NBINS; ++i) { + bin = bin_at(av,i); + bin->fd = bin->bk = bin; + } + + av->top_pad = DEFAULT_TOP_PAD; + av->n_mmaps_max = DEFAULT_MMAP_MAX; + av->mmap_threshold = DEFAULT_MMAP_THRESHOLD; + av->trim_threshold = DEFAULT_TRIM_THRESHOLD; + +#if MORECORE_CONTIGUOUS + set_contiguous(av); +#else + set_noncontiguous(av); +#endif + + + set_max_fast(av, DEFAULT_MXFAST); + + av->top = initial_top(av); + av->pagesize = malloc_getpagesize; +} + +/* + Other internal utilities operating on mstates +*/ + +#if __STD_C +static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate); +static int sYSTRIm(size_t, mstate); +static void malloc_consolidate(mstate); +static Void_t** iALLOc(size_t, size_t*, int, Void_t**); +#else +static Void_t* sYSMALLOc(); +static int sYSTRIm(); +static void malloc_consolidate(); +static Void_t** iALLOc(); +#endif + +/* + Debugging support + + These routines make a number of assertions about the states + of data structures that should be true at all times. If any + are not true, it's very likely that a user program has somehow + trashed memory. (It's also possible that there is a coding error + in malloc. In which case, please report it!) +*/ + +#if ! DEBUG + +#define check_chunk(P) +#define check_free_chunk(P) +#define check_inuse_chunk(P) +#define check_remalloced_chunk(P,N) +#define check_malloced_chunk(P,N) +#define check_malloc_state() + +#else +#define check_chunk(P) do_check_chunk(P) +#define check_free_chunk(P) do_check_free_chunk(P) +#define check_inuse_chunk(P) do_check_inuse_chunk(P) +#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N) +#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N) +#define check_malloc_state() do_check_malloc_state() + +/* + Properties of all chunks +*/ + +#if __STD_C +static void do_check_chunk(mchunkptr p) +#else +static void do_check_chunk(p) mchunkptr p; +#endif +{ + mstate av = get_malloc_state(); + CHUNK_SIZE_T sz = chunksize(p); + /* min and max possible addresses assuming contiguous allocation */ + char* max_address = (char*)(av->top) + chunksize(av->top); + char* min_address = max_address - av->sbrked_mem; + + if (!chunk_is_mmapped(p)) { + + /* Has legal address ... */ + if (p != av->top) { + if (contiguous(av)) { + assert(((char*)p) >= min_address); + assert(((char*)p + sz) <= ((char*)(av->top))); + } + } + else { + /* top size is always at least MINSIZE */ + assert((CHUNK_SIZE_T)(sz) >= MINSIZE); + /* top predecessor always marked inuse */ + assert(prev_inuse(p)); + } + + } + else { +#if HAVE_MMAP + /* address is outside main heap */ + if (contiguous(av) && av->top != initial_top(av)) { + assert(((char*)p) < min_address || ((char*)p) > max_address); + } + /* chunk is page-aligned */ + assert(((p->prev_size + sz) & (av->pagesize-1)) == 0); + /* mem is aligned */ + assert(aligned_OK(chunk2mem(p))); +#else + /* force an appropriate assert violation if debug set */ + assert(!chunk_is_mmapped(p)); +#endif + } +} + +/* + Properties of free chunks +*/ + +#if __STD_C +static void do_check_free_chunk(mchunkptr p) +#else +static void do_check_free_chunk(p) mchunkptr p; +#endif +{ + mstate av = get_malloc_state(); + + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; + mchunkptr next = chunk_at_offset(p, sz); + + do_check_chunk(p); + + /* Chunk must claim to be free ... */ + assert(!inuse(p)); + assert (!chunk_is_mmapped(p)); + + /* Unless a special marker, must have OK fields */ + if ((CHUNK_SIZE_T)(sz) >= MINSIZE) + { + assert((sz & MALLOC_ALIGN_MASK) == 0); + assert(aligned_OK(chunk2mem(p))); + /* ... matching footer field */ + assert(next->prev_size == sz); + /* ... and is fully consolidated */ + assert(prev_inuse(p)); + assert (next == av->top || inuse(next)); + + /* ... and has minimally sane links */ + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_SZ */ + assert(sz == SIZE_SZ); +} + +/* + Properties of inuse chunks +*/ + +#if __STD_C +static void do_check_inuse_chunk(mchunkptr p) +#else +static void do_check_inuse_chunk(p) mchunkptr p; +#endif +{ + mstate av = get_malloc_state(); + mchunkptr next; + do_check_chunk(p); + + if (chunk_is_mmapped(p)) + return; /* mmapped chunks have no next/prev */ + + /* Check whether it claims to be in use ... */ + assert(inuse(p)); + + next = next_chunk(p); + + /* ... and is surrounded by OK chunks. + Since more things can be checked with free chunks than inuse ones, + if an inuse chunk borders them and debug is on, it's worth doing them. + */ + if (!prev_inuse(p)) { + /* Note that we cannot even look at prev unless it is not inuse */ + mchunkptr prv = prev_chunk(p); + assert(next_chunk(prv) == p); + do_check_free_chunk(prv); + } + + if (next == av->top) { + assert(prev_inuse(next)); + assert(chunksize(next) >= MINSIZE); + } + else if (!inuse(next)) + do_check_free_chunk(next); +} + +/* + Properties of chunks recycled from fastbins +*/ + +#if __STD_C +static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) +#else +static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; +#endif +{ + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE; + + do_check_inuse_chunk(p); + + /* Legal size ... */ + assert((sz & MALLOC_ALIGN_MASK) == 0); + assert((CHUNK_SIZE_T)(sz) >= MINSIZE); + /* ... and alignment */ + assert(aligned_OK(chunk2mem(p))); + /* chunk is less than MINSIZE more than request */ + assert((long)(sz) - (long)(s) >= 0); + assert((long)(sz) - (long)(s + MINSIZE) < 0); +} + +/* + Properties of nonrecycled chunks at the point they are malloced +*/ + +#if __STD_C +static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s) +#else +static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s; +#endif +{ + /* same as recycled case ... */ + do_check_remalloced_chunk(p, s); + + /* + ... plus, must obey implementation invariant that prev_inuse is + always true of any allocated chunk; i.e., that each allocated + chunk borders either a previously allocated and still in-use + chunk, or the base of its memory arena. This is ensured + by making all allocations from the the `lowest' part of any found + chunk. This does not necessarily hold however for chunks + recycled via fastbins. + */ + + assert(prev_inuse(p)); +} + + +/* + Properties of malloc_state. + + This may be useful for debugging malloc, as well as detecting user + programmer errors that somehow write into malloc_state. + + If you are extending or experimenting with this malloc, you can + probably figure out how to hack this routine to print out or + display chunk addresses, sizes, bins, and other instrumentation. +*/ + +static void do_check_malloc_state() +{ + mstate av = get_malloc_state(); + int i; + mchunkptr p; + mchunkptr q; + mbinptr b; + unsigned int binbit; + int empty; + unsigned int idx; + INTERNAL_SIZE_T size; + CHUNK_SIZE_T total = 0; + int max_fast_bin; + + /* internal size_t must be no wider than pointer type */ + assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*)); + + /* alignment is a power of 2 */ + assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0); + + /* cannot run remaining checks until fully initialized */ + if (av->top == 0 || av->top == initial_top(av)) + return; + + /* pagesize is a power of 2 */ + assert((av->pagesize & (av->pagesize-1)) == 0); + + /* properties of fastbins */ + + /* max_fast is in allowed range */ + assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE)); + + max_fast_bin = fastbin_index(av->max_fast); + + for (i = 0; i < NFASTBINS; ++i) { + p = av->fastbins[i]; + + /* all bins past max_fast are empty */ + if (i > max_fast_bin) + assert(p == 0); + + while (p != 0) { + /* each chunk claims to be inuse */ + do_check_inuse_chunk(p); + total += chunksize(p); + /* chunk belongs in this bin */ + assert(fastbin_index(chunksize(p)) == i); + p = p->fd; + } + } + + if (total != 0) + assert(have_fastchunks(av)); + else if (!have_fastchunks(av)) + assert(total == 0); + + /* check normal bins */ + for (i = 1; i < NBINS; ++i) { + b = bin_at(av,i); + + /* binmap is accurate (except for bin 1 == unsorted_chunks) */ + if (i >= 2) { + binbit = get_binmap(av,i); + empty = last(b) == b; + if (!binbit) + assert(empty); + else if (!empty) + assert(binbit); + } + + for (p = last(b); p != b; p = p->bk) { + /* each chunk claims to be free */ + do_check_free_chunk(p); + size = chunksize(p); + total += size; + if (i >= 2) { + /* chunk belongs in bin */ + idx = bin_index(size); + assert(idx == i); + /* lists are sorted */ + if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) { + assert(p->bk == b || + (CHUNK_SIZE_T)chunksize(p->bk) >= + (CHUNK_SIZE_T)chunksize(p)); + } + } + /* chunk is followed by a legal chain of inuse chunks */ + for (q = next_chunk(p); + (q != av->top && inuse(q) && + (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE); + q = next_chunk(q)) + do_check_inuse_chunk(q); + } + } + + /* top chunk is OK */ + check_chunk(av->top); + + /* sanity checks for statistics */ + + assert(total <= (CHUNK_SIZE_T)(av->max_total_mem)); + assert(av->n_mmaps >= 0); + assert(av->n_mmaps <= av->max_n_mmaps); + + assert((CHUNK_SIZE_T)(av->sbrked_mem) <= + (CHUNK_SIZE_T)(av->max_sbrked_mem)); + + assert((CHUNK_SIZE_T)(av->mmapped_mem) <= + (CHUNK_SIZE_T)(av->max_mmapped_mem)); + + assert((CHUNK_SIZE_T)(av->max_total_mem) >= + (CHUNK_SIZE_T)(av->mmapped_mem) + (CHUNK_SIZE_T)(av->sbrked_mem)); +} +#endif + + +/* ----------- Routines dealing with system allocation -------------- */ + +/* + sysmalloc handles malloc cases requiring more memory from the system. + On entry, it is assumed that av->top does not have enough + space to service request for nb bytes, thus requiring that av->top + be extended or replaced. +*/ + +#if __STD_C +static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av) +#else +static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av; +#endif +{ + mchunkptr old_top; /* incoming value of av->top */ + INTERNAL_SIZE_T old_size; /* its size */ + char* old_end; /* its end address */ + + long size; /* arg to first MORECORE or mmap call */ + char* brk; /* return value from MORECORE */ + + long correction; /* arg to 2nd MORECORE call */ + char* snd_brk; /* 2nd return val */ + + INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ + INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ + char* aligned_brk; /* aligned offset into brk */ + + mchunkptr p; /* the allocated/returned chunk */ + mchunkptr remainder; /* remainder from allocation */ + CHUNK_SIZE_T remainder_size; /* its size */ + + CHUNK_SIZE_T sum; /* for updating stats */ + + size_t pagemask = av->pagesize - 1; + + /* + If there is space available in fastbins, consolidate and retry + malloc from scratch rather than getting memory from system. This + can occur only if nb is in smallbin range so we didn't consolidate + upon entry to malloc. It is much easier to handle this case here + than in malloc proper. + */ + + if (have_fastchunks(av)) { + assert(in_smallbin_range(nb)); + malloc_consolidate(av); + return mALLOc(nb - MALLOC_ALIGN_MASK); + } + + +#if HAVE_MMAP + + /* + If have mmap, and the request size meets the mmap threshold, and + the system supports mmap, and there are few enough currently + allocated mmapped regions, try to directly map this request + rather than expanding top. + */ + + if ((CHUNK_SIZE_T)(nb) >= (CHUNK_SIZE_T)(av->mmap_threshold) && + (av->n_mmaps < av->n_mmaps_max)) { + + char* mm; /* return value from mmap call*/ + + /* + Round up size to nearest page. For mmapped chunks, the overhead + is one SIZE_SZ unit larger than for normal chunks, because there + is no following chunk whose prev_size field could be used. + */ + size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask; + + /* Don't try if size wraps around 0 */ + if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) { + + mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); + + if (mm != (char*)(MORECORE_FAILURE)) { + + /* + The offset to the start of the mmapped region is stored + in the prev_size field of the chunk. This allows us to adjust + returned start address to meet alignment requirements here + and in memalign(), and still be able to compute proper + address argument for later munmap in free() and realloc(). + */ + + front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + correction = MALLOC_ALIGNMENT - front_misalign; + p = (mchunkptr)(mm + correction); + p->prev_size = correction; + set_head(p, (size - correction) |IS_MMAPPED); + } + else { + p = (mchunkptr)mm; + p->prev_size = 0; + set_head(p, size|IS_MMAPPED); + } + + /* update statistics */ + + if (++av->n_mmaps > av->max_n_mmaps) + av->max_n_mmaps = av->n_mmaps; + + sum = av->mmapped_mem += size; + if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) + av->max_mmapped_mem = sum; + sum += av->sbrked_mem; + if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) + av->max_total_mem = sum; + + check_chunk(p); + + return chunk2mem(p); + } + } + } +#endif + + /* Record incoming configuration of top */ + + old_top = av->top; + old_size = chunksize(old_top); + old_end = (char*)(chunk_at_offset(old_top, old_size)); + + brk = snd_brk = (char*)(MORECORE_FAILURE); + + /* + If not the first time through, we require old_size to be + at least MINSIZE and to have prev_inuse set. + */ + + assert((old_top == initial_top(av) && old_size == 0) || + ((CHUNK_SIZE_T) (old_size) >= MINSIZE && + prev_inuse(old_top))); + + /* Precondition: not enough current space to satisfy nb request */ + assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE)); + + /* Precondition: all fastbins are consolidated */ + assert(!have_fastchunks(av)); + + + /* Request enough space for nb + pad + overhead */ + + size = nb + av->top_pad + MINSIZE; + + /* + If contiguous, we can subtract out existing space that we hope to + combine with new space. We add it back later only if + we don't actually get contiguous space. + */ + + if (contiguous(av)) + size -= old_size; + + /* + Round to a multiple of page size. + If MORECORE is not contiguous, this ensures that we only call it + with whole-page arguments. And if MORECORE is contiguous and + this is not first time through, this preserves page-alignment of + previous calls. Otherwise, we correct to page-align below. + */ + + size = (size + pagemask) & ~pagemask; + + /* + Don't try to call MORECORE if argument is so big as to appear + negative. Note that since mmap takes size_t arg, it may succeed + below even if we cannot call MORECORE. + */ + + if (size > 0) + brk = (char*)(MORECORE(size)); + + /* + If have mmap, try using it as a backup when MORECORE fails or + cannot be used. This is worth doing on systems that have "holes" in + address space, so sbrk cannot extend to give contiguous space, but + space is available elsewhere. Note that we ignore mmap max count + and threshold limits, since the space will not be used as a + segregated mmap region. + */ + +#if HAVE_MMAP + if (brk == (char*)(MORECORE_FAILURE)) { + + /* Cannot merge with old top, so add its size back in */ + if (contiguous(av)) + size = (size + old_size + pagemask) & ~pagemask; + + /* If we are relying on mmap as backup, then use larger units */ + if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(MMAP_AS_MORECORE_SIZE)) + size = MMAP_AS_MORECORE_SIZE; + + /* Don't try if size wraps around 0 */ + if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb)) { + + brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE)); + + if (brk != (char*)(MORECORE_FAILURE)) { + + /* We do not need, and cannot use, another sbrk call to find end */ + snd_brk = brk + size; + + /* + Record that we no longer have a contiguous sbrk region. + After the first time mmap is used as backup, we do not + ever rely on contiguous space since this could incorrectly + bridge regions. + */ + set_noncontiguous(av); + } + } + } +#endif + + if (brk != (char*)(MORECORE_FAILURE)) { + av->sbrked_mem += size; + + /* + If MORECORE extends previous space, we can likewise extend top size. + */ + + if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) { + set_head(old_top, (size + old_size) | PREV_INUSE); + } + + /* + Otherwise, make adjustments: + + * If the first time through or noncontiguous, we need to call sbrk + just to find out where the end of memory lies. + + * We need to ensure that all returned chunks from malloc will meet + MALLOC_ALIGNMENT + + * If there was an intervening foreign sbrk, we need to adjust sbrk + request size to account for fact that we will not be able to + combine new space with existing space in old_top. + + * Almost all systems internally allocate whole pages at a time, in + which case we might as well use the whole last page of request. + So we allocate enough more memory to hit a page boundary now, + which in turn causes future contiguous calls to page-align. + */ + + else { + front_misalign = 0; + end_misalign = 0; + correction = 0; + aligned_brk = brk; + + /* + If MORECORE returns an address lower than we have seen before, + we know it isn't really contiguous. This and some subsequent + checks help cope with non-conforming MORECORE functions and + the presence of "foreign" calls to MORECORE from outside of + malloc or by other threads. We cannot guarantee to detect + these in all cases, but cope with the ones we do detect. + */ + if (contiguous(av) && old_size != 0 && brk < old_end) { + set_noncontiguous(av); + } + + /* handle contiguous cases */ + if (contiguous(av)) { + + /* + We can tolerate forward non-contiguities here (usually due + to foreign calls) but treat them as part of our space for + stats reporting. + */ + if (old_size != 0) + av->sbrked_mem += brk - old_end; + + /* Guarantee alignment of first new chunk made from this space */ + + front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK; + if (front_misalign > 0) { + + /* + Skip over some bytes to arrive at an aligned position. + We don't need to specially mark these wasted front bytes. + They will never be accessed anyway because + prev_inuse of av->top (and any chunk created from its start) + is always true after initialization. + */ + + correction = MALLOC_ALIGNMENT - front_misalign; + aligned_brk += correction; + } + + /* + If this isn't adjacent to existing space, then we will not + be able to merge with old_top space, so must add to 2nd request. + */ + + correction += old_size; + + /* Extend the end address to hit a page boundary */ + end_misalign = (INTERNAL_SIZE_T)(brk + size + correction); + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign; + + assert(correction >= 0); + snd_brk = (char*)(MORECORE(correction)); + + if (snd_brk == (char*)(MORECORE_FAILURE)) { + /* + If can't allocate correction, try to at least find out current + brk. It might be enough to proceed without failing. + */ + correction = 0; + snd_brk = (char*)(MORECORE(0)); + } + else if (snd_brk < brk) { + /* + If the second call gives noncontiguous space even though + it says it won't, the only course of action is to ignore + results of second call, and conservatively estimate where + the first call left us. Also set noncontiguous, so this + won't happen again, leaving at most one hole. + + Note that this check is intrinsically incomplete. Because + MORECORE is allowed to give more space than we ask for, + there is no reliable way to detect a noncontiguity + producing a forward gap for the second call. + */ + snd_brk = brk + size; + correction = 0; + set_noncontiguous(av); + } + + } + + /* handle non-contiguous cases */ + else { + /* MORECORE/mmap must correctly align */ + assert(aligned_OK(chunk2mem(brk))); + + /* Find out current end of memory */ + if (snd_brk == (char*)(MORECORE_FAILURE)) { + snd_brk = (char*)(MORECORE(0)); + av->sbrked_mem += snd_brk - brk - size; + } + } + + /* Adjust top based on results of second sbrk */ + if (snd_brk != (char*)(MORECORE_FAILURE)) { + av->top = (mchunkptr)aligned_brk; + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); + av->sbrked_mem += correction; + + /* + If not the first time through, we either have a + gap due to foreign sbrk or a non-contiguous region. Insert a + double fencepost at old_top to prevent consolidation with space + we don't own. These fenceposts are artificial chunks that are + marked as inuse and are in any case too small to use. We need + two to make sizes and alignments work out. + */ + + if (old_size != 0) { + /* + Shrink old_top to insert fenceposts, keeping size a + multiple of MALLOC_ALIGNMENT. We know there is at least + enough space in old_top to do this. + */ + old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK; + set_head(old_top, old_size | PREV_INUSE); + + /* + Note that the following assignments completely overwrite + old_top when old_size was previously MINSIZE. This is + intentional. We need the fencepost, even if old_top otherwise gets + lost. + */ + chunk_at_offset(old_top, old_size )->size = + SIZE_SZ|PREV_INUSE; + + chunk_at_offset(old_top, old_size + SIZE_SZ)->size = + SIZE_SZ|PREV_INUSE; + + /* + If possible, release the rest, suppressing trimming. + */ + if (old_size >= MINSIZE) { + INTERNAL_SIZE_T tt = av->trim_threshold; + av->trim_threshold = (INTERNAL_SIZE_T)(-1); + fREe(chunk2mem(old_top)); + av->trim_threshold = tt; + } + } + } + } + + /* Update statistics */ + sum = av->sbrked_mem; + if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem)) + av->max_sbrked_mem = sum; + + sum += av->mmapped_mem; + if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) + av->max_total_mem = sum; + + check_malloc_state(); + + /* finally, do the allocation */ + + p = av->top; + size = chunksize(p); + + /* check that one of the above allocation paths succeeded */ + if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(p, nb); + av->top = remainder; + set_head(p, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + check_malloced_chunk(p, nb); + return chunk2mem(p); + } + + } + + /* catch all failure paths */ + MALLOC_FAILURE_ACTION; + return 0; +} + + + + +/* + sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back + to the system (via negative arguments to sbrk) if there is unused + memory at the `high' end of the malloc pool. It is called + automatically by free() when top space exceeds the trim + threshold. It is also called by the public malloc_trim routine. It + returns 1 if it actually released any memory, else 0. +*/ + +#if __STD_C +static int sYSTRIm(size_t pad, mstate av) +#else +static int sYSTRIm(pad, av) size_t pad; mstate av; +#endif +{ + long top_size; /* Amount of top-most memory */ + long extra; /* Amount to release */ + long released; /* Amount actually released */ + char* current_brk; /* address returned by pre-check sbrk call */ + char* new_brk; /* address returned by post-check sbrk call */ + size_t pagesz; + + pagesz = av->pagesize; + top_size = chunksize(av->top); + + /* Release in pagesize units, keeping at least one page */ + extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz; + + if (extra > 0) { + + /* + Only proceed if end of memory is where we last set it. + This avoids problems if there were foreign sbrk calls. + */ + current_brk = (char*)(MORECORE(0)); + if (current_brk == (char*)(av->top) + top_size) { + + /* + Attempt to release memory. We ignore MORECORE return value, + and instead call again to find out where new end of memory is. + This avoids problems if first call releases less than we asked, + of if failure somehow altered brk value. (We could still + encounter problems if it altered brk in some very bad way, + but the only thing we can do is adjust anyway, which will cause + some downstream failure.) + */ + + MORECORE(-extra); + new_brk = (char*)(MORECORE(0)); + + if (new_brk != (char*)MORECORE_FAILURE) { + released = (long)(current_brk - new_brk); + + if (released != 0) { + /* Success. Adjust top. */ + av->sbrked_mem -= released; + set_head(av->top, (top_size - released) | PREV_INUSE); + check_malloc_state(); + return 1; + } + } + } + } + return 0; +} + +/* + ------------------------------ malloc ------------------------------ +*/ + + +#if __STD_C +Void_t* mALLOc(size_t bytes) +#else + Void_t* mALLOc(bytes) size_t bytes; +#endif +{ + mstate av = get_malloc_state(); + + INTERNAL_SIZE_T nb; /* normalized request size */ + unsigned int idx; /* associated bin index */ + mbinptr bin; /* associated bin */ + mfastbinptr* fb; /* associated fastbin */ + + mchunkptr victim; /* inspected/selected chunk */ + INTERNAL_SIZE_T size; /* its size */ + int victim_index; /* its bin index */ + + mchunkptr remainder; /* remainder from a split */ + CHUNK_SIZE_T remainder_size; /* its size */ + + unsigned int block; /* bit map traverser */ + unsigned int bit; /* bit map traverser */ + unsigned int map; /* current word of binmap */ + + mchunkptr fwd; /* misc temp for linking */ + mchunkptr bck; /* misc temp for linking */ + + /* + Convert request size to internal form by adding SIZE_SZ bytes + overhead plus possibly more to obtain necessary alignment and/or + to obtain a size of at least MINSIZE, the smallest allocatable + size. Also, checked_request2size traps (returning 0) request sizes + that are so large that they wrap around zero when padded and + aligned. + */ + + checked_request2size(bytes, nb); + + /* + Bypass search if no frees yet + */ + if (!have_anychunks(av)) { + if (av->max_fast == 0) /* initialization check */ + malloc_consolidate(av); + goto use_top; + } + + /* + If the size qualifies as a fastbin, first check corresponding bin. + */ + + if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) { + fb = &(av->fastbins[(fastbin_index(nb))]); + if ( (victim = *fb) != 0) { + *fb = victim->fd; + check_remalloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + + /* + If a small request, check regular bin. Since these "smallbins" + hold one size each, no searching within bins is necessary. + (For a large request, we need to wait until unsorted chunks are + processed to find best fit. But for small ones, fits are exact + anyway, so we can check now, which is faster.) + */ + + if (in_smallbin_range(nb)) { + idx = smallbin_index(nb); + bin = bin_at(av,idx); + + if ( (victim = last(bin)) != bin) { + bck = victim->bk; + set_inuse_bit_at_offset(victim, nb); + bin->bk = bck; + bck->fd = bin; + + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + + /* + If this is a large request, consolidate fastbins before continuing. + While it might look excessive to kill all fastbins before + even seeing if there is space available, this avoids + fragmentation problems normally associated with fastbins. + Also, in practice, programs tend to have runs of either small or + large requests, but less often mixtures, so consolidation is not + invoked all that often in most programs. And the programs that + it is called frequently in otherwise tend to fragment. + */ + + else { + idx = largebin_index(nb); + if (have_fastchunks(av)) + malloc_consolidate(av); + } + + /* + Process recently freed or remaindered chunks, taking one only if + it is exact fit, or, if this a small request, the chunk is remainder from + the most recent non-exact fit. Place other traversed chunks in + bins. Note that this step is the only place in any routine where + chunks are placed in bins. + */ + + while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) { + bck = victim->bk; + size = chunksize(victim); + + /* + If a small request, try to use last remainder if it is the + only chunk in unsorted bin. This helps promote locality for + runs of consecutive small requests. This is the only + exception to best-fit, and applies only when there is + no exact fit for a small chunk. + */ + + if (in_smallbin_range(nb) && + bck == unsorted_chunks(av) && + victim == av->last_remainder && + (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) { + + /* split and reattach remainder */ + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + av->last_remainder = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* remove from unsorted list */ + unsorted_chunks(av)->bk = bck; + bck->fd = unsorted_chunks(av); + + /* Take now instead of binning if exact fit */ + + if (size == nb) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* place chunk in bin */ + + if (in_smallbin_range(size)) { + victim_index = smallbin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; + } + else { + victim_index = largebin_index(size); + bck = bin_at(av, victim_index); + fwd = bck->fd; + + if (fwd != bck) { + /* if smaller than smallest, place first */ + if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) { + fwd = bck; + bck = bck->bk; + } + else if ((CHUNK_SIZE_T)(size) >= + (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) { + + /* maintain large bins in sorted order */ + size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */ + while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size)) + fwd = fwd->fd; + bck = fwd->bk; + } + } + } + + mark_bin(av, victim_index); + victim->bk = bck; + victim->fd = fwd; + fwd->bk = victim; + bck->fd = victim; + } + + /* + If a large request, scan through the chunks of current bin to + find one that fits. (This will be the smallest that fits unless + FIRST_SORTED_BIN_SIZE has been changed from default.) This is + the only step where an unbounded number of chunks might be + scanned without doing anything useful with them. However the + lists tend to be short. + */ + + if (!in_smallbin_range(nb)) { + bin = bin_at(av, idx); + + for (victim = last(bin); victim != bin; victim = victim->bk) { + size = chunksize(victim); + + if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) { + remainder_size = size - nb; + unlink(victim, bck, fwd); + + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + } + } + + /* + Search for a chunk by scanning bins, starting with next largest + bin. This search is strictly by best-fit; i.e., the smallest + (with ties going to approximately the least recently used) chunk + that fits is selected. + + The bitmap avoids needing to check that most blocks are nonempty. + */ + + ++idx; + bin = bin_at(av,idx); + block = idx2block(idx); + map = av->binmap[block]; + bit = idx2bit(idx); + + for (;;) { + + /* Skip rest of block if there are no more set bits in this block. */ + if (bit > map || bit == 0) { + do { + if (++block >= BINMAPSIZE) /* out of bins */ + goto use_top; + } while ( (map = av->binmap[block]) == 0); + + bin = bin_at(av, (block << BINMAPSHIFT)); + bit = 1; + } + + /* Advance to bin with set bit. There must be one. */ + while ((bit & map) == 0) { + bin = next_bin(bin); + bit <<= 1; + assert(bit != 0); + } + + /* Inspect the bin. It is likely to be non-empty */ + victim = last(bin); + + /* If a false alarm (empty bin), clear the bit. */ + if (victim == bin) { + av->binmap[block] = map &= ~bit; /* Write through */ + bin = next_bin(bin); + bit <<= 1; + } + + else { + size = chunksize(victim); + + /* We know the first chunk in this bin is big enough to use. */ + assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)); + + remainder_size = size - nb; + + /* unlink */ + bck = victim->bk; + bin->bk = bck; + bck->fd = bin; + + /* Exhaust */ + if (remainder_size < MINSIZE) { + set_inuse_bit_at_offset(victim, size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* Split */ + else { + remainder = chunk_at_offset(victim, nb); + + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder; + remainder->bk = remainder->fd = unsorted_chunks(av); + /* advertise as last remainder */ + if (in_smallbin_range(nb)) + av->last_remainder = remainder; + + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + set_foot(remainder, remainder_size); + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + } + } + + use_top: + /* + If large enough, split off the chunk bordering the end of memory + (held in av->top). Note that this is in accord with the best-fit + search rule. In effect, av->top is treated as larger (and thus + less well fitting) than any other available chunk since it can + be extended to be as large as necessary (up to system + limitations). + + We require that av->top always exists (i.e., has size >= + MINSIZE) after initialization, so if it would otherwise be + exhuasted by current request, it is replenished. (The main + reason for ensuring it exists is that we may need MINSIZE space + to put in fenceposts in sysmalloc.) + */ + + victim = av->top; + size = chunksize(victim); + + if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(victim, nb); + av->top = remainder; + set_head(victim, nb | PREV_INUSE); + set_head(remainder, remainder_size | PREV_INUSE); + + check_malloced_chunk(victim, nb); + return chunk2mem(victim); + } + + /* + If no space in top, relay to handle system-dependent cases + */ + return sYSMALLOc(nb, av); +} + +/* + ------------------------------ free ------------------------------ +*/ + +#if __STD_C +void fREe(Void_t* mem) +#else +void fREe(mem) Void_t* mem; +#endif +{ + mstate av = get_malloc_state(); + + mchunkptr p; /* chunk corresponding to mem */ + INTERNAL_SIZE_T size; /* its size */ + mfastbinptr* fb; /* associated fastbin */ + mchunkptr nextchunk; /* next contiguous chunk */ + INTERNAL_SIZE_T nextsize; /* its size */ + int nextinuse; /* true if nextchunk is used */ + INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */ + mchunkptr bck; /* misc temp for linking */ + mchunkptr fwd; /* misc temp for linking */ + + /* free(0) has no effect */ + if (mem != 0) { + p = mem2chunk(mem); + size = chunksize(p); + + check_inuse_chunk(p); + + /* + If eligible, place chunk on a fastbin so it can be found + and used quickly in malloc. + */ + + if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast) + +#if TRIM_FASTBINS + /* + If TRIM_FASTBINS set, don't place chunks + bordering top into fastbins + */ + && (chunk_at_offset(p, size) != av->top) +#endif + ) { + + set_fastchunks(av); + fb = &(av->fastbins[fastbin_index(size)]); + p->fd = *fb; + *fb = p; + } + + /* + Consolidate other non-mmapped chunks as they arrive. + */ + + else if (!chunk_is_mmapped(p)) { + set_anychunks(av); + + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); + + /* consolidate backward */ + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } + + if (nextchunk != av->top) { + /* get and clear inuse bit */ + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + set_head(nextchunk, nextsize); + + /* consolidate forward */ + if (!nextinuse) { + unlink(nextchunk, bck, fwd); + size += nextsize; + } + + /* + Place the chunk in unsorted chunk list. Chunks are + not placed into regular bins until after they have + been given one chance to be used in malloc. + */ + + bck = unsorted_chunks(av); + fwd = bck->fd; + p->bk = bck; + p->fd = fwd; + bck->fd = p; + fwd->bk = p; + + set_head(p, size | PREV_INUSE); + set_foot(p, size); + + check_free_chunk(p); + } + + /* + If the chunk borders the current high end of memory, + consolidate into top + */ + + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + check_chunk(p); + } + + /* + If freeing a large space, consolidate possibly-surrounding + chunks. Then, if the total unused topmost memory exceeds trim + threshold, ask malloc_trim to reduce top. + + Unless max_fast is 0, we don't know if there are fastbins + bordering top, so we cannot tell for sure whether threshold + has been reached unless fastbins are consolidated. But we + don't want to consolidate on each free. As a compromise, + consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD + is reached. + */ + + if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) { + if (have_fastchunks(av)) + malloc_consolidate(av); + +#ifndef MORECORE_CANNOT_TRIM + if ((CHUNK_SIZE_T)(chunksize(av->top)) >= + (CHUNK_SIZE_T)(av->trim_threshold)) + sYSTRIm(av->top_pad, av); +#endif + } + + } + /* + If the chunk was allocated via mmap, release via munmap() + Note that if HAVE_MMAP is false but chunk_is_mmapped is + true, then user must have overwritten memory. There's nothing + we can do to catch this error unless DEBUG is set, in which case + check_inuse_chunk (above) will have triggered error. + */ + + else { +#if HAVE_MMAP + int ret; + INTERNAL_SIZE_T offset = p->prev_size; + av->n_mmaps--; + av->mmapped_mem -= (size + offset); + ret = munmap((char*)p - offset, size + offset); + /* munmap returns non-zero on failure */ + assert(ret == 0); +#endif + } + } +} + +/* + ------------------------- malloc_consolidate ------------------------- + + malloc_consolidate is a specialized version of free() that tears + down chunks held in fastbins. Free itself cannot be used for this + purpose since, among other things, it might place chunks back onto + fastbins. So, instead, we need to use a minor variant of the same + code. + + Also, because this routine needs to be called the first time through + malloc anyway, it turns out to be the perfect place to trigger + initialization code. +*/ + +#if __STD_C +static void malloc_consolidate(mstate av) +#else +static void malloc_consolidate(av) mstate av; +#endif +{ + mfastbinptr* fb; /* current fastbin being consolidated */ + mfastbinptr* maxfb; /* last fastbin (for loop control) */ + mchunkptr p; /* current chunk being consolidated */ + mchunkptr nextp; /* next chunk to consolidate */ + mchunkptr unsorted_bin; /* bin header */ + mchunkptr first_unsorted; /* chunk to link to */ + + /* These have same use as in free() */ + mchunkptr nextchunk; + INTERNAL_SIZE_T size; + INTERNAL_SIZE_T nextsize; + INTERNAL_SIZE_T prevsize; + int nextinuse; + mchunkptr bck; + mchunkptr fwd; + + /* + If max_fast is 0, we know that av hasn't + yet been initialized, in which case do so below + */ + + if (av->max_fast != 0) { + clear_fastchunks(av); + + unsorted_bin = unsorted_chunks(av); + + /* + Remove each chunk from fast bin and consolidate it, placing it + then in unsorted bin. Among other reasons for doing this, + placing in unsorted bin avoids needing to calculate actual bins + until malloc is sure that chunks aren't immediately going to be + reused anyway. + */ + + maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); + fb = &(av->fastbins[0]); + do { + if ( (p = *fb) != 0) { + *fb = 0; + + do { + check_inuse_chunk(p); + nextp = p->fd; + + /* Slightly streamlined version of consolidation code in free() */ + size = p->size & ~PREV_INUSE; + nextchunk = chunk_at_offset(p, size); + nextsize = chunksize(nextchunk); + + if (!prev_inuse(p)) { + prevsize = p->prev_size; + size += prevsize; + p = chunk_at_offset(p, -((long) prevsize)); + unlink(p, bck, fwd); + } + + if (nextchunk != av->top) { + nextinuse = inuse_bit_at_offset(nextchunk, nextsize); + set_head(nextchunk, nextsize); + + if (!nextinuse) { + size += nextsize; + unlink(nextchunk, bck, fwd); + } + + first_unsorted = unsorted_bin->fd; + unsorted_bin->fd = p; + first_unsorted->bk = p; + + set_head(p, size | PREV_INUSE); + p->bk = unsorted_bin; + p->fd = first_unsorted; + set_foot(p, size); + } + + else { + size += nextsize; + set_head(p, size | PREV_INUSE); + av->top = p; + } + + } while ( (p = nextp) != 0); + + } + } while (fb++ != maxfb); + } + else { + malloc_init_state(av); + check_malloc_state(); + } +} + +/* + ------------------------------ realloc ------------------------------ +*/ + + +#if __STD_C +Void_t* rEALLOc(Void_t* oldmem, size_t bytes) +#else +Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes; +#endif +{ + mstate av = get_malloc_state(); + + INTERNAL_SIZE_T nb; /* padded request size */ + + mchunkptr oldp; /* chunk corresponding to oldmem */ + INTERNAL_SIZE_T oldsize; /* its size */ + + mchunkptr newp; /* chunk to return */ + INTERNAL_SIZE_T newsize; /* its size */ + Void_t* newmem; /* corresponding user mem */ + + mchunkptr next; /* next contiguous chunk after oldp */ + + mchunkptr remainder; /* extra space at end of newp */ + CHUNK_SIZE_T remainder_size; /* its size */ + + mchunkptr bck; /* misc temp for linking */ + mchunkptr fwd; /* misc temp for linking */ + + CHUNK_SIZE_T copysize; /* bytes to copy */ + unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */ + INTERNAL_SIZE_T* s; /* copy source */ + INTERNAL_SIZE_T* d; /* copy destination */ + + +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + fREe(oldmem); + return 0; + } +#endif + + /* realloc of null is supposed to be same as malloc */ + if (oldmem == 0) return mALLOc(bytes); + + checked_request2size(bytes, nb); + + oldp = mem2chunk(oldmem); + oldsize = chunksize(oldp); + + check_inuse_chunk(oldp); + + if (!chunk_is_mmapped(oldp)) { + + if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) { + /* already big enough; split below */ + newp = oldp; + newsize = oldsize; + } + + else { + next = chunk_at_offset(oldp, oldsize); + + /* Try to expand forward into top */ + if (next == av->top && + (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >= + (CHUNK_SIZE_T)(nb + MINSIZE)) { + set_head_size(oldp, nb); + av->top = chunk_at_offset(oldp, nb); + set_head(av->top, (newsize - nb) | PREV_INUSE); + return chunk2mem(oldp); + } + + /* Try to expand forward into next chunk; split off remainder below */ + else if (next != av->top && + !inuse(next) && + (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >= + (CHUNK_SIZE_T)(nb)) { + newp = oldp; + unlink(next, bck, fwd); + } + + /* allocate, copy, free */ + else { + newmem = mALLOc(nb - MALLOC_ALIGN_MASK); + if (newmem == 0) + return 0; /* propagate failure */ + + newp = mem2chunk(newmem); + newsize = chunksize(newp); + + /* + Avoid copy if newp is next chunk after oldp. + */ + if (newp == next) { + newsize += oldsize; + newp = oldp; + } + else { + /* + Unroll copy of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + INTERNAL_SIZE_T-sized words; minimally 3. + */ + + copysize = oldsize - SIZE_SZ; + s = (INTERNAL_SIZE_T*)(oldmem); + d = (INTERNAL_SIZE_T*)(newmem); + ncopies = copysize / sizeof(INTERNAL_SIZE_T); + assert(ncopies >= 3); + + if (ncopies > 9) + MALLOC_COPY(d, s, copysize); + + else { + *(d+0) = *(s+0); + *(d+1) = *(s+1); + *(d+2) = *(s+2); + if (ncopies > 4) { + *(d+3) = *(s+3); + *(d+4) = *(s+4); + if (ncopies > 6) { + *(d+5) = *(s+5); + *(d+6) = *(s+6); + if (ncopies > 8) { + *(d+7) = *(s+7); + *(d+8) = *(s+8); + } + } + } + } + + fREe(oldmem); + check_inuse_chunk(newp); + return chunk2mem(newp); + } + } + } + + /* If possible, free extra space in old or extended chunk */ + + assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb)); + + remainder_size = newsize - nb; + + if (remainder_size < MINSIZE) { /* not enough extra to split off */ + set_head_size(newp, newsize); + set_inuse_bit_at_offset(newp, newsize); + } + else { /* split remainder */ + remainder = chunk_at_offset(newp, nb); + set_head_size(newp, nb); + set_head(remainder, remainder_size | PREV_INUSE); + /* Mark remainder as inuse so free() won't complain */ + set_inuse_bit_at_offset(remainder, remainder_size); + fREe(chunk2mem(remainder)); + } + + check_inuse_chunk(newp); + return chunk2mem(newp); + } + + /* + Handle mmap cases + */ + + else { +#if HAVE_MMAP + +#if HAVE_MREMAP + INTERNAL_SIZE_T offset = oldp->prev_size; + size_t pagemask = av->pagesize - 1; + char *cp; + CHUNK_SIZE_T sum; + + /* Note the extra SIZE_SZ overhead */ + newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask; + + /* don't need to remap if still within same page */ + if (oldsize == newsize - offset) + return oldmem; + + cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1); + + if (cp != (char*)MORECORE_FAILURE) { + + newp = (mchunkptr)(cp + offset); + set_head(newp, (newsize - offset)|IS_MMAPPED); + + assert(aligned_OK(chunk2mem(newp))); + assert((newp->prev_size == offset)); + + /* update statistics */ + sum = av->mmapped_mem += newsize - oldsize; + if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem)) + av->max_mmapped_mem = sum; + sum += av->sbrked_mem; + if (sum > (CHUNK_SIZE_T)(av->max_total_mem)) + av->max_total_mem = sum; + + return chunk2mem(newp); + } +#endif + + /* Note the extra SIZE_SZ overhead. */ + if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ)) + newmem = oldmem; /* do nothing */ + else { + /* Must alloc, copy, free. */ + newmem = mALLOc(nb - MALLOC_ALIGN_MASK); + if (newmem != 0) { + MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); + fREe(oldmem); + } + } + return newmem; + +#else + /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */ + check_malloc_state(); + MALLOC_FAILURE_ACTION; + return 0; +#endif + } +} + +/* + ------------------------------ memalign ------------------------------ +*/ + +#if __STD_C +Void_t* mEMALIGn(size_t alignment, size_t bytes) +#else +Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes; +#endif +{ + INTERNAL_SIZE_T nb; /* padded request size */ + char* m; /* memory returned by malloc call */ + mchunkptr p; /* corresponding chunk */ + char* brk; /* alignment point within p */ + mchunkptr newp; /* chunk to return */ + INTERNAL_SIZE_T newsize; /* its size */ + INTERNAL_SIZE_T leadsize; /* leading space before alignment point */ + mchunkptr remainder; /* spare room at end to split off */ + CHUNK_SIZE_T remainder_size; /* its size */ + INTERNAL_SIZE_T size; + + /* If need less alignment than we give anyway, just relay to malloc */ + + if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes); + + /* Otherwise, ensure that it is at least a minimum chunk size */ + + if (alignment < MINSIZE) alignment = MINSIZE; + + /* Make sure alignment is power of 2 (in case MINSIZE is not). */ + if ((alignment & (alignment - 1)) != 0) { + size_t a = MALLOC_ALIGNMENT * 2; + while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1; + alignment = a; + } + + checked_request2size(bytes, nb); + + /* + Strategy: find a spot within that chunk that meets the alignment + request, and then possibly free the leading and trailing space. + */ + + + /* Call malloc with worst case padding to hit alignment. */ + + m = (char*)(mALLOc(nb + alignment + MINSIZE)); + + if (m == 0) return 0; /* propagate failure */ + + p = mem2chunk(m); + + if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */ + + /* + Find an aligned spot inside chunk. Since we need to give back + leading space in a chunk of at least MINSIZE, if the first + calculation places us at a spot with less than MINSIZE leader, + we can move to the next aligned spot -- we've allocated enough + total room so that this is always possible. + */ + + brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) & + -((signed long) alignment))); + if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE) + brk += alignment; + + newp = (mchunkptr)brk; + leadsize = brk - (char*)(p); + newsize = chunksize(p) - leadsize; + + /* For mmapped chunks, just adjust offset */ + if (chunk_is_mmapped(p)) { + newp->prev_size = p->prev_size + leadsize; + set_head(newp, newsize|IS_MMAPPED); + return chunk2mem(newp); + } + + /* Otherwise, give back leader, use the rest */ + set_head(newp, newsize | PREV_INUSE); + set_inuse_bit_at_offset(newp, newsize); + set_head_size(p, leadsize); + fREe(chunk2mem(p)); + p = newp; + + assert (newsize >= nb && + (((PTR_UINT)(chunk2mem(p))) % alignment) == 0); + } + + /* Also give back spare room at the end */ + if (!chunk_is_mmapped(p)) { + size = chunksize(p); + if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) { + remainder_size = size - nb; + remainder = chunk_at_offset(p, nb); + set_head(remainder, remainder_size | PREV_INUSE); + set_head_size(p, nb); + fREe(chunk2mem(remainder)); + } + } + + check_inuse_chunk(p); + return chunk2mem(p); +} + +/* + ------------------------------ calloc ------------------------------ +*/ + +#if __STD_C +Void_t* cALLOc(size_t n_elements, size_t elem_size) +#else +Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size; +#endif +{ + mchunkptr p; + CHUNK_SIZE_T clearsize; + CHUNK_SIZE_T nclears; + INTERNAL_SIZE_T* d; + + Void_t* mem = mALLOc(n_elements * elem_size); + + if (mem != 0) { + p = mem2chunk(mem); + + if (!chunk_is_mmapped(p)) + { + /* + Unroll clear of <= 36 bytes (72 if 8byte sizes) + We know that contents have an odd number of + INTERNAL_SIZE_T-sized words; minimally 3. + */ + + d = (INTERNAL_SIZE_T*)mem; + clearsize = chunksize(p) - SIZE_SZ; + nclears = clearsize / sizeof(INTERNAL_SIZE_T); + assert(nclears >= 3); + + if (nclears > 9) + MALLOC_ZERO(d, clearsize); + + else { + *(d+0) = 0; + *(d+1) = 0; + *(d+2) = 0; + if (nclears > 4) { + *(d+3) = 0; + *(d+4) = 0; + if (nclears > 6) { + *(d+5) = 0; + *(d+6) = 0; + if (nclears > 8) { + *(d+7) = 0; + *(d+8) = 0; + } + } + } + } + } +#if ! MMAP_CLEARS + else + { + d = (INTERNAL_SIZE_T*)mem; + /* + Note the additional SIZE_SZ + */ + clearsize = chunksize(p) - 2*SIZE_SZ; + MALLOC_ZERO(d, clearsize); + } +#endif + } + return mem; +} + +/* + ------------------------------ cfree ------------------------------ +*/ + +#if __STD_C +void cFREe(Void_t *mem) +#else +void cFREe(mem) Void_t *mem; +#endif +{ + fREe(mem); +} + +/* + ------------------------- independent_calloc ------------------------- +*/ + +#if __STD_C +Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[]) +#else +Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[]; +#endif +{ + size_t sz = elem_size; /* serves as 1-element array */ + /* opts arg of 3 means all elements are same size, and should be cleared */ + return iALLOc(n_elements, &sz, 3, chunks); +} + +/* + ------------------------- independent_comalloc ------------------------- +*/ + +#if __STD_C +Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[]) +#else +Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[]; +#endif +{ + return iALLOc(n_elements, sizes, 0, chunks); +} + + +/* + ------------------------------ ialloc ------------------------------ + ialloc provides common support for independent_X routines, handling all of + the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed +*/ + + +#if __STD_C +static Void_t** iALLOc(size_t n_elements, + size_t* sizes, + int opts, + Void_t* chunks[]) +#else +static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[]; +#endif +{ + mstate av = get_malloc_state(); + INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */ + INTERNAL_SIZE_T contents_size; /* total size of elements */ + INTERNAL_SIZE_T array_size; /* request size of pointer array */ + Void_t* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */ + Void_t** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + int mmx; /* to disable mmap */ + INTERNAL_SIZE_T size; + size_t i; + + /* Ensure initialization */ + if (av->max_fast == 0) malloc_consolidate(av); + + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (Void_t**) mALLOc(0); + marray = 0; + array_size = request2size(n_elements * (sizeof(Void_t*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + /* subtract out alignment bytes from total to minimize overallocation */ + size = contents_size + array_size - MALLOC_ALIGN_MASK; + + /* + Allocate the aggregate chunk. + But first disable mmap so malloc won't use it, since + we would not be able to later free/realloc space internal + to a segregated mmap region. + */ + mmx = av->n_mmaps_max; /* disable mmap */ + av->n_mmaps_max = 0; + mem = mALLOc(size); + av->n_mmaps_max = mmx; /* reset mmap */ + if (mem == 0) + return 0; + + p = mem2chunk(mem); + assert(!chunk_is_mmapped(p)); + remainder_size = chunksize(p); + + if (opts & 0x2) { /* optionally clear the elements */ + MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + array_chunk = chunk_at_offset(p, contents_size); + marray = (Void_t**) (chunk2mem(array_chunk)); + set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_head(p, size | PREV_INUSE); + p = chunk_at_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_head(p, remainder_size | PREV_INUSE); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) + assert(remainder_size == element_size); + else + assert(remainder_size == request2size(sizes[i])); + check_inuse_chunk(mem2chunk(marray)); + } + + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(mem2chunk(marray[i])); +#endif + + return marray; +} + + +/* + ------------------------------ valloc ------------------------------ +*/ + +#if __STD_C +Void_t* vALLOc(size_t bytes) +#else +Void_t* vALLOc(bytes) size_t bytes; +#endif +{ + /* Ensure initialization */ + mstate av = get_malloc_state(); + if (av->max_fast == 0) malloc_consolidate(av); + return mEMALIGn(av->pagesize, bytes); +} + +/* + ------------------------------ pvalloc ------------------------------ +*/ + + +#if __STD_C +Void_t* pVALLOc(size_t bytes) +#else +Void_t* pVALLOc(bytes) size_t bytes; +#endif +{ + mstate av = get_malloc_state(); + size_t pagesz; + + /* Ensure initialization */ + if (av->max_fast == 0) malloc_consolidate(av); + pagesz = av->pagesize; + return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1)); +} + + +/* + ------------------------------ malloc_trim ------------------------------ +*/ + +#if __STD_C +int mTRIm(size_t pad) +#else +int mTRIm(pad) size_t pad; +#endif +{ + mstate av = get_malloc_state(); + /* Ensure initialization/consolidation */ + malloc_consolidate(av); + +#ifndef MORECORE_CANNOT_TRIM + return sYSTRIm(pad, av); +#else + return 0; +#endif +} + + +/* + ------------------------- malloc_usable_size ------------------------- +*/ + +#if __STD_C +size_t mUSABLe(Void_t* mem) +#else +size_t mUSABLe(mem) Void_t* mem; +#endif +{ + mchunkptr p; + if (mem != 0) { + p = mem2chunk(mem); + if (chunk_is_mmapped(p)) + return chunksize(p) - 2*SIZE_SZ; + else if (inuse(p)) + return chunksize(p) - SIZE_SZ; + } + return 0; +} + +/* + ------------------------------ mallinfo ------------------------------ +*/ + +struct mallinfo mALLINFo() +{ + mstate av = get_malloc_state(); + struct mallinfo mi; + int i; + mbinptr b; + mchunkptr p; + INTERNAL_SIZE_T avail; + INTERNAL_SIZE_T fastavail; + int nblocks; + int nfastblocks; + + /* Ensure initialization */ + if (av->top == 0) malloc_consolidate(av); + + check_malloc_state(); + + /* Account for top */ + avail = chunksize(av->top); + nblocks = 1; /* top always exists */ + + /* traverse fastbins */ + nfastblocks = 0; + fastavail = 0; + + for (i = 0; i < NFASTBINS; ++i) { + for (p = av->fastbins[i]; p != 0; p = p->fd) { + ++nfastblocks; + fastavail += chunksize(p); + } + } + + avail += fastavail; + + /* traverse regular bins */ + for (i = 1; i < NBINS; ++i) { + b = bin_at(av, i); + for (p = last(b); p != b; p = p->bk) { + ++nblocks; + avail += chunksize(p); + } + } + + mi.smblks = nfastblocks; + mi.ordblks = nblocks; + mi.fordblks = avail; + mi.uordblks = av->sbrked_mem - avail; + mi.arena = av->sbrked_mem; + mi.hblks = av->n_mmaps; + mi.hblkhd = av->mmapped_mem; + mi.fsmblks = fastavail; + mi.keepcost = chunksize(av->top); + mi.usmblks = av->max_total_mem; + return mi; +} + +/* + ------------------------------ malloc_stats ------------------------------ +*/ + +void mSTATs() +{ + struct mallinfo mi = mALLINFo(); + +#ifdef WIN32 + { + CHUNK_SIZE_T free, reserved, committed; + vminfo (&free, &reserved, &committed); + fprintf(stderr, "free bytes = %10lu\n", + free); + fprintf(stderr, "reserved bytes = %10lu\n", + reserved); + fprintf(stderr, "committed bytes = %10lu\n", + committed); + } +#endif + + + fprintf(stderr, "max system bytes = %10lu\n", + (CHUNK_SIZE_T)(mi.usmblks)); + fprintf(stderr, "system bytes = %10lu\n", + (CHUNK_SIZE_T)(mi.arena + mi.hblkhd)); + fprintf(stderr, "in use bytes = %10lu\n", + (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd)); + +#ifdef WIN32 + { + CHUNK_SIZE_T kernel, user; + if (cpuinfo (TRUE, &kernel, &user)) { + fprintf(stderr, "kernel ms = %10lu\n", + kernel); + fprintf(stderr, "user ms = %10lu\n", + user); + } + } +#endif +} + + +/* + ------------------------------ mallopt ------------------------------ +*/ + +#if __STD_C +int mALLOPt(int param_number, int value) +#else +int mALLOPt(param_number, value) int param_number; int value; +#endif +{ + mstate av = get_malloc_state(); + /* Ensure initialization/consolidation */ + malloc_consolidate(av); + + switch(param_number) { + case M_MXFAST: + if (value >= 0 && value <= MAX_FAST_SIZE) { + set_max_fast(av, value); + return 1; + } + else + return 0; + + case M_TRIM_THRESHOLD: + av->trim_threshold = value; + return 1; + + case M_TOP_PAD: + av->top_pad = value; + return 1; + + case M_MMAP_THRESHOLD: + av->mmap_threshold = value; + return 1; + + case M_MMAP_MAX: +#if !HAVE_MMAP + if (value != 0) + return 0; +#endif + av->n_mmaps_max = value; + return 1; + + default: + return 0; + } +} + + +/* + -------------------- Alternative MORECORE functions -------------------- +*/ + + +/* + General Requirements for MORECORE. + + The MORECORE function must have the following properties: + + If MORECORE_CONTIGUOUS is false: + + * MORECORE must allocate in multiples of pagesize. It will + only be called with arguments that are multiples of pagesize. + + * MORECORE(0) must return an address that is at least + MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) + + else (i.e. If MORECORE_CONTIGUOUS is true): + + * Consecutive calls to MORECORE with positive arguments + return increasing addresses, indicating that space has been + contiguously extended. + + * MORECORE need not allocate in multiples of pagesize. + Calls to MORECORE need not have args of multiples of pagesize. + + * MORECORE need not page-align. + + In either case: + + * MORECORE may allocate more memory than requested. (Or even less, + but this will generally result in a malloc failure.) + + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. This malloc does NOT call MORECORE(0) + until at least one call with positive arguments is made, so + the initial value returned is not important. + + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + + * MORECORE need not handle negative arguments -- it may instead + just return MORECORE_FAILURE when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + There is some variation across systems about the type of the + argument to sbrk/MORECORE. If size_t is unsigned, then it cannot + actually be size_t, because sbrk supports negative args, so it is + normally the signed type of the same width as size_t (sometimes + declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much + matter though. Internally, we use "long" as arguments, which should + work across all reasonable possibilities. + + Additionally, if MORECORE ever returns failure for a positive + request, and HAVE_MMAP is true, then mmap is used as a noncontiguous + system allocator. This is a useful backup strategy for systems with + holes in address spaces -- in this case sbrk cannot contiguously + expand the heap, but mmap may be able to map noncontiguous space. + + If you'd like mmap to ALWAYS be used, you can define MORECORE to be + a function that always returns MORECORE_FAILURE. + + Malloc only has limited ability to detect failures of MORECORE + to supply contiguous space when it says it can. In particular, + multithreaded programs that do not use locks may result in + rece conditions across calls to MORECORE that result in gaps + that cannot be detected as such, and subsequent corruption. + + If you are using this malloc with something other than sbrk (or its + emulation) to supply memory regions, you probably want to set + MORECORE_CONTIGUOUS as false. As an example, here is a custom + allocator kindly contributed for pre-OSX macOS. It uses virtually + but not necessarily physically contiguous non-paged memory (locked + in, present and won't get swapped out). You can use it by + uncommenting this section, adding some #includes, and setting up the + appropriate defines above: + + #define MORECORE osMoreCore + #define MORECORE_CONTIGUOUS 0 + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MORECORE_FAILURE; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MORECORE_FAILURE; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* + -------------------------------------------------------------- + + Emulation of sbrk for win32. + Donated by J. Walter . + For additional information about this code, and malloc on Win32, see + http://www.genesys-e.de/jwalter/ +*/ + + +#ifdef WIN32 + +#ifdef _DEBUG +/* #define TRACE */ +#endif + +/* Support for USE_MALLOC_LOCK */ +#ifdef USE_MALLOC_LOCK + +/* Wait for spin lock */ +static int slwait (int *sl) { + while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0) + Sleep (0); + return 0; +} + +/* Release spin lock */ +static int slrelease (int *sl) { + InterlockedExchange (sl, 0); + return 0; +} + +#ifdef NEEDED +/* Spin lock for emulation code */ +static int g_sl; +#endif + +#endif /* USE_MALLOC_LOCK */ + +/* getpagesize for windows */ +static long getpagesize (void) { + static long g_pagesize = 0; + if (! g_pagesize) { + SYSTEM_INFO system_info; + GetSystemInfo (&system_info); + g_pagesize = system_info.dwPageSize; + } + return g_pagesize; +} +static long getregionsize (void) { + static long g_regionsize = 0; + if (! g_regionsize) { + SYSTEM_INFO system_info; + GetSystemInfo (&system_info); + g_regionsize = system_info.dwAllocationGranularity; + } + return g_regionsize; +} + +/* A region list entry */ +typedef struct _region_list_entry { + void *top_allocated; + void *top_committed; + void *top_reserved; + long reserve_size; + struct _region_list_entry *previous; +} region_list_entry; + +/* Allocate and link a region entry in the region list */ +static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) { + region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry)); + if (! next) + return FALSE; + next->top_allocated = (char *) base_reserved; + next->top_committed = (char *) base_reserved; + next->top_reserved = (char *) base_reserved + reserve_size; + next->reserve_size = reserve_size; + next->previous = *last; + *last = next; + return TRUE; +} +/* Free and unlink the last region entry from the region list */ +static int region_list_remove (region_list_entry **last) { + region_list_entry *previous = (*last)->previous; + if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last)) + return FALSE; + *last = previous; + return TRUE; +} + +#define CEIL(size,to) (((size)+(to)-1)&~((to)-1)) +#define FLOOR(size,to) ((size)&~((to)-1)) + +#define SBRK_SCALE 0 +/* #define SBRK_SCALE 1 */ +/* #define SBRK_SCALE 2 */ +/* #define SBRK_SCALE 4 */ + +/* sbrk for windows */ +static void *sbrk (long size) { + static long g_pagesize, g_my_pagesize; + static long g_regionsize, g_my_regionsize; + static region_list_entry *g_last; + void *result = (void *) MORECORE_FAILURE; +#ifdef TRACE + printf ("sbrk %d\n", size); +#endif +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Wait for spin lock */ + slwait (&g_sl); +#endif + /* First time initialization */ + if (! g_pagesize) { + g_pagesize = getpagesize (); + g_my_pagesize = g_pagesize << SBRK_SCALE; + } + if (! g_regionsize) { + g_regionsize = getregionsize (); + g_my_regionsize = g_regionsize << SBRK_SCALE; + } + if (! g_last) { + if (! region_list_append (&g_last, 0, 0)) + goto sbrk_exit; + } + /* Assert invariants */ + assert (g_last); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated && + g_last->top_allocated <= g_last->top_committed); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed && + g_last->top_committed <= g_last->top_reserved && + (unsigned) g_last->top_committed % g_pagesize == 0); + assert ((unsigned) g_last->top_reserved % g_regionsize == 0); + assert ((unsigned) g_last->reserve_size % g_regionsize == 0); + /* Allocation requested? */ + if (size >= 0) { + /* Allocation size is the requested size */ + long allocate_size = size; + /* Compute the size to commit */ + long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; + /* Do we reach the commit limit? */ + if (to_commit > 0) { + /* Round size to commit */ + long commit_size = CEIL (to_commit, g_my_pagesize); + /* Compute the size to reserve */ + long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved; + /* Do we reach the reserve limit? */ + if (to_reserve > 0) { + /* Compute the remaining size to commit in the current region */ + long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed; + if (remaining_commit_size > 0) { + /* Assert preconditions */ + assert ((unsigned) g_last->top_committed % g_pagesize == 0); + assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); { + /* Commit this */ + void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size, + MEM_COMMIT, PAGE_READWRITE); + /* Check returned pointer for consistency */ + if (base_committed != g_last->top_committed) + goto sbrk_exit; + /* Assert postconditions */ + assert ((unsigned) base_committed % g_pagesize == 0); +#ifdef TRACE + printf ("Commit %p %d\n", base_committed, remaining_commit_size); +#endif + /* Adjust the regions commit top */ + g_last->top_committed = (char *) base_committed + remaining_commit_size; + } + } { + /* Now we are going to search and reserve. */ + int contiguous = -1; + int found = FALSE; + MEMORY_BASIC_INFORMATION memory_info; + void *base_reserved; + long reserve_size; + do { + /* Assume contiguous memory */ + contiguous = TRUE; + /* Round size to reserve */ + reserve_size = CEIL (to_reserve, g_my_regionsize); + /* Start with the current region's top */ + memory_info.BaseAddress = g_last->top_reserved; + /* Assert preconditions */ + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); + assert (0 < reserve_size && reserve_size % g_regionsize == 0); + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) { + /* Assert postconditions */ + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); +#ifdef TRACE + printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize, + memory_info.State == MEM_FREE ? "FREE": + (memory_info.State == MEM_RESERVE ? "RESERVED": + (memory_info.State == MEM_COMMIT ? "COMMITTED": "?"))); +#endif + /* Region is free, well aligned and big enough: we are done */ + if (memory_info.State == MEM_FREE && + (unsigned) memory_info.BaseAddress % g_regionsize == 0 && + memory_info.RegionSize >= (unsigned) reserve_size) { + found = TRUE; + break; + } + /* From now on we can't get contiguous memory! */ + contiguous = FALSE; + /* Recompute size to reserve */ + reserve_size = CEIL (allocate_size, g_my_regionsize); + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize; + /* Assert preconditions */ + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0); + assert (0 < reserve_size && reserve_size % g_regionsize == 0); + } + /* Search failed? */ + if (! found) + goto sbrk_exit; + /* Assert preconditions */ + assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0); + assert (0 < reserve_size && reserve_size % g_regionsize == 0); + /* Try to reserve this */ + base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size, + MEM_RESERVE, PAGE_NOACCESS); + if (! base_reserved) { + int rc = GetLastError (); + if (rc != ERROR_INVALID_ADDRESS) + goto sbrk_exit; + } + /* A null pointer signals (hopefully) a race condition with another thread. */ + /* In this case, we try again. */ + } while (! base_reserved); + /* Check returned pointer for consistency */ + if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress) + goto sbrk_exit; + /* Assert postconditions */ + assert ((unsigned) base_reserved % g_regionsize == 0); +#ifdef TRACE + printf ("Reserve %p %d\n", base_reserved, reserve_size); +#endif + /* Did we get contiguous memory? */ + if (contiguous) { + long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated; + /* Adjust allocation size */ + allocate_size -= start_size; + /* Adjust the regions allocation top */ + g_last->top_allocated = g_last->top_committed; + /* Recompute the size to commit */ + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; + /* Round size to commit */ + commit_size = CEIL (to_commit, g_my_pagesize); + } + /* Append the new region to the list */ + if (! region_list_append (&g_last, base_reserved, reserve_size)) + goto sbrk_exit; + /* Didn't we get contiguous memory? */ + if (! contiguous) { + /* Recompute the size to commit */ + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed; + /* Round size to commit */ + commit_size = CEIL (to_commit, g_my_pagesize); + } + } + } + /* Assert preconditions */ + assert ((unsigned) g_last->top_committed % g_pagesize == 0); + assert (0 < commit_size && commit_size % g_pagesize == 0); { + /* Commit this */ + void *base_committed = VirtualAlloc (g_last->top_committed, commit_size, + MEM_COMMIT, PAGE_READWRITE); + /* Check returned pointer for consistency */ + if (base_committed != g_last->top_committed) + goto sbrk_exit; + /* Assert postconditions */ + assert ((unsigned) base_committed % g_pagesize == 0); +#ifdef TRACE + printf ("Commit %p %d\n", base_committed, commit_size); +#endif + /* Adjust the regions commit top */ + g_last->top_committed = (char *) base_committed + commit_size; + } + } + /* Adjust the regions allocation top */ + g_last->top_allocated = (char *) g_last->top_allocated + allocate_size; + result = (char *) g_last->top_allocated - size; + /* Deallocation requested? */ + } else if (size < 0) { + long deallocate_size = - size; + /* As long as we have a region to release */ + while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) { + /* Get the size to release */ + long release_size = g_last->reserve_size; + /* Get the base address */ + void *base_reserved = (char *) g_last->top_reserved - release_size; + /* Assert preconditions */ + assert ((unsigned) base_reserved % g_regionsize == 0); + assert (0 < release_size && release_size % g_regionsize == 0); { + /* Release this */ + int rc = VirtualFree (base_reserved, 0, + MEM_RELEASE); + /* Check returned code for consistency */ + if (! rc) + goto sbrk_exit; +#ifdef TRACE + printf ("Release %p %d\n", base_reserved, release_size); +#endif + } + /* Adjust deallocation size */ + deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved; + /* Remove the old region from the list */ + if (! region_list_remove (&g_last)) + goto sbrk_exit; + } { + /* Compute the size to decommit */ + long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size); + if (to_decommit >= g_my_pagesize) { + /* Compute the size to decommit */ + long decommit_size = FLOOR (to_decommit, g_my_pagesize); + /* Compute the base address */ + void *base_committed = (char *) g_last->top_committed - decommit_size; + /* Assert preconditions */ + assert ((unsigned) base_committed % g_pagesize == 0); + assert (0 < decommit_size && decommit_size % g_pagesize == 0); { + /* Decommit this */ + int rc = VirtualFree ((char *) base_committed, decommit_size, + MEM_DECOMMIT); + /* Check returned code for consistency */ + if (! rc) + goto sbrk_exit; +#ifdef TRACE + printf ("Decommit %p %d\n", base_committed, decommit_size); +#endif + } + /* Adjust deallocation size and regions commit and allocate top */ + deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed; + g_last->top_committed = base_committed; + g_last->top_allocated = base_committed; + } + } + /* Adjust regions allocate top */ + g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size; + /* Check for underflow */ + if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated || + g_last->top_allocated > g_last->top_committed) { + /* Adjust regions allocate top */ + g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size; + goto sbrk_exit; + } + result = g_last->top_allocated; + } + /* Assert invariants */ + assert (g_last); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated && + g_last->top_allocated <= g_last->top_committed); + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed && + g_last->top_committed <= g_last->top_reserved && + (unsigned) g_last->top_committed % g_pagesize == 0); + assert ((unsigned) g_last->top_reserved % g_regionsize == 0); + assert ((unsigned) g_last->reserve_size % g_regionsize == 0); + +sbrk_exit: +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Release spin lock */ + slrelease (&g_sl); +#endif + return result; +} + +/* mmap for windows */ +static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) { + static long g_pagesize; + static long g_regionsize; +#ifdef TRACE + printf ("mmap %d\n", size); +#endif +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Wait for spin lock */ + slwait (&g_sl); +#endif + /* First time initialization */ + if (! g_pagesize) + g_pagesize = getpagesize (); + if (! g_regionsize) + g_regionsize = getregionsize (); + /* Assert preconditions */ + assert ((unsigned) ptr % g_regionsize == 0); + assert (size % g_pagesize == 0); + /* Allocate this */ + ptr = VirtualAlloc (ptr, size, + MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE); + if (! ptr) { + ptr = (void *) MORECORE_FAILURE; + goto mmap_exit; + } + /* Assert postconditions */ + assert ((unsigned) ptr % g_regionsize == 0); +#ifdef TRACE + printf ("Commit %p %d\n", ptr, size); +#endif +mmap_exit: +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Release spin lock */ + slrelease (&g_sl); +#endif + return ptr; +} + +/* munmap for windows */ +static long munmap (void *ptr, long size) { + static long g_pagesize; + static long g_regionsize; + int rc = MUNMAP_FAILURE; +#ifdef TRACE + printf ("munmap %p %d\n", ptr, size); +#endif +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Wait for spin lock */ + slwait (&g_sl); +#endif + /* First time initialization */ + if (! g_pagesize) + g_pagesize = getpagesize (); + if (! g_regionsize) + g_regionsize = getregionsize (); + /* Assert preconditions */ + assert ((unsigned) ptr % g_regionsize == 0); + assert (size % g_pagesize == 0); + /* Free this */ + if (! VirtualFree (ptr, 0, + MEM_RELEASE)) + goto munmap_exit; + rc = 0; +#ifdef TRACE + printf ("Release %p %d\n", ptr, size); +#endif +munmap_exit: +#if defined (USE_MALLOC_LOCK) && defined (NEEDED) + /* Release spin lock */ + slrelease (&g_sl); +#endif + return rc; +} + +static void vminfo (CHUNK_SIZE_T *free, CHUNK_SIZE_T *reserved, CHUNK_SIZE_T *committed) { + MEMORY_BASIC_INFORMATION memory_info; + memory_info.BaseAddress = 0; + *free = *reserved = *committed = 0; + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) { + switch (memory_info.State) { + case MEM_FREE: + *free += memory_info.RegionSize; + break; + case MEM_RESERVE: + *reserved += memory_info.RegionSize; + break; + case MEM_COMMIT: + *committed += memory_info.RegionSize; + break; + } + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize; + } +} + +static int cpuinfo (int whole, CHUNK_SIZE_T *kernel, CHUNK_SIZE_T *user) { + if (whole) { + __int64 creation64, exit64, kernel64, user64; + int rc = GetProcessTimes (GetCurrentProcess (), + (FILETIME *) &creation64, + (FILETIME *) &exit64, + (FILETIME *) &kernel64, + (FILETIME *) &user64); + if (! rc) { + *kernel = 0; + *user = 0; + return FALSE; + } + *kernel = (CHUNK_SIZE_T) (kernel64 / 10000); + *user = (CHUNK_SIZE_T) (user64 / 10000); + return TRUE; + } else { + __int64 creation64, exit64, kernel64, user64; + int rc = GetThreadTimes (GetCurrentThread (), + (FILETIME *) &creation64, + (FILETIME *) &exit64, + (FILETIME *) &kernel64, + (FILETIME *) &user64); + if (! rc) { + *kernel = 0; + *user = 0; + return FALSE; + } + *kernel = (CHUNK_SIZE_T) (kernel64 / 10000); + *user = (CHUNK_SIZE_T) (user64 / 10000); + return TRUE; + } +} + +#endif /* WIN32 */ + +/* ------------------------------------------------------------ +History: + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from . + Thanks also to Andreas Mueller , + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sYSMALLOc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshhold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occuring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/qemu-tccboot b/qemu-tccboot new file mode 100755 index 0000000..0335770 --- /dev/null +++ b/qemu-tccboot @@ -0,0 +1,6 @@ +#!/bin/sh +~/qemu-current/i386-softmmu/qemu -snapshot -user-net -serial stdio \ +-kernel tccboot \ +-initrd initrd.img \ +-append "root=/dev/hda" \ +-hda example.romfs diff --git a/tccboot b/tccboot new file mode 100644 index 0000000000000000000000000000000000000000..f3c5a0a797e5925b07a89910b081166595e70a86 GIT binary patch literal 141792 zcmeFadq7mx`ZvA@1{i2?kBUZxie;slCZ!dbiqMc0+3r0~mP8LbF2gf=0HcO&t^uB6yhh^Q^T8sm}NPz5oBdlzXqe zF3)<_v!3;=XFcm#YpOyCYa6Si%C(_Ql4VrKsL0v&H-5N4k}9{JnzFWWZG(1p?a|H4 zq@fEwz3!j!1?_Ll8&>DEj#!{gS+M5jm@S`L-g@WvQi>SE5x{A5FCEUy`UKWQ0aF zRqrIYHX(n6KYu)$LPPeWl+Ov+C=LW0K|ln3*%&E;Q{bb(?K| ztyUdtofuzh@g<{K88^lFVk0-9Wtx>>?^=?$CRD!7u{M;^RO@1PX2X2-*|CT zoTPm>gAJ9+&%`Z}tDOBtZr|ss^or%O4{wH-DH*Ye5ty| zS|#^OZ?INgo|1L>ZpX2oY6T%{C27O}IWalu_J`*$xc{O1MC<>@XPiYkJe*P^5ld{| zo5t8_3l_U$V|aeY2MLlzDwiY&|D=b-mR|X7viOWdBG)m=+MD*jf&Znz|5D)pcND0S z2EALcd*GVwYa1&jA4$x6(|23NL{1t1 zF~N=Pi0?01l(A#WCquURF6%cecx_A^b484%g2fL%bYJ=d5S|McUzvX2{YxQ3(|wBw zo8pX6@%m=VpO~dMqiJhHOmS5@8ddy}YwC1&@un2?{LqBmgBGV3PofxK>TAq$V|rQt zo_J=_I@M5$R$r`kOTO)AGInI_t+FuWy+S=B`4ZJG$!FE7wFAW!$08ActUeZBtPO6q z6jvPO- zwG<|7t(4kdH+yJtT>05pWq=?-2r0;&5jxxcrb(2H(E8V;_d{oZ*6^Wg0Nl)}9p3ty zq}$c%81;-}aV$!;zdpzvsj%x z5?ypN^;Ei&wPyRI^hzbO3tX{gZ)VNn_=*~-a#W3}w2amJ4Uqn$eQ~GOQxX2k*8chr zU&W_aZlX|CdLSbXp~S?HlC)seWeL)PKlPJpH(XFY z`9Pwi#4mi!Z|w>3I7zCKq`WtailS)#*Z0}3s#GLkq=eQsuVwPPm%g7i08%1rJ!Q{~ zBX(0xmPjvWgtj&h86a(1weS*Y)ENj7wbh|D=eyVyw^<};=I70wo6!XI+kyYKJch?s z{F65Q12|SoSi*RUL>XSf_)6=@ z)4p@-b8KMdIG)kX{tT72+OQS+P8mP;rn{@VZEOuvctDK?9z{T#V^g=;sc`r=_zW(U z08`Ji732r@x^(0bvhZ*-g1rT$w6{F}f3IJA8JufTQm~9VHflhAT3Gh|0<@lq7YV!)OxiD#)iMX>DlVrNsEv z$6_b7;jNcB^f@WY^hqc2;hQXPsm-iaE~?lWz^69HuI1T+3r|r?!Pf6Ye&dHqb!Xg6HVbhcZH_}LvunQIoSA?^Gy^_r>j*SN{?KF7U3Ph6 z`)N!dKY!X z%I#`re%q5O88S0@&=KgJtFc6 z%6h38MBOI=#Q2)Z37~E>SDrnxIMvOrM{;-tVD$2(+DZcj1@xhsvrYY0*7NOZw@u4y zl{ckk01+MOu-H}OD{>|etJG0LO2DT@SMe(DAtDk4NyC9fhvxG2+$^?aMwa4B(B zaUi4&iTGM0OH<_!YtTz=?0epxUM`qe)~2-Ud2K=eHzAbPoV_gj6kux|1kCtE>+r5Z zsAm2mw!dYCtc~y#tM+63y81^4@iLr#Co!tEP0I67?LG2}-t{8(G+~9i< z)(%uw9N4Q&E{R!3VkpZM`E9BxBxYfH!bn(0rS&Yk%B-&7Pt&+{*HSq9^Dd+5nhEoTdaXJO$A|>whOk zO)K(9na)-@_#6USoz|fb;)uoYp`Rf8r=evkvtfwKPn<%Yk{C80^$GMPAs`AjlTAaB zGnybIe_WuR3LG`QI)Yx~g1B z&-n#3e(+9+NlqHAUKhl%7b@rTh6itUxmYtKBH?AdW*qw;oOtkPA`?Zds zv&}V=oxGpAORbGnzddV`XbQF8!p2WWO1&6el_(i}q8UY9%#U!z7c{0yq=Luy4z2hA)YK~oZJJwes(>i*f-!mYY>}qF%Ht1XeL50ZcQ;VcrS&N&OB3-I=xi|W zdV*zCRdULzW2mWCb`|nRCQumANO;K`pCHz8v8jNn526&3y>}v8ecbc-;)HUx+L3oF z@R!0g$>gmgbtbEGfX*leP9~D7A*;|t){}(b zm`BD%CQuodw}ra+Xf81d7a%2;ESK99AQyrz==mrOV$lwSTT#MWimKQRH=|gMC>E3D znrRmMwm%gcQF7MHfCBrh1CrA<6Ir=B#M1HotE%2XnP@#PA_J8-t6D}?eL)!hx~gSV z%&cdDDCRBlAphonV*q-0Q2iSa)F)8K1Ufvv3@@z}*o%mqB^0n})~Z-UjpmH058@PS zKC4-iO!4Tgp#mi+5aCeRMF_4Vc-5UY7fZ$$_|tfobVZ-d8>ZZih}=Zy6Q(WZ*n zBPM`B!R&6A$oLL2Li;6U*lSS9EvGOkMCX8i;=|OZ^2ID5Z!EM`bdw^12|$PPHN8#t zCxJpi)zm6V?}e_A*{+N$U?o6VtLFkVvdGS^LS2||_-vwv(=;C7a!Bihva3(E2SUL@ zyD|ubCM{E6WDTZWt;YsJT8;1D+C<&irZ2M_+aTL@3&~?Htd~AqtWSm72^!M8q+~(7 z{0*W4^8~4xECX!_`kO)syVwYPKnL014l&a~$p!iBEyQW{q_NB=d^<8i>LYeZekPQO zrrm|80=5p)MynSRuM*-l`6fy7K10waLc-JPfh1LU2`>-$j{ttbxp~i#(dJ@Kvl=^B zjC$v%8JL#U?zyu6_h!|tnn$KG_klo+tw~=MfT_fQpqlNVh}*4-vXl7vkUMgn!d=cos_rKWY!fa zmv}3PT5p_;MDj(|iJi%!#xcA`yRpWrQkKV%P?~{z8(O`v4YC!a ztC-$K@Hh)xTjBsdq1*bj+505{Cm4xkZKR-Gh#(qj&W7Q!XegVDZbiZ0Oydo4;Vaq@ zZ#5M^ej-|2JwA*z;wzu6#nafRc3&z7O2H+l*~)UGH3wZSL~KDj=o8NXB*tn*cMZTP zMM2d?^zF2iPV#2#JuR^SfCV3KL!yg4g77@R^(YrqbpMm`_s@W^CPpjP9<_y+qqeQ! z2g&%s`@(5pH;otWq?l?iA|!S#k7-5_Yj1?j()HO%F_I@EQfO>6`6m1pfT`W1mEYj8 z@;0^m3OVpMQrC%hmSHe)I`GyA_7N@e%I~0-hBo4#o)D^m4uMiVIXD4jDLNZwOSt zPEvRS;wzC5Pao^?ac*5HIzi8FJ*N#rxAs$S6zka-d_asCQT9JCqFQ^G2?KZrbI~9N zJblwMI={C}Yv#*g5AD^{-!>?-QB-eJ)@#=Qqo zz}O9Lcr=GZD=kLdny>k1tI>=OR?dstjTa!3yoafhcYgxk)cki4-?kSKf{osYVO>WEDLLK`8l)25mO?_~~-P^svCm4`pTGpIfQpis0g2%T;i zUI$D9YZ2`|fXS4#{RsWxNHapm5h|?cei})0m3bB2Gy^~fRPW`5Eu~|;9ot~e+lp+P zDU*kjR%L1H`KYbWBFX8ohu;8_(7QXfIVEF{x4gwEF`X(YKb0l13WPjG#x@bZgvX!F zl8oct{ZJ>^J&2eWz6Uh|OWyMEbbO%A?A0lfw1rq8{0aEDl0AuU2%VSEV@yTZW70@R zD)J+#0!h(W8kCi6mMG(;GI#bVGeVTX7ywZ}r%V=>gG^%P)B9@;G$C)p>dCGwRgVo* zyBx~Z^2Vt)l@|nD^w_m{@icj3 zmH^_VqDQU-khv5>WfHp!AzB}9LVM&7r;4S>lwTDc(_8eC-l7-qqK@$IK)qg@k*y>~ zFSBJdVX*++Q(|{#OHv?$g&x*#`^SJ$u!?FnP&T-VJpur1H8MmOi7TiEs*Wk=>bjrmn)YAnIt2;E)m2uE z+~}8Y&^=mRFw5g&;}HWQ!{DK$2uf>H@Z?0*e!>}r*+jEo4j z?e8yb`YlpGGf}VWMouYxkv(>?)oj2F_SYMLyo)Bmz+SCZNs%|^TC0!Qf>wu?Yc=(G z*gEi^i;V+gkgDaj8I7A31Ge$TrX}=xWs^d$=QjE1Rp~A7=`XPua8$;QSw_XCrHBoe ziZLH_o&tsVGC2V2{amfu6P|`77-rI`O?zd320k}Y&qQsk=$nj?pi5Q8*8UiFy#EUF z^$AbY>a*h`dMWA9?%(={&*l&LIx|9s^@5Py0y~$i*4Xr`10BBRj1ZZq^=zwgn#K)FMuwZqE_Q;XP#Vq zqAo_#8p#wyW%BF=8KF(UI)b5i@*k)7+>*Ixc@iz~%dkq(IwC&DsGaznY1!^#&mkMA zZ8Y;Xpsag!j6VeXwLaaZBv)Q<@eR&i9Wxb;IED3kjCLNj%Oq*m4tT^EXLuPhUEA_m z*X=8u-lPuK8bBl=U-Ey^tf@2Dat@#n7-;oxy$LM4P-bqQGH&#OK8zYa zE|?PuH2N+HH1Q@Vu|exa)y*-ke70n!MGAz93bi_AFmh_+KyS_ZN{%)xI)95MYz4!4 zj6;NfzPlF}J14PPeftb{F=10X5BAdFvS|Vx>bkrXwevhJCq;9omfjH=5g31?gEq9B8#l7ct$TV z##8=BZQ6lRUD}?EQ1Hgtwos8WxNl&QZoyb|j9H-dZ7!MKzLiNt=pn;?ez!C!C z6A^^t^(=?x+|KI*sgzM#qdtgisn}4C&9>^(9eP=cp0{0_X4RJ2iZxfiD3f5HM9kuy z@m#S8WtBjoZ^#IpuA$iu#6)!DYR&l>`vB5dpFrlId4UeC#-rq5xapQjbCGZPb(7}d zwIXTE3cMg@OE23J!3YYL?Gr4eea#0qzO}}a|G3j+`@DVC3qP~Y)SaJ2eA{E44X~o8 zV5FON;Ius)0W8wBY+LwN;!|^v&aUU!AeM$kme|Y^KATx+D-|NkhSniuuA6-fibCcZ zd(3g{qW*VwLT>bqhfY2oM(vE9-l^rmzQM&#c;y5O@jUo2=u81_4zr6s1S-+-?S>H; zma}!IhC_;mFsADQr(m*z`f39ug0FD9t*=r+z^Oom)EwbzvLwV z1!}F8J;eu13i2;Lz#*c0AkNp4UOCe&aanH(##PX5DG<1U9lp7O5&{@Y0VMtL=TQzV z4}^Tz6nM|C>kk#V9Q(8EkCQBj#|8=bNlDTlUxj=K#YY||-6YBz1(A(ZaijrUzRx@M zT)hS-sND#fC|!RpsX@>>>_z>ewc+a#^iGOq5s?M#F9HVF`J*(pgT}TJAZdr4i@l5- ze4l{R-|nVFeF!C$NN&DgsH-nU8>sXvu>{rE@abY9v<`~Mq|p|M#xmK*a!@^cABgaI zPai~3^vBj9JM2u~DRI1>pi>}VnaPHuZsP}m7q}%e?ku`jNp^YfBtX9bAWzYb@Kxiu zZjdL`C$hkpYdnCAfo*LLWquTy3yl*d@8x3>8T40-%nEE9L}c2fn&Xh4q7GF?npul-jg)LUw zKM(RRxGb&x4KYjM1rhjq-A$=!G`h8zXm7)QZ~cTg5xND{$ogY)F*S34FePUBcW`$a21;0;38AjD$g4Y`Q{}ZO<1&jZo%P2)SJc33d@0iSe!0EG!CGO%rX3W_Rz9YuYD2ZT zP;Ef~U3H8AAZF@mP91o70) z>Gg38DdfCTAJ>YI1;dK_vJ}cskR1lf&jKhvk!!u>P@KJ#k^TM}EVgeCh3_(kKN zMLd=oJbpF8SYaBPKDp##5mid;SkX-ejUwe%J)xS#1Q$gznljs$UlPsZ?gN4hZ=fPy z!hufu&DO~H=Q*exmV;_Nne9c(!^SrKx;qF+*=FRBH(Fz{X#>o1*f1h$X1izk_r`0H zrB-140GcSM^1G`olJTd?if-s6(#o64CuKodkUpZP?C(KAtwY|J9h2FpB&#*CFy8g;6EDr^CuMv}IjQ$UO}k`!3J zI5NAGX`pe^>1t|%zBtu)rM9c$TiyiGacQCnM@18O*D9A#6VL$6!ELHM_t|rxlPysu-I}#&QyKHZ3Q$KF3b;Reg?_P8*|@=d+_L7yx=DH>h)Zr1VYO) zgDI(&I4b}KALB>^crPG>jD zgSN1V(a-QAgot;kk2uy7v#pCS(qdRD60~X&r=FRu$0%{ycIfBSAc=!wamXT2z&^Om zXokPwe`&b(9nY-{jy6u{g*(D^Twbtu@DivUCOqR5SWgW3h;XzihPT>;B`*KFnd9C_nF9!<_-6di|X zpUAn*j2bIikjgegDrz-qmx)KJl0cbXH`aMWWTCnn8=M~FnP^>CDF0Twu2LSMJdZ^| zkl&cV8p7=YO4=h(n2nu@t|aArwJTW;+<`8NMiY9zuR#jH1u0(FE-*G}7qF{-2FN&o zsI;F)ZOFmOmsWqgZm?Ro0~7)unX(D4i9TB{zU7TWC(g0U{uGRIQcY$;6H$h#-O0X- zbCE_-BPq&Wl3c(Bg5e8i8M~rD0}*923;<{OHoCDvv~wJ*gbap51Mli_Ms+lreA?g; z(`{0492^R}XUhRv7euRigsLh;?aH;trwoJj*-vAlWOf027PX+VC-I_rdkUhl4E0J8 zQ2!VN9N!j*0vbYq9NyJ5Of8tWJW-Ux2`HwGW33Rq=WyIpiKzX3qr!`k5A=b&%QZD= z8guz>(JUJ`z+`k(${Q^~*J-uOEk8ltW|0xkywucRf<8p8=b1qKsYc^eSR?Kfl(4ah za3B@qE`W)_ck4P@RZX&@3zP|yuxOK%v6Cj#>+(sl^cpg0BE1Grx{+S8NQ%%ifP3Y zwLOT8uuwq=W?R>#L#feXIPx%Ytdr<7+rG}q-;Q-Q{!Za*RxWS!Tde{aOp-C{sWf&T z*Ao@>Z2NjDu4g;eQ*k{zWjz%)%R>Lff=TPC)Z;OYPZ`5^4?8a^9S7yM^zFhLLcJETbiq)sRPi|*@uWn zp~&emn)Sn64Ml`2GVGTk4wmg)B!R5C#zC~ZZ@jS!bFHrpYFT6)+l-+H-%ic?T&~C1 zSm|aYXs}Q+{Dc#%bq>+jKrG8XF~_c?YB_ez*&1}Vy1XBOEN(`wbYN01^;={D^`e_y zi8wC6P&eR^g{?nK1<1eRqKSOjUpCNG%_>Rfiy8qiK(GyC+MKDc^&t(=>oU8QNtN7K zrY}kLW$Od~A7HuJx0yooYWHo^CJ~q@m9!2QU#EDC4@~^>Z0oo+;|H>QjG+hC`ZY_= z6F6Ec^=URf7o#otZZgQh8i4M_()Ewz8dFVH>#5eh$|h-{J^QFqmrbEgnVKesxwakJ zj=snS^L!Y3sP0J%G(j1KZ~lR(t$|bn=$@%)_VwuJ94jo5Xt?s=H02IX1>aS=Nxr8! z`ShGL3_LeG0qu))&2}GcGW5|kVU}HzB3#VeYza1?fF1?_C^pGVvn69`fn|4@81xElO^dk^?%k z`XSK7xU8H40xPAHI6tJ+bE^~$<@z|{A^Iy|D=;HQD=1WAvwWcvI5WKtUV(QddkJsd z*}`Q9dl+!pvHPJH*G7CT^>HLn=#TBhlz<&pDAP^Yo{#oE`yEz4Cg_paceo{3Nfs;4 z0ROe?tr<<&Q+uXX?HQ>II)|+`gs=_IKv^~tAdMC_JPO(Pd$cdr)>M5GH5&F4;3ThI zhPLxDRqt#jEOls+F&P~Y#fI186Jy}ILiZiFV_lZ>O3`H(5KaH1H7q>Ap(fWGo*1#dSG!}EZ5_Dv;He#I`Nrn`BqY!+3S zcTkH#Jvd6J@*-CPvQK7Tq8p%%XR;?TM2o_&Q%UNv0`Au_7YzR@O>7GR2W#p3N$!zd9&(uWV={K=p7hN9 zWkaw^gtZ8?h1E_^StW1DwoY`~mHx2xEpN@(TR2N>dBN_uI1XnV;8wJz zU{+ao6Of>}fg6FfKnrZ?^>Gx5ZBmIrwnMuhjtM^YA0VZ6PSw7VpLhc6D~SHjkjy?p z7WOi!j65GHpAM~qtw*#VV3O~o{tUs@1`*ue-kVB&8fZmAP|d<>5&O50a+m3!@ThF7 zc7v8f z4C8(8heYC-XqhA49rS&YnJ{*U$)#80sN~8_#$L5EU8|P;7h+jUzUuz|Ja8U1BX|G` zFS*TO*&+Ko5P%PV0y}}3%-E`)xv`$tO;#z{zm&3_xmfn!ivY{LAWGVbF0wRXj{~Cs z+1BBMs5qg|_+e|{>o9koODwK=P8UnXIaU@)MQ)5H&Z|kjUFP-+wgdo8>`0!mXxmpL zm-Cn#W&g9FwKqgud-{7cooh(g@DreUrULc}%p&Y{fHpf76?7$uwpo+yD3xqBTK0_NsGg%n1K|wkZ#Zzap zJ&1?f2a>3;o1KAUl?K-(Pm%AEnkeCVV<-q zH*KbuPI3fuhv1wJ0As!s+Bs}pgVG@#;KNPC*-J(fZbZ=9d>3)fQWbDwsUbXG5>zjSC!8;;+Pk)EfZ`7HJa2hgeLCRe52|bCmm!jvRVV$SZ z_6Of~)4tYuQu`Z!rM=)caLUv2dX!9@_&i%doqr_X!D9=Kz@gX8p26geHejQuP7aV| zHk^n@`NyN$LEy=9fQ~)|KlnF31hCAL!CQKkTm;8}*8%Dz;?X2xHyT;t-%)NQBN7zl z|I_~ZS%db+d5(*bgUyEn4Ij=EMQ?pT9jQ#DIq2Ffeib?}{-FxMV=4X6};WY|6u14yWdD=!zqEfA@?{L}uD zFRm<?XEjU`r}#!NgD~vO^t9)j1rDqJEq^G}sHdfc<|NQT{h+S;fbrCHxA)f)p4^ zQn(nM+E-d;>od=4&k!wdT{%jOLbyX$q7Zu(@>U2>oo9f$r@79Cy-4#8A!geM)1$}O z>Ai|R{ze}pjQ@f8Nyt*SZK5l6>CO5yhd7YwhI8jvu(cvq4t$1yDYmp}Ddr}d(vRJL z9{Af7XD7v=u!cWEX78<3O*X2T7X@#;4!=y<{{(@G443^Y5U7Z}hjz;T#R#9~6B@0a zMbMTK6s1?=2YI{}i=k;kMxcoB8 zz*mWk0^$GQ2#7$dcK{O|h>`dlBJ?OegXq8qo*=THMm0J5m60WI8J{^mgrGOzRFxDI zJUTW@8T~)Ar-t0-nwx7=UsXh=qL;y(FkA~+=Pf3x%|;``gQ@F}9CVqDsq zBJ7GL8vge14scx`JqI2Ya3W}f9Ur!kq0p24*mmQlq_kXpdK>(l&V$t^0|Nz%f^t0F$_=_3cx(fX8d9Px+o2?GzKG&iV{XeiqIS+H#Yw-riB|@pSvV{B5a{OimLmrHP#EM=cs61- zgL$OzObR7ai2TPkCsJrKg}~NQcsxS68GEUH!JJps&bZS4s!>3@x7J`vY_9P=cqW*$ zl@e^Kah>rU|3)hkwJ+r-PD4m)wMGN9KWr8Z#wHzLN_|WRpP-w69UDO`;0V31U`sm6 z&}w#$1M4CJFq+PFLZ5tM^>lcyeA%4X2x@EAC$%rN6A@?Imxfx_W?69%2g$Kx%livD zsn`oQ*UC{FDfCsf^+u%PTyBIsLYm3jWy^ro)0^?Bc8=BZ8doKU{)Bx&=e}zGY9Xr0 zehn(6+Br`4lYgE*y-^L%SI@+0(?jwTwMf#PjVdc3c+*=lr#F{fRZ+Gtgq6bL^SqA{ zmtJ{IO0}$UtnBw=c0*{P?7y3;D{FMZCMZIS z5E7{_*?&7ix^rJ0vpmKW-spAp$i_i)X*P0QO3`l5O?HR9Yy+w09} z67r~BcdwYI{!oAj-S?U9+;;l7miMtfXS@8)Va-{kJ3rRT8Z5A{Zj;|R5=@KLoy{-c zWVJTuW4H(Dc~zFY&-CdZ_n+R7S=Lbc%Hp_8-)Bpn^FBpQ&OD4J6OXE8%~>nr)d!oe zGhW1VoFLKiC%tb`wgEibV{jdZW8I!aXtKjer9vGI8GGSh$m5;~!R8*f0|;}=S4Kxh z6TuB#0J@jPsw`dYyj}K_E4SL2=bIU5T5+EDBZB_^At1rmMLCnW)VN|2K~c^}7l8i3 zsjrGI(3~OT9neUuSa%|eaY%%>Q+N*tAt0nrq9_Gbg8&WO0;dhKLi|Km4tm~a8ka}L zk7|!)<+NP2Ck7RArVF}wh0aDTFBG1L!PW=xKn~dVbsrpdiZG#UZ1KKEL;To5BJ1DP zvIeO%v7+oVz*xCdK8I`g2nM0}qy~q{JEwdSF~~SMa2T2hmdZ5Ym<%4?g(z^5LrKCj zMh?6#z9BU)?&Y$KreGPPO!CvU!k!SEeNl|bcf{6x_(~Z|JaxX{sfe!`V(=2EAP}d< z*FWQhZ;2O-Ls-po-iI}YIDV1RA5RzL^f%OhQ?hx94B*!rTL*BG3g-Jszbp>ceeCt}boZO+(>-Qbxnc$#AZ+-8GI z8gt3-%sGgN{X0grBkEvsxdRqDc)T92J_I9a{!ojkWImdsc3uF$;JzQy)vjC&AY~jl z{r0HUBLoZ%39HAqM0IYssYYbeByI}%yf=~@f%c1>n-J-3riNdKDaaEpMDL=pc+xf{ zgqG%PL<_)Ti&O>!sN* z305C>_xwwc38U#|PP$95lHkIZ6Dx!SRtsaZAS33rKZ22dmWAla-b%BWL^YazmP&+W zk0Mo2*Z9N~%CdSNC&0ed(;HXWvdAUMDOec%H3s~p+yy>U?(A)9>G*pNscgOOhOT4F$s2;qzQ~_v`KM@E8sbbDJ1ed6aoD9%|PzWvbfc zQEpr}j@y)QiV;c{I{qFKB#qf};2V5{I?>r_%K(8p#1G^OJUu9x4(yA`%bbrfv|DKV zJ^TWaNhiRn6rvR~)_3A^3!2Ab0wF#PD23P&A>)tO+o*x*_acqwFC79!Yg}`GD1lTQ z2dZkUrlh8Cp+%BK-ZxmNj#14~Bs}`HQIP_0Aa^b+GC1E4A8~eVIa)g1v7(r`)0BmU<9BEVFyRZcoB4r zMJDr9KUQ%#<6dir1)(=ytwkh*-vIo zu6PKRAgFcZ^EUnQJ6cbLM^h^FS7(b$#J`Jp;K9Wy>H>8jU_b6leNOsvwB+Zcht5Om zD+9!f_&z35w?ke>Nlp4e(6=q06*Yp#=fPztV@F`Ghs^tTpnftbk)Np8b8G#A2$u!uXHzVdn`nw*MP>*GFSb;{E~km89phR1_@q=mW#Eh+gP4G%@@c$rjYy;oxoe?|2Y!-_PF3?zrQ{yg?)B}QWQ`*n!{h&A{8)YjQTUm_!% z?DxPHGAWk_ryw|nPePp*`SF*y=0e_E@a%zt@u=ORIiFW%@Xe?^N91bkrn6+sbL4^l zyy@Yc0T9FjsqnVbb5enb7K8m!O!+u`=u3i!jk#}#BJ3qBS!Njq-_6BXIG=o0eZ{x~ z347o3&h4uFV!_>awfz(D=Bw@RcmS(y(rK{Tmi=Gy0PM9p5dhpcEVq3L>_&`c<70Bn z7+Hm$iO2!sakd{b65Ge8B`lc8J6h70!9n~b;%7 zuAuWJ7kdR`K|tM>3UK<$6y-v-^GYas+hJ>BH}AzVo+Sfdcq$6P-QpS{W|B>@BL_x_ z_=~Wi$hTutzXqSqa(P3f7Vp}F!BG_Py(Bu6!FvVAie3VC0f>X=l?(NGsgY6}`xa7H zyKw|-hgyI`K-v-CalZEvZU&pdmw~1epeK$~0Y5oFo4k4f`Q-bD00VyVRM>xz>j91v zW>I7UUc_6h{3|pCJ0_9^cwGa$uwzn=bYX+L5~1Ln*WB#8eim#kwc|Ng%tyq&TFpGg zgG)0OStE;V-Zp50CF#LAuVa4}M+>yW7|&bzNT0LK&CY&_VeV!x;6D=5~QAhSz;|0#S zIJ4l5-`tDCJdVTG&%qy==_$d`yBb~v3BlU?`)oj6IgV*mY;jnCkLP4b3CKIb5Y*=+?Ft=N@+ z1Z+Tl2EH5kX^4YjIO{`b^47SWPMbhsR@F@efls5(zcrpi_UEBKni7a^?6o7DqxqFL zSnFbF<3gQg_V!_LS9JXA@g&jUu?cCH%cWhNJ$xNO!8~ZD!IOfd-EhPT{|2=+h2PJj zClf9(72R)|Quq3Dyy_tkNRaWBsNFkJ;S)xi@(Z zEZ$ACF2}-9sV}jak7{U}6L$i6$w1{j1>_j9c+}1Wyaz2qdm$2_R%Kh((}y0POwJOx zFX^FukTtc0QjViO$4ZwLM9(zY-AvjER*D`?6!ampl_QAtsh9EsX0$ihHxMViW%C7| z{QQZQlWH;sAEUgt37KclbvV2>__4z|j^2KU^M(UA6Yf;#oS(T{i$)|Qm!qA~LEeWf z(P}DoaWrlwr)8~(HjLnzNc+LOmI4^>`V8ZI@(#h-%U@NpOUcAo4NkSYSjKP?97Tn6 zCNt_Jg!@+t*!TvB&$)27AX)nTE|T%wN2f%p_?&QnnhQtZXEAwbwH4Fsge8W!u?e_v zngMtCJ0p|u)v4>Hq1$9c~H=e|ufl4YyS<*zO4X1%?(=vB0ZRwSP&d=xyiwZObN07nQ zID(A(88Mv>&rk1-9A~t0;N`znIOg+@(({$6x}&sFmZ|=W@OgKxQRS+IfP~G`Fp| zK8J?-@9gHyJrh6$c<~IsMv{_gMO2JEd|YHk)yp5|B)L>)vz7h*9nNRZdJnvdcQzBy`l?27HtB3Hw%UxaI&{)O6BCI-g8OhA2?ATsHf zk^Ka;FSw>@yzOSR32v+h|3Fsbk`F+TMm6N)V6x7SrMpMfahA$#FCBz>xpby#R`e&% zXn7Z-*oUoaQ*`$t-+MrM19&(9_> z@VB5&^v*&E!}6{Z+IJ-Ol~~Ln;D@!ayzHdg*^0FpHp4K-<}WTgJRONpnLey^3po0b z-Yafi+lra&f_h>(N&80st(83nhQRd|28{&k%r9UM@ZSHflJuT6pt;VkyU{Py_5&UD zj9shOzV^ue0I-bSpFmYZCn^-kE}cR2g1H89+J0p$UzQeQj=fYu(2>i3!x4_2;!k@(ka(o^73;Ucv9QmjJDY)- zNdmdzsE|Tfm8*!xTs{L$ih|qoH>_6Lpqz;Q(U!G}?IHFtGL=}_rJZ<(Xl3G6H-#tsFV3m^Y4D7>Dbr_5RtB8O9|fx zfWQl51ODbT$__vw8~K89bRTg`(w0v!IIeGL6iNUcudl=8>)hV{hB6epbycKcSdL(C_|7zK z`Zn$JY^@p!N7**tPuh{7Z=WdE{)W2kM!sR%TiKbp>C0L85MqJyeeW&eF zt{g+UJ6%)U4>!<>YhAdG75S~5wY-j=5M_*`;1hMyDzK{L+a>1aNk}%1MEM8v2&XH~ zvo@OA!YpxjAYbB}(@Cjq3uG!zjW)t|hOwECVH~7RhH}{G$}iUDG@C{e_QyBS&zr!y zqPuXV4#q(5+{3;H@A7$oB@=tcvTTe{_FHr>M9Q@YyO|YFB#aWTiz)pmPIb5$elsA# zzD5vBdgHC$K9~(A!F^dcqym{jfhQj)FFxEl=l`u4c#;8q+z|qb$xo~T<;~HGDdF7Z zh`8S^M!~IeF)MGoO-TYTn6|5``%nP%a`D?W*;iP-0lh9DgVFikU-6PyYI~Hix24d9 zO%T#x>lj?#A+A#My<1fbD8}Zhh4gw~p9yPp14~3>*glL50L%nH(G|E3(uP*!4#PtH z2*`J+Pwg4DVm50>4z(wJ$I>@P?rj|`nlpRGi3lw<)vf< zWey}@2j5c^tx>^&;#W{oxkXf}+)&X?(}OZPpG^VW0zhkmCal#*e4nxD>&WK(Z=|!6 zoRX>GKZ4#{{(uK4Kzd@(|}A5*ahbH3)x{vtR)a24HG!k$OX>|Tsukb65`tROnmp2b)`N9{##^)9=D z%icOiZ1h2&T+6lx7rn*1L1lO0hX(4;Wn^Qijonwwr6Xgbm0XNVN`uFrQgOw#!7eVR!{|EiPld+GK?x)xS`x~5n7$xoBFNMJ8SDXx*woTp^}Gk`|-eih;^YSU-f5VUl!X91Fsc!qh)x$-8iD>+U0j@a6y8xZxp`2$iQP~ zA|9V+=G*0`s8-`B1^*B-+Ufmt$S~;rOvw0_-p_`NCVD>?GQOnuOCe(yy-Y(|>aKjQFxI*@q zAOQZ-ou^=rMPK-D;G-QWPM+l|*?%Jf0#n=z$-~4yvVR26fO=#ez+@TE0|5UYWZ-Pc zRSLQ{m$vAl{B?w<;(Fb$5ukf1arf>91c@^?b3VR8$v3(54wF{7l=uoy9IYhMj2xhQ zDZq2M6cj87287cLKl2Nz(7*S(W6KPC_+1Er(+BX24%k=0jXoZf!{%h5K@LnuunG;1 zWbNtlbwjkkLa7WDRfAjPjg7Zed`q^9idvd{!gS_$Ga4y{2O@;M4LNW#QqhL!^~zy# zYsWoidzDt+k*E_JrE=g)QC4^01oFRsL0#&nGW*KcVZex*6a3NglV z&Z!gsWrV56=yxK(@ybnD=Vj?rTlmjvBvVh&DTFgXw&^$aru*&tcO1%V+rLB4I|UD$ zHdj;GXH8`X*?a#KR{N$0Kl5F3sL|wK*qMGd2yyQ3AzBF<|2j0MEee5^_Le<};&7Wo zU24y0oWt%Jy>g`5bA=LrsPRyv@#f(j*h=jr>lJ?KBd+OpcH^}}v+Tx;`|)^I?HQvu z)t;-AM9Lg~6!hHx6OcW6r1{W(+|`LoI(@?_$sA^~zW`apc$)JejW6ng!)J^a4~LEC z4p$q0;BaWr9m=sEnraWn5Xc<+egw%g5^@amKTCHSvE}E11Fa)i6*P%+=QZj4(;Qnb z=`(hac!OO;_LH86f&aic(Z|zdKip#|!HtGC%lc&81WVq$F;HSP-$D6t2k#F6NT7Us zeW^%|#K@1+u)v*=vY%87FwI43nPZGH$XiZ!)-N`4*&o#z)lOVs`41|93&Z@6BY=}| z*z$fJLS-KIxh^9Ka~sOcnycqEn^uQdC8SS3D6a4Bj8)#l;k}twEsxRGiAA4*%@cks zk~SA0q$?J-{0lzM6$@MG#V!VQ$Q&#OmVtyEwKd)lp*9}Xc=+*W@P5RqoTo-)<3fD=i}Sr6k5zKB>`TyBV53mEfc9Sl zZy`y~$7X?D`zEno4!neKZrcbyg%EriqGjCJ6*SAtqcUy>j(dnQclMTXQW;_Uq5kF8 zGX0JgeX_O_hElB?KUJc3S+(h{T3MSmy;&<5SAcxLc5~OMmnnO{{c^YyvjfJh!4`+#HJro zNS%}Yl#cbsoEFA#k_mSSx3KfEl3@q%g-j&g=Hb2|)Tum#r$zDNTrC8vcCy!uJZtGR zvLjzd1Lt34o;ZhlVd)^Q0@VWyC}pm4Fdx!Xu8qSDORy=S*h|o+jC#6L(#9OnD(F0? zxym!1#Eqt;xQ1zbBTQ;*_%;zHO_tlA5KrU^UnAoGN#V;yn5K~MMIy|lwTv*1*??Uq zyhrT19vrX*Ix%0YfBnsn}`tF+lM^zF^%f#nGS@ zb}UqG++d43y}v<0<5MrGN5&SM6V0>j2tWL1jt^uAx)zoB=8KM^-z=f&?G~EeCV3z3 zt+NMBD)gYv;iAq1&+|H`^sO_xs^>LVdLN<+tGy4>>m$_bM7>}1)=P&x;dpY4eh&v) z1&;SNJA!G4NX#fBR_*8N>6gvZTgrI;LHpPHMrNEC=p~VCb-9$${Gu08Y9C8^&Mbrj6sn)NhAj z``})k8;G#ZEhOTIU-@}91k7PBhalyXBUfjh{ndhj+{R@8WFSJm9;<;5G#ls?Hf6Tj z0|Ki(NlGf(f+?Ec$pfy|EK!j|H!lyy?@RfPV&-qc#sTti1b{unI?u)+0H=1u43quWA}sbZX0pdIHjU?~ z+Op%ty7Tz^uwpw~_CMItRA}G-;Bm7dp8$PhV?I+bJUqq^z`}TeDoLVBE<+{07yk>0 zBV`8(g!3R&T;l@*(Fodb=}yAWrS_bsTm!Y341nn%eBfmu1BnP9IRuyrU6s`tSQx&I z&wTbUDi4Pdge-X&9xme7IbMeS4pL$@;-jWN{Gp;$-$oSon7l4E|!x5fO zNriT;&|W{4B&-ys`5Ij5f$Tx%xynfPH&hCHQxYn{NScTmOhe=>+I=b3vVRKS3mO8C z?~m}ZX(($nkq(X%8dL%A7FY`$$_zYK-b`}-I%1_O@K~7w&XNP~pgm2zzr&v)2p*FI zFN!!WhmaQj3g7um!$=DMi}G}*86Ogu>j_LM2jfLN{Mo14xj#Ex*NwM1DAzCJ!*~BG zhIZr7^f@G`L|2FDx7)CH7D1J?YZl31rxA>%K8jTIA;v2F954uNT$53|61b}Rdt%cr zE9P_@5}rrm@K)+*Csd^jwJQ!h2{C!O+7$y&d0MzhTzUYWx$v$cNl+ipB=tnt^{-JxL~UY{Ug7VW5ZsH_|2$ztZIzs1nz`+ zmQ@Ww@bdMKy9Bpz*x*U!1;})kjhPX5XB;ahgUC*+HV`Lbxiz13Ini#wguftuye8M~ zLWM3?f|5lUdjp|D+_0K!#Q|ZZU&dZ)m)0QeWz%o7sb^x88?i|ST}~OFu{X2YcNok3 z;W$&duNTm80OI}-)%^FT^rb1jBY=w^0;n-IaQ)Tzo2gTZZ4&)FeXzE@;u!skA^#~u zZ0gte#^A^UO%YaKNjG2B`mv*0~XtMwSWaP|yCqcdKSw^BcFQO19vhkmjVt|CI4 z3foZtuDO}Hv=0U`YWfvm9~}4Kq9DBSU~xYTuMhROjN9wbMRf_19Oy(EJ~6)Jz&?br zPa+4n&f1e)dQoJ=2iG6(FI9y;ru$j2D1sly@%7+tx}z2+Zg)6-nM@$i;;v`Rbm6ofn?ZBvY&Wl<)@%JxL*rLaQmXl0wf?$VZ`< zDD)c&1>qRTWo4!J&xqtL&NgwQBLlbxc0)A&3NwDNi2o=WKfsJX2`R(J8Rr`ZhBsy+ ztO~gg3pO#Yqjk`6`ESHNa^Om&LtK{L2J_q^u*om>IK;{r?w4RItbQ-0$q=hrH*pze zMgOf7Pb`#0T6p9y#PMABe+kQnDC^K#fX-Q~J@FV0vj01b0ky}9pE#6oOCTUb*+wP- zNy`Wk`|>G#;~YNM7SrAN-fFa!-HJ-k3&slt>{S58HIa4ZE!(k$$~^cw(m;$=@`UQ} z3?p&`TJY54an&o1F(Vk{Pgjt~yR>K85LfXAW#X&jHz*HZAX`v3Um*Kk=yiP{Zte!z zeDNK*7MpJ{cGmfzI}d4yamaoe^WJjMB23F(ZYuyXvY#eW?)TUVZEP0gqKo|*$OrJV z&9eXBNDSBG!38w`=^&jpmHBeH$PR`V%jMq@$ZWel)1iHtSStsJb;NR+rdjya)0N@0 zdRAyjlf*`l42NN00 zu~!}53xc}Ug7^3tubRI|94)jKgIYvxvwkAe4x;8XZBVJ|TqD?;5xa|xzj+T`GSS=Y zLgeN9v=*^rm}0cCRP$=`^yzLlzN$4XtkQ1KJ$f;-0^X>>G@>4+AIim(ws#U2zZs$w z@Bx?8jR!715{d>d&vXzpJ=&pq^rmUgZw+4atWS!L?B zzc8?~)N`C>a2l_c$*W5}#;dw(H@;*`I^Q{P^~mdAo^&I%EJW^1%EA*s!wbjQCHUkCXEPHQ1$aidoHbUYlek;X|F1#qY zS0kkOKuGI&lKcI@IG*Bfc@TswcB_Y|uWp>0NfywtInnlnV5Vx~Q{sJ`Hyi{e z{EX0bYU42(97C)2-gX{YqS^d0`u$@uv}bWAS*OJwRTom5`rT==uUKsj10sb4>7av! zLU*oMPY0=Vhy2F6+KW9&+NmNBuINvwuX(-pKEJAo+PJVONOWtgH!K8Pz5f)#)?OXv3Gd|2wH@$2vLW=;@2)S2tErP0C#||eIgJ`2??!EIp8vcJ%F=r34YBqbDR?2COe9Y(Kf z7dW$jk+?f>8zeXE)Lb2t4Ll-BdI{4 zMWE2i%BYHD%H@1g-aFp42S zHi@h=KiJ}crXj@&wqVwtU%r#a@|_rj!Q?X5##hLfJzEV29qp1$mJ2WgSaud?Jo$l) zLUa)(s7Iw&hAquk_w!<4T3_W23nN-rufinSB|Gi%mdNyBaqHD1ADq#}^jjE|UFNQe zrjX0_NiQPPlM&UlSfV{lg0ICDY{6_VTFdM(FxT$9E^bh{nu;uspIN_^?WebOUq8LPj^^e=F}Cb_Bp1pPJktOi&? zF3@K&HC&_GUr)fOAW$3+wj8rGkSaEULFl{7P7JUOEGGm{P|cLmHN|}`q2Bu79|7zH zT8PU1rV?8Q^|zCyiI&*!{pY+$@80+g%yF!_j(!WqvAZYXUA&}oj#aW(70`_3+w6GJ zprxpveZ+r-4C@{WVw&F-J&C0CV&D)dby?c%fe~0cVo!iu2|mnImoA`9R~I}ZhG;qX zNrPO&8#>P0l9-54?2WijLx>JK!9m!+2E6 zvHFGtI*hB8OZi}2tz0Be!W2d)@>Dk{x{>I7SWP%jqP}42*94B>qu9kcJ>rLoPg#1CeYg7hRSp~K6QEQTJ%u4BaAUMXD{$o~mk^=e`Nxs@mnme>y9SPwQt?2# z`rZh={(SE)pIKLpE(Mwhg-yARUh0SQ;i2imZN^cCWkEhwb9b| zEUOgrm65Tv{$ELd!@rjP;*V0zpk3{@u}k+3bnPAIIH5FLO{8ArnBv;wEH1ZjsxSa$m*$Gb!OKf)#+*>tFni&bHHw1KMc-O3?6XP2s% z9+Iv?V=RZB?hqG~SS6xwhfo?b#qcc*5}q$rx5Ms!2_xtSMi0deFhpme0+UuzyhMb+ zNtOmwE%h6y2td*>c(bNuMh^<9m_ZluUJ@9f`yRVX{}hOxS@3qFj5LNEu_jtgwY?{A5*;HFKN^NxJ*H^l#PDKyP5WJ z!FGMQmXIBYB9PBNkq%jM!KI;3 zU4d(n?iijPgsD(BFoAlQn%%9Ys(DKG`IA0&f*#LMc;f4 zVP=`o8!0J@((`zgbzi5e{fj&>Ea`vc5f3?HFL%8f^r$A6!KGm_@@FyxX z?*o2?=Dl5^k`Z&8ItHE=>N}?@us!+|fHbt5qq`&9bp=L;`UZ1{+?TvH=?n= zpxkVJjQNZ@mvD`wWrKSxZRWdyOwJLFI?B}j0Q6sMWilf*%7ij}h>En7c(|zt56ipR zi=EQV{$V?T_vd#D8h&W{W5%xyxrv6RSqka8`bGOOyEf!=q3%+%-i1H5+Hf;5hDxDvhv!`>oLKZ3#37J@^VqLeauLG(XMsmEs=|A!11Tbw z*5=RSGy{GXov4bv2Oc1}8|?p!bnM<=k_OHV;#)#;RTY>P$rL@hq;W>x)4D%-9PVvQ zY-2g-c%Sp_ij?of4yBqITxgfSp;rTz41<8O4tiXvetfeKJOWH`q?1m=y%aOCG8Qz+ z{~qyeF4a@@9IM41A{Kq)a16c~mcG@f-puxYMIQ__ES*8B`O2jl!BsQ4tXVisEZNyw z?P-~s9b*xR_m7VuVP)#1PssWobVM)Ov|4YtmQ2w){w!>~#uNW8Dmpv~3a*NGu=@YW zMvMgI;=MeNI|@CnQ6K&S{<13iGJsYmk)*yPK?T?h)rk~I5G&ot0Eph#0_dNR$02%P zfJQv-bo4GCK5Ts149-i#spe8*YzF_~cME6U+uUDh{XuZq9cBG|5b<)zor)nrOv7Hh4BRFhWEVjSw6$+0B$OOp-BF8(wm2GZPB4N-iyE zD^ovW2v)lw&3IjPnVRktzFNrb3|%K$ z*=?-;S$TqXrsfUbEBQ0Dj!A(WZM@+oNf{a2UY8!*+FUm%_DsR0LV}w9#9#fmvZ3Hy zo&zWAAMYJy6gXEDP=&d;-6t)Nqvgb)r$y?wSE}<_e;_q_GL`DUeumZ2cvNbsstyc^ z6Yi!>AbKSb4ab+KRK~vWuaglDUki9*J-UY1zk!%;XMGk_Q_kCTfZpoImA4d#`wnmT zRVH#9Rp2^zC)T_8%X?pYP>uw*S6HtVaaboC=gkA;;P_XvJ)3x^W$U$Zmqb~6hCY-T zV=pxr$t0oOu)!K>PnOywWa_`h3yg~2vV!@#=__ekUGOrH8>{x-N&Q;R-H|HNZr$iR z>=1?wu)Au_*y@};qP~dBIF-ghsBWC4%cut^UZ$P_g3--Po%-ubth(S6R2V0k}}_8L02{p5+bykaEgLzf?C{tl8^{a%%_& zp>y_U2ZlSczW}L6!u*?FUNPO_WXSLdJ5D_5~z?ajU*L6u^%O%kB1? zp_8l@QCL>L6(o_{qtN1XSM(}+!Nu0i>AK!jCKjT-P7V1668{SMa z>b@8GbBR3NdYC%SASYu+S@~W`yP2=(DO|$*sZ&Tm_v(5i($PtR%QaHeP3rMH4Vsk{ z7%CG!KK8bMpCGIM4uDNuMZCzUu@nMpvFtphN0^X#^Mg~(g199xOtr&fGG)(DAUa>K zY}F>z$|cmp{7=<_FyvWF9wg+^pGmjihPIYc=Ls;jd|RTwIlI^`0sL9PkyWbQd#F}| zx?mga+a4;o0~{1I(-bfAn^SuLWpH4rn(`0MxNX%Ud8km^9yX^d99qyo4H8m(YDd4J z!sdd_6!5Mc2rg(3kKV}VBskmv8<+FAR;+Cb*73(1E~H$-FHMtNAE*)A6QXNjXd3EG za1XWXH$vfX){W^%gFBh{xy8cxeJ@f=ue39VGSv)}HSF;~px$JGN4EeBa@gE5fQ z+K;IG3N7uaigk1pOZXfNukcCMUcG z)KE9lfEvydp*hs^ETZUnAWM955>Qm5nAYe<7EytmMg!>u&&kjJXes&iJ(i*5n^uvG z!a^F-BBol?pYrYMxeu8(#wF)rp+2g3y#w@Q&1%A*97Ituv~z59m2~C63;bEbEY(5F zfnQy1^xA)l9!u;#IX|eM@hNfED+o-C+&Z6*;tOf%n8v;jrDLNS`@WUm;f;Nb@;jul z?^gLu)dv7o2>^^VTP>%l(9r_UF9%ktPi1vp0cgwAweQLvD~f8bLN(!|en4{UbA5hM z%V;hksh5yJN6$y!7?X)KbG}e#O8@7O332{Tq=i}H1hW{^9&mAits^b~e!KEiP9u;|SU})n}(Sz+DU(vBXO_4hFvAZgAt5eQ; zXX)O?K3($N#y(wgS8S)zrBQ5bPA4Lv8+1Far3tvjw$j%dNi0(b-%ez!I+pLl*k2f{ zj@@f$%30c$Up*t`9;6(1fxoAq^RYVcHy!^|%D|91$+f1;N-6V4DO0PK^_Qug_g~Ab zFlA=-msz2T`pc}S`nNLXrfQu)Ovlwz5|d9oPFsFSR33^R%TpEjDE7YKl>*b9be2b= z?0Za%dRWRzqT2P=@p<~omZ@3&g{2J#({uKev6V~-K%b8_h)x4Cf|XM5Nfe6hEi(pt zQy~Co-L@MX7o0R@bY~1UNqtGGdr5zN^3BwDzSOtasZVcOU$P1k_35+Y6vtp9aHASR zMl(y79FyspeFnqOFW3{*htDRmTogE0-NDXmzE`EIH)YSbNZr7)P!CZPe%Jyp7)n`{ zP7F3Dn#rryA+1xW#vf}f_)8V0Vx74_9CHM+&1qfXQ9~~K?`{u@F7;Le8X&9626 zEywnjA41Zl!{jK$(zNE%-kg$w)%ArT zLBszkoq~q{Z*&R|vfellL4LW3*)>P)fC6zjLcLYSU_eX2mtCVp%*4)Sj1uO3r+V?{ zY@!Sc`$iT1aXkL5QNeL6P$R)z6>2GUm8r`qf~;_-ssv-rq?GRz=LK>HKZ}yGZ=cPm zIOGK%*v1JS@i$%4n(mBP>{8j}jq`Ci2>uwS5`iCyZs6siZ`}B8>KhQ47W!s@gak}M zFYucb^a2l@j;Y9E<>slq#}cCjrsydoKy?=cOeyx_DFxEj*(=6 zCr{LYqP#dHB#|8`Df)^`hT~4%+ccCn^*qd%X34%@q^Dhnd|Ws@~vc&SKWa0O?;*R3pG=QLfNu4<$iynPc* z(a2j(IHro6md3d7HDxQ)j2*u^`x&WMUs?CDW}0YUJ44(smgXH_puMLWcV*H!ZmbW)(Y3dVS!m@SfnQWyrBJjVN1`?3U2JoX)OiQdA#Qg*b`Xsj!<1SMj_Cfx?Aq zRuh{I#$=ZYR(`iqZz*@l-A#|-Zl6SgqD2LgwXg7%HW1kyXrieSi(j=%vs8azORvD>J@M|Y6B8=;7K5Vk-9^Yjn3=Y z{McE5aF~WG5vOrQjk~9I}`~HA)(|NIgvNt8(^KN5%QM z|86OD08y>3yTKG;E3}e9!wz^hbrF!v+f59f$Yns+zg!0WeStvqB6-(97%Fu}Q+QW* zQ3f%>6jD@CAI~ne8YX}ZI1JCjf%1ENsbkmdre$ISz{fHWpuj~R>Ki?xTPs&D&OM*n zM)TZ>ab7`Z$73+vw=W*#|2^uYt7$UWt)}rS60AD`x*8@uNOG`0VZ&rk7TV&p>V&jV z3#i*KltM-7DKl2x#t$2+RQ<&Qs=DSA*1 zv3ILOwK_?_u$jl``tP*N163e7HuHnW&!aFG+C8sY|AZdXWz>#_#bAM{B}nq(Eg(hg zE%E6r@=Gr_qBv%4)8#y|cK@#>(|tPAL3I_CBWWVeNU<7P*&t6^^Q+Dsdq>X0x*~h{-K1k$gCVT4-jpKu zk>D5aWMyi#tlhSQ{CP>9^8PG9$%x&rw*8)t?1qU&YsKs~lPO?NmN@c(pP=Ru_<Yb| z2IrdCmS%MUiH4MD8OP+hWJms>s~AERDnplZ!o5uh>;g1@xEV<4ZtL)mhJsc}V(g`6 z_0i*kum&O3t)waxwFS$U@~jENb#R*fsH4q>jcH(FB@NeO8e5@y7Ygcht4&ui7D1pT zJZPN>&ezLyJ1SdRINP+4nP?&9_!gi6>{+y@fvJjalXiqeYP~hdLeeCJ!zkl6o7?AisF1UuDpmpYb(ziPW%ZT#y{Ar$LmqYb& z-kLJiE%~e+4HwuoMCjSu8cV*^s@Y|g;2SihoRnSGd+4&&?EW`7bG)RB8&t=hgIlHE zCE!MN?05dFg-7^mne2FL{0k+h-5JaIq(c~~-#!MAW-{Z=hX-T_FS1Lr?33xCZy~!qPf4@?F_L}SzLbEH?_jvX` z+2aak2xUJ22@3TAQ#o>}^Jv%BVmEcBJQ}~01K8tVkV;olX*Ig-c6BC7J4s2|^l6B7 zIPZeM3EGs0%bi`4uTmP<^Ab#@@A$m^zx(a*JM)=+nQv={gnBzq>%KF7^wUX1Z3x<; z?sya&j~~)JU;A=`IpCTdD!AO6*S=z~|7~ggeRzM8B*e};JcY5^$n$vxP_hMQB+OnE)Eg<*#D4dq}plicNtz>b->kJw54}y_`&FJi> z-0TkRE5!4Fn2B*~P{i$EdpCNDXXPxF-P2d6bv%iBa--0dJW43t1VtvG!L{0gvTq24^4SgCV zHwPW5@>kZBdbG!(%nS7$7HM)@W9e+(Cb87??~^H90D+16&@k~FFYdr(PsW)^eh_y`>~^mf5Cjs>m=~IP}G~!ekTwiuz)wb zLQq>2PB2GVD~ZnW`xdO8eObg^ZV605(;NHBxajGO#U8njyMMJ}130ep#`evM^)wf8 zSDHYjP+xuEP^fQ0@YPs1cTM&BuBoszu@O|LwciR7WX9f5lc^>i%)*(ns3=vaa$eCc zeM4hOR_qUW!xbxKxh62Jq217jPTx!t!h0O8Yj61U{_mkaPRDYr<+Ntc%(O~&(Hnw? zclbIP9Srkc`C=^oKYJ_sycW( z(3JxKdU({nVw^4($dbWs0V&~CUM2gLRl$E~-3MUu0`KVDjQ4exvNxviqamh4ZNbsK z^zR>@2DN0RmF$Z81gf{F<<#I9AvrNOwF0u*v_8?XvFK@(97Ic;m@W4ruhWV^-;*me zRM8Vk?62(B{X0j7gASRP_xWQ&VRtW*9=mr+xjtzVn`vXFari;ndk6YgGIyx7f zC}}cHo(kE|ecPQ$QqLf9Cs3T6pyI);O0`K#qSxrjkLBQrgCuhNEHp@AaDpIqoht_D*$s|99vfmmO8~vTD)AQwC&`2uh>(@HW1LH{$6LD)32P%WU{2!3F`AoGi zYbJ}YZTZ0Qm~eD8BwOzoFjwX6_O2DdHD=*l7N#41LtiZZ91qza*8c#Xa(a>Jw7H`7 z?B0a>JpFL>NI#DCqFAMwZsdKq;x$)Ir?w|Z;FJ{iHq|hK1j@|jIXYB^G{fzE)}0pn z9l-)!K}$H*^7NExZq}srzP-$i)`R#bJ=~_l10Qaa_`tD;{Ml)c`OcpTS8@rpuvZL- zW=eG>TMxUi9~6%+QL(uf8Wt{-6(TM#O^0Hi9pl?)M(Q_d1IssC^b)mvPt0?$yr0a8 zTn1{_Mg4K_YVAx|!sg`rHVIyCT2-I%6^_$n4V4I$e}!w#`jK+UjrpQbPi=!P!8UG` zxAIMPnyU-b2W=nGVV67+ap&1XME!@NuEJ15rpHPP49qX~cyAX;Vtz$7?NnrIZ1y&N zOvU~6i*_uo_DeE^$C9tnl&F1K=saQG>^J_%{xBe=&Xgq#9zvDc!>=Vj(~NVKp3Uvi1f9F7T3!%|Akcbq;L@ZP#_Y1N#IEwGDCU(LJRu`H-efhzyb?|R9ZxXmw zs6u`<@}9VyF8X%rP$y~-Rfwx+k>3fHox`4l6tVyNXTNsLuAkp7Q~@7lPt5B+WONVM@T|Rij zSC|~!2UMsEdaHg)<9hH~$76m#>AZoDqoc`b+Haul+UR58hURf`^B~lRU<56Pi-+L; zN4%l$3$-l7fL2t;{3p<96IcEL!Nf8(c^%|pDeHDFb697tP6#FhJVuwPuXE)(XdPeV zUx&Vgk*9>-a)SR&*oD9)x}Z~fCVR=p)Owr5wxZD6q@ts6QK9rzozOLVi#36WXXx~Rv?r4makWQ3T!&6LP z#ad+^ z+??!_ZtP^GK%RVE##YIQfW2W4HN<}HKUt9LuVk684J?dKh&@~;smw;GUpfah^|B2j zi&8gVBN$GsjHjRl8)TZbjB112km(CG;jkbhZv#@nb3eb?MuPYZXFlx@rMOh;IQGAe zRFR9yKegbXEQtw@%=Di#LChw0ZF{&hzFvA{Wl*WsY?8IbvzAWUY|6wZv4`kxVq8mu?xWx*z;F` zOPNXrk*l;1V!Z~|cTy2|jIVHFXb+6CD})61tDtkKxHJn%nbO(tNb#P|HlmqB^KrR` znp~ebm>ys1R%HR$Ugs_?wy)W)^?-Lz8`nFiwBv`BucCcx!d0m!?*UI{GJuK=0wg6q z-^$G8stie`P+yhyzEG`+f|+A|51MNh$}aq6cgjK`WY!D^qPHukD27Js2vfQy!46q*L&yfJUbWloToIZr$Qoo2Ly*0Oo#H$BM%?z2m;s=;T&YT z0CP)x=klFu+%8#gEmBtOn$V#4N&zii3w_7JPobs5zSnWFDm2`#X>r^;IrdUH@gr^7 zdeSQYc4&j55}%4hN+_Hy&!dQWT%Rc%NB?|ulq z-g$3orrH+R{Z94Ky?TF(J!TzVd!H`3Wlz09I z6LL83cvFi-Fm~1705oLY0*6^B{gI(LEsV%Q0EL_gpeM<# zw)_dO?`A+rc#&?n(jkn*zS)hgI^i$eGB4Uz!G6{(8*kW4@u=9W2{>8= zBLBmdDV)$wt4Irtzu_b9n8=5YBw$2AC>kQo=(5j~2BVeoK2aQGxs-j}(qaOYR-8hS+1sc)VEpGUPV}ax3(#Ggb98W~W%$I)|9bxfJBahzj zwN$I?OqI}qrT#BT2cN-fpfg!WvF;Rm3N9FGv6$0BfE|rNQY9> zTS-T1H{S)()pKCmh92J5cL@>JPIB0$Q`hmaRK3U@wK+@_li0CJgg^2xzD3;Ss1IQ_ zRbHRoL4<~@OWA2)cNubnR)92ph%gZYGyl3ejcY>ag&$3b@llOEAti8K>dX6Z^**Z)ZI(f;dqINVv8 z3RnGKO_r&7y2Y{oTdy<->aI2OtR^_!O9E{T&(_$TeU{))x<4tKF1`5WNAS4_N7eh4 zz(w47=?|6apS%nIuqU$R-S;{5HZ=n?9IsKTQtjhoM|dyk^3QrvaxhOhEzirlv3N+& zi8PJ^Ijrz*zP9VVLwZM+3wG z%w$&OUt%$B8^KbHf_u<<8DAx$1E&+#d%7F5=cvI8M4Vrm`R^2Uo8J# z%IZ-z9?oH_S)Rn_XSu*7SZNA-e3tBX<*y*BXOnlxHHjEy%s zJk?t5T&(evSB}VeduCvWf0aOJgjAFB_Uf{<*yA|3)83T=U%nv2z#>!YgLV{ocuVRi zN|&E#0exv!KciVQz{xbgu(M^DJvQ!_XL%TAH3tZ9So*z2qTubwYYmL^zO)0DGjB&= z1To_H^@d%1XUG{;dzhZ{NK>(osctmCZ^9c4pdE8`jNJM&vV?jsSyn4o9w~Z2@9Yol z8*pR?M};KoL?9H%YHA5)vQ-pH0_8hwNc;U16c6Su?vr|xgP*Os#hpwD-;`u{gVccZ)BBz-SLVjB_Y!-EWWdcq-ZXb$ zw5TjdxjD1dBc=%ORqjO9L#VnLQ^J&#@%oeyI{=w=ikOz14ej!Azz(5k>#_FAT90~^ zKc4{+cuJOTbUrWUyI!9+aDtB1?CkJ827EY6BzW(@dDt`r#)ax1OZKjP2$aB1V(hoT zyDcN`du)NTNUE^6ecFQI>|D5h9~XvPId=G-keWJt8_mxqNoQZ73Sze*=h;MrDYwLE z13Ptooe_IkD5v+i4sJ%r;U?((Wy4>Peb(Bo<3Nb}(%-?iHHUng2I)F-7s3fWg`Dx+ zV%Arvw`SS-kV5Zszd0fE@598!BO88jXqmeHBF39=!Hx4TPpPURsWx^{*raszW7ez$ zY<^VG>=d@5s_IbvM%>V~xLJfH8?BOcmvZKoZZCIRy@{xH_~PYz-9j~(%8ztC}y>k_h z-j`Y;ziGSKD_}J9Ld71r&pkWmaDMQy`phHKw`?B3I`D?4lE0X=72OLP>mS?%s7I0_ zJxP`VDJeA@<)u(wGU=%Ixn%Sr#D^O z_ElRpBf~!+yQw!km%lpGCy|n!NL(Xs51JdV){DU-BggDId11l*bX|DFg$1AS^bekN zecs4>tnNtt&QOPk(pLR_R>|G06KMftn&M6hOl>@xG)YE&4~2N1D9;wpe21C`rrN%{ z?V0IpsptB)2{!HGHnPetxjX)IkGg@iQ&k-5$O!rF0US)-ZO;)m{ySn@1dEOox?S$T$+qtaCJA=p?)YL)>@^Ab{>V|vJzA-*li!DI z-@0(iHknOt6AUG`XrcOb;u(7uHTOUX+mf=OjFoNa=U%kFqIF;|nAS%;2=MD>_>;`Zlse?|ffVvjNe! z_|+qvKDpD?rbGS6`WxA%!wrRwrmiF|LHfC3fYI8UEa^8=f?;LIunLmE{2CIRp$F5) zwVSc^n6dRRFa|hoht(C^5vt#XUlEb1YN&g|M}hdZT{5H-bmF@fKI&0dKsg9!Q26lh z?Sllcpi#5yiT;D!ei&%{9C1?yG@&{-xubWH%tR;Sc!eh@uZ%YOGoH;-$-yvQB47*% zXu6Ta0Ug&$9Cf+J^Sy zOKhXz%N#Ox)38LLmAcT#*ehaot10TJ#HVZZr_*$ORXj(JvO*-AroyKrvK8oTDUxk% z4$raML;KwlZuBhgbMNJ`f8B8^sgY^PI)|5MaBo~!A=4E79W_!YvcO7(1+mDzlSPvPXTCSz4!LEywyeP;!RuIeLJ(5k*M#xAVTeCm}?hvd`e z@+lfS(9at*c5eid>=oP~-}zg4gw?4}=erd~t`P4i%;qmYBhAlP^8?t~whEa=;NOya zW$>+>cz%6){f~vwAnwIdx3xuzYd_dlSZ=|p*JQ9-H9t1cd)uR6`zq0Fdyt3t7RjKl zxQXWPkz&@g(N!}-?YUIB;u%x(5NrF30Y1%;v_o|GQv6K6X+;oM#OrUQ&B!uWelXqI za+M@p3I5%^Me^If(Z#HLN_lDBpE6!QaG4Zw|5bWFZnr)6tZ=(%Oo9a)h@+d2iK}5zyc_!MgUQ^Kal&%8XEaK*Qg3kK4nGB&9vf z2``>fpKEW{)kfU18xaXmccIll*^Jt*9w|bgJqBLO>-11Nxy0&7q6#ke1j3^@XTm3r zMU`!!JuQpCqhAJw%?Rz#u=+H?1KHCiLhe=0o;6M5rDIqlAov?bOy&U%-Tv$W2qQAD z1dF&8v`&e=Vqccc{IC8RlJdNeAzROw_oa+N)0fD!4??|{dvAw{78hw(eT~*wM5D*l zNkWG5gBdH@=lHLddbcB7;HK<3@3T5MPPjGr6px%b<2?S>cCc!K6zeD)%iknVM7rpS zNFGzIJzMam_XG(ckaIxwG>WmK*hw)Pp&bTftYy4Ex%_GHeU*zdOqP+}W3v5GyQ?b`mM z0tOU&Q2;S8)b~?w_=5hnomujRWhF5qck2$U7OKIV4Ayh zRop)$Y0p>&cJW2-3jaER^qdkVmW#_y#-6k1xPdnfpe5=}Y4|Lfqd$VL$AABnM5o^Q5lSK>|0M9Yr=OAy;tuv_d&v4rvcfretQ99`cNC9?@lT4%u6ay{p%7B! zeQp7#;X{qz_3+)>G#hXKWEKX;|nnh z@8)%dk($l;Wod$a6*1#qK~v;Fi3Q_7ojfXu9@U~7ZYuMxZKFzigm#nW(#nDX#4z}w z;vOoKA$&m-ezh)pU@}Tri z)CS&S35o}XAJBGmK66$W+#h-H9=^u*8QZe0M4(?eU@M>Eu_ydfq>oq4f)~Qoo&G@9 zq}W~NLlGaU^at;q*Hb_TFo^z;Co{M+<$Ls8-kamg(*5Pq=qJoR^9^rHO~EYseof#6 z{@xf!ufu%CqizGoYO4UUa0SQ*?HO5^L1fW!04o{pA%lHo`uy-K`0f59)zyucsE4lx%BaAiy!Cd&$&re&qBnckP6ks_+oz9x z!Uc7k75u_c9(ao(3$}-TgD`W|im^{{Cio+1;B#^V6feIfz5Y@wxX~`(sL7;Nz7A)Z zi?dcWKq;&9YclEw+2!l(X_H`ON7(i2;>)t)%f_m4=gBxG)QSr72}L2_##|~|@j?PW z(9^J&WtjlQqP9~S^0l}L&hZuSgxVnk(IhwU5Lfb6MF$N7xQfW)HA z$ZZS2E8OJ`n74Frj$ z3%>28*!^)%uisAJj~Di$PB!o?6>Zh!F36&QTAY{+b zdQOk41Ithau2kZK9$A<~bd*xR)Ila|m^-z@WbpD_h4v*^wm}rj@fvaDBz?Ji?S4S&a1Euf2nm(cpgqaw8PhJ_R* z1b1>HNwj$s=G8w!aw_78)U!rLHswcw>?sQaBS9pj!`)HY$^6@-6VvI$JZ%|?olK<~ z&Ie4_l-gM-u%SGqSLOaFki#waU@h50_D?1e5UB(p+jM4f3M?UsI&2yRPOxVoMGxZH zjA$F)a24ff#ecPIW3Qn&iTpvpM`RP+gfC$2nD(RWE$ zPY!gy1OTdZDYK3IyHedxnFP>L3hiN`4EAe4UoB0E*z5{)_nZHr{VDnokPRQ`2-D)psyEcuStFlXP@5Q|>%%xh%9t%Dx@j zkwCq~I>tBOv2mRM0FI68EQ!Rlf*bgwwLyI52NNUd1}6V~B==6&BdH}O+|k+w0J4HW zh`#QDX|dn)lam(PNQ~6r*F2w*7P~Le72x|y^sb+>R5fkEfgpX-+t7ZEsJ?FnQL!ND z#c$#$@he^@?ZiGe`Nf5NdWCu#frr%=-cuDAs__8J9U;H?36;}NMXFg-TXil655Nev zAMS7A#;u~vguamIVWmS@gvnSZxbQN>oY>&+nD(@U%}9N@Ix}j*vN_6$z}&y+`xBub z4@sjST4^)B9S9)2BrEW}#y+74hVMwTXH2r)%^>TN%+fMdS*YnO5j}=KAu?v@3XD@n zA;Jm~&o$pE8U|I?q2N;;nnXx)0*KN#Bm2`DFB~CD(k+Q*rp47-1t$@g2pKqZ68peQ zWQ+)xEi^unFH3wlgX@jmSFudgU8o9Zp>o0PyxIjK__o6LG5y}PyCE_KVUoR(tL5=> zd2G6mzvgkRocjw#@E*8O|9IERnY2Ljb)yrcPHhw-Bg#es=Sp_B`L&4=X$KI>OCKUz zM0KoLYkR&a%P6!m(^DO|k^p{zX_}6)ZL;o0#&t%uO)+1Nm^-*h?c!EHw!`^0Vl*%N z4H}(EdjtrqQ2Re6@pacIk>tnP3(l6Y$knFfPZqYz|42gk=>E2PKuw=HtwCgW!d#j z*0SvA3K;>NNs5YHf!Ya%ChiE@!xA!q855Bd*9FEyyViuWNQpckpN-R|#o~bcTUMOR zlT2Tky7MBvwW!6%)fgy}TZfCOBG{qk@!>dC?7{y1LbIL$ziU+i-^$D)3VJqF0dlnv z=tYxhQA!_(wdfs#BT;Ov=}7ETH6YQH>6BCB52?{aaxkFRc)G}K=Mo8PPu|fLBh{On z*PTI62t02JzY)EU2AfgjzM!;7rG5{i@S{~LdSY*f;|-lx#{N_g(Q|#5;I)3#TIU58 zsSQ`F`b3T5!#O`NT!sx9@TfvWa9C{C@O?oA7`{c2LG2LYF+Y5`bn92ZAeg$fm!DF7 zY+1X^IkqfhiaEAKU35#0qP*kn>%&s5AQBkXcy)SQDs%BIx`*7FsF_?2Ws|(e@d~e7 zhYQx~VMq*D^LU4BNd?Wk2L=_~2v-q3Ng90OXNf(!(0%N}%?gXsF{t*fOx^a2nL0Lb zomd**BHz@RE7--`Y@Md};Uw!X!8g@a)Tc3hRGCUYL#&h1_&aaDvlzVLQy4&Obx7Zc zd16Y9r+@%3j?4_kNyo>n_OAX5y+sUeme9-eIw`Pp^#ki@nVYfI4~Qjq&SCxduu03` zB9V1hKOoxl?ct65<=*}g^YdHt^QioIBln4bRS?8mYznRTGAe!?!HK5v(gJw#_tOnt zbjS86>qgVV8hU7TgyU7qCxqgjhSZmZH205d0hv^-llA+OwXG@&QycfEw*bTC1t4p5 z6wUZY^B((Lec#EOMqaay^SZ3I+QkEN4v%{$`iA7ZSV~?@Nz8UMc{-@R{VBOe$--f4 zIXymEo&cXcO5a=_mUi{m=q6;V{_KG)uzyaWg4ny7zE_l${+sHFW2kjcOQ;22z;jF! zWrHZ9s5^9fhArkI=`3aQbt3R^def zSo?zD7&V!+3N^SFU^^;4+*3~Am6c#vtJr!{h8_TF)d^mU_zz+UIoMw%aQP9#stS#f zz8uw-qMW}gR26lo_im7ih!EM&eGZa%<#3;0t7V8Jg?aqiAZ_q0apI?*KO{(srb3pK z{}tu&Fi}Vga`d@gX7O5S^YeV#jJ;l=-XdF-4%x`wtA0%WL?;IYd$ehKh5GV{F#Emp zb6N=3$KDFBfIv7B(#oh%*Ilmb63_Sv z{i;wm3=G?MkJzDTvsAw*%&1yqWcmpYP3@)1JBRU}mV*2LT6agWhoQvVQ?5E*#Y*m- zBDZOED^)*HX1MR-^Ub0h0*R&k3dJz%CmGXt+Q_F8c-KH zF7V%*;RKJz{dFaMK2CVdNpSUB%Vvs$s-$J7HOCnc(yNzRBbmQkGc+={w2NmWW?p(A zuWxbc3o;5VJm6NXBt!T(6U)Ww;&|DN6gs$-K)ntu73=~>ghsv$YJiZU4!pelnAjO%oO_2|niZ*e*AZuY zWc}sPBlgU(Sd$G!7%C_Ey{m3UT7n~;T?orE)eYTxiD;wuA+oVcme|MsLZKM1<-%LU z9`wHCvhdD4mA_p&1W~93)!Qyuu}oDcD}fMD+=?+53AQc4L&C!w!iYDg?_?5SGBGnp zSkHZ$V5}#SiOy~}G^D;~ly|MT8zMr5rV^2|+&D)&!d#YjcTDS~G@Q!&{QZ~B|600e zHh+Xy*MFZSQ{;xFG7El@`-#Vy?LPH}pM^YDEhDpP*h~q1EbxYJl|BtIKD}pqUKfN&gjv@RdC)i^Fyn)jRvZ0#tnG(Wth6z>z-# zw=xpY!G(;EJ=}?W8rz{#C>Z;)S)EM@J@x{*;chtDN>Q28^BVQy4`i4^exv_n@*V&G zBv$Lh*q{A=p-ZQbGB5f9IiNFTxWK?c{H>%NW5YymQ1fBqrAGGVi;n*~FW?pF<@I^s zGlv2EbtSkk&VD3n-)@eLzu1ISZXMZM;C~MWjr{#P87>ItYV)@uB?3BjrviM z1@30-9^P7oibIvUi~~)mE!TWp@u@cUxfdizV2IHhf44MIpNhZ+Xb>(E9D_@oL)+@e zb-=_G(O@h!AgVNg1Z40U-%4J`Z^}is?uj@29N3{=l#+0A0wd%SSNuy(Z}=|B^K0@P zL!1QO9Aam17Th8?b~nIB{fwN|4(8+Kpq$dN7WA(wN!KjBrUytFG9oqIYAUH^YCgLK z^LbFJb0$<`v*`(4tJ^qkZ`3;?6D8Bo<4nVMz0Az?sJPz8 z&Xa$`972j|*W9T;SIHGNEQs`pU2Gu0I_fWld3fi*nPgQ;al{yIAM* zii3%9vMy3m@Y4p^kR=6azsGj}f>>!&eo-aza3ddpw%6$+&v1GPlWY690K)L?S-Kw& zX48fEqFl8cT&P`OrK~BzGCp04Su$47;sF_7*6N}mwr?bHHinXWe3jd(>9Lm=w$}Hc zB_BAe&RwaVKaaY7-D`ZA^t!sK1?$>*!4UwYLohrkj`jGl`LR!pDX(im!=L8JUV07LF%XrQWjQU;GREA$w{u|e>0);RO^H3fVKPlbzj9enRP?yzZ2fNW>vffOYG7+r$FCy}+ zi2D>f*kjMVjY@)VuuMX5$oQV207QTr7a-oN7e<-?Rw0{z2l3}u#iGu5F`vm?-36Gv z&r0~9@E*H=0++2JXia$*<+_l~WMEp+*@*0$1mmqpq4D%ZuPSt)FlYS)Lim{JxkpICA;OjG3kWjr7$7=nYG> z=Tf!l$6(!(th#{_li(1eznvp9)KNHEmatWoYtJA8)<~;z3ML4#yG&my%IeZNS;Z&c zz~rfq#zS_nh-s?|Tu>38yhze!LFKI!F||uapOq1#y$n;!0!h=-NtoMSVf?8NHCp}| z|IKzyHV}auI|hh|U`$O`hfg+P{3J+|s1S)#DsGI@HU6M#nov=dC`Wo-A72SODuOx0 znZQvvdMvO{&7n(YTb<+4p{iG{y>83>oSn=9M$I0xV`BDAg?6SB9Vyg` z%F~Vv?DjiMRk{#@fL>ypbyF#@@2j5%oD%g0{(si1$I_~IhJ%M5c}!dMKPCqK@!&y~ zB5Ez=J^CS_mK*$- zaoH!hc5vJ;-)FThc}v0>vw1*vlA{1yUhvGBI1&P8wKs+WJyin2cZFXZqHwi#%MCO|FFfxzf8|IuLpM~-FYQFi&rOBX~$Gftt-lnU0h+&&cST7DME75_Y*CLy@3nx(8MM%4w$kLDZ*@E}e zrpQ5t9kS4EpNuwcO?N)PX=7twat$iQrmZ;>ta{V|abvWPe%gbH+MTXlc)2}k6W9ll859@>jf)h!fIw)62Ty^qm+TS+@U-lz z)r0G#*hZ&~f#cp|NxJf4|6{6P_rvB-QGqomQk)#yCfnkS>A2Ov3z#c8icmH_?md0q zM1pluRNx4|1!%@EpAdUSgK-Kl9M@&nA9M|fBCpl@ZRH3&&`G-t-Ka_S$knpUTT=$* zbuRJRWfK^qD+5_nUgzQ?{rpoI{F`jF?I}M%a@S++SaOEIQwomJt0js9GgsgOL7y`i zXx>+g_ah%0>gcS5uFOidW{j0Hyu_U2xbz-s>?F3;8FWo-R~LL#dC7&{tW$)(z-uuV3(|a>M6s<%jNH6S%EKQ>O3w&tfDFGTe6D$ zuahp8V8g`D&fz^iIwJhT*t+!ik}2w(F>-7uC7LzIQaL~rxl2`z^N41?oJx-8z2W%T z@$BL$;?63|+f>JHlwUiueyCjx{@f?zVU3W7;wd=hs8F}C4cWzWHM@n zr0mnfnpj7CuPLewFvt}@=S*kw4_%y$0iXBT-Ye~j!qBI;+R2R_8~BdMrTk#vdSQGI zs*wV{zSX6!_9T(i$si&{$)!pHw_*n6XEn&4oBft5jT8+aW|B@H9M$PZc9Te0vC~Op z$%ZZ_1~;-44x0zK7(n?>W*JaF6&zwm^ZL|`Z0ZZVjbL(dQCd^Ln>Y@R(6^|5o z>X-m=#v#YU`P9^0p$1WTRdfx14Q|Ulug{~A+Mb!4_Zo_kvQF8t?$bNgkfOZTR&*zT zpeI0fswgWgu~YR}9ZTM~4tt+n=0;Iz1)kL14N#UNth;6S;`7+E;j#i1k>Y=nLJZ*! zPkw#R;x0k-nYpOAU&*n@V|5CP;OjQjQBUq{C_5U*={avxZK?O39cbN$cBE+@{YgEl z%-GYjRW6TJvGooO#)X;j^^vZx}xrOKp~Ra|H-%5@e;T}G)I&n*$q?GPyuDvoaX5qSn= zs#N`Piij=J1$c&`l|G6#lt&$+GWH6tV&OJRbzBECAMmCSV2%foT34C>0L#y>rLZr4 z4JTNsiV$3u`9JSZ4u1{SmZ}LPGa`Kz+DX;2;kDHUrq~lJ<4ZE?GGs{3>OJFY5sBzx z&L? zqme2mwy*cEJ8Yl0h~S>EQevp$a_)V>&J+Wh^7bk$h$qM z8OKhpzzD9Jc{Q0<{563!&4-I&JPt&zm}y12`QGe(X&6g?Vr1Gw;wtClrKd-xy)3^Y zLVXJYC*b466PeaUY?x(FPStu8M(OrbY^o{kUq%x0dVG&O&xtSnSERU% zHO!{ul)UxdOAh{zlJN^2NM}$I0Epw9<+G!6JD@|I^(5r_%l>E{&XgGo2^QzZj?z+!mav5VU}H|NL6Qp?Je#jKWMKXu)2> zOI@=64gvBn`GZCMVE_F*NWmbB2*_APbJ>~UEzLPLr0N`wp$vFSuLV5e|@1!~=nBC(p_u4(z;5WrGA=BnKIijDWDj zlYdjL_l~CqO4sF6>nEudY7Q+4BDPPmG@?~W*tq3M;;8A2c3W2gH?(CD`Mq~eragOF zKsJg(sit}nhM!-JiGw?wWBHrcRiBzF)^X#yT-toX4hyE*5y4@sPQ$tUN|!pqQ=BJ@ z*d3yLV7(D)&$hP6ONX{GkZiVA%`rPM9eYvuQR$m~kK_xrq*~L6$vRS61dtu!tL3|t zyCPQ)I`7T;AIMrOWOT4`ToFhO_C4|pvpVSoZ}=y&H5X@v_F*i|%uX91bN%LuKgV@t zA#E|@Zg|SHXzM|Z=Oi*M!`eN5n#a~%BJ_1=M|L=F>23u_>P5C(oVvZnWyz5?0Syt) z+N7Ux-32ogoDPEzar*%Q5}WJ~>@6Bx4+yyI;)x7+e%qWypXk+Y&bdxAZ6 ztX1Z*@D7)#U9?#3sk-){XO6JFi`=RTO5zYE7ee3#sQe28*Um)`k5k>?P)kN$r={1~ zl6zfqV{I>FXiVV0b=nHP~Iy7*6~&vsFvZB|rHe9?qD z_iWXCSmsdZj3lJS^%7J!ORDT>b0sy+Nxha-R+Sym*>ma!Iq8>^&c-{~c`e{IzD2OH zZkUr~e6Ee=Zb?#4a8fg+oTu(Y=QUMcg#|NxILz5YT6;iN7U5<;%;t`@2D88}v9V&n zW)WVI3ra4wixc>$vpe{>2YhTulPxDW%Z^-2J@x}D_-jqEnP&SQS%Z(f-zt1IBufF! z2hLGLAj%n?R@X77I%`4k|TYT-lXD@H}Jy+cB+v;lfJ^Af1-XW-PUxQiiC8-iD+9nRVH724(Ml!WGL^=NbRKh%WXY-cIHb7&ZSAGf40KW+xPhJtcXOGZrp@^^?|N@tOBu1+tG_8y(^NNB zE|DKRpK2fh#WHgO?87=xrR8g~ixE7phO}C~JM7}o7KRVSBQ4)LyEqqYNV5wwEZ=X0 ze)%>Ek@Mm3Y@&s2cJWxthilGN)bA=&OJTNWFgm)DzdU!v8a}9E(sK6T%pZP6GA0$N zUy)QrAK|nF?Pld_9ie9D>rH%}$Ed3of36vPnGjc!iQmj)N|KJ>Y>=HIvO3<->p4BP z7DV#D2;!*KBj7EJ_oS`QGDXqjMgHX-g|@eIOzvfo1@y^01mfB2lB> z>qLoYLwJ<=efGICNkClxwf?YH=G^ms?6c24`|QUrh2+-a2m8Vg*r>k!$dNB3T3y;= z?ePBT`Wz@k`y0eUcQaFWkGFq#7924#c1~O4hVCU29cr0DhX<*M7hywwC}dZDu8EXm zlB)yyrg9_$)^vmTa9Q$9%l?=Q>N1{^H(LA$hs!+Hb3`JyE=kNp(F7+6GHIsuYa(SL zpLgmJHma&;{RQ=?%nuMb-^p6KxY150bImGsl&w6Xj3Sp8 zzr;b)y~D!!(~utT_YT06?@b`4+2WW?nT9%qn=a?t5E!ivlB*S_8y}t&%Z>#43nqQe zJ_l%{L6AQ(ib|H^*L>>airRv7ga1$^|Ds^{e&ibjhUW=er&eYE9(0B<&1%EFN+_BHW(gUU^jgZ%;>dlq)6ouwx+i#QG3B$h4NiTK7f?0X3-e?*W@;* zk2e(!V7joui@*3u!sXLm*i{IlDE7a2Wbnk6++*D*6VMR9JSZAs)Fd~0|7Ou$##oG~ z4YM^WlShnwhT2X`rkPI(X|ce(6KVuP=^A#h?2pNQkkMtPx(!s%>$xzL{Lcd9}-Li4Z>4Nbd$qZ zLtfxrcZq9XoFl5ekmtn>9cQ>(X6ja^T;@ZLoj4zEyxGT@+q%TmVs4dZZh6YT$4ZSC z5Fuoz0R6m$p3+Zs>~lv^{oGDQ^$3F!_&=B}<1J@gbsxG@mb?7T1&VE@#crk`?_i z2Uysk03wQ5CXOvax^lA=oRmgE_^KRNZ5;a%-zJ$G?ZrA^GuyW*;uEPzXdf8>KTkM6 z>*Oai!VGQ-yI(LvR2pddg7Pbxdurq}w2;htWMo7gRB#ZIj8`6@ zap@n)=x8WnM-JBFCsWe&bI3eyHWYD%e+-C0{T!##Y8BvA3JH0rnsNw3uda%Zl!iB- z-!PIxTl8HccSxINagCXNGm<04RlMkhYRXr9!z0dQCnKO{%-tO5$L#R}Y#=A(+swFV z@*`#?-qNm-dV^P5*lPOLsY(7rKS3Rj#2YjV)*>64xvLDrrr_>fU zvgvyQd8jx8@z_y?+)#;b6g&~yggGC>z0KYyUuk0Js+Q0Kb4D6ZQwxluK`xQxkP4W0 z&<$@su_YW;@W2&&d1W)@3O3O{!@X9XZdOS}xv18&K0v|4W?`BcsxoE{Q_rr76fCYX z3Mbef%Or$0n^QBOL;6OhDx@L@aqf6!t2eujJ4`5FD1@bB%~uvN5{HRogeuHRLHnk= z3YU()D!pNv@z%v~n%Kvklv7Z?;&l^?`06lF4>Fy7Wmi|xFCo*XW(XBOb%cPm+E)!z zl07bbdqPnf;ea&siuxO@$|*|^mSJ_>%*!wfs-VQo!LAc0-@N8T&ZLZ`nlUMZc1$rb zqViL{st;H)zG6&p>8cv!BQ|3x!fh@~4~!BWn6#9sf<%IMg9Bs1DyzZ+olV4}1%l*g zsLh^vmQMJpTw|i2`mbk+?V-%F@#fCk2){ItQGRQ=Cq4oH>Cs}=D$(2F+FL;f8m0{H z=i)Kj_XxB7T{VRCLZePv&!csbKq?Eo??lz$sbxs=YU#a|X4;>81!YB;L zSSg+_ozI{kw;KRJx;qi#-(QXhw!Mf&28VOL%9}0^WIX3O54^nest4Y|z2A40O}KP4 z&5Nvg$TbwnqbF~pmT{L%MOt=aV5kCY6k{!437e_sjxJHZ5Ewx=$7eyC@s=7+uR$u7 zC(k)bW+%G=204=jeblxB(LMB6gCAf;<8MZ1;4>f+I)E4LuLGN6V zpD=ZKOlf@Q1lz;+UhY~^&%7cT;r@G}#VHgEFQ1^i-Fe}jThGvQ1iqAb*G=3ZI2I~v zfAWGoPBJFWH+^?G_WjjWKt^o+=1vfMCkNcZ365&J*IGjq5m|?XN4b|V=v@=E%I|~g z7NQg)#@;s;S|^6{$$$B1@ck8=nNz7q?UjXTQWLk^B5Oanw!*lhz7d!ysK^MWO#4aa zLKhYdDisUGYuU6GQ{@cBDb;@``|TVZ*eRA%FD3vJ#QvDaT=W1&pNBb&Iy|PRD%6mE zMU6I6X3!W?6{9S)geox{ZC<7Euyfj%)E)Ti&O5&>>6bf;za&YkkX^X*zAsHKmX4^c z8egEb++7%BW$iXe(TuVDweRImjsHPvvF5QeRgyiGNQXAj+aHJ%CZfUH`Yp9gLTUuQ z;txroObTr!dpBj`P$2L2qCjH7H%hD)xenpnYquAJOpqZQSput=wQ_#eIX`iFVzYdq zS?^F3fv?IP45OlAF8qgbyvxP(T6Mf{261z2W&1X-Xklk>dQ@UC zeE7CQRZ_WLJH|*K0ntWibfI-v@&(s%v1j5i7wIY+jwOm|V5pkv67R`++3#AVnC2jh z^xBU1*9>kn3!ac(7d#olz~YnocvoP8>eNOVp>)iaGyYNQ=yY|oud@z0fhVlJPGE~w zOJF`HdWM{Oa~}oIr64;ty?auMH-|=<-x^&5A!JTirAID09|41ArM;$w$;t%G9qdj8 zhnjIefK%jOD}dArMl4n6Z?^q?qG>PX>LTsM{6Wq&z`5)Qle5sW-VjJ0YID$WaHQuX zYwFnvzsU&Y2)~B1v)sxC4+b1K(s4moq7H#g);*sK7F`E9*G=+5?{cjvv9eXtqk`s( z+idZIl_*_b_fZP|A~sr=k%hi<7(|aI-X_fzmFw9Zo z^Mt3nj5HAe(of%OsNBJQ*3?1RxR#R99ctJ;$ljWw(aJdEOlzf94(GyzVX?W*jiiU0hM-x*FpMN8YGn z@3idtkY}B=UzIZ1dV!8QHAnvhr}JeIu(0oziQe_4Sm`1)qQ1m(qrBRp&Ang6MqlzbF;t~N*1bND%4H+y@kdFC zjVu5DbxD@*n__A^K*#wJsq2LNGFJ0Ty(!5IFnd>|@&QTVA5Wj%AwMDc$9o?ks^;)s zL}iZeyr#=#UMkzg40 zw6tUha>ej4;B|?p;a64i;;1u#g{j>3KW9AF;VC^5WpW1OxwCzq#fMVzJp3-%XC|9N zL|RsOF8*>l8<#1!V%P*^l;y9e(FcD5tCJp8?k$@edtL@+J|^F>=N^-e`cL;vUH{vm z&7P7(h&?@;y!@)zb58{%Q}V9v`F($)LXLO-bY z){6(}$IS6_vfnHX4X0fUkqq0?IT@)Qul`fsgYJ{Z+sUKzv)^3t9-h{$DEUiGRiV5J z4m^)4bEM#Bj?aEGl%A-|oXq=PaDY_s3D+ug&#`*($|2D^q21_*968ahbE303OTKI3 zvS+ZjimclhBwR#=f|rl-UkL~nSy%mw32G>fJCT?j%uR@-T^iR)+7=&Q(c)rz9)yC_wbM*qPaGjAnD{# zUXqlDggFaRw!@!PE*B1eEpQx3-OOM4XMHza?a=axtqp@gaH*Rl0pzqzz5~~w4A!ab zNSVykb|GwhU>a7E8!^6Q94WsoX-;YIm|S7}J_`6no!nXuHji7A`Gr!-+?^K5r3pqP zj$+80-<|iaUU`QKKKw%7soi-qdgUE<(Y({T^M20hljwsmDlVwke=|#kyz0}!)AZ-4 zkN-ZUKBlOT4nDYUk$AWIZgmpxQ6Go-@XsUYzh8atax(jWAW=V2QClR7`mR>r>*V{_ zDtO8*-O2E8QeRQN{2NrTl^}P){PB2d8Cz)m5v%-4bm?y@^#q>?Hz|SY7bRwcC@fk> zC0EKqkN*`FC-Qt&L}ktJGvnp{dgA@B@`ssE_8(TcM5xco{5O=05_-xV?_@nHSwECN zQj**!RBkyltTS)06Y>iq`Qn8k$v{a_sRk*|kGD z<=$u1ZxF&?6bpm5p7f;&Bn2;5R|RY*tF)dggZN7J`rBWEuK5+1aeis^P4FeMNmuR^ z`QD3>#=*c*U(5+ItJd#301A9<&*r^% z)}8i20O;U@Y5Moml;?C|eyKKIB&Nu zstG+NuiXkp0N!|Dby#T|`A+DB5G8Q>7`~@~(~-a+4i#VKs|QYj!ES+Y*|sA?<00m_ zen?rhm3pMIXIdtJ$ay@o#F|Jm;8x+fVGoybD_&Cvde0riIdgm0Z_fK3P*vzObF0$~ z@*0VD?qqoCJ^?*LJ8%k1tT$or(hQ9ZQH>nDppmRY(G%3*FG-YjYMp7lb7Q=Qp$rAq zqUvZQs>}UE3MjWz3gi;HxUcUP+UvZxHM#OA*?>~ilaL&`U8}f?g^iOYTBW=goB1n5 z)a@@4yh^>u9}VV&r(*2Nwd|`(f<8E0k0s-YWVT`B$pc9|Amx&I&LRo8qsO*bNT|PQRNZIog!&!v>+2tq*JEAeP{I$zl|CYeJ}8 zR?$DzM5tRdYr}!NR<-iu$0got-_Ari@#|FlOybr1=JV>a`8da)Ja@jLdU3Bbxrew7 z;&_$g(*BIGP~P95vY$E)z&5GiCj{dH{vQzG+&-TY7=soZt&V79{P&QHSJidY62C)= zweH(~6qqOWVNnnChOWh?zVYT4pEClNP8Kc7xoIojR)nk4l3QR-!m_?H@-7B|Lqn4g zVK96{wHF7=HZ2%xcv0Vq9>&YekAqK^Ka_l9@Nx3hr}SivR(8Yi&P-f#v6n8Y@EyD* zYwx!5F1)PZ$O5z7VrQg3?mQxHA+(Yv@fJnRr&5+4Q!DMPvqRP{bo-?s@*T`kRCEXx ze5L`SEciN-1O>G-De*mBA|@T3BjYHb`NadlI1=#Yp@8d9%Fye@o@j?tuM`y)_~(UI z*%-jJplfSxo|bg-!Ox9X>&^^!?HPQeTgS_v)Sm^uAC_n@ePD0=l_274XIz}qKCd$J zBcyUh)nTn-7qRF8XcgO308XpW3VIjLr8cR+KU0914ua z*t{@mG6MfYC4srHY6GPd7Qeg$19x+G>Q5oIV)a*|2Ve+geiokWDzxrqdW?7NtX$o!>PQ?SJntonY67n|rO#zfKEu`ObY{2AP#{KMoTRvINDbp*la zwn*wovdCoHbyCb35uw3+@4 zjj^l)$JyL*YE%T=GZb8xC5ET_v! z_Fwo^Izd$X3dm4^F>g^MFQ792gwC@!xS|Uh_+L;ai^6eZEOlxzw^mRIo;E7JPehTw zoz|?|rN<6fsA7NWDfUe%R_rk!R>f+gxYnR!rK+v#oGK%^VjUE>kbFi`N5|*QPEDlx zRca^aGTur%3awUZ7fsnR4J}O`v^>^6b!K~=1(ZQXQe}zcpS^=Lznjahe*k~10i)z- zv!1(L4LtJjBji!_KTbXr>#~cj&?P)tqF!fCco=6Jmi;*?MgA+Lbm}Ku?q^4j)v#T( zO7YT(GxaxA`Zpx~{g2P#W!ctMOb>K!16MK+o$XGhoraUdB)0 zJ^N8Ib@Mhw4j*TMj9dm4scegftX~}$f(;bGE2})jAi3uZUIrCO6@XMFdcn4A7$;5| zbwaXpoRGk+6Ox|DKkI%PgRNxoqq-L#MJHPTkw9{p1Eu_WX)JS}Jc=G9X}v4~Bhj`x zU6D@-De=rjWZ=viZw`?{;8}c(A+`A_8T@z4VhJGx%aA4utsJaA)25@qWvCMCWs3_H z^hZj>#!1~pSuyK*-tFe)Oq4#AhQ1CQub06hG+ciBjatP_@**0ZiyWcAk5)B-h^bC? zbTP&$O!Ck-7}Z2oeXKM%!@LDyW86SXH;b&N_90uC)j2Cpj|KY6W%+Y;`WIP^9ncfe zkIlzqqM#na?w=QC@&puw1GNZ}UaX0Dqr0uPu2^i8TFWaRlXaX?cx?2Dmei`WOOl#T zwWQYY+w$(QR!-E;-9D|dhB<3FVh(iE1NgR!rZ?3cnK0t*PxW@UIV6e5rdLi-OUtkk z3(QIh&1kB9&Px^qY(&?t=`Zs4=u@H@*Yp?3U{*?)V~W$6V(aB9KYhlHvO0zSTn!ZT z{?~*^8ljM{I+`M>(mWZG)B{dSqq1PA_;fa#KZY_tk_Tm<$7j|M^Oz<6dt^@>{OBB# z`1Yg3ST;w6p@wd#q1%&)H75$pfl?}6e5n<~0NcZo)jh&5>PHDcxVi0IEd0Cx1v9x- zuBhltizPlDPD`BYDMB+ZgE+zuxpuT-Xf)0mt|O|Zr=5rSydyB!UoKip!YyyWP6$jc^2TjcKoWRMoXQH#DV zUwlY-S5Mf?2WNYtf4eXSmW7e$jsChu4tI2oT!CBpv-o;>Sh*3~das-NIQG<44hS9q$iy;RXGBd!Kh4`H?V(%`# zWY8$q%dd?1eS*ZijbEmHcw5p>y8<`!XYn<7&j<{pKgcP4_U8qE{!{ejo{#9WPG418 z^tVQ5BI!@j$Kv=>Zy#w6Q5Gjr@ZXPrZ|#LKO0PVqt?5K-4MP}JbkBLrE8d?|=w5>1 z{{6;y?S;P1yPlsbYxHF!&H6ThcwIN=EOr~g{fnPkdv$xY!Ne5NBlMLoHE+bq*JvHFM7@b_v!EOWa8N z&$n|}1I1hYEwxxLapMp~vMW6MDs9K&Vdfp_n5YXjqTpfmgZWGvSoi-FXWwvyW7I4D zuirTb+M6$))PG8aMn@HoU=O9j8BvA}%>s9J5P6nnoDcaJhd=9%YuB6r>#uL}TVW1gT%(vgZpKacg*~t<8 z6Dh2L_QE&xbNAvOPL;!1dVphH8ooyAtt6B4`52$d_4rI{h0LDHj7Kj84o9>yhp{8E^kmqhNn@_j%);oT%@sJSQ@P@k&8dNp^1UN`EggU+*2W zPGW`;Gt;`hcQekk{-p+Q=j2EV&uuHR@bM87$lGt-%qBrkhpD%IimwH~m)W)k3ymIZ zMHbBYE{4UAu5^*rMHc~+Vjc9F0(gvxechkL zS!0_bh65#PCi4lw1Gp#m=L1!hp z;qpt?Y1Db^1cjPpKhe9=KTFK&-Z2{`CZL)L^=f90g8%4W&WEqr33#|MFj3$EptwB& z6kLsx>`}d|epO;F?;RtYCcAI%7(qnJ1>p?q95-C2ezQNID3;|Pcjhet-&(~TY`k#3 z=<6?Cp`ZJDqHQ_A?f-I`%PbeL8?t0KOpzP|{MCwf0RPOF)ZqT|+wTnwaT;CdddvUt~zhV!2hX2Io4=Z5o@HG*RO-26Zx)N8Hu`DOjwD7;&oyD^xqpZR`Z zvwp@G_=A3CQQ)`wnZm$R+~(B2m_-{s@a~M|HO9hS_ft4hMyJCk_+t$|Ww_NE4Gxy! zklA0V3?$94s)>)!d3J$(oBXNS0R&# zl*-eb=6bsL#Vw_1%-_&%cehqa$tu;8pF@l}13wS-Io*F+^8J;3h1L(d^MQ=j;@b~U z{-d*W=jrSZ{i7|I?E@3>`!<_0ID&*qmp36{@{d=wj z-D7@OVjd-?(0Yoi|Cf&WDap5ud^4?JcfJe8thUxHmwx76t>W8EuYTrhT7^8*>SwOi zD&*NtXrEAX`0>7kyH~M%hrBN*1qNeCt~J%gpOnl4;q*Sy78wM3Bl}UuAD)ynYHoAb z{a7ssQ)JdLQXLqXJ$q;XYl8=(9~*CCpGjeFdPvvsQDV zwfvacgP4JizURQyn~*BZSGKQt*5Xjl{LYPciMOC}vXx@v1npZRTiGSP1Ki6uCid}v zi0U559Qr#yFd$L5Ia*g}P2+Ity|ZRTTW4CgTo73rT}$t^7m^E|Zat%FZDu}8vkRJM zFMd~XwxykJx@9Z}Ti`nV+_izLoiYO$%EG&x@O;XHY?Qn)2Pu=CWdB{Q-Af#ay}L6% z`r`lR*)vay|1THbZlHNvV&)Sw)ACbcH_$kT_wEw^TEVH4TG-e>i(}nxT+vN4tT^v1 zS|L?>dp9E5miB3`(A~|hml%tf8P=!V)Z>V$ztJ#XNE3Ay8OjlrJ(!Am_?r`ioBSF4 z;yyS9yHT{eU^N;i!%N-E(xA}qJIe*44>n~DUt5h?ydB{QE_eSS+p61z2n$BSzTJh^ z<3JA@=Gzs;0Bci`0B}||xPguAGnn>@_}(BpOwvZ@r`h>Ivy4Y_54o01s1fh&@jmPl zj_0yV=)nQ$L5jXhdH}u?(e79f2WM{-T0_B(EM@i?%m_@(pbn$M2-O{ zl=y`li96wvQtyIDt-a7e%F?zX)B}B$J^biC8H`C4sy6G5x4G$t-rz;b3JLc0y^-KE zh^&ZPnZ!p1jE6_xc8B*J&ht32iE%K9D;PxXe;LFO>6g$3ZHdCTx1MSJh*o;_(OSS< z7S0TZ+N?H!1XkX5t4N}ZU23XdCsGMcsKP(zOO0ceGBG|+`nIc7E^63dgSmt0)kyyFg(16e@O=B&Z&T z>0|E!Rx;p1)%z4um|Yp`p62*ie?ThZW0CjxbA>WofRnOb#nsAREvG;an3>M!Q-f7Bs)Ye zurMBn(X8S`x)lL#BtV78g2`O_~T;g_+j$x&Kyd?kIuBV7&PpN}RRQt&;C9W3~0eWyoP0XnJ z+QWHX`;QRQj$a3i&9$M&sn?2SNc-o+%hqZW=`RW(5a(-0cER2PHJM1zcM)sZPe~N0 zpiS5vtm!D3JKQp12$2WVuyeu`Q?Kl!hRbHboxfphzb~P4S zBQc1No2dDLJiH9#25wjt>L{l~Kr5$BlC+t3uB5y{!Nt+dYW}9#pqVI`(R=y{@j(f0AwWSUn)%)PbMCFppE&#Z= zo3*F%5g&@3OqcbEBAszc3Iy=V%~9fx1LP6v>Sz! zE~TG0s-PXzHwgZ5aR;_u>aq{urL)wXGaHTr5B}+FfKhqrVYjEp9pY1tbG*s)a!wSU z6beTHBl?k9BF17I2YNxC#@bzd!~m`AR+UGq_>N3kzFVp9)g%_U*c_4zH?aJnK7Eb% z1DTQ3?QHa<@&Q~vrx5=J!$wDX(1vMz5a_$%E&{x3#xo`*GCoJfxV_xP6CY>@v7q=Lwmr?B)>37``j%oc3(J_Uyict=V-Uv17Vk$~CxM8S9;z zQl)aeE?F|Hcvo1EiF253Q?0F+!Az~#3-PYKQ0fZgm7nS#_-gqn9#2=Z67foQh23M! zAyRXdH!nX&b#ldk$@p`>N~#Z)RBO}`+@)$WPE@6-Kvk(rd-f{T&tbPLFvrRK9DHxf zAmhX*ACzNf;cje@_A~a$$B8YPiaThjZ^c)~_CH5Z?9S+KvpS#Z5Qh7L`<=$;=Rv??Gkeom3{ z)7J$a0;j0AQ@nT7_9`8`TIez_4@K4Dml-vQ-l>7Mo;Tu~uf!Y(-q`9{xQjj$171dY zrF559rVjPSti1e4_43MHe!73~Gy9tg5IvD3$=K`3m^GHYG;{rCX2bAxy+l3{vI%+A zL7<;e^GjoU18%j=10uytX{{-QbD!6IcV@CA1w=Ms^kY|0WZ9LSvmJJ{QjM z1_?l)L?iKbLTlI5t{`ZZ50wbcJFk4rZ7o`uX8qt= zVUf0}67$S0k|R6VHeb^Zu|ncVq3yGc{3-wDaE$8^#qo*`--e3sUzddKZv+<0ldSi6 zuv4Kz|I2E`mbEpFsCw!9t@NGi^;;5tlUSwLXEbP4Vi*Sz5)j~qbS2V~RD$WoUlCUC z_-m=OsIUa3_vbE^QX?hxAgK-R90-VxnT6IfoM2q{d>s+@7GMKSB@4HagSe>@|seLZ;QI zlJ!tXN?~cW&%=v(*lZ84NRAl?|NGg-v(eo~XZ2Lq1T)~7@H9rX*tu_lxq1L^CO(dKxqy0&p>yUb^93+#D-Kod z38Y{{SFDN@?v3stZ~3X^J3Q1G{6CaBT%8I_ObM)=yo2>hivGiS*YJ7D$KS|%9#6u7 zzI&ascEbtwBSfycDPeU{~9H!Q*OT>HuIgLm-nI@dA|x|&d^ zC5Q`}=J2oqDZyq8gk2hHjp5fT*lvX6^`*Lh^p{{T&OHxbGEHgJ!chv@8ppY6z5)Zo zlUhYD#rj6{VWAD`qMzq74w*5i8yUxPO_{3EUdT<%o>uWSK}BK!GfieuwP#~Yg!7al zwul{FbY7uQVl`@BpCxp%Q9qP)@<>9iPz0d{V3In4irra<0Dz21+Kx#vTzVM;CMlm@ zFHDMc=RlA09pgZ^y>E0wPx?mWN{`TD$HUiLo_vNyd{4Fli0Higo?J(OEYj(KTmSxu zb2v)Stz=>OU8}9RhczUXzex?g4P%0C{$>!tek0wAk@QN_Djp%85Jn$X5HcG3?TM?7I`nIUZzDR3Pmlz(WMuoEDC=VuKVcMSj0#DE z9DGT^uNYn5#us>64iAhe4Sh0t@i!4s!x9JrfKjw3Ok)xONc}n zZSYE`HgqlGB{x*{iP!iEU5FKAEuzqZ?tdbyZhcDRDdugmX``d z`MlvNW8gz|;0i9=*Rcfp=HwADfNTcbWB_sH+2Ahbu-Y}@`oQfu!H#9AOi@8vzOiSL zLGp4}Qt&;APHX4`KVek&{b3hW1yrr<(1zrssPH0C8din09l0)0n%;%mpk+63-S=Dq zJ%~;J*VCdc>emzfxBAV9{!9J3p~ms~;cJ(URbPSCqh32kt2_fdm!G*ot9X?Fh)Hsl zXMf#zRjcq5L*M<|Bu=ZikL4WY^aGQ5LYz_4d>J>aXX%EKshWzALauKm9;J(@)P1%+gOk6u3vnAs$-N z>4+23(n;ko$pp)zc3M|=YB(Ga3+bz(=~$}Z!2btlu?TTwM1OHX8STZiz#(lXfEe9F z5?0r#E(;IHi4QDbxuaY3*OS%LyNXpSEcs9bJ`I)evY=JR)K{oI7PvCr_(GsIJ?oH} zG(++hDwRi8PcrMTyP{1}mq(ITof4{IS?WuhhQRB=Q^=lZpYriZfxfwSaLWvSp1Fr9 zH+BWDV#b-u?8csHB!+!T)XI-5kCvqwM`ShJ)h>45q&tEq5(Tuizj7v6tyHf_z8J(E%CNo#Xxmp15FP|T%L)*qRm$5QApd|?`4*w@wB&7fF(kKUNtyO0YpLPv=fCtIS1cdKQ=CvH53zQzfYUY2;T%C*CBn=MF3q#&Jpk)Bp|%EGxe~DX_TL zF(>zxN$6wOfI*Cd;TcRwUiS${Vh|oupTz!^%<1$s|NOY%aX~o7zQET=ziv`>AEUsV zk3?vH!AGgI7+lLzCN8#=C!X97RomFZ1xxi|l>dPt)1wV~JzMz>PHZ2^^RZDfBe6Z|C_BBK) zeAg!DX*Il-Zitfkx=iNl!q_0Frwt0SU-@aI#>B3NTn*n6ToU8>7T%^F!^O3DBgbto z7L};F5-$dsGtq0=r5G( zN1~CUY`I-nZick?-i4#w!M-U!{S9sHDSB#e;Y(=t?rD~psQkAiJtiA$Dg2Q7IHj#A zui?7&H)`q@G_T%(ZAb(}FW~ZDeN|cr(ZW34q zfVHocariK%B0fS8$}I4f+AScBbsr^|4?#Kq)yeI}h3f@9aRbTkT{sY2AX0@+K+xq`Mvg!PbB)h*M1qbE2sa2KkW%G>CUsTzi2gi ztU6XKaYy3{;y7oCZI?vMdtu)L_|9woUm$sruWbKDY_PBqbowd8b1VK++01NKQ zIwIx)_!~A7h|k;lO@w>+b*tLFbRCLNe=nD^TE%auTh;VMH5Emv1%H%vXzM!0V#50l z+@Gh{rpI-LKk4b5>MQ4Bol(F1_ORIE!=Qll#;8$6he(HvnoU$=?2D9d;aBi!q-G9H>DJp9 zGj$fc#F4MnTlo+ zN*$4h1#;w{Gj`)))xac=wkPs%0+fu01(b{v_TRy$JUBeVzKN+tbVRGmZ@VdQ-2Sg5 zie55%7l54^_R6bYQc3vds>(dKx!9Z8k;5Sp@|JO;rltH%64cV4x8$xoZ#O^u;&nZg zR*86~H`sqGi-PtX_>8A7^C53$SGF&$?9yHv9WEHsk49(8OR5(IX(F8JOFOSY=}VqB zN9_B1mmi3}NBm6K*MY zXf*Hw*J;o0Dx4m>*}5N)Qjg@zJrFpseDK67uA~`{O25-dkA9@uEo6XIK2D6VFGkpl z4EYUYP^dfJj>kAs_mKze$J^@G`AoqwO?#z2Jgv{+sQz9`c#5834nt5mK3@f%@T9TZ zM9c#D>0;k4wTG45PCTPxT!}@f%v@RIsFZ_h|GG^$*WN7pAP1_VDQI zI@P(_sS@XVzGm5oP}cGjE=>z0XPz;O-@v2k$wS8BYjz=(Q`W!8%2yYgwo*OR#syyV zYVh3WdfHEo*#&J{MT5ksTA>X^vjSqhmA|yt7)Zw`8D6KX?1M}C>8~ zBF`3WqkNWYwbVx#1QfWG3goJq>1(CRl!8`ja)nwE+f13;hRAU^9oMykuk#(imj^zx zgRRzYyMQBKbMCCTVQ*g>Vn9m<>%W)5o-^iRgjID(4>iXE)6C+=%wnGHnp1qO`X2E~ zXcn}()=CfXlu-dXWOv1>8c-HuUnc^@9Ezv|NWx(v+B-+>`; zHv$=nOO?^tgEG+owJ=&->oK1e*odr_zp`wFR*Y?9sMu1aUJ)ND>PlfzhVV7dw)>D# zMh;Jz5GhNXlu^F6%tmMCqfNkX*gc%lGG<0=4<{>eCDuL$i>yqCZsFxF(L1|}836A9 zhxje>np*`5@q2*bTK$8wiU7|#6rR%e_{o!aQPG|~#E%1A+`=Fh3Fj(sZ%;Yc6H5!H?uxj$91(=u+e5 z^w#0b0UoWrk4e)Q&@~ELm3NNng7#FI(Ze4azP6lD>oPYt>KpP_!7ZRd6hgE$qe189 zFjcVfD9;*!cewiMyGE<>m7&()fSQ9JB27Xt&}qti!vyX+dEgq5kFPa6&vWoY!>x)( zpLQnKI5o;yj8mGk6icV!r7N6%3(syWf-z(TJvwiiBY9q#?PYz8A&dCc3p%jU*nxZi zG?&6ZhIg?29JGmk*EifQ`%i?-5|42TX&9#|W?S!W4ee)X?aSMV{?6!}Y26Oq4yQX{ zbcZMVps@=ltEF0nnh;2dtR5uCd9u`l4 z`OWEG+m;Tpig%5nw`YO)oN+QPt@6ou0!S{RtzIi%PG3)f=bXX!?lyyq17duJjCpRQ zrOSIcduat&y;H3(CtEvR?@xu!)*8q1mfELj8aQ(He-RfOw$)AHySv1Z@nMjy+S}Q? zmVFhN*7pF`PYJMWkUwVLS!0sNxZ`YZ{P1paP(V{<2u~PbPIc?8XFDgc<$N8%OU?YQ z%sPhe+GSD_qAAalGg0=5R`E;L9Y(u3J7&buOZCy#)YT*aQ2Oj+TdbJgCGunDKC$$3j*B+imx`T1XRS{t= zfjB{ID=QIg357Q+`cGPQ+L+`v21+&2r+VW`Mr9@mw@CTDX#FoEFtA8HmWO#z5MSXb z51-ZRg*(cVIx?5+Oc8~uD5rYo+FOH9XH0**cP>13fZ)vO9$8kfwMN9Id3 z%#1>awT_%cqXYBIDc^`sRGB+C2v|on$#o`?r`0U<7}Gq7uCon|JG{np+;4lCB*lRW zuAwu!uic%4l6&QM891i?)H5XT;5q}e$zW!=jXOq1Uz5GY$>3zLOE_8-?WfKRWM)^Q=qE5x_l#;btY$vTiI(ElhUa+vaCOTK9a z+g)$t_MAD-e$xm-Cfz`y8EivzNW2f6>zu;g2`<#%JsX~Ig?K2P@63WT2gbQkK;k)V z+12_x?8Wwer|=V^-__SGACLhFpp~<_gDfe<9Rt8gbA?(l?i9s8{jSU0F1;Qdqqo^4 z^H8ByCwQUtx{yls@&q+Mur>S;M2>;G?}9dSr5qgDd(d2%d{$j}^i%zoUW-P5u=%{3 z2;$e4aJ4}ZO0&QWHJg3WH^eP6gw#}zeu{u+X`?w+79B_Q`l&ufUfR-#IW--3WaiYN z`YA3jLzli_P927$bnJ&4c^T17u4DAyywh#R1Y_q5yoV>SJUFofn=HojbUPz8>KQ1L z!HBoYKIT+53P&r9)y!dRqx%ud#s)!U4FP36^?W)?Y4`V{V`?FVoINp<>xO=YCq%Ed z`U+Y5GupCL^#~N69II>$3>MLmB*ZEDAP?2J_%vXD`papJaNb2|4Yj2%<+0!nA;1tq zt=u~=un$vidrp2}NgH?v6kjfpjKAQpt~850aKPkpr$YkVkByOmR`&&dLP2@X6 zexmEK%`F}2G7e_dDz1)FPzDb6+Rw_dsm>I6uwg_C5k);4_MH`er-c{~H!vriWMLMn zM;f^hB;plb$uK9XV+J)=^^VKMLM~_RoWTeY?2t6A;zhtr+&KPFaDxaX8jV`TX{s{j zHgY{9z-zljB6(9J4>-q_z)5YrsMt1Ej+t*WckCmhIJTL zyb&>;mKd=s$bC+cjq|(AB|^8;n7~J1gb?t}x&`KcL!km>#ERdz(1EzG5qcTb-Hxoi z;pH-C_<-hWm3<0F;(@PY2n0?iW`^5MceT^!ZK}tqM!o)SmjkSm-I;H?4o`~y6Pn&w z0?FOjjoAvY$9{R{Yb1m@4RDuI+-STO{dv~j9Eb+zH7I`7(Zt@qwBNg}rI$e7b34R) zKu7Qc8x;fm{rFnbqR1w6(C^W*7u}ByV7Tf|kP!2iE^4bqms4F2nr-n34g;L%gwCq+ zlv84f62>fhBFT!(DCX3EKad8H#Kt`-hw|?^%&`F}D{ZYNZ)x;B`q(S)Fms?}lmlpA zc#2CLM;$1fZvCiNxw}<4k&Oj5ovLz(a?XChQT+BQehif`s2jr5g!VaEuU63`hf7`; zoF3=E@wi<9C+=+YBW{!*Bxr#Nio~d_RZwa8wrUmsVzj1jEAP)B2}LPKeA{ITBEDMr z$vSG@^R&poUTSjGw4Q}TjQO^^e4Hx5k0=S{(@W7n> z-l?v->=PVv4W2uH@-CVhDBGDsHZdQ@fy_O?@%kDG`wpz%sD9Bit~Ek?44kwS?^Xx) zR;Mb6ll9N3ER8%NgoC@!z9bKXpZ$(0i33x;n`>+eDAzfVOO4{K_SJw4;}81|=pejZSsxirZC` z{gM{_Kz=;YV@4y2Fd%UzD5P=H%+-9LqEfa#F@#K5ZB-*oQ zvO0}}=G;ctDec*}5o-gOjJd!O0`sk)vrCKHuvG{{iJNQ+o*sn0*oA@Q4g`aWTaCHa zif6%L{q_Yehs&5V$rHkPYP-xJJ=LlTd9wHQl;YYOF_c0a}ZNmBT}i_1n>p-&tkf)_3(&^7=>y z7B1{RBi|AZ|77^3+2@S<6(3B_38sZInDxvKqXF3qBp+2i>pSsmcN_IVOKmIlSflUi zAKfpo#h|llPu2@lD(F#!65c`Nn**{B1S?KM)K`amiCZxBh5I_aHW@BIZ0JF`tKKDh z?n@c`4J5Pdd*b^^n7o5e*7ue8(b9^HDR@a7%MCjCk(vTyaE;`z`zU#3JVVdui;_O zn8mxZfz6NwP=g^kFqh1)0a{HQKD4?yo_ucv@=SCoLl77HrMigkfc&5+cJix`gnf;} zS%+M8gAb<^?7wvG0UCoe)$nwVq0g}o3lEzsOlwL}FjRLg8Arnt zlMaeum%u$WC*=y36dZH#9KNZjIp8)T(T`@%w0U8mQj^V|gotDvp4EZyLq*5;72 z;G1bIR`3(4sQ@GI>E4uDFubh5%$GA5f2QEEPLTB}m~S22)C`wi=QeMqHtyzC zO@}|pfi=VBsf|W%2{@=s?t;^d$wH1;m2^VjDXwNjUEKD#=et!!6sD%d(dfST8NqIw z>!lxNQCjAKK;Jni-~j*7O7tZu{J-hp^fBQnLq%pt|HuuEXkKA@vR@5)vQI1%8{4mE zzj}Es7m;W~_q|{QP(KsyOo{ayPFZy?K)k1m>O}hWZGN+kitqYTNN=+Q)F(RtmzZJq z<3cg}5bW=ed!)en8YamIT1KeV0r9NPtXL_15!_$gYI@9MOky9(th@Yh=BJmV$k`~c zZ@jI5)`G@$Ap|L>G2qLwv3QRuPX!i^+2M2b&pIlFm>J3mYm8!Ty^B~<90eT=*Hm>Y-c6u*MeZdYTU}T&#Lml=d%mEzk`=R3)W*mpa?O{jv0%sWU zlRE=sVewg%G)&*w>ywf?R^AA58tb;R+bIx{4HxDXwtv~b|zv*ztH>A&?| z0l!@eiDfnl&N$P5mXW+N=OrO~GnM&hVLA5_``UP!&bkD z5o&MgfgZ{AZT%Csd6^y^5Xc7nK2#W++73wWRj;cGLmgQNRZ4|rXUX$!JkR0d{Z6x_ z@Wi|GMUU5D9rf>t{e=M}847-g)uCrik(+WuI2VwB3g|HEMMH9>5kZX{$;G2R9p z{;WX1Rr}<9j8B$8ke#Jf{7N9!XBAp6$QzcrkFHcbVbwF64z~_~v6Y`r{~n}}sE<$% zuWgUjBEO!?bvvIo+`#qWikyFe4|rcSTwBM<%rTegMzU-uqeEVHWq;_eCGrS?a5xsI zIcvSg8rQv-nu!&o^!dgW(SIo#g(jmlq8DrpSKXp%q9Xa~Fj00K^j#UH(F$ijq8=co zFgfgDaC$8-s7nmmm9)&Yte7xZC#Q|d2Vg!QRosEfeb|j3owUl@mbtBEm)5nWyL_x( zYjcQ0yGyn*QJKEBXw;c+xv=kmityxC?lp&F*}ZeJw&PO$bZ(gkdDoRGdVNM#XZF6} zCFCMY@EtT@jJ}F0j1N7rZY-MD96vSUXeGp9G=gRv^QFC@u_@(w`Hq_KB(dnR%r6`W ztFo4ik|Qv{_$*#jIr?HJ4dU7LU2&3W4Koh++eLtV+|ci}snkYKUR?E_000EA8>X*j zup}OV<&`X;aWjZJu9Fa+gpZh1ym;C(qhVR6Nf_Z!-`}tw0+jSq8OyF#5EJ0WAUDIO z=`~&Aa8x(%=vw_$X7I00+8@GG<#`?RQ{~j0_# z9hYaJS$b3182wE4%26U#__HTneKB(E#i>1XcND%KE9VC8cC%(nvL z1^+@32~DmB%@ip;5y8n!783o;>p-wbQ)}WB;v@Q*zjUYwcUeW2$mHT|VP4=x{||^O z!qA~LOU2Ef9pp%JsQr$W@#%~{rJwm>pjJQgqu^@)oJ1uf-A$VwNasT^1N;Vg?Q_ij>*ULhlW}XR8zAW3fW#!PVlAJl0jE!WXM6>rm6dw#m zSLU7+sXKl-*Ve9sNEUXy#%#M=dTK?A202_SM_wek~2x#~f-Lt#pb`(KjF8M(slatG*Gxe6ZGx)rEmXlrKBf_&Dj&{jVW z4NQMyvE#C;B+xk__BM%+ubuQF%v+1E9Q@}ve>Q#Pu|;mng`}TZTrQ5^yKiM?r~O2rNN0P6~>OzDNH;RlGL)*&XpH}6eUuJMi)*)z% z;8xH-t6dd^CFqJ6b)yQlgW$vWdSa{2BnyQfdQJ=1lentx7MPi?*HVJpU9WTf`m8M3 zIFmE?IpmJ!KF4!;-^^o~Z)XxbR5ofxeD?Rjz+++X+q;5*wT8|Jjay}{ms|GSD(P0N zY!Gu&<;auzs!C^@p{+zh3T~CPTtgMJn2R~k=YufBRbgEtf~UR;GWwoFtKza)@VR(% zeMy<`v{gz@sdXd_4RetI4Au$}Dc@X@M%$j}7lbGTZ?z1J(Zf!0W7vL$s9 zTeihM);gziw`;yuc@m;r><<_5+tS!%HF113n(+De^}s{xN~5^OQIEoK2l!;gst~wv zU7ux3T2jm7^-*20q$%{;0@L@}bKV75KWch#|FKYQ(}O$$)e2Sch-htzE>*A1+w$1G z%qIJ`veT@Tp!wK+GS7Whogg2812HJ#BBOjYtlJjM4KzD&(el*yWErM0C(Q+obDDka z^8xObN{JOJ@PJvc{dxgtV8!={R{0^-WutU-b4#PlWjjKuvH)%J>uj$k6;!kN8bM~$ zz7;%WbY3{9qs4pFp!O6V839`25QJGBY6n60b4a7Wq8iMDcF&71a4%Lu`E1&Igo#&1 zLZulqxLsy`5Rj=%o)l)sFNH0>1Mz)1w*#D@tbf7);rXxf9Vne13}PSRv!yZmd`s#R z)GxJ1*VSO`^Kz=^{s{qpAiRosJ1bMd_ElR7Ii>jQ^#^L0JtnV|$(ya_O-4aSa8yfUOX_l& zXaz>SA*eU}*rTbtd7xwz-F@cgr!qfIj8pUi@QXxu%F^tlf6k;x&zTf0DI97STT1Q9 zO5|;f5fW%<994;o^DHKFjvOXelNOH`ukXzdwl{s!w70b7onx&nP33LA^0T?K$;bMW z&+bs8-%iEAYhxh4XY;M=!{%EyuI{7sm+q%KnFqI$KX$k^dzYq!J>X)fC1ZO)f=_kF zC(nMG8grvZn`>p-#H6G}DcV|LyxVi7OTG@3%mpYd zX`qasrHvl|_5?C8OO<0Kqc9z`%1o!sA0?+&DcU+cwQH*fNUFkDW@$R)V}E^l8cwss zUNg=q93-UyB+iCRptg62aZuY@u^X+7l-Im;tIS26Jsyb2Y*2Q}keAqU@`&+P#J82S zq^wT+V-OzG;=towd;p5Z6n;@5I{&2Ro;A0f0~GBzlJzbebuFW_e-siV0M^>RP-@%@ ziXOJ@zKa~LsJ|Vj24_;o^z5TLE52%VSOVtpb$D5>erjxdiOQ~16u_bsq0WE}*}ei8 zpXX|oPj@j=nHb&9h%fjx_1w#%hZA2AaS`6qLR&u49{CYBwUcupp`cU#sYs7dyL|(Z zf(t$dMS`0yaY#vw3_K~PLqtv12)xKB@RZ4i>$j@cL<8)ek-LdKBAwnxPu2!tog z-9cLe+9z_1yi~ugcX^imkMSCMM;6K2j(L>4sqA{8hcm(@$ce&Dn5^|WD6}_y8 z;P)~=Km5iCWZNd(i=viZd>YrzLyW+( z_o}?2!^>lx2iqC+OGt0^yy{TmP}M|em9ILZ&?^3{h9)D?)-0kta}-{30Gc1i;t}6E zQjsN%L4$X~zAIkmD&7e5w~SlSo!~m7YbphyF~wc?FAlas@ZTByC>}dHei3lz6P>6^ zR6tdqoar&_ys_1D0SHxv=He?mED` zk$G7jl4C|eElb7I03^W4Paz6WcZVnILr7x_eY(){*M3=m_@T&I5^RO-D(q62q{d;` zq^NphZVdZ$u&PMBIV%SL>9xz>8Wnuw$`DjcyOD(;X|S26boh4JL7o=&!-|ccJcyAj z{j)HyFoPEyE9^A5xLn(;rB^Ek(@_0E;tF;%w(J6HX+PoL-VRs7VvKU<%7d;`FVNqF zAg-*PGf%5(puc9po=IkKw>+8^aFo5_j3Iv3oj0N&=W{FLf_Vo2E6(|?XF#nmf7Ph| z{|pHiF$tDzOk(0Ce0c8Z||JB0hSC=J3{nSpTD zpAA)nVb@caDkDcrk=uJWoR>B^d(YBN?8?3m#0`b9x4{ixx7$BR2`WQr@=yZnXgY9wNa$)+`N=r%e9dpaGqZW%CJCo*v}HAG7kwNHt4>?DBco3Ju=T2 zZ{Mv|?Sgd?@$I2hk|92#w?MO2{fbDc0C`4yyH!&0Iz5!u|MJ>v&=L)$r78|>Ss(qu zG`Cm}PQ$v_Ce`xwJcq%j$z?1%Wpz2)nPD*4cIQV)n$*m+W^-2Sq3wgXN^wyv)z(~3 zak)*4FD7&DmaG^ZuRPkqRia?~Hf}me+S&(UBA^h|Sa*(BaqVP-9QEE&DH(hTf6?;`cMlt<8Pmmn^U_wzXD@=NK}Tvjj0Si z*H++dZ3otnh0zGGSxJYhYJ9ft~vDp2`%zax}B9;^&m(iH-%!Aw*n>2*EdS4 z?~SItLPWBT5uY5#%*rG3Bdn1MHyMF|*2G1lqE2Q&O^cuzo~8uEI*hqnjDqcqgKbXz zRl+G?$p>jadq(nyU1p9w_G<-&Ie zOR+PW>KTh!Q1!MQ&Ktv=RJq6{U52=s#JHe*je;tzVm-CygtiB~&V)=QYxLQcrpDU% z@EB0wp&p_rc%}ZXD{xt?pvo++s!pDe9k`7~a;s41EU3aAnl3&{>`C{gE2@mRVfUkeo%0c}8)S$kje#1zIUzP*b^g zj#i;4^5REf*9|5ryWr74KjEfKCkt2p8f%&a>}vsao|I2=Fl~u$rz=PTZ-QkU+!OIV zD(fEaNdS!Wq*&biocn)xt1vhgx?G_>uFytn2ON9IkW%;#LzQ>>vcYC(WA@4a*WH)E zM_HZgpUeyp12Tz%TU|bjF;GoN!X|rI1%-eDidqemS(1UtOq}nVB=8drhO~@hl-nyx zZEaDpwQ8&X7Svj8gA1|M)&*@7HxRW>ZmgoXYt8?8&N<)AWC%!mdzar0yyv{=PIK4{X5u5% z_mxjEmiD^b*FA+^iqD=b^j9c077Hj5Cg~?B9rq(YRTzAiCl+qSVjSeHY>*$@*-Wgx zhlj2cIMl9v80d9xVW5zimJ;t?zX5K2a;$=!NSaL*MI|vkfMGy5yYKEwwl)+=`v7l#%t$QUhWfkyT|~v+G6&P_O!PI#p>+{{njnvv z%S8&vJ#Qdz!MoY|mh!nF`R=RyPDZYc@i{jncf*OIB9~`(k3jxWKKh=Q+zAf^hacE_ z_HriNoLnenNYsJKZ)@hKP=57y-8 zTv@8l`9`T6{{Uiz7LhqNk$air_?&GxkDJqu2q#(Fu36ZQF=RuoROf+Ayr(l^{j~=( z7LxoPBm>HA+IwEyh;`5A`R9nCepUmAG_6FAhd8!Olg=*9Ip>yv>Pvl=NbId(Cg~ zXnJ?srO1fg_CsFj?LAvI-R7QOLA;#!(RfU}?pC=+=K$y4@ErsM{{VKASCDzIV=c28 zojSP?H70+O=lbKo)ctK3a(rXS%WS{+HnfLM5c6DeGva_ypi3bp)FG~uKZseVyDi1G zF=Fy?E;CDO{IAd+HdjK39 zw7ajs;eZ~`_!|Z4s0jDnUK#u1avbY(o#cN(O=_5Q%X%?u58%|ysSS19^#{37vZu9| zPCE_r8H9>cnuAl?oEJD#;ivStsAAWe)AeZQUNt?x+69_bbT(>!?Xb?Lx_(bUDsq+3 z^@pC@)Wm{ZUWToH%5gG|A5xAYIFMaT!*+HTrn9T3;dG8S9++P}>2l8fumaiU=aq+5 zA=2Z48%62-!`XJmel3$n(d5Be*N)B|diK^O>iYd_D5AZ-Xx)NShU|VANtLLZwUeOw}#hpqT@sBGz1q*tS=O$m0sE!H_%aM7g@sORKzBU7z9HJVERpM(OM5($z#(_r1oM9Xpdb(|2iXhDUX=Tz#PIUg_i}V+Dk>`$lZgSLV{`qf>r}_*h+P>7EY1 zjf<1N#l}6oBf6~HO)|e}+xMkx8}}F1y>r6=EEQ(+XCJpN?r62pX^z_zi|cn{?##&Tid`?R8~by0J;-?;mc(M>p1nJKolecvAEF>|3^? z>kq_5)%=DJCf-87_$}*Ungip~B~|%tOw8U+K4)d@qlw!k)Ann@gr}a;)zKP)i*3E3 zE73~-U3ZW&`4)1;2bm{xPC;6uB%^!7V&rAkbCPG`{^(nb-u4;?FQ;USgIe}-+p;?= zJ6bm*9#(@Yaa-FD;kHhIegT%*d0hN>?003MB_Z~9s&eoVUDeC+@W#qOI;>+7>uu8k8!M)xl>ud5WLh2gEhO|g{KYrb zOMiazpnpeet>$!J&zpFOR)$Y3ZQXiKeA7mtGu0v{(Z+C2*)S@Nc$*It_jP<3oMom zIO7Z%|Ezh5P0ZcIFWMGt+G-9*d!fFb+mk4m_-wBMJ6cpE7M^%J5xlU#!oO_;!(Hftvd6R0;Aq%o zC*nT-`ZiXcwLBr^{+c^jzPQBW+uB&~ZL{(dvT`;^>KU@q5}O!X{1Nt$`2JlziS_cU zRicfA#3pv3_^{6S0^CoM{5dj)L@R4$qHGlR^|xU*zDDBTYGrfa_)LY|u(NG_+nbV; zvWD-eFx^)lfoj+(^WaTLxi~|%qO4?`jqJZhI(PLge{Ay*BUEeX6{Z*KcBtSmWeH8N@Hk zHcF~NA2YO-?`!e52 zSwmYMp>K{gK%E2Wi*yM!4mqGM!Yq|qF5EY@tA6*({E?3a@Kj@mYlAF5epOF4lkA7` z3DOzRF|qKtYlh8G446p*3D-=`*CP9vOa7d8;ubbDhT%a?#JiN4Ik%GuH%PG`X0Yh*aUd{)x36;BPJGmT z|KOI-^5YBlxxdqlBJkV7KCi5G_xhs{q5J-Gb6V>5<&Rp3b@{$sb+0htNgQYUq#geH zwD+mBm+f;uu>L3(ZUj!embfzW(0DbILn25G`G6KLswM;G#)k>^be_o`P;TzfzNXwd zZItqX^+zym_XA5@NFSHCa5N&zHte8fJNoP6AC|7yJ-C6BStk6JQo2UDh3U6O&)V3x zKCcGj;g#u-^|_vj^=Y~+nQe78Ul%Kc|sw2^WIC>w|ISOukhT0dsD6uxVuA>4%+x9D(ydJV_ z-~Ky3e$-((0m2Vke#>7?me6xK7J%SJg)EFhl{Y+%yMnGnz@{b zn>Q#7KBD1Zp>O$W5wR{`_m|k~B#+KYi9d~-{0bMDGK(<)M0>2cAaN4YjPGuFdr)GW z%*1D-zPOcn+Vw6TU*Z_Nu{=u-CuPb9XZ0g?rbd*<@OsnT@J#%ZmL48yV==PhUFe8$ zHa$rCDQ-*^ufd z{lq<}VHZ!|_hnV@K5HZ*&TZ+*MZ~c?dGkdLy{cMdJ(WG*-3TGb5V5z0Dw#%kdfJo! z4i~CgCED|E$(BsFJiyzSjP#-X0n(%Hn67(=sw%MRgpSwsw{RcJmCdYmUtACC%6ERV z5tjDAfi*q2)VuPXbze}M;Z?)5A7>r-kw(8$GW~Rfq+R(=TdRdM7Pqz5)2eMg9=e1o zXym}VmJf>BwlfzOU?%{J@g{d+`=C^`KZZ9bOLdaiH3%%@PrCBGw$_k}vsT6_FsjzJ zBTnr7PewfSBy%#M;=xU{_+vO*+0HB^|BM}C{H4^(r{3*k@7>PCd$%*f3_f`2 zCr!Qy!4+TfTA;YkyiI-oS@+I~NK^?regPygm15pFHcMvQEdz0btgqa3^pR^9PU`8ILpG+SyM$=X^~(PL(G&_8>dnbG7ui z);+@PoQ&+@*Ls1uzq=XJaLXf%{A4Jygag0&u$^A!B`se#`l$!J)H`I}9SzSUhHc-2 zEhjWhLsig@_na*~P8DZ-{U#S$DtbrDS4TA-{}7oHu3yfDt7CmDebD-%0kS86FK*lP zCGv4Tq(iJcdgak=ZR9wwLwZVi^WxT^lDF1<;IvMihgWNxh{}&^@(D}tc1Duij1z&2 zdiEw{b2w#XCblyb_X9VK=kz{NMiG}gD(Hwqn2y$WU@}MUmK$MioVedC?K@80oVkXP z-8xeyuz`FQn#~Z&sh=lWnT@t#&${oA=EUD0wcq_f)V1q9=nsf*`Vc3-Opp_0d z_I@mBMSMFmfFFB?nb;&(`m%85V3l>LOGuaRK^#7`wXs0wwaKKiyKL`vwtSnc|MTNg z$m4R*QuWC(BYCvEL4yw=Doty|Ja_9o7=db=Q9(%$KF{gYs0R~gjC{Y}*7_;p#W=xI zEUwMy(}^~w*t?ywg`EnEgkuA8qbK3*Eq>rUv+>SE3 z$D{gNzA8{(n_9jY*jUnMUNhGos=p3w*cJc0W$#r`e#gSjE*QMyy+JKKgC-$-CW1;(`PzYH=tdUBQi<4&-lPgzd@OP z-7_Ja88pN6rDl=Vm-)a@t_x!0512%3cWbB>Hl?M-x0H|L+*4yzeYZYoD;~6UyYXm;6 z9W&3$yW}#tE5*YYu1veXk8@XvxG+viWH%;B*3)iGF?OBo?71cROs2b=zaOp3j^F7i zvzI<7V#ffk9QoHVfHNxp+MsN`Lms-&mAs(`N#HZH>$Kz}kaqd_C${8*YnU;I{;lZyV{RzK? zDO-o}T+tuTzA2|zC)94|SjM%+gQ&FB^$*lBv~1qL6TjNxn)0JqSMz7NR$gtzH*f{@ z`2%F9z>eL0|CCfVx$1zzV|QyeW+OTrbUY8oCmrpmXLswvoTgYgs>BmJ-7Vh-sX}h^ zr3Xn32a=W?NGd&$bmW1geb@l2=$RoG6mmy0>zp<9oAe%eY^p5jifl`JyBts_mfl5F z%Api>BXd4^fI7yi#{bO%1-CU%53i^(0=Ty{J^B0VggOF()9KS)``JeXBM_p@4 z9w(45c?`xPbe>t%{W^iqCq_~Q>xnI&I<5TL6=<(y1)^Lo@AK9^jkfN2w{{Dg@oj#x z&4qyXbk^=6;m=uZu^8!OwMFrPq`U)3Z=-cpz*ON{c7UdbhkmOy?VVMvb`<6%snKP} zp8RyRr~F4}_#c<{pFFMtJrA<`63X6vG-|7?y_=Pm-TdaQ{eJ?t-KS;Vo2NL? zEBFe(yRd)1XOQo1{*pm*H!4j23}W&{U{Vmf*U)1poT)6%CS@C0hY-69m`=Q#bJNXf z_fER6NxN^SdrjKiO!vyP`%<`T-)5DzV~(m75TNe+i5*4k9$?8S;Es26e<BZ{EUsPlNE5hLuW z^BYBlCcQ(ZU$aYGQyPO-Z6s!a-}2TXr^W~wi7b;UAcGs>h-Cy=a4|v`*ogYQv8Wl0 zgbgbaF?@c1y7e@uRX4t(}Eb%8`11)D!lZ$gNQw40|9(D3R9_W`)Mk zBJ~kwC2AVA{-|G=wUMYLz9mMAGYvL+%xf9Kj{+NFiYFWiuda*4%xMCJ^H_p~H++#; zb;vKOW=0|*MUiz93!A~3Fr-p6L_!{mf+}R@V5!8t9@CPd!K_IUF(Hv`P6`d=60PpB ztZ1-0W;0)R(*a?DSW@t0)iefuR;{E}2d&~j&>!;YG-Vk_ z$HGW>v@EEMmu*|;39mM)J!a5rFh*185}`VcRPpK}5c;4$no8UgWlu4KYy6_#gI?li z|M2;}ArG=|cub=q67-2cEUe0^t6wm|fWOi3L~CMo5Chp@HkaKJD3Z*mPz6Z$30Ya_ z96A{h@>KgnM$m_dRPJoKU{8~x?QWIe$Ogd9$A4ZtHChjU&)iIYgwhL9I2`cq5w@o zRiJE8iU@o`g^ZxHp>q5p7G4#OG@{e9m1RA1tn-!$ReH!8L?{$Q$XM7OX1}jqjfPL>sLAA%1AxMU~I&68Gj7tj5OH;vRwq#Rq=v5My^3=Ik;SLzbC?}iro==ulIYb{(LZgP>?z1ltiL6=-=K|80vi{MmZ@W za)d9MRFsxrk6D+l<)X<&9D>>Pr1;1g6eE>(7*=D@>lY zE{WLd0GqGcTkH3(0=>{2jg(7dhqKoT(t*VUX8*DhkEp_!)P$)0K9A)QB^VIMP$fH_ z3Tfl)9~!Sx8A8sGv}r}^iIvoP%vw?6TaAe?s_fNu()QE^y@)zwi4wCGo|dOtG*)8@ zh+*Vq?KMV&5I{NRr0WL}F(4!=qn@Nh*QbOhQoWKrfNLMDft0Z79>t!IE)-pzVz#Fg zIh>T#gkvRW-&K(?+GzA>6r^OyMGI%onVZH&iy7z7oxfmC<&1@MmW)ot*H35@0JeJO{yofBRi4C>R zfb%b&Q?>M>#dBn!#Y@khJ%8@pG~vQo^JbiHJNqtNQvxeWq_mYNVXHQ^)KDNwv|yB= zy`>xNPl>W|+ne&GIF=xT7yu(RVf1_~dAY8TZF}kzHp{fUzW#F-%vFMtmOIQ&KCE>- zJ`CmPE9e!4Ci;Z3%}6TS>}O@@PnrEL2siSqa_*_7%vV*(Dp*#zWd7Neb7sqGnz?9M z#?0E@<>@Rh%&g3J-P*c5@bcYdul)zPdV0cGJatFiiq^9;?fR6-+~~^;X$qSGm4HOSWYK$#3(J9SW;fb z-Hd^ywp;`knt50{sKpaUci9tzrcoEfYDX^8)li|MDK1uHkEaBi4|`t3aEFyO>XqyG zP-qhH(Rud52NmN%uS_rVMNvsnUvXn4E1qwo=ax6YSFKg#gcU{ac`L%7N|yS7C776jc2#Ez|;~Ez6cszug&xtj;l8ku{f%# z>SIH2?S(qL`a!`U7xwmgK(B_utd_+o1dN^-k`IREREjaDF9Lt8Kq0|iW}+Vy3{_Pu z7`70RFkEW4(PyKf_9=)38x+(smv9^gFwbHcDA%oW*Mdy0@kauPFST#T#AQm@tzy+6 zc@zdJqV1W4v1~~nr71@+>i>%mu$hY+1=F;A$ zb3DezA?m5A!#rKJV1A_;sA$rG#IcG z_65fP!EQ_;u@otmqM2CcNiMTCg@hCZ0|~^`!g?*nZec~Sm#wNIwJ!w+FqV`HiO9+# zB}5Dhbo4(n;0vZ4zD!4xa>((&s@{rLd3+3(c4P1fM#~N)l^;kNdm!no14(85ge)7M z;*%Sls<6K?i{kT#ut3COg4HVmQ9oBX%7KcP0)qildcs|Y!w#<4v38-w=Hb>tiI^}~ z1?z=bkG%~&ISzYFZ*Yw?gQ5m|jF2=vAt6@@a*@C_0v4qW!AQ*HswgC|QWuxUJlOt+ z1Vqvo5(ww16_~nwn7wL+r%8C5gs(|dHz7GujYNakSO`Bn{P6L^%MU+4Jh9Dz4L|tT z)s+Iu0Of!&fU$safboC{qKOPJ%4n3+7(-($jd3)_)0iN<(NaJepd2s;FcvTlFdi@g zKkHfwz>Ecd3;q`TE%;mTx8U!qE(Kr@TU`zq0~iY!2N(~4Kf?PE-iPo$g!du558-_X z??ZS$_I5a+Fa_2a0aIXYfFT}DQ%$3hMu>)h%vIB9q=71g2u7mSfJOl886+<`yyQSt z!w_b5bv2Df8X+12XCTr5SD5i&gJQ*+*41MdP*>kb1C3e7 zg*{S)!Q(Qd2m=EtNH7{{puPQEb6C|h8fk=Rh_Jr~B!3M^{u+?{HKHySLXl%36q$#m zFtA7>jV2P1RGQp%B8ea>2ZnTtprp|Rr=-#pP2O6hLlfnIiGsj{yqd@n%|>Zbf>G%h zI;v?j(g@K2pc-J1glgb$AN7*UOD-?DP{^p4TwZc{$>oh=F$OLesM1<+!2o~@21($8 zAuz6i^HqG9d`yFhLCYJ&iOZ2E-I0$Z7~88-l=w zAhB{zSqXs%Ni0NkWkjqD1FFSJFEEcE25^YTO5*sx1};)zSJUdyw zq{2vthd4SWB9SJ>D8GfAk)UvyBw7pv+_79*QmAVGL%OLR!-$YD={r))k_CTAZgmAvcaAWKIB~5%Bfu_ zi1O5a%2^|-%|Ig!4DMzC0D*=o*I3Ri0s8m}fagz=cKsHQ2GyoVfV9@1-aG~eG zfXIP6z`>42y)-$q$T5&4_|gFyhXxv8f=MP=ibN(w!{S8*U{sY!i(GKQUzv1(1{1N+ z)RyK$7n4-RM_-URSkeF^O!C2kzcT4iOi@XSDmLu4Bp=F7Mq1<~nZp(hFv27sEch#v z4#kA{fHOW?nvW6D$@mnx33Y=J5GF=ISzR>IeD!K<)QB9@)d;C6WHIG zaC9V?7?fZ_%wb}r0W%DaJ79&up$0UXa_j+>IJP1_kZGcYVe-=hn$LuZcwr)5n1~mq z9D?NN1a62WxM>O#Cb(gO8z#76A}b!mO_Q1PfSV>JS1&gHG(m<5GEB^~5pu%>879au z;b%q2ZQ&~rO>$#k;ehMm2#f9%LVv`{(HjZ^P$q93MAlOqzz7b5oahQ*rAddx5j(~n z3nRJ}6dsQUfJq;=54MLZBz&0jV)XSkBI-sTI+yQq5YT_Tjp#rym?R9WcgN7cP7Moo z5yd{H7RPms9-P#NF$FZ?vyJdHMle~Rv7=ZxN}MY+;8+^2!m^OdbPND6*3iI6Omhw5 zZNOm#cJ{Em^)Y<$XpGjOA4h$b>4mBAdXzZkE9bO;MI>_(q7jJcPGey{4K$%ZfshYf z4YOG}2(VwizQ!cHrwAP3~c@vgvfz=`GzXSF79tRV7hSh|RnAk1`u(l)zRu!fN z(Z_;A+9a!R_3$+>$LVVTM+aWCW*~~SIrilNwmZ@WnSB@@UL0)%a1?tJGAc?vqa#Vt7@=flGw8+ugDhQ$3qux$mTjV7 z0bOc*AqivX(N+i_px_FdY~Fw}qv!@0cI37~n#eN&c|LFaOO0`#sjO(d%joPZOFnG)fkS}zAx znpl&;#8@INhVc1f+KB*gW06fGEb1{>`Y#8kwBaRf_`6H6&r@mnb)Jn@(Cb zBd*&x81?1ygiF)VoqHjOsZ7&s3753q3Z z|1J3A*nF`W<$7&mzAz`{i>m=|0$9$Q0WWE|X$tBDykn>4i+i%5KUy}A7hw(s+5DOA z!-+qW|6hUkmu3QG_+CcYzYqLR8Sehl?acDutKIKt`(tg3Y5C&mxoOy&f!lP_vf<3L z(LVs$blc6a>m~qN7|;N?67U_s&46sa z!*x>zY&hI7|5?6bU%vRcg**d(1E>Q$3b-zoFNOeU{{`?mpa<{~0P1GN5e@m`hfQhd zg!={nZNg&s*|=?!zwrtM&b9f1@G#tj8Nh8g2e=Kk|5DHhzt(X5)%juz;0eHeE&apO z@Ov5X#zArpNZuY5OO*zhKB8{Pr_b?{s8*kJw;Yby;8X5g7L(lg;+xbMuOW%@HLc?j9` z&Yz*YfLj5?`-2BTe?t2mdl%Nccn#qYISz-z>2$hW0|pEnIA~Dr5qWuo2M-x?||$1)4u5pYwUFAJsSH~3M+AL)wJ>yHs1LQO`DX$raAA@ zw3#XF_l}3%!-V6;W5m;rM>O_~<91@l+$f%LJmenM?8v)OJny($)1D`73u!Mpex+$I zlGaXIr{g!8)=AnP()Q$RLAs8kh61~d*wCYZZ4k!@$M8+St|7K*II#P~P~jLxY$s{M zHi_4Bt`*2jF0t!~ZOR2^FpU8YV5brrMC>$T1Brc;m}3C25fU2&>~x6@6duQK-TA`N z><}x6G)W;?*K=It}B=0E6{TAH0B7$JWb0b%^+)GyXMvG#o?|Q0b>Uh4iTc+`3H?f9Q9&`Oydsc z9w8+5lN9z-=j)nwXA1k7bFZe|<$Ob9`y5?TL%k5MIodmOb93-EUtx|T+`x9cCa6Y4 z>~^%jrhFAfUyZqs0A};;ade9OT;~3D$4eU9>v%)b8!WnJBu!u}t9CGD4_H8bq5oX@_hV&0a*wln4m`rc1$1+i^R zL((1~CTR~c7D@XxF-d!fX-L{{h)LSR&Q6`iBjnq|_P%|vG3OgE8-jn2Gk;qc;t66hmYu*#C3c&058f~4K&8xW%(J9*fM!UV!Wc^j zd{u}ioqKi6zh`{yjQJ0YPhwAz)=b*dj8D@3$gmRo6KT@-8E5;exy@{?XPxc4751F- z7Z_<6_IY9kv0Y9tTFa5MPQ2iBa2&eYAzpOmxK;iAnb->Q{lzIrLm%uUP4?uM9ZvKz zjsjiuT|wWM=quBBnV3xD75d8l`zmS8=Y1Idcr#3XGGu`Tp{-T9;&5`l-mL!Iqz z%uyKEj&k0ovAv9C4|DZ~vzobgG>bQhPo00)*k{hKH1@gk z9gThA{F}zUbiSgozdK*m*jLV%HP-9gtuf)UG?wFvY0Tkj(3sQZ)0oTU*Vq78Kw|@4 zA8Blm>n)Atx?a@S5w0GM<+cHklACjSo}!*P~0NnGx2xvg_wZ*=S5<)$a^vGh0THQ2R4iEi_J6x z-xp7dKZ$3=UG5LX55z|C7g2+M0sPx1_RD`Chz~@r`v~`K?oxM|yWBm-J=pzw@tAl@ zbiedw*IW3#x$dt2=<4Zur|WO{38Z~p?{|IBwZH4bu8+Dt?)s$b)2`3DKJWUX>&vdc zcYW2>+qI|b^{zKw-uv>iT|W@`k%Qfh?gyXA7bid07w+zdmd$fG?hRS-Je}d5_H5rY z8DA)iMt3%id~I2Lhm#lb+<nAT``=PA|ZSj8OAFvhhG+;M?w7+T~z7OyPp!xU$G2w)-hl{|M4Tlr%aCjJRCST2A zfgnbTd>>|Yi*aKky z8fgrKPbJ*YN+g!T#`;72FW!H!Kwvv6E`&dp_TnM*=DBL)$5)H7KgC^KV!QSxe0&z6L`5r0|*v*JFpbrOs3v6tK z#kH`Hh5aMgC&0c7eBXd=|6NsFAQFII0QLet0Q?=0TT&nlz-Yj9z9+snaNh^G3-BPI9q<(3kqO8jfVK^1%_tE63SfMH0bDT=c{~g0Bi_eh4}+UD zx(T64C@+BcX_Jv3z==~KlZOd*Je$EkWmGsQ<%fK)r^6*AyrN?)dqAxOqq25%T#YZb&M%6*iB2A(oD+Uku6tLaPm2?ZN|T zC^H*sf{)#&0VqNImwHnm(X4rqDzYiiGWt z>zq(jGe>UB+Mp5V>dS39Zii&Y${mS~6`VlhJe;hh@IQu(Tc&Tm!wemF)@J6fs_D;T?Hl#mW5x6&Lcnn;;wz8tK+t*fnA#<-)+oZHkWQECedLR%rVRkg5&a=vIrc##rlXUh| zN`Vf{LPmIG-b>vP2HvLOc_-Aspf0I}$*vInrkQ0*Wg#QHO5aYhPe@x~(IL}E{qZ?_ zaU_f$$LD-fmq{w6{t@|rPZd7~4LsF;RC9gAgdP-hj`~O=UN|FO=qlW<`{ERKn`XvEix5 z+kA|Ec#7_!x+jV~-&N{T96Xb@>~Ku>c)k95j-edmVp4lkeF%>t5B2B6u;+&;AQZ+I z=3I^F(1)T`oG>fA3~lhuzfwAr5usG>gA7CG4YQEaJz%WH>wM-TrKAHFxjlH(ll8}t zK?oW2E*W@9DkTl?@EI2qjzN7w2r_3uU_GdwAap(T`8#BIKFr5G{6qa^gx87^WUiH3 zA!I~q5~A9!gZv$Qe$I&bAYn<)6dP7odis-NzM4u=t=`(SS7vm)ACGD^Abh52m{0}6 zkwuBXK?miG2-2!9q&fp$v^Vf(1{IINLg_G)6+)dU9=vMEt_Z0R&Q@!M0XS^P;f3Qw zTBKEk{R$%0<;Z8;O5Z`D<&Uj{?!w{erkm^edC)%E-Aw?eRUOSp~A#Z-fVgR0QOh zvX%}mRq_~CEfa-y6+LepBs9LVVH^$VK2uca%TPU+QWkX3u=GHy`RgF!(Nr8%WOn#N zq`kgK$CF;W6hR5np|wZgJ75F~`qblTEMAnBu~cQCy6yVa^^j3fN|#?@;USABwEi7A zc=N41v|1<$f%-Zs?xA{=L;d;u)v+1_0tOL}L7P?B!sI0fy9Q4XLlp)GcAE?F96r+6 zkA!6^{ZM*9jACPH5W3oC8M83JKu<;#lv$Db)zM&0t!0dyHOeS2EiEgCF~*qdsfOwa zzcJGv!szbHi<#(blMKdsNpZuaFuXJ(>@!p^L&r?C0Bxx(g~)tOn2qvYG8KwNE+ust zT$kn<>Ay>gFQghl`qjwr(y)w5+6#0XAt+LzTWMaAm{i5mHzue9LuSg*7igE4l$4MY zYFadfisMXD^YYI2hoSQ?q!p%^laQn0hIFQsvN33KQB{f(udQI}O^!kse&N8O8NG$4=DF*@}PmN-cd80ZrA|vIZyC@tfE@Eea>V{E9 z5qXP@ktW7VETxg78viIJBioRB2wlKKMxxb5_7_j1omqpcUNs9sWt0wK<}FTbSRnpN zO^s?iN}CRgcRZ*&r5ux0W)#7%h`CIQqF#p?MG(g#xsWv=p)u@9U`bvIs*X_VAi=&* zapx_7b}TLcrLG;7I^{GY^~?rKr%oZ(OweGmN~LNq6i^)5a9+a>Tm(esLSk)P(j7s&(bA zNgf)Q*u+DL8xkBDq-H1VO^R!z2v%dvV2b#I=9@-1Fh^mWld42fA>LI6-!}T80||Ou z(n~f~Qk{RjaK(fy6+@b5LWw9^BaV9v#hdm4jEL&Dif6Yth73&l+qs}(h2y(z#sD@h$4|@!h*kWwRI76@^tDxjW{q37UyLo-dj4yxo{__i~7f;X2 zGm49iC9CoFu@1YM5Y=@RfBTBj0*5n&#p6oNBL$i^><@`qa%^QD6am9_o)}#niCEP3 zD|hfRK%Q8%>P(16MCO+Z7|ox@i|2?$YVt%hT$o0EGMZVbMV4Is^|N41$D;>IwGYFZ z3?k_<&R?`}?h@6QswiBKV)^MYjrnS2Z=CO`GiC?PRmRBqmGhwzZj`PQWtEndj~P2| z{DiY6da9v0C{R-yT)8S#hX=8j<9%nWp|NT8nmz%t9O>{gX3m0czO(1eKj+*93o92b zKJWY`OP5`6;qr?vW{@+N6o}ax#%AC)Ez_Q&{ZGr_vo0tQCoe4!o0t9HfYJXzS$3ix z9_oj`KiZ?<`wQUp4E~|I&L(8g%7GUy$NBw*|2JU$#fM*x4^if^{gi#rMF+<5HfUdF z(Ff|ZCTGOK^z*?t@1KB`|3nz3^ElG~XVS|K!}O1e=8QR5(cGi>K7spM-JP*@PKf4ykf}GNu}ZWu>85p6Em6#F+BI zFvf!q#oxzAykHfGzXIL`d<=LNa2?>lzkSmc?X(JIU!?4c>y^FXF=em(RN9;e1pb~q z$}8>lf4NE8{y+Yawo$dkF%x#OVcrDVx7~0;9qfv?epq@N?2RWs{_vk+k3IdPRYPWp z<~*;>pUIx*d1V5>x+e#hc4tpH*?~_NJ5W9X@;Udwc)<<_YJ{ z{@#lV2R^f}c{buze>(iJ;Q`yXKltx+Kl*HXw{y{13(HR`{cK+2)fZ3soq5%(i-yN+ z_H6s%c;`*OeEx@1#{Bfo7e{+a8~3V-^hWr1uNDS zh;7Y>hF`W7h-~<}Fo)~Ucn_C%tIp>wu;+YFLFEq%@P42`ECZ|ogaZOEze+`=r&7$v zE(??0>WsRnQ1~9mMLT|3V)u+q(iC{87()TPWQ};cz^oc=coVb z$rXu_*X*4B#p)j~K6>`9>3!?pmah|a{KG2vYJd`nYrgW27iIaqpWpcTE`Zq|Sd%{m+GT_-}yzhkNJvM}G2JlS(g+d!-jOX4k9<)?;T`1O3KI0k%n9 zHawtJ8v&?63D;mtr}TBt5A(zh+Heu8T8wgkiuLzGF1Rt8Vw)2rPadR|tT@`lcdF=W zo)_l{oZ@mT&e$cFLA@;$9g*64%XFwqSVa#_^|jEI1ApbV7noT4;XDQkTaA$stR16$ zS9^@H@82Q9BgLuZ|H<%WUl%?zHxyX{`^_G-9sj0G{n50kx12h4)9I&gx#Egj&p2ai zaq(?ar`|ASO3t)tjx(m-T|D*Y-Y=&AP>PQBywsXw`F>T6d_{jzx4Qza#Lju>%2 zpF literal 0 HcmV?d00001 diff --git a/tccboot.h b/tccboot.h new file mode 100644 index 0000000..f27961e --- /dev/null +++ b/tccboot.h @@ -0,0 +1,90 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SEEK_SET 0 +#define SEEK_CUR 1 +#define SEEK_END 2 + +typedef struct FILE { + int fd; +} FILE; + +struct tm { + int tm_sec; /* Seconds. [0-60] (1 leap second) */ + int tm_min; /* Minutes. [0-59] */ + int tm_hour; /* Hours. [0-23] */ + int tm_mday; /* Day. [1-31] */ + int tm_mon; /* Month. [0-11] */ + int tm_year; /* Year - 1900. */ + int tm_wday; /* Day of week. [0-6] */ + int tm_yday; /* Days in year.[0-365] */ + int tm_isdst; /* DST. [-1/0/1]*/ +}; + +void *sbrk(int increment); +void *malloc(size_t size); +void *memalign(size_t alignment, size_t n); +void free(void *ptr); +int printf(const char *fmt, ...); +int fprintf(FILE *f, const char *fmt, ...); +uint8_t *load_image(const char *filename); +void *realloc(void *ptr, size_t size); + +int open(const char *filename, int access, ...); +int read(int fd, void *buf, size_t size); +int close(int fd); +long lseek(int fd, long offset, int whence); +int write(int fd, const void *buf, size_t size); + +FILE *fopen(const char *path, const char *mode); +FILE *fdopen(int fildes, const char *mode); +int fclose(FILE *stream); +size_t fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream); +int fputc(int c, FILE *stream); + +long strtol(const char *nptr, char **endptr, int base); +long long strtoll(const char *nptr, char **endptr, int base); +unsigned long strtoul(const char *nptr, char **endptr, int base); +unsigned long long strtoull(const char *nptr, char **endptr, int base); +int atoi(const char *s); +float strtof(const char *nptr, char **endptr); +double strtod(const char *nptr, char **endptr); +long double strtold(const char *nptr, char **endptr); +double ldexp(double x, int exp); + +int gettimeofday(struct timeval *tv, struct timezone *tz); +time_t time(time_t *t); +struct tm *localtime(const time_t *timep); + +void exit(int val); +void getcwd(char *buf, size_t buf_size); + +typedef int jmp_buf[6]; + +int setjmp(jmp_buf buf); +void longjmp(jmp_buf buf, int val); + +int main(int argc, char **argv); +void fatal(const char *fmt, ...) __attribute__((noreturn)) ; +void romfs_init(void); +void show_filename(const char *filename); +void set_output_file(const char *filename, + uint8_t *base, size_t size); +long get_output_file_size(void); +void putstr(const char *s); +int do_gunzip(uint8_t *dest, const uint8_t *src, int src_len); + +extern uint8_t *romfs_base; + +extern int errno; +extern FILE *stderr; + diff --git a/test.c b/test.c new file mode 100644 index 0000000..52c9fac --- /dev/null +++ b/test.c @@ -0,0 +1,4 @@ +int _start(void) +{ + while(1); +} diff --git a/user.c b/user.c new file mode 100644 index 0000000..aeb80eb --- /dev/null +++ b/user.c @@ -0,0 +1,50 @@ +#include +#include +#include +#include +#include + +#include + +#define __NR_linux_open __NR_open +#define __NR_linux_lseek __NR_lseek +#define __NR_linux_read __NR_read +#define __NR_linux_write __NR_write +#define __NR_linux_close __NR_close +#define __NR_linux_exit __NR_exit + +_syscall3(int,linux_open,const char *,filename,int,access,int,mode) +_syscall3(int,linux_lseek,int,fd,int,offset,int,whence) +_syscall3(int,linux_read,int,fd,void *,buf,int,size) +_syscall3(int,linux_write,int,fd,const void *,buf,int,size) +_syscall1(int,linux_close,int,fd) +_syscall1(int,linux_exit,int,val) + +void exit(int val) +{ + linux_exit(val); +} + +void putstr(const char *s) +{ + linux_write(1, s, strlen(s)); +} + +uint8_t *load_image(const char *filename) +{ + int fd, size; + uint8_t *p; + + fd = linux_open(filename, O_RDONLY, 0); + if (fd < 0) { + return NULL; + } + size = linux_lseek(fd, 0, SEEK_END); + linux_lseek(fd, 0, SEEK_SET); + p = malloc(size + 15); + p = (void *)((unsigned long)p & ~15); + linux_read(fd, p, size); + linux_close(fd); + return p; +} + diff --git a/vsprintf.c b/vsprintf.c new file mode 100644 index 0000000..9deafb4 --- /dev/null +++ b/vsprintf.c @@ -0,0 +1,741 @@ +/* + * linux/lib/vsprintf.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ +/* + * Wirzenius wrote this portably, Torvalds fucked it up :-) + */ + +/* + * Fri Jul 13 2001 Crutcher Dunnavant + * - changed to provide snprintf and vsnprintf functions + */ + +#include +#include +#include +#include +#include + +#include +#include + +/** + * simple_strtoul - convert a string to an unsigned long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) +{ + unsigned long result = 0,value; + + if (!base) { + base = 10; + if (*cp == '0') { + base = 8; + cp++; + if ((*cp == 'x') && isxdigit(cp[1])) { + cp++; + base = 16; + } + } + } + while (isxdigit(*cp) && + (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { + result = result*base + value; + cp++; + } + if (endp) + *endp = (char *)cp; + return result; +} + +/** + * simple_strtol - convert a string to a signed long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long simple_strtol(const char *cp,char **endp,unsigned int base) +{ + if(*cp=='-') + return -simple_strtoul(cp+1,endp,base); + return simple_strtoul(cp,endp,base); +} + +/** + * simple_strtoull - convert a string to an unsigned long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base) +{ + unsigned long long result = 0,value; + + if (!base) { + base = 10; + if (*cp == '0') { + base = 8; + cp++; + if ((*cp == 'x') && isxdigit(cp[1])) { + cp++; + base = 16; + } + } + } + while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp) + ? toupper(*cp) : *cp)-'A'+10) < base) { + result = result*base + value; + cp++; + } + if (endp) + *endp = (char *)cp; + return result; +} + +/** + * simple_strtoll - convert a string to a signed long long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long long simple_strtoll(const char *cp,char **endp,unsigned int base) +{ + if(*cp=='-') + return -simple_strtoull(cp+1,endp,base); + return simple_strtoull(cp,endp,base); +} + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SPECIAL 32 /* 0x */ +#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits; + static const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + static const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + int i; + + digits = (type & LARGE) ? large_digits : small_digits; + if (type & LEFT) + type &= ~ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & PLUS) { + sign = '+'; + size--; + } else if (type & SPACE) { + sign = ' '; + size--; + } + } + if (type & SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) + tmp[i++] = digits[do_div(num,base)]; + if (i > precision) + precision = i; + size -= precision; + if (!(type&(ZEROPAD+LEFT))) { + while(size-->0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + } + if (sign) { + if (buf <= end) + *buf = sign; + ++buf; + } + if (type & SPECIAL) { + if (base==8) { + if (buf <= end) + *buf = '0'; + ++buf; + } else if (base==16) { + if (buf <= end) + *buf = '0'; + ++buf; + if (buf <= end) + *buf = digits[33]; + ++buf; + } + } + if (!(type & LEFT)) { + while (size-- > 0) { + if (buf <= end) + *buf = c; + ++buf; + } + } + while (i < precision--) { + if (buf <= end) + *buf = '0'; + ++buf; + } + while (i-- > 0) { + if (buf <= end) + *buf = tmp[i]; + ++buf; + } + while (size-- > 0) { + if (buf <= end) + *buf = ' '; + ++buf; + } + return buf; +} + +/** +* vsnprintf - Format a string and place it in a buffer +* @buf: The buffer to place the result into +* @size: The size of the buffer, including the trailing null space +* @fmt: The format string to use +* @args: Arguments for the format string +* +* Call this function if you are already dealing with a va_list. +* You probably want snprintf instead. + */ +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char *str, *end, c; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + + /* Reject out-of-range values early */ + if (unlikely((int) size < 0)) { +#if 0 + /* There can be only one.. */ + static int warn = 1; + if (warn) { + printk(KERN_WARNING "improper call of vsnprintf!\n"); + dump_stack(); + warn = 0; + } +#endif + return 0; + } + + str = buf; + end = buf + size - 1; + + if (end < buf - 1) { + end = ((void *) -1); + size = end - buf + 1; + } + + for (; *fmt ; ++fmt) { + if (*fmt != '%') { + if (str <= end) + *str = *fmt; + ++str; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= LEFT; goto repeat; + case '+': flags |= PLUS; goto repeat; + case ' ': flags |= SPACE; goto repeat; + case '#': flags |= SPECIAL; goto repeat; + case '0': flags |= ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt =='Z' || *fmt == 'z') { + qualifier = *fmt; + ++fmt; + if (qualifier == 'l' && *fmt == 'l') { + qualifier = 'L'; + ++fmt; + } + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & LEFT)) { + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + } + c = (unsigned char) va_arg(args, int); + if (str <= end) + *str = c; + ++str; + while (--field_width > 0) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 's': + s = va_arg(args, char *); + if ((unsigned long)s < PAGE_SIZE) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) { + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + } + for (i = 0; i < len; ++i) { + if (str <= end) + *str = *s; + ++str; ++s; + } + while (len < field_width--) { + if (str <= end) + *str = ' '; + ++str; + } + continue; + + case 'p': + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + str = number(str, end, + (unsigned long) va_arg(args, void *), + 16, field_width, precision, flags); + continue; + + + case 'n': + /* FIXME: + * What does C99 say about the overflow case here? */ + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z' || qualifier == 'z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + if (str <= end) + *str = '%'; + ++str; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + if (str <= end) + *str = '%'; + ++str; + if (*fmt) { + if (str <= end) + *str = *fmt; + ++str; + } else { + --fmt; + } + continue; + } + if (qualifier == 'L') + num = va_arg(args, long long); + else if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & SIGN) + num = (signed long) num; + } else if (qualifier == 'Z' || qualifier == 'z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & SIGN) + num = (signed int) num; + } + str = number(str, end, num, base, + field_width, precision, flags); + } + if (str <= end) + *str = '\0'; + else if (size > 0) + /* don't write out a null byte if the buf size is zero */ + *end = '\0'; + /* the trailing null byte doesn't count towards the total + * ++str; + */ + return str-buf; +} + +/** + * snprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @...: Arguments for the format string + */ +int snprintf(char * buf, size_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} + +/** + * vsprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @fmt: The format string to use + * @args: Arguments for the format string + * + * Call this function if you are already dealing with a va_list. + * You probably want sprintf instead. + */ +int vsprintf(char *buf, const char *fmt, va_list args) +{ + return vsnprintf(buf, (~0U)>>1, fmt, args); +} + + +/** + * sprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @fmt: The format string to use + * @...: Arguments for the format string + */ +int sprintf(char * buf, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsprintf(buf,fmt,args); + va_end(args); + return i; +} + +/** + * vsscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: format of buffer + * @args: arguments + */ +int vsscanf(const char * buf, const char * fmt, va_list args) +{ + const char *str = buf; + char *next; + char digit; + int num = 0; + int qualifier; + int base; + int field_width; + int is_sign = 0; + + while(*fmt && *str) { + /* skip any white space in format */ + /* white space in format matchs any amount of + * white space, including none, in the input. + */ + if (isspace(*fmt)) { + while (isspace(*fmt)) + ++fmt; + while (isspace(*str)) + ++str; + } + + /* anything that is not a conversion must match exactly */ + if (*fmt != '%' && *fmt) { + if (*fmt++ != *str++) + break; + continue; + } + + if (!*fmt) + break; + ++fmt; + + /* skip this conversion. + * advance both strings to next white space + */ + if (*fmt == '*') { + while (!isspace(*fmt) && *fmt) + fmt++; + while (!isspace(*str) && *str) + str++; + continue; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + + /* get conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt == 'Z' || *fmt == 'z') { + qualifier = *fmt; + fmt++; + } + base = 10; + is_sign = 0; + + if (!*fmt || !*str) + break; + + switch(*fmt++) { + case 'c': + { + char *s = (char *) va_arg(args,char*); + if (field_width == -1) + field_width = 1; + do { + *s++ = *str++; + } while (--field_width > 0 && *str); + num++; + } + continue; + case 's': + { + char *s = (char *) va_arg(args, char *); + if(field_width == -1) + field_width = INT_MAX; + /* first, skip leading white space in buffer */ + while (isspace(*str)) + str++; + + /* now copy until next white space */ + while (*str && !isspace(*str) && field_width--) { + *s++ = *str++; + } + *s = '\0'; + num++; + } + continue; + case 'n': + /* return number of characters read so far */ + { + int *i = (int *)va_arg(args,int*); + *i = str - buf; + } + continue; + case 'o': + base = 8; + break; + case 'x': + case 'X': + base = 16; + break; + case 'i': + base = 0; + case 'd': + is_sign = 1; + case 'u': + break; + case '%': + /* looking for '%' in str */ + if (*str++ != '%') + return num; + continue; + default: + /* invalid format; stop here */ + return num; + } + + /* have some sort of integer conversion. + * first, skip white space in buffer. + */ + while (isspace(*str)) + str++; + + digit = *str; + if (is_sign && digit == '-') + digit = *(str + 1); + + if (!digit + || (base == 16 && !isxdigit(digit)) + || (base == 10 && !isdigit(digit)) + || (base == 8 && (!isdigit(digit) || digit > '7')) + || (base == 0 && !isdigit(digit))) + break; + + switch(qualifier) { + case 'h': + if (is_sign) { + short *s = (short *) va_arg(args,short *); + *s = (short) simple_strtol(str,&next,base); + } else { + unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); + *s = (unsigned short) simple_strtoul(str, &next, base); + } + break; + case 'l': + if (is_sign) { + long *l = (long *) va_arg(args,long *); + *l = simple_strtol(str,&next,base); + } else { + unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); + *l = simple_strtoul(str,&next,base); + } + break; + case 'L': + if (is_sign) { + long long *l = (long long*) va_arg(args,long long *); + *l = simple_strtoll(str,&next,base); + } else { + unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); + *l = simple_strtoull(str,&next,base); + } + break; + case 'Z': + case 'z': + { + size_t *s = (size_t*) va_arg(args,size_t*); + *s = (size_t) simple_strtoul(str,&next,base); + } + break; + default: + if (is_sign) { + int *i = (int *) va_arg(args, int*); + *i = (int) simple_strtol(str,&next,base); + } else { + unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); + *i = (unsigned int) simple_strtoul(str,&next,base); + } + break; + } + num++; + + if (!next) + break; + str = next; + } + return num; +} + +/** + * sscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: formatting of buffer + * @...: resulting arguments + */ +int sscanf(const char * buf, const char * fmt, ...) +{ + va_list args; + int i; + + va_start(args,fmt); + i = vsscanf(buf,fmt,args); + va_end(args); + return i; +}