2015-08-21 10:04:50 +03:00
|
|
|
/* Unicorn Emulator Engine */
|
|
|
|
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
|
2021-10-03 17:14:44 +03:00
|
|
|
/* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
#ifndef UC_PRIV_H
|
|
|
|
#define UC_PRIV_H
|
|
|
|
|
2017-01-20 16:13:21 +03:00
|
|
|
#include "unicorn/platform.h"
|
2015-08-21 10:04:50 +03:00
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "qemu.h"
|
|
|
|
#include "unicorn/unicorn.h"
|
2016-01-16 11:44:02 +03:00
|
|
|
#include "list.h"
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2016-01-23 01:47:29 +03:00
|
|
|
// These are masks of supported modes for each cpu/arch.
|
|
|
|
// They should be updated when changes are made to the uc_mode enum typedef.
|
2021-10-25 01:51:16 +03:00
|
|
|
#ifdef UNICORN_HAS_AFL
|
|
|
|
#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS \
|
|
|
|
|UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
|
|
|
|
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN|UC_MODE_AFL)
|
|
|
|
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
|
|
|
|
#define UC_MODE_PPC_MASK (UC_MODE_PPC32|UC_MODE_PPC64|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
|
|
|
|
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
|
|
|
|
#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
|
|
|
|
#define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64|UC_MODE_LITTLE_ENDIAN|UC_MODE_AFL)
|
|
|
|
#else
|
2019-10-26 00:01:00 +03:00
|
|
|
#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS \
|
2021-10-03 17:14:44 +03:00
|
|
|
|UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN)
|
2016-01-23 01:47:29 +03:00
|
|
|
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN)
|
2021-10-25 01:51:16 +03:00
|
|
|
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN)
|
2021-10-03 17:14:44 +03:00
|
|
|
#define UC_MODE_PPC_MASK (UC_MODE_PPC32|UC_MODE_PPC64|UC_MODE_BIG_ENDIAN)
|
2016-01-23 05:48:18 +03:00
|
|
|
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN)
|
2016-01-23 04:08:49 +03:00
|
|
|
#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN)
|
2021-10-03 17:14:44 +03:00
|
|
|
#define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64|UC_MODE_LITTLE_ENDIAN)
|
2021-10-25 01:51:16 +03:00
|
|
|
#endif
|
|
|
|
|
2021-10-25 11:46:52 +03:00
|
|
|
#ifndef NDEBUG
|
|
|
|
#define UCLOG(...) fprintf(stderr, __VA_ARGS__)
|
|
|
|
#else
|
|
|
|
#define UCLOG(...)
|
|
|
|
#endif
|
2016-01-23 01:47:29 +03:00
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
#define ARR_SIZE(a) (sizeof(a)/sizeof(a[0]))
|
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
#define READ_QWORD(x) ((uint64_t)x)
|
2016-03-02 06:43:02 +03:00
|
|
|
#define READ_DWORD(x) (x & 0xffffffff)
|
|
|
|
#define READ_WORD(x) (x & 0xffff)
|
|
|
|
#define READ_BYTE_H(x) ((x & 0xffff) >> 8)
|
|
|
|
#define READ_BYTE_L(x) (x & 0xff)
|
2017-01-15 15:13:35 +03:00
|
|
|
#define WRITE_DWORD(x, w) (x = (x & ~0xffffffffLL) | (w & 0xffffffff))
|
2016-03-02 06:43:02 +03:00
|
|
|
#define WRITE_WORD(x, w) (x = (x & ~0xffff) | (w & 0xffff))
|
|
|
|
#define WRITE_BYTE_H(x, b) (x = (x & ~0xff00) | ((b & 0xff) << 8))
|
|
|
|
#define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff))
|
|
|
|
|
|
|
|
|
2016-01-23 12:14:44 +03:00
|
|
|
typedef uc_err (*query_t)(struct uc_struct *uc, uc_query_type type, size_t *result);
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
// return 0 on success, -1 on failure
|
2016-04-04 18:25:30 +03:00
|
|
|
typedef int (*reg_read_t)(struct uc_struct *uc, unsigned int *regs, void **vals, int count);
|
|
|
|
typedef int (*reg_write_t)(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
typedef int (*context_reg_read_t)(struct uc_context *ctx, unsigned int *regs, void **vals, int count);
|
|
|
|
typedef int (*context_reg_write_t)(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count);
|
|
|
|
typedef struct {
|
|
|
|
context_reg_read_t context_reg_read;
|
|
|
|
context_reg_write_t context_reg_write;
|
|
|
|
} context_reg_rw_t;
|
|
|
|
|
2015-08-26 15:00:00 +03:00
|
|
|
typedef void (*reg_reset_t)(struct uc_struct *uc);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2015-08-24 18:02:14 +03:00
|
|
|
typedef bool (*uc_write_mem_t)(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
typedef bool (*uc_read_mem_t)(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
|
|
|
|
|
|
|
|
typedef void (*uc_args_void_t)(void*);
|
|
|
|
|
|
|
|
typedef void (*uc_args_uc_t)(struct uc_struct*);
|
2021-10-03 17:14:44 +03:00
|
|
|
typedef void (*uc_args_int_uc_t)(struct uc_struct*);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
typedef void (*uc_args_uc_long_t)(struct uc_struct*, unsigned long);
|
|
|
|
|
|
|
|
typedef void (*uc_args_uc_u64_t)(struct uc_struct *, uint64_t addr);
|
|
|
|
|
2016-08-27 16:49:11 +03:00
|
|
|
typedef MemoryRegion* (*uc_args_uc_ram_size_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms);
|
2015-08-26 23:29:54 +03:00
|
|
|
|
2016-08-27 16:49:11 +03:00
|
|
|
typedef MemoryRegion* (*uc_args_uc_ram_size_ptr_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms, void *ptr);
|
2015-11-28 04:25:53 +03:00
|
|
|
|
2015-08-30 07:17:30 +03:00
|
|
|
typedef void (*uc_mem_unmap_t)(struct uc_struct*, MemoryRegion *mr);
|
|
|
|
|
2015-08-26 23:29:54 +03:00
|
|
|
typedef void (*uc_readonly_mem_t)(MemoryRegion *mr, bool readonly);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
typedef int (*uc_cpus_init)(struct uc_struct *, const char *);
|
|
|
|
|
|
|
|
typedef MemoryRegion* (*uc_memory_map_io_t)(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write);
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
// which interrupt should make emulation stop?
|
2021-10-03 17:14:44 +03:00
|
|
|
typedef bool (*uc_args_int_t)(struct uc_struct *uc, int intno);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2015-10-27 09:37:03 +03:00
|
|
|
// some architecture redirect virtual memory to physical memory like Mips
|
|
|
|
typedef uint64_t (*uc_mem_redirect_t)(uint64_t address);
|
|
|
|
|
2017-05-13 20:16:17 +03:00
|
|
|
// validate if Unicorn supports hooking a given instruction
|
|
|
|
typedef bool(*uc_insn_hook_validate)(uint32_t insn_enum);
|
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
// init target page
|
|
|
|
typedef void (*uc_target_page_init)(struct uc_struct *);
|
|
|
|
|
|
|
|
// soft float init
|
|
|
|
typedef void (*uc_softfloat_initialize)(void);
|
|
|
|
|
|
|
|
// tcg flush softmmu tlb
|
|
|
|
typedef void (*uc_tcg_flush_tlb)(struct uc_struct *uc);
|
|
|
|
|
2021-10-25 01:51:16 +03:00
|
|
|
typedef enum uc_afl_ret {
|
|
|
|
UC_AFL_RET_ERROR = 0, // Something went horribly wrong in the parent
|
|
|
|
UC_AFL_RET_CHILD, // Fork worked. we are a child
|
|
|
|
UC_AFL_RET_NO_AFL, // No AFL, no need to fork.
|
|
|
|
UC_AFL_RET_CALLED_TWICE, // AFL has already been started before.
|
|
|
|
UC_AFL_RET_FINISHED, // We forked before but now AFL is gone (parent)
|
|
|
|
} uc_afl_ret;
|
|
|
|
|
|
|
|
// we use this as shortcut deep inside uc_afl for the arch specific uc_afl_next(uc, bool)
|
|
|
|
typedef uc_afl_ret(*uc_afl_ret_uc_bool_t)(struct uc_struct*, bool);
|
|
|
|
|
|
|
|
// afl_forkserver_start
|
|
|
|
typedef int (*uc_afl_forkserver_t)(struct uc_struct*);
|
|
|
|
|
2016-01-16 11:44:02 +03:00
|
|
|
struct hook {
|
|
|
|
int type; // UC_HOOK_*
|
|
|
|
int insn; // instruction for HOOK_INSN
|
|
|
|
int refs; // reference count to free hook stored in multiple lists
|
2020-05-07 09:24:48 +03:00
|
|
|
bool to_delete; // set to true when the hook is deleted by the user. The destruction of the hook is delayed.
|
2016-01-16 11:44:02 +03:00
|
|
|
uint64_t begin, end; // only trigger if PC or memory access is in this address (depends on hook type)
|
|
|
|
void *callback; // a uc_cb_* type
|
2015-08-21 10:04:50 +03:00
|
|
|
void *user_data;
|
|
|
|
};
|
|
|
|
|
2016-01-16 11:44:02 +03:00
|
|
|
// hook list offsets
|
2021-10-03 17:14:44 +03:00
|
|
|
//
|
|
|
|
// The lowest 6 bits are used for hook type index while the others
|
|
|
|
// are used for hook flags.
|
|
|
|
//
|
2016-01-16 11:44:02 +03:00
|
|
|
// mirrors the order of uc_hook_type from include/unicorn/unicorn.h
|
2021-10-03 17:14:44 +03:00
|
|
|
typedef enum uc_hook_idx {
|
2016-01-16 11:44:02 +03:00
|
|
|
UC_HOOK_INTR_IDX,
|
|
|
|
UC_HOOK_INSN_IDX,
|
|
|
|
UC_HOOK_CODE_IDX,
|
|
|
|
UC_HOOK_BLOCK_IDX,
|
|
|
|
UC_HOOK_MEM_READ_UNMAPPED_IDX,
|
|
|
|
UC_HOOK_MEM_WRITE_UNMAPPED_IDX,
|
|
|
|
UC_HOOK_MEM_FETCH_UNMAPPED_IDX,
|
|
|
|
UC_HOOK_MEM_READ_PROT_IDX,
|
|
|
|
UC_HOOK_MEM_WRITE_PROT_IDX,
|
|
|
|
UC_HOOK_MEM_FETCH_PROT_IDX,
|
|
|
|
UC_HOOK_MEM_READ_IDX,
|
|
|
|
UC_HOOK_MEM_WRITE_IDX,
|
|
|
|
UC_HOOK_MEM_FETCH_IDX,
|
2016-10-22 06:19:55 +03:00
|
|
|
UC_HOOK_MEM_READ_AFTER_IDX,
|
2019-09-22 20:53:06 +03:00
|
|
|
UC_HOOK_INSN_INVALID_IDX,
|
2016-01-16 11:44:02 +03:00
|
|
|
|
|
|
|
UC_HOOK_MAX,
|
2021-10-03 17:14:44 +03:00
|
|
|
} uc_hook_idx;
|
|
|
|
|
|
|
|
// The lowest 6 bits are used for hook type index.
|
|
|
|
#define UC_HOOK_IDX_MASK ((1<<6)-1)
|
|
|
|
|
|
|
|
// hook flags
|
|
|
|
#define UC_HOOK_FLAG_NO_STOP (1 << 6) // Don't stop emulation in this uc_tracecode.
|
|
|
|
|
|
|
|
// The rest of bits are reserved for hook flags.
|
|
|
|
#define UC_HOOK_FLAG_MASK (~(UC_HOOK_IDX_MASK))
|
2016-01-16 11:44:02 +03:00
|
|
|
|
2017-01-19 14:50:28 +03:00
|
|
|
#define HOOK_FOREACH_VAR_DECLARE \
|
|
|
|
struct list_item *cur
|
|
|
|
|
2016-01-16 11:44:02 +03:00
|
|
|
// for loop macro to loop over hook lists
|
|
|
|
#define HOOK_FOREACH(uc, hh, idx) \
|
|
|
|
for ( \
|
|
|
|
cur = (uc)->hook[idx##_IDX].head; \
|
2020-05-05 03:36:50 +03:00
|
|
|
cur != NULL && ((hh) = (struct hook *)cur->data); \
|
2016-01-16 11:44:02 +03:00
|
|
|
cur = cur->next)
|
|
|
|
|
|
|
|
// if statement to check hook bounds
|
|
|
|
#define HOOK_BOUND_CHECK(hh, addr) \
|
2016-01-23 06:24:45 +03:00
|
|
|
((((addr) >= (hh)->begin && (addr) <= (hh)->end) \
|
2020-09-10 05:03:36 +03:00
|
|
|
|| (hh)->begin > (hh)->end) && !((hh)->to_delete))
|
2016-01-16 11:44:02 +03:00
|
|
|
|
|
|
|
#define HOOK_EXISTS(uc, idx) ((uc)->hook[idx##_IDX].head != NULL)
|
|
|
|
#define HOOK_EXISTS_BOUNDED(uc, idx, addr) _hook_exists_bounded((uc)->hook[idx##_IDX].head, addr)
|
|
|
|
|
|
|
|
static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr)
|
|
|
|
{
|
|
|
|
while (cur != NULL) {
|
|
|
|
if (HOOK_BOUND_CHECK((struct hook *)cur->data, addr))
|
|
|
|
return true;
|
|
|
|
cur = cur->next;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2015-08-26 09:08:18 +03:00
|
|
|
//relloc increment, KEEP THIS A POWER OF 2!
|
|
|
|
#define MEM_BLOCK_INCR 32
|
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
typedef struct TargetPageBits TargetPageBits;
|
|
|
|
typedef struct TCGContext TCGContext;
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
struct uc_struct {
|
|
|
|
uc_arch arch;
|
|
|
|
uc_mode mode;
|
|
|
|
uc_err errnum; // qemu/cpu-exec.c
|
2021-10-03 17:14:44 +03:00
|
|
|
AddressSpace address_space_memory;
|
|
|
|
AddressSpace address_space_io;
|
2016-01-23 12:14:44 +03:00
|
|
|
query_t query;
|
2015-08-24 19:42:50 +03:00
|
|
|
reg_read_t reg_read;
|
|
|
|
reg_write_t reg_write;
|
2015-08-21 10:04:50 +03:00
|
|
|
reg_reset_t reg_reset;
|
|
|
|
|
|
|
|
uc_write_mem_t write_mem;
|
|
|
|
uc_read_mem_t read_mem;
|
|
|
|
uc_args_void_t release; // release resource when uc_close()
|
|
|
|
uc_args_uc_u64_t set_pc; // set PC for tracecode
|
|
|
|
uc_args_int_t stop_interrupt; // check if the interrupt should stop emulation
|
2021-10-03 17:14:44 +03:00
|
|
|
uc_memory_map_io_t memory_map_io;
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2016-03-26 03:24:28 +03:00
|
|
|
uc_args_uc_t init_arch, cpu_exec_init_all;
|
2015-11-11 20:43:41 +03:00
|
|
|
uc_args_int_uc_t vm_start;
|
2015-08-21 10:04:50 +03:00
|
|
|
uc_args_uc_long_t tcg_exec_init;
|
|
|
|
uc_args_uc_ram_size_t memory_map;
|
2015-11-28 04:25:53 +03:00
|
|
|
uc_args_uc_ram_size_ptr_t memory_map_ptr;
|
2015-08-30 07:17:30 +03:00
|
|
|
uc_mem_unmap_t memory_unmap;
|
2015-08-26 23:29:54 +03:00
|
|
|
uc_readonly_mem_t readonly_mem;
|
2015-10-27 09:37:03 +03:00
|
|
|
uc_mem_redirect_t mem_redirect;
|
2021-10-03 17:14:44 +03:00
|
|
|
uc_cpus_init cpus_init;
|
|
|
|
uc_target_page_init target_page;
|
|
|
|
uc_softfloat_initialize softfloat_initialize;
|
|
|
|
uc_tcg_flush_tlb tcg_flush_tlb;
|
|
|
|
|
|
|
|
/* only 1 cpu in unicorn,
|
|
|
|
do not need current_cpu to handle current running cpu. */
|
|
|
|
CPUState *cpu;
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2017-05-13 20:16:17 +03:00
|
|
|
uc_insn_hook_validate insn_hook_validate;
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
MemoryRegion *system_memory; // qemu/exec.c
|
2021-10-03 17:14:44 +03:00
|
|
|
MemoryRegion *system_io; // qemu/exec.c
|
2015-08-21 10:04:50 +03:00
|
|
|
MemoryRegion io_mem_unassigned; // qemu/exec.c
|
|
|
|
RAMList ram_list; // qemu/exec.c
|
2021-10-03 17:14:44 +03:00
|
|
|
/* qemu/exec.c */
|
|
|
|
unsigned int alloc_hint;
|
|
|
|
/* qemu/exec-vary.c */
|
|
|
|
TargetPageBits *init_target_page;
|
2015-08-21 10:04:50 +03:00
|
|
|
BounceBuffer bounce; // qemu/cpu-exec.c
|
|
|
|
volatile sig_atomic_t exit_request; // qemu/cpu-exec.c
|
2021-10-03 17:14:44 +03:00
|
|
|
/* qemu/accel/tcg/cpu-exec-common.c */
|
|
|
|
/* always be true after call tcg_exec_init(). */
|
|
|
|
bool tcg_allowed;
|
2015-08-21 10:04:50 +03:00
|
|
|
/* This is a multi-level map on the virtual address space.
|
|
|
|
The bottom level has pointers to PageDesc. */
|
2021-10-03 17:14:44 +03:00
|
|
|
void **l1_map; // qemu/accel/tcg/translate-all.c
|
2015-08-21 10:04:50 +03:00
|
|
|
size_t l1_map_size;
|
2021-10-03 17:14:44 +03:00
|
|
|
/* qemu/accel/tcg/translate-all.c */
|
|
|
|
int v_l1_size;
|
|
|
|
int v_l1_shift;
|
|
|
|
int v_l2_levels;
|
2015-08-21 10:04:50 +03:00
|
|
|
/* code generation context */
|
2021-10-03 17:14:44 +03:00
|
|
|
TCGContext *tcg_ctx;
|
2015-08-21 10:04:50 +03:00
|
|
|
/* memory.c */
|
|
|
|
QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners;
|
|
|
|
QTAILQ_HEAD(, AddressSpace) address_spaces;
|
2021-10-03 17:14:44 +03:00
|
|
|
GHashTable *flat_views;
|
|
|
|
bool memory_region_update_pending;
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2016-01-16 11:44:02 +03:00
|
|
|
// linked lists containing hooks per type
|
|
|
|
struct list hook[UC_HOOK_MAX];
|
2020-05-07 09:24:48 +03:00
|
|
|
struct list hooks_to_del;
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
// hook to count number of instructions for uc_emu_start()
|
2016-01-16 11:44:02 +03:00
|
|
|
uc_hook count_hook;
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
size_t emu_counter; // current counter of uc_emu_start()
|
|
|
|
size_t emu_count; // save counter of uc_emu_start()
|
|
|
|
|
2020-05-25 11:22:28 +03:00
|
|
|
int size_recur_mem; // size for mem access when in a recursive call
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
bool init_tcg; // already initialized local TCGv variables?
|
|
|
|
bool stop_request; // request to immediately stop emulation - for uc_emu_stop()
|
2016-01-27 19:56:55 +03:00
|
|
|
bool quit_request; // request to quit the current TB, but continue to emulate - for uc_mem_protect()
|
2015-08-21 10:04:50 +03:00
|
|
|
bool emulation_done; // emulation is done by uc_emu_start()
|
2020-05-24 18:54:45 +03:00
|
|
|
bool timed_out; // emulation timed out, that can retrieve via uc_query(UC_QUERY_TIMEOUT)
|
2015-08-21 10:04:50 +03:00
|
|
|
QemuThread timer; // timer for emulation timeout
|
|
|
|
uint64_t timeout; // timeout for uc_emu_start()
|
|
|
|
|
|
|
|
uint64_t invalid_addr; // invalid address to be accessed
|
|
|
|
int invalid_error; // invalid memory code: 1 = READ, 2 = WRITE, 3 = CODE
|
|
|
|
|
|
|
|
uint64_t addr_end; // address where emulation stops (@end param of uc_emu_start())
|
|
|
|
|
|
|
|
int thumb; // thumb mode for ARM
|
2015-08-28 09:19:32 +03:00
|
|
|
MemoryRegion **mapped_blocks;
|
2015-08-26 07:52:18 +03:00
|
|
|
uint32_t mapped_block_count;
|
2015-09-04 10:40:47 +03:00
|
|
|
uint32_t mapped_block_cache_index;
|
2015-09-02 11:13:12 +03:00
|
|
|
void *qemu_thread_data; // to support cross compile to Windows (qemu-thread-win32.c)
|
2015-08-31 11:00:44 +03:00
|
|
|
uint32_t target_page_size;
|
|
|
|
uint32_t target_page_align;
|
2021-10-03 17:14:44 +03:00
|
|
|
uint64_t qemu_host_page_size;
|
|
|
|
uint64_t qemu_real_host_page_size;
|
|
|
|
int qemu_icache_linesize;
|
|
|
|
/* ARCH_REGS_STORAGE_SIZE */
|
|
|
|
int cpu_context_size;
|
2015-09-28 05:58:43 +03:00
|
|
|
uint64_t next_pc; // save next PC for some special cases
|
2017-06-16 08:22:38 +03:00
|
|
|
bool hook_insert; // insert new hook at begin of the hook list (append by default)
|
2021-10-03 17:14:44 +03:00
|
|
|
bool first_tb; // is this the first Translation-Block ever generated since uc_emu_start()?
|
2020-09-24 17:28:55 +03:00
|
|
|
struct list saved_contexts; // The contexts saved by this uc_struct.
|
2021-10-03 17:14:44 +03:00
|
|
|
bool no_exit_request; // Disable check_exit_request temporarily. A workaround to treat the IT block as a whole block.
|
2021-10-25 01:51:16 +03:00
|
|
|
|
|
|
|
#ifdef UNICORN_HAS_AFL
|
|
|
|
uc_afl_forkserver_t afl_forkserver_start; // function to start afl forkserver
|
|
|
|
uc_afl_ret_uc_bool_t afl_child_request_next; // function from child to ask for new testcase (if in child)
|
|
|
|
int afl_child_pipe[2]; // pipe used to send information from child process to forkserver
|
|
|
|
int afl_parent_pipe[2]; // pipe used to send information from parent to child in forkserver
|
|
|
|
uint8_t *afl_area_ptr; // map, shared with afl, to report coverage feedback etc. during runs
|
|
|
|
uint64_t afl_prev_loc; // previous location
|
|
|
|
int afl_compcov_level; // how much compcove we want
|
|
|
|
unsigned int afl_inst_rms;
|
|
|
|
size_t exit_count; // number of exits set in afl_fuzz or afl_forkserver
|
|
|
|
uint64_t *exits; // pointer to the actual exits
|
|
|
|
char *afl_testcase_ptr; // map, shared with afl, to get testcases delivered from for each run
|
|
|
|
uint32_t *afl_testcase_size_p; // size of the current testcase, if using shared map fuzzing with afl.
|
|
|
|
void *afl_data_ptr; // Pointer for various (bindings-related) uses.
|
|
|
|
#endif
|
2015-08-21 10:04:50 +03:00
|
|
|
};
|
|
|
|
|
2016-10-11 00:04:51 +03:00
|
|
|
// Metadata stub for the variable-size cpu context used with uc_context_*()
|
2020-06-05 15:12:44 +03:00
|
|
|
// We also save cpu->jmp_env, so emulation can be reentrant
|
2016-10-11 00:04:51 +03:00
|
|
|
struct uc_context {
|
2020-06-05 15:12:44 +03:00
|
|
|
size_t context_size; // size of the real internal context structure
|
2020-09-24 17:28:55 +03:00
|
|
|
size_t jmp_env_size; // size of cpu->jmp_env
|
2021-10-03 17:14:44 +03:00
|
|
|
uc_mode mode; // the mode of this context (uc may be free-ed already)
|
|
|
|
uc_arch arch; // the arch of this context (uc may be free-ed already)
|
|
|
|
struct uc_struct *uc; // the uc_struct which creates this context
|
2020-06-05 15:12:44 +03:00
|
|
|
char data[0]; // context + cpu->jmp_env
|
2016-10-11 00:04:51 +03:00
|
|
|
};
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
// check if this address is mapped in (via uc_mem_map())
|
2015-08-28 04:03:17 +03:00
|
|
|
MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
#endif
|
2017-02-24 16:37:19 +03:00
|
|
|
/* vim: set ts=4 noet: */
|