unicorn/include/uc_priv.h

350 lines
14 KiB
C
Raw Normal View History

2015-08-21 10:04:50 +03:00
/* Unicorn Emulator Engine */
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
2021-10-03 17:14:44 +03:00
/* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */
2015-08-21 10:04:50 +03:00
#ifndef UC_PRIV_H
#define UC_PRIV_H
2017-01-20 16:13:21 +03:00
#include "unicorn/platform.h"
2015-08-21 10:04:50 +03:00
#include <stdio.h>
#include "qemu.h"
#include "unicorn/unicorn.h"
#include "list.h"
2015-08-21 10:04:50 +03:00
2016-01-23 01:47:29 +03:00
// These are masks of supported modes for each cpu/arch.
// They should be updated when changes are made to the uc_mode enum typedef.
2021-10-25 01:51:16 +03:00
#ifdef UNICORN_HAS_AFL
#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS \
|UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN|UC_MODE_AFL)
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
#define UC_MODE_PPC_MASK (UC_MODE_PPC32|UC_MODE_PPC64|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN|UC_MODE_AFL)
#define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64|UC_MODE_LITTLE_ENDIAN|UC_MODE_AFL)
#else
#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS \
2021-10-03 17:14:44 +03:00
|UC_MODE_ARM926|UC_MODE_ARM946|UC_MODE_ARM1176|UC_MODE_BIG_ENDIAN)
2016-01-23 01:47:29 +03:00
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN)
2021-10-25 01:51:16 +03:00
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN)
2021-10-03 17:14:44 +03:00
#define UC_MODE_PPC_MASK (UC_MODE_PPC32|UC_MODE_PPC64|UC_MODE_BIG_ENDIAN)
2016-01-23 05:48:18 +03:00
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN)
2016-01-23 04:08:49 +03:00
#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN)
2021-10-03 17:14:44 +03:00
#define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64|UC_MODE_LITTLE_ENDIAN)
2021-10-25 01:51:16 +03:00
#endif
2021-10-25 11:46:52 +03:00
#ifndef NDEBUG
#define UCLOG(...) fprintf(stderr, __VA_ARGS__)
#else
#define UCLOG(...)
#endif
2016-01-23 01:47:29 +03:00
2015-08-21 10:04:50 +03:00
#define ARR_SIZE(a) (sizeof(a)/sizeof(a[0]))
2021-10-03 17:14:44 +03:00
#define READ_QWORD(x) ((uint64_t)x)
#define READ_DWORD(x) (x & 0xffffffff)
#define READ_WORD(x) (x & 0xffff)
#define READ_BYTE_H(x) ((x & 0xffff) >> 8)
#define READ_BYTE_L(x) (x & 0xff)
#define WRITE_DWORD(x, w) (x = (x & ~0xffffffffLL) | (w & 0xffffffff))
#define WRITE_WORD(x, w) (x = (x & ~0xffff) | (w & 0xffff))
#define WRITE_BYTE_H(x, b) (x = (x & ~0xff00) | ((b & 0xff) << 8))
#define WRITE_BYTE_L(x, b) (x = (x & ~0xff) | (b & 0xff))
typedef uc_err (*query_t)(struct uc_struct *uc, uc_query_type type, size_t *result);
2015-08-21 10:04:50 +03:00
// return 0 on success, -1 on failure
2016-04-04 18:25:30 +03:00
typedef int (*reg_read_t)(struct uc_struct *uc, unsigned int *regs, void **vals, int count);
typedef int (*reg_write_t)(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count);
2015-08-21 10:04:50 +03:00
2021-10-03 17:14:44 +03:00
typedef int (*context_reg_read_t)(struct uc_context *ctx, unsigned int *regs, void **vals, int count);
typedef int (*context_reg_write_t)(struct uc_context *ctx, unsigned int *regs, void *const *vals, int count);
typedef struct {
context_reg_read_t context_reg_read;
context_reg_write_t context_reg_write;
} context_reg_rw_t;
2015-08-26 15:00:00 +03:00
typedef void (*reg_reset_t)(struct uc_struct *uc);
2015-08-21 10:04:50 +03:00
typedef bool (*uc_write_mem_t)(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len);
2015-08-21 10:04:50 +03:00
typedef bool (*uc_read_mem_t)(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
typedef void (*uc_args_void_t)(void*);
typedef void (*uc_args_uc_t)(struct uc_struct*);
2021-10-03 17:14:44 +03:00
typedef void (*uc_args_int_uc_t)(struct uc_struct*);
2015-08-21 10:04:50 +03:00
typedef void (*uc_args_uc_long_t)(struct uc_struct*, unsigned long);
typedef void (*uc_args_uc_u64_t)(struct uc_struct *, uint64_t addr);
2016-08-27 16:49:11 +03:00
typedef MemoryRegion* (*uc_args_uc_ram_size_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms);
2016-08-27 16:49:11 +03:00
typedef MemoryRegion* (*uc_args_uc_ram_size_ptr_t)(struct uc_struct*, hwaddr begin, size_t size, uint32_t perms, void *ptr);
typedef void (*uc_mem_unmap_t)(struct uc_struct*, MemoryRegion *mr);
typedef void (*uc_readonly_mem_t)(MemoryRegion *mr, bool readonly);
2015-08-21 10:04:50 +03:00
2021-10-03 17:14:44 +03:00
typedef int (*uc_cpus_init)(struct uc_struct *, const char *);
typedef MemoryRegion* (*uc_memory_map_io_t)(struct uc_struct *uc, ram_addr_t begin, size_t size, uc_cb_mmio_read_t read_cb, uc_cb_mmio_write_t write_cb, void *user_data_read, void *user_data_write);
2015-08-21 10:04:50 +03:00
// which interrupt should make emulation stop?
2021-10-03 17:14:44 +03:00
typedef bool (*uc_args_int_t)(struct uc_struct *uc, int intno);
2015-08-21 10:04:50 +03:00
// some architecture redirect virtual memory to physical memory like Mips
typedef uint64_t (*uc_mem_redirect_t)(uint64_t address);
// validate if Unicorn supports hooking a given instruction
typedef bool(*uc_insn_hook_validate)(uint32_t insn_enum);
2021-10-03 17:14:44 +03:00
// init target page
typedef void (*uc_target_page_init)(struct uc_struct *);
// soft float init
typedef void (*uc_softfloat_initialize)(void);
// tcg flush softmmu tlb
typedef void (*uc_tcg_flush_tlb)(struct uc_struct *uc);
2021-10-25 01:51:16 +03:00
typedef enum uc_afl_ret {
UC_AFL_RET_ERROR = 0, // Something went horribly wrong in the parent
UC_AFL_RET_CHILD, // Fork worked. we are a child
UC_AFL_RET_NO_AFL, // No AFL, no need to fork.
UC_AFL_RET_CALLED_TWICE, // AFL has already been started before.
UC_AFL_RET_FINISHED, // We forked before but now AFL is gone (parent)
} uc_afl_ret;
// we use this as shortcut deep inside uc_afl for the arch specific uc_afl_next(uc, bool)
typedef uc_afl_ret(*uc_afl_ret_uc_bool_t)(struct uc_struct*, bool);
// afl_forkserver_start
typedef int (*uc_afl_forkserver_t)(struct uc_struct*);
struct hook {
int type; // UC_HOOK_*
int insn; // instruction for HOOK_INSN
int refs; // reference count to free hook stored in multiple lists
bool to_delete; // set to true when the hook is deleted by the user. The destruction of the hook is delayed.
uint64_t begin, end; // only trigger if PC or memory access is in this address (depends on hook type)
void *callback; // a uc_cb_* type
2015-08-21 10:04:50 +03:00
void *user_data;
};
// hook list offsets
2021-10-03 17:14:44 +03:00
//
// The lowest 6 bits are used for hook type index while the others
// are used for hook flags.
//
// mirrors the order of uc_hook_type from include/unicorn/unicorn.h
2021-10-03 17:14:44 +03:00
typedef enum uc_hook_idx {
UC_HOOK_INTR_IDX,
UC_HOOK_INSN_IDX,
UC_HOOK_CODE_IDX,
UC_HOOK_BLOCK_IDX,
UC_HOOK_MEM_READ_UNMAPPED_IDX,
UC_HOOK_MEM_WRITE_UNMAPPED_IDX,
UC_HOOK_MEM_FETCH_UNMAPPED_IDX,
UC_HOOK_MEM_READ_PROT_IDX,
UC_HOOK_MEM_WRITE_PROT_IDX,
UC_HOOK_MEM_FETCH_PROT_IDX,
UC_HOOK_MEM_READ_IDX,
UC_HOOK_MEM_WRITE_IDX,
UC_HOOK_MEM_FETCH_IDX,
UC_HOOK_MEM_READ_AFTER_IDX,
UC_HOOK_INSN_INVALID_IDX,
UC_HOOK_MAX,
2021-10-03 17:14:44 +03:00
} uc_hook_idx;
// The lowest 6 bits are used for hook type index.
#define UC_HOOK_IDX_MASK ((1<<6)-1)
// hook flags
#define UC_HOOK_FLAG_NO_STOP (1 << 6) // Don't stop emulation in this uc_tracecode.
// The rest of bits are reserved for hook flags.
#define UC_HOOK_FLAG_MASK (~(UC_HOOK_IDX_MASK))
#define HOOK_FOREACH_VAR_DECLARE \
struct list_item *cur
// for loop macro to loop over hook lists
#define HOOK_FOREACH(uc, hh, idx) \
for ( \
cur = (uc)->hook[idx##_IDX].head; \
Ensure that hooks are unaffected by a request to stop emulation. (#1154) This change removes the check for stop requests from the hook loop macro. Requests to stop emulation (uc_emu_stop) should only affect whether the emulation stops. This isn't the case at present for the invocation of hooks. If emulation is requested to be stopped (which is indicated by `uc->stop_request`), the hooks will skip all execution. This means that when the emulation stop is requested, some expected operations may not occur before the emulation exits - leaving the system in an inconsistent or broken state. This is particularly obvious in the case where a CPU interrupt is required, and a hook has been registered for such cases. The expected operation is that the hook be called, and no CPU exception be raised (because the hook has handled it). However, because of the short-cut in the case where the `uc_emu_stop` function has been called out of band (eg on another thread), this hook would not be called. In such cases the execution would terminate with an error that an 'unhandled CPU exception' occurred, and the hook would never have been called. This probably affects other parts of the system, such as hooks which handle remapping of memory on demand (UC_HOOK_MEM_READ_UNMAPPED and friends) where the remap would not happen and instead an error about the unmapped memory would be raised. In all cases, it makes sense that execution continue normally until the outer loop which controls the execution determines that the emulation should stop. This will mean that for any given sequence of events all the emulation operations are completed deterministically regardless of when the stop request was received.
2020-05-05 03:36:50 +03:00
cur != NULL && ((hh) = (struct hook *)cur->data); \
cur = cur->next)
// if statement to check hook bounds
#define HOOK_BOUND_CHECK(hh, addr) \
2016-01-23 06:24:45 +03:00
((((addr) >= (hh)->begin && (addr) <= (hh)->end) \
|| (hh)->begin > (hh)->end) && !((hh)->to_delete))
#define HOOK_EXISTS(uc, idx) ((uc)->hook[idx##_IDX].head != NULL)
#define HOOK_EXISTS_BOUNDED(uc, idx, addr) _hook_exists_bounded((uc)->hook[idx##_IDX].head, addr)
static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr)
{
while (cur != NULL) {
if (HOOK_BOUND_CHECK((struct hook *)cur->data, addr))
return true;
cur = cur->next;
}
return false;
}
2015-08-21 10:04:50 +03:00
//relloc increment, KEEP THIS A POWER OF 2!
#define MEM_BLOCK_INCR 32
2021-10-03 17:14:44 +03:00
typedef struct TargetPageBits TargetPageBits;
typedef struct TCGContext TCGContext;
2015-08-21 10:04:50 +03:00
struct uc_struct {
uc_arch arch;
uc_mode mode;
uc_err errnum; // qemu/cpu-exec.c
2021-10-03 17:14:44 +03:00
AddressSpace address_space_memory;
AddressSpace address_space_io;
query_t query;
reg_read_t reg_read;
reg_write_t reg_write;
2015-08-21 10:04:50 +03:00
reg_reset_t reg_reset;
uc_write_mem_t write_mem;
uc_read_mem_t read_mem;
uc_args_void_t release; // release resource when uc_close()
uc_args_uc_u64_t set_pc; // set PC for tracecode
uc_args_int_t stop_interrupt; // check if the interrupt should stop emulation
2021-10-03 17:14:44 +03:00
uc_memory_map_io_t memory_map_io;
2015-08-21 10:04:50 +03:00
2016-03-26 03:24:28 +03:00
uc_args_uc_t init_arch, cpu_exec_init_all;
uc_args_int_uc_t vm_start;
2015-08-21 10:04:50 +03:00
uc_args_uc_long_t tcg_exec_init;
uc_args_uc_ram_size_t memory_map;
uc_args_uc_ram_size_ptr_t memory_map_ptr;
uc_mem_unmap_t memory_unmap;
uc_readonly_mem_t readonly_mem;
uc_mem_redirect_t mem_redirect;
2021-10-03 17:14:44 +03:00
uc_cpus_init cpus_init;
uc_target_page_init target_page;
uc_softfloat_initialize softfloat_initialize;
uc_tcg_flush_tlb tcg_flush_tlb;
/* only 1 cpu in unicorn,
do not need current_cpu to handle current running cpu. */
CPUState *cpu;
2015-08-21 10:04:50 +03:00
uc_insn_hook_validate insn_hook_validate;
2015-08-21 10:04:50 +03:00
MemoryRegion *system_memory; // qemu/exec.c
2021-10-03 17:14:44 +03:00
MemoryRegion *system_io; // qemu/exec.c
2015-08-21 10:04:50 +03:00
MemoryRegion io_mem_unassigned; // qemu/exec.c
RAMList ram_list; // qemu/exec.c
2021-10-03 17:14:44 +03:00
/* qemu/exec.c */
unsigned int alloc_hint;
/* qemu/exec-vary.c */
TargetPageBits *init_target_page;
2015-08-21 10:04:50 +03:00
BounceBuffer bounce; // qemu/cpu-exec.c
volatile sig_atomic_t exit_request; // qemu/cpu-exec.c
2021-10-03 17:14:44 +03:00
/* qemu/accel/tcg/cpu-exec-common.c */
/* always be true after call tcg_exec_init(). */
bool tcg_allowed;
2015-08-21 10:04:50 +03:00
/* This is a multi-level map on the virtual address space.
The bottom level has pointers to PageDesc. */
2021-10-03 17:14:44 +03:00
void **l1_map; // qemu/accel/tcg/translate-all.c
2015-08-21 10:04:50 +03:00
size_t l1_map_size;
2021-10-03 17:14:44 +03:00
/* qemu/accel/tcg/translate-all.c */
int v_l1_size;
int v_l1_shift;
int v_l2_levels;
2015-08-21 10:04:50 +03:00
/* code generation context */
2021-10-03 17:14:44 +03:00
TCGContext *tcg_ctx;
2015-08-21 10:04:50 +03:00
/* memory.c */
QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners;
QTAILQ_HEAD(, AddressSpace) address_spaces;
2021-10-03 17:14:44 +03:00
GHashTable *flat_views;
bool memory_region_update_pending;
2015-08-21 10:04:50 +03:00
// linked lists containing hooks per type
struct list hook[UC_HOOK_MAX];
struct list hooks_to_del;
2015-08-21 10:04:50 +03:00
// hook to count number of instructions for uc_emu_start()
uc_hook count_hook;
2015-08-21 10:04:50 +03:00
size_t emu_counter; // current counter of uc_emu_start()
size_t emu_count; // save counter of uc_emu_start()
int size_recur_mem; // size for mem access when in a recursive call
2015-08-21 10:04:50 +03:00
bool init_tcg; // already initialized local TCGv variables?
bool stop_request; // request to immediately stop emulation - for uc_emu_stop()
bool quit_request; // request to quit the current TB, but continue to emulate - for uc_mem_protect()
2015-08-21 10:04:50 +03:00
bool emulation_done; // emulation is done by uc_emu_start()
bool timed_out; // emulation timed out, that can retrieve via uc_query(UC_QUERY_TIMEOUT)
2015-08-21 10:04:50 +03:00
QemuThread timer; // timer for emulation timeout
uint64_t timeout; // timeout for uc_emu_start()
uint64_t invalid_addr; // invalid address to be accessed
int invalid_error; // invalid memory code: 1 = READ, 2 = WRITE, 3 = CODE
uint64_t addr_end; // address where emulation stops (@end param of uc_emu_start())
int thumb; // thumb mode for ARM
MemoryRegion **mapped_blocks;
uint32_t mapped_block_count;
uint32_t mapped_block_cache_index;
void *qemu_thread_data; // to support cross compile to Windows (qemu-thread-win32.c)
uint32_t target_page_size;
uint32_t target_page_align;
2021-10-03 17:14:44 +03:00
uint64_t qemu_host_page_size;
uint64_t qemu_real_host_page_size;
int qemu_icache_linesize;
/* ARCH_REGS_STORAGE_SIZE */
int cpu_context_size;
uint64_t next_pc; // save next PC for some special cases
bool hook_insert; // insert new hook at begin of the hook list (append by default)
2021-10-03 17:14:44 +03:00
bool first_tb; // is this the first Translation-Block ever generated since uc_emu_start()?
struct list saved_contexts; // The contexts saved by this uc_struct.
2021-10-03 17:14:44 +03:00
bool no_exit_request; // Disable check_exit_request temporarily. A workaround to treat the IT block as a whole block.
2021-10-25 01:51:16 +03:00
#ifdef UNICORN_HAS_AFL
uc_afl_forkserver_t afl_forkserver_start; // function to start afl forkserver
uc_afl_ret_uc_bool_t afl_child_request_next; // function from child to ask for new testcase (if in child)
int afl_child_pipe[2]; // pipe used to send information from child process to forkserver
int afl_parent_pipe[2]; // pipe used to send information from parent to child in forkserver
uint8_t *afl_area_ptr; // map, shared with afl, to report coverage feedback etc. during runs
uint64_t afl_prev_loc; // previous location
int afl_compcov_level; // how much compcove we want
unsigned int afl_inst_rms;
size_t exit_count; // number of exits set in afl_fuzz or afl_forkserver
uint64_t *exits; // pointer to the actual exits
char *afl_testcase_ptr; // map, shared with afl, to get testcases delivered from for each run
uint32_t *afl_testcase_size_p; // size of the current testcase, if using shared map fuzzing with afl.
void *afl_data_ptr; // Pointer for various (bindings-related) uses.
#endif
2015-08-21 10:04:50 +03:00
};
// Metadata stub for the variable-size cpu context used with uc_context_*()
// We also save cpu->jmp_env, so emulation can be reentrant
struct uc_context {
size_t context_size; // size of the real internal context structure
size_t jmp_env_size; // size of cpu->jmp_env
2021-10-03 17:14:44 +03:00
uc_mode mode; // the mode of this context (uc may be free-ed already)
uc_arch arch; // the arch of this context (uc may be free-ed already)
struct uc_struct *uc; // the uc_struct which creates this context
char data[0]; // context + cpu->jmp_env
};
2015-08-21 10:04:50 +03:00
// check if this address is mapped in (via uc_mem_map())
MemoryRegion *memory_mapping(struct uc_struct* uc, uint64_t address);
2015-08-21 10:04:50 +03:00
#endif
/* vim: set ts=4 noet: */