2021-10-03 17:14:44 +03:00
|
|
|
/* Modified for Unicorn Engine by Chen Huitao<chenhuitao@hfmrit.com>, 2020 */
|
|
|
|
#ifndef UNICORN_COMMON_H
|
|
|
|
#define UNICORN_COMMON_H
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
#include "tcg/tcg.h"
|
|
|
|
#include "qemu-common.h"
|
|
|
|
#include "exec/memory.h"
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
// This header define common patterns/codes that will be included in all arch-sepcific
|
|
|
|
// codes for unicorns purposes.
|
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
void vm_start(struct uc_struct*);
|
2023-06-11 00:36:02 +03:00
|
|
|
void tcg_exec_init(struct uc_struct *uc, uint32_t tb_size);
|
2022-10-05 17:53:24 +03:00
|
|
|
bool unicorn_fill_tlb(CPUState *cs, vaddr address, int size,
|
|
|
|
MMUAccessType rw, int mmu_idx,
|
|
|
|
bool probe, uintptr_t retaddr);
|
2021-10-03 17:14:44 +03:00
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
// return true on success, false on failure
|
|
|
|
static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr,
|
|
|
|
uint8_t *buf, int len)
|
|
|
|
{
|
2021-10-03 17:14:44 +03:00
|
|
|
return cpu_physical_memory_rw(as, addr, (void *)buf, len, 0);
|
2015-08-21 10:04:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr,
|
2015-08-24 18:02:14 +03:00
|
|
|
const uint8_t *buf, int len)
|
2015-08-21 10:04:50 +03:00
|
|
|
{
|
2021-10-03 17:14:44 +03:00
|
|
|
return cpu_physical_memory_rw(as, addr, (void *)buf, len, 1);
|
2015-08-21 10:04:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void tb_cleanup(struct uc_struct *uc);
|
2016-07-11 18:13:13 +03:00
|
|
|
void free_code_gen_buffer(struct uc_struct *uc);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
/** Freeing common resources */
|
|
|
|
static void release_common(void *t)
|
|
|
|
{
|
2017-01-19 14:50:28 +03:00
|
|
|
TCGPool *po, *to;
|
2015-08-21 10:04:50 +03:00
|
|
|
TCGContext *s = (TCGContext *)t;
|
2016-02-01 07:05:46 +03:00
|
|
|
#if TCG_TARGET_REG_BITS == 32
|
|
|
|
int i;
|
|
|
|
#endif
|
2015-08-21 10:04:50 +03:00
|
|
|
|
|
|
|
// Clean TCG.
|
2021-10-03 17:14:44 +03:00
|
|
|
TCGOpDef* def = s->tcg_op_defs;
|
2016-12-21 17:28:36 +03:00
|
|
|
g_free(def->args_ct);
|
|
|
|
g_free(def->sorted_args);
|
|
|
|
g_free(s->tcg_op_defs);
|
2016-02-01 01:22:20 +03:00
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
for (po = s->pool_first; po; po = to) {
|
|
|
|
to = po->next;
|
2016-12-21 17:28:36 +03:00
|
|
|
g_free(po);
|
2015-08-21 10:04:50 +03:00
|
|
|
}
|
|
|
|
tcg_pool_reset(s);
|
2021-10-03 17:14:44 +03:00
|
|
|
g_hash_table_destroy(s->helper_table);
|
2022-01-20 21:50:57 +03:00
|
|
|
g_hash_table_destroy(s->custom_helper_infos);
|
2021-10-03 17:14:44 +03:00
|
|
|
g_free(s->indirect_reg_alloc_order);
|
|
|
|
/* qemu/tcg/tcg/c:4018: img = g_malloc(img_size); */
|
|
|
|
g_free((void *)(s->one_entry->symfile_addr));
|
|
|
|
g_free(s->one_entry);
|
|
|
|
/* qemu/tcg/tcg/c:574: tcg_ctx->tree = g_tree_new(tb_tc_cmp); */
|
|
|
|
g_tree_destroy(s->tree);
|
2015-08-21 10:04:50 +03:00
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
// these function is not available outside qemu
|
2016-07-08 19:16:23 +03:00
|
|
|
// so we keep them here instead of outside uc_close.
|
2022-01-05 21:12:36 +03:00
|
|
|
memory_free(s->uc);
|
2021-10-03 17:14:44 +03:00
|
|
|
address_space_destroy(&s->uc->address_space_memory);
|
|
|
|
address_space_destroy(&s->uc->address_space_io);
|
|
|
|
/* clean up uc->l1_map. */
|
2016-07-08 20:49:43 +03:00
|
|
|
tb_cleanup(s->uc);
|
2021-10-03 17:14:44 +03:00
|
|
|
/* clean up tcg_ctx->code_gen_buffer. */
|
2016-07-11 18:13:13 +03:00
|
|
|
free_code_gen_buffer(s->uc);
|
2021-10-03 17:14:44 +03:00
|
|
|
/* qemu/util/qht.c:264: map = qht_map_create(n_buckets); */
|
|
|
|
qht_destroy(&s->tb_ctx.htable);
|
|
|
|
|
2020-05-23 04:59:30 +03:00
|
|
|
cpu_watchpoint_remove_all(CPU(s->uc->cpu), BP_CPU);
|
2020-05-26 15:52:40 +03:00
|
|
|
cpu_breakpoint_remove_all(CPU(s->uc->cpu), BP_CPU);
|
2016-01-08 02:41:45 +03:00
|
|
|
|
|
|
|
#if TCG_TARGET_REG_BITS == 32
|
2016-02-01 07:05:46 +03:00
|
|
|
for(i = 0; i < s->nb_globals; i++) {
|
2016-01-08 02:41:45 +03:00
|
|
|
TCGTemp *ts = &s->temps[i];
|
2016-01-10 18:10:00 +03:00
|
|
|
if (ts->base_type == TCG_TYPE_I64) {
|
|
|
|
if (ts->name && ((strcmp(ts->name+(strlen(ts->name)-2), "_0") == 0) ||
|
|
|
|
(strcmp(ts->name+(strlen(ts->name)-2), "_1") == 0))) {
|
2016-01-08 02:41:45 +03:00
|
|
|
free((void *)ts->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2015-08-21 10:04:50 +03:00
|
|
|
}
|
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
static inline void target_page_init(struct uc_struct* uc)
|
|
|
|
{
|
|
|
|
uc->target_page_size = TARGET_PAGE_SIZE;
|
|
|
|
uc->target_page_align = TARGET_PAGE_SIZE - 1;
|
|
|
|
}
|
|
|
|
|
2022-10-05 17:53:24 +03:00
|
|
|
static uc_err uc_set_tlb(struct uc_struct *uc, int mode) {
|
|
|
|
switch (mode) {
|
|
|
|
case UC_TLB_VIRTUAL:
|
|
|
|
uc->cpu->cc->tlb_fill = unicorn_fill_tlb;
|
|
|
|
return UC_ERR_OK;
|
|
|
|
case UC_TLB_CPU:
|
|
|
|
uc->cpu->cc->tlb_fill = uc->cpu->cc->tlb_fill_cpu;
|
|
|
|
return UC_ERR_OK;
|
|
|
|
default:
|
|
|
|
return UC_ERR_ARG;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-22 14:20:36 +03:00
|
|
|
MemoryRegion *find_memory_mapping(struct uc_struct *uc, hwaddr address)
|
|
|
|
{
|
|
|
|
hwaddr xlat = 0;
|
|
|
|
hwaddr len = 1;
|
|
|
|
MemoryRegion *mr = address_space_translate(&uc->address_space_memory, address, &xlat, &len, false, MEMTXATTRS_UNSPECIFIED);
|
|
|
|
|
|
|
|
if (mr == &uc->io_mem_unassigned) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return mr;
|
|
|
|
}
|
|
|
|
|
2021-10-03 17:14:44 +03:00
|
|
|
void softfloat_init(void);
|
2015-08-21 10:04:50 +03:00
|
|
|
static inline void uc_common_init(struct uc_struct* uc)
|
|
|
|
{
|
|
|
|
uc->write_mem = cpu_physical_mem_write;
|
|
|
|
uc->read_mem = cpu_physical_mem_read;
|
|
|
|
uc->tcg_exec_init = tcg_exec_init;
|
|
|
|
uc->cpu_exec_init_all = cpu_exec_init_all;
|
|
|
|
uc->vm_start = vm_start;
|
|
|
|
uc->memory_map = memory_map;
|
2015-11-28 04:25:53 +03:00
|
|
|
uc->memory_map_ptr = memory_map_ptr;
|
2015-08-30 07:17:30 +03:00
|
|
|
uc->memory_unmap = memory_unmap;
|
2023-05-30 17:18:17 +03:00
|
|
|
uc->memory_moveout = memory_moveout;
|
|
|
|
uc->memory_movein = memory_movein;
|
2015-08-26 23:29:54 +03:00
|
|
|
uc->readonly_mem = memory_region_set_readonly;
|
2021-10-03 17:14:44 +03:00
|
|
|
uc->target_page = target_page_init;
|
|
|
|
uc->softfloat_initialize = softfloat_init;
|
|
|
|
uc->tcg_flush_tlb = tcg_flush_softmmu_tlb;
|
|
|
|
uc->memory_map_io = memory_map_io;
|
2022-10-05 17:53:24 +03:00
|
|
|
uc->set_tlb = uc_set_tlb;
|
2022-12-22 14:20:36 +03:00
|
|
|
uc->memory_mapping = find_memory_mapping;
|
2022-12-22 17:14:07 +03:00
|
|
|
uc->memory_filter_subregions = memory_region_filter_subregions;
|
|
|
|
uc->memory_cow = memory_cow;
|
2021-10-26 12:22:21 +03:00
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
if (!uc->release)
|
|
|
|
uc->release = release_common;
|
|
|
|
}
|
2023-05-11 22:43:15 +03:00
|
|
|
|
2023-05-12 06:08:27 +03:00
|
|
|
#define CHECK_REG_TYPE(type) do { \
|
2023-05-12 08:54:03 +03:00
|
|
|
if (unlikely(*size < sizeof(type))) { \
|
2023-06-17 00:47:03 +03:00
|
|
|
return UC_ERR_OVERFLOW; \
|
2023-05-12 06:08:27 +03:00
|
|
|
} \
|
2023-05-12 08:54:03 +03:00
|
|
|
*size = sizeof(type); \
|
2023-05-12 06:08:27 +03:00
|
|
|
ret = UC_ERR_OK; \
|
2023-05-11 22:43:15 +03:00
|
|
|
} while(0)
|
|
|
|
|
2015-08-21 10:04:50 +03:00
|
|
|
#endif
|