Merge pull request #1746 from PhilippTakacs/virtual_tlb

Virtual tlb
This commit is contained in:
lazymio 2023-03-28 21:17:24 +08:00 committed by GitHub
commit 51a5b68b50
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 1406 additions and 316 deletions

View File

@ -446,6 +446,8 @@ set(UNICORN_ARCH_COMMON
qemu/accel/tcg/tcg-runtime-gvec.c
qemu/accel/tcg/translate-all.c
qemu/accel/tcg/translator.c
qemu/softmmu/unicorn_vtlb.c
)
if(UNICORN_HAS_X86)
@ -1188,7 +1190,7 @@ set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} unicorn-common)
if(UNICORN_HAS_X86)
set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_X86)
set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} x86_64-softmmu)
set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_x86 sample_x86_32_gdt_and_seg_regs sample_batch_reg mem_apis shellcode)
set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_x86 sample_x86_32_gdt_and_seg_regs sample_batch_reg mem_apis shellcode sample_mmu)
target_link_libraries(x86_64-softmmu PRIVATE unicorn-common)
set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_x86)
endif()

View File

@ -111,6 +111,7 @@ module Common =
let UC_HOOK_INSN_INVALID = 16384
let UC_HOOK_EDGE_GENERATED = 32768
let UC_HOOK_TCG_OPCODE = 65536
let UC_HOOK_TLB_FILL = 131072
let UC_HOOK_MEM_UNMAPPED = 112
let UC_HOOK_MEM_PROT = 896
let UC_HOOK_MEM_READ_INVALID = 144
@ -128,6 +129,9 @@ module Common =
let UC_CTL_IO_READ = 2
let UC_CTL_IO_READ_WRITE = 3
let UC_TLB_CPU = 0
let UC_TLB_VIRTUAL = 1
let UC_CTL_UC_MODE = 0
let UC_CTL_UC_PAGE_SIZE = 1
let UC_CTL_UC_ARCH = 2
@ -139,6 +143,8 @@ module Common =
let UC_CTL_TB_REQUEST_CACHE = 8
let UC_CTL_TB_REMOVE_CACHE = 9
let UC_CTL_TB_FLUSH = 10
let UC_CTL_TLB_FLUSH = 11
let UC_CTL_TLB_TYPE = 12
let UC_PROT_NONE = 0
let UC_PROT_READ = 1

View File

@ -106,6 +106,7 @@ const (
HOOK_INSN_INVALID = 16384
HOOK_EDGE_GENERATED = 32768
HOOK_TCG_OPCODE = 65536
HOOK_TLB_FILL = 131072
HOOK_MEM_UNMAPPED = 112
HOOK_MEM_PROT = 896
HOOK_MEM_READ_INVALID = 144
@ -123,6 +124,9 @@ const (
CTL_IO_READ = 2
CTL_IO_READ_WRITE = 3
TLB_CPU = 0
TLB_VIRTUAL = 1
CTL_UC_MODE = 0
CTL_UC_PAGE_SIZE = 1
CTL_UC_ARCH = 2
@ -134,6 +138,8 @@ const (
CTL_TB_REQUEST_CACHE = 8
CTL_TB_REMOVE_CACHE = 9
CTL_TB_FLUSH = 10
CTL_TLB_FLUSH = 11
CTL_TLB_TYPE = 12
PROT_NONE = 0
PROT_READ = 1

View File

@ -108,6 +108,7 @@ public interface UnicornConst {
public static final int UC_HOOK_INSN_INVALID = 16384;
public static final int UC_HOOK_EDGE_GENERATED = 32768;
public static final int UC_HOOK_TCG_OPCODE = 65536;
public static final int UC_HOOK_TLB_FILL = 131072;
public static final int UC_HOOK_MEM_UNMAPPED = 112;
public static final int UC_HOOK_MEM_PROT = 896;
public static final int UC_HOOK_MEM_READ_INVALID = 144;
@ -125,6 +126,9 @@ public interface UnicornConst {
public static final int UC_CTL_IO_READ = 2;
public static final int UC_CTL_IO_READ_WRITE = 3;
public static final int UC_TLB_CPU = 0;
public static final int UC_TLB_VIRTUAL = 1;
public static final int UC_CTL_UC_MODE = 0;
public static final int UC_CTL_UC_PAGE_SIZE = 1;
public static final int UC_CTL_UC_ARCH = 2;
@ -136,6 +140,8 @@ public interface UnicornConst {
public static final int UC_CTL_TB_REQUEST_CACHE = 8;
public static final int UC_CTL_TB_REMOVE_CACHE = 9;
public static final int UC_CTL_TB_FLUSH = 10;
public static final int UC_CTL_TLB_FLUSH = 11;
public static final int UC_CTL_TLB_TYPE = 12;
public static final int UC_PROT_NONE = 0;
public static final int UC_PROT_READ = 1;

View File

@ -109,6 +109,7 @@ const UC_API_MAJOR = 2;
UC_HOOK_INSN_INVALID = 16384;
UC_HOOK_EDGE_GENERATED = 32768;
UC_HOOK_TCG_OPCODE = 65536;
UC_HOOK_TLB_FILL = 131072;
UC_HOOK_MEM_UNMAPPED = 112;
UC_HOOK_MEM_PROT = 896;
UC_HOOK_MEM_READ_INVALID = 144;
@ -126,6 +127,9 @@ const UC_API_MAJOR = 2;
UC_CTL_IO_READ = 2;
UC_CTL_IO_READ_WRITE = 3;
UC_TLB_CPU = 0;
UC_TLB_VIRTUAL = 1;
UC_CTL_UC_MODE = 0;
UC_CTL_UC_PAGE_SIZE = 1;
UC_CTL_UC_ARCH = 2;
@ -137,6 +141,8 @@ const UC_API_MAJOR = 2;
UC_CTL_TB_REQUEST_CACHE = 8;
UC_CTL_TB_REMOVE_CACHE = 9;
UC_CTL_TB_FLUSH = 10;
UC_CTL_TLB_FLUSH = 11;
UC_CTL_TLB_TYPE = 12;
UC_PROT_NONE = 0;
UC_PROT_READ = 1;

View File

@ -806,6 +806,9 @@ class Uc(object):
def ctl_flush_tb(self):
self.ctl(self.__ctl_w(uc.UC_CTL_TB_FLUSH, 0))
def ctl_tlb_mode(self, mode: int):
self.__ctl_w_1_arg(uc.UC_CTL_TLB_TYPE, mode, ctypes.c_int)
# add a hook
def hook_add(self, htype: int, callback: UC_HOOK_CALLBACK_TYPE , user_data: Any=None, begin: int=1, end: int=0, arg1: int=0, arg2: int=0):
_h2 = uc_hook_h()

View File

@ -104,6 +104,7 @@ UC_HOOK_MEM_READ_AFTER = 8192
UC_HOOK_INSN_INVALID = 16384
UC_HOOK_EDGE_GENERATED = 32768
UC_HOOK_TCG_OPCODE = 65536
UC_HOOK_TLB_FILL = 131072
UC_HOOK_MEM_UNMAPPED = 112
UC_HOOK_MEM_PROT = 896
UC_HOOK_MEM_READ_INVALID = 144
@ -121,6 +122,9 @@ UC_CTL_IO_WRITE = 1
UC_CTL_IO_READ = 2
UC_CTL_IO_READ_WRITE = 3
UC_TLB_CPU = 0
UC_TLB_VIRTUAL = 1
UC_CTL_UC_MODE = 0
UC_CTL_UC_PAGE_SIZE = 1
UC_CTL_UC_ARCH = 2
@ -132,6 +136,8 @@ UC_CTL_CPU_MODEL = 7
UC_CTL_TB_REQUEST_CACHE = 8
UC_CTL_TB_REMOVE_CACHE = 9
UC_CTL_TB_FLUSH = 10
UC_CTL_TLB_FLUSH = 11
UC_CTL_TLB_TYPE = 12
UC_PROT_NONE = 0
UC_PROT_READ = 1

View File

@ -106,6 +106,7 @@ module UnicornEngine
UC_HOOK_INSN_INVALID = 16384
UC_HOOK_EDGE_GENERATED = 32768
UC_HOOK_TCG_OPCODE = 65536
UC_HOOK_TLB_FILL = 131072
UC_HOOK_MEM_UNMAPPED = 112
UC_HOOK_MEM_PROT = 896
UC_HOOK_MEM_READ_INVALID = 144
@ -123,6 +124,9 @@ module UnicornEngine
UC_CTL_IO_READ = 2
UC_CTL_IO_READ_WRITE = 3
UC_TLB_CPU = 0
UC_TLB_VIRTUAL = 1
UC_CTL_UC_MODE = 0
UC_CTL_UC_PAGE_SIZE = 1
UC_CTL_UC_ARCH = 2
@ -134,6 +138,8 @@ module UnicornEngine
UC_CTL_TB_REQUEST_CACHE = 8
UC_CTL_TB_REMOVE_CACHE = 9
UC_CTL_TB_FLUSH = 10
UC_CTL_TLB_FLUSH = 11
UC_CTL_TLB_TYPE = 12
UC_PROT_NONE = 0
UC_PROT_READ = 1

View File

@ -4,13 +4,13 @@ pub const unicornConst = enum(c_int) {
API_MAJOR = 2,
API_MINOR = 0,
API_PATCH = 1,
API_EXTRA = 255,
API_PATCH = 2,
API_EXTRA = 1,
VERSION_MAJOR = 2,
VERSION_MINOR = 0,
VERSION_PATCH = 1,
VERSION_EXTRA = 255,
VERSION_PATCH = 2,
VERSION_EXTRA = 1,
SECOND_SCALE = 1000000,
MILISECOND_SCALE = 1000,
ARCH_ARM = 1,
@ -106,6 +106,7 @@ pub const unicornConst = enum(c_int) {
HOOK_INSN_INVALID = 16384,
HOOK_EDGE_GENERATED = 32768,
HOOK_TCG_OPCODE = 65536,
HOOK_TLB_FILL = 131072,
HOOK_MEM_UNMAPPED = 112,
HOOK_MEM_PROT = 896,
HOOK_MEM_READ_INVALID = 144,
@ -123,6 +124,9 @@ pub const unicornConst = enum(c_int) {
CTL_IO_READ = 2,
CTL_IO_READ_WRITE = 3,
TLB_CPU = 0,
TLB_VIRTUAL = 1,
CTL_UC_MODE = 0,
CTL_UC_PAGE_SIZE = 1,
CTL_UC_ARCH = 2,
@ -134,6 +138,8 @@ pub const unicornConst = enum(c_int) {
CTL_TB_REQUEST_CACHE = 8,
CTL_TB_REMOVE_CACHE = 9,
CTL_TB_FLUSH = 10,
CTL_TLB_FLUSH = 11,
CTL_TLB_TYPE = 12,
PROT_NONE = 0,
PROT_READ = 1,

View File

@ -113,6 +113,16 @@ To provide end users with simple API, Unicorn does lots of dirty hacks within qe
Yes, its possible but that is not Unicorns goal and there is no simple switch in qemu to disable softmmu.
Starting from 2.0.2, Unicorn will emulate the MMU depending on the emulated architecture without further hacks. That said, Unicorn offers the full ability of the target MMU implementation. While this enables more possibilities of Uncorn, it has a few drawbacks:
- As previous question points out already, some memory regions are not writable/executable.
- You have to always check architecture-specific registers to confirm MMU status.
- `uc_mem_map` will always deal with physical addresses while `uc_emu_start` accepts virtual addresses.
Therefore, if you still prefer the previous `paddr = vaddr` simple mapping, we have a simple experimental MMU implementation that can be switched on by: `uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL)`. With this mode, you could also add a `UC_HOOK_TLB_FILL` hook to manage the TLB. When a virtual address is not cached, the hook will be called. Besides, users are allowed to flush the tlb with `uc_ctl_flush_tlb`.
In theory, `UC_TLB_VIRTUAL` will achieve better performance as it skips all MMU details, though not benchmarked.
## I'd like to make contributions, where do I start?
See [milestones](https://github.com/unicorn-engine/unicorn/milestones) and [coding convention](https://github.com/unicorn-engine/unicorn/wiki/Coding-Convention
@ -122,4 +132,4 @@ Be sure to send pull requests for our **dev** branch only.
## Which qemu version is Unicorn based on?
Prior to 2.0.0, Unicorn is based on qemu 2.2.1. After that, Unicorn is based on qemu 5.0.1.
Prior to 2.0.0, Unicorn is based on qemu 2.2.1. After that, Unicorn is based on qemu 5.0.1.

View File

@ -119,9 +119,6 @@ typedef MemoryRegion *(*uc_memory_map_io_t)(struct uc_struct *uc,
// which interrupt should make emulation stop?
typedef bool (*uc_args_int_t)(struct uc_struct *uc, int intno);
// some architecture redirect virtual memory to physical memory like Mips
typedef uint64_t (*uc_mem_redirect_t)(uint64_t address);
// validate if Unicorn supports hooking a given instruction
typedef bool (*uc_insn_hook_validate)(uint32_t insn_enum);
@ -146,6 +143,8 @@ typedef uc_err (*uc_gen_tb_t)(struct uc_struct *uc, uint64_t pc, uc_tb *out_tb);
// tb flush
typedef uc_tcg_flush_tlb uc_tb_flush_t;
typedef uc_err (*uc_set_tlb_t)(struct uc_struct *uc, int mode);
struct hook {
int type; // UC_HOOK_*
int insn; // instruction for HOOK_INSN
@ -202,6 +201,7 @@ typedef enum uc_hook_idx {
UC_HOOK_INSN_INVALID_IDX,
UC_HOOK_EDGE_GENERATED_IDX,
UC_HOOK_TCG_OPCODE_IDX,
UC_HOOK_TLB_FILL_IDX,
UC_HOOK_MAX,
} uc_hook_idx;
@ -284,7 +284,6 @@ struct uc_struct {
uc_args_uc_ram_size_ptr_t memory_map_ptr;
uc_mem_unmap_t memory_unmap;
uc_readonly_mem_t readonly_mem;
uc_mem_redirect_t mem_redirect;
uc_cpus_init cpus_init;
uc_target_page_init target_page;
uc_softfloat_initialize softfloat_initialize;
@ -337,6 +336,8 @@ struct uc_struct {
GHashTable *flat_views;
bool memory_region_update_pending;
uc_set_tlb_t set_tlb;
// linked lists containing hooks per type
struct list hook[UC_HOOK_MAX];
struct list hooks_to_del;

View File

@ -244,6 +244,22 @@ typedef uint32_t (*uc_cb_insn_in_t)(uc_engine *uc, uint32_t port, int size,
typedef void (*uc_cb_insn_out_t)(uc_engine *uc, uint32_t port, int size,
uint32_t value, void *user_data);
typedef struct uc_tlb_entry uc_tlb_entry;
typedef enum uc_mem_type uc_mem_type;
/*
Callback function for tlb lookups
@vaddr: virtuall address for lookup
@rw: the access mode
@result: result entry, contains physical address (paddr) and permitted access type (perms) for the entry
@return: return true if the entry was found. If a callback is present but
no one returns true a pagefault is generated.
*/
typedef bool (*uc_cb_tlbevent_t)(uc_engine *uc, uint64_t vaddr, uc_mem_type type,
uc_tlb_entry *result, void *user_data);
// Represent a TranslationBlock.
typedef struct uc_tb {
uint64_t pc;
@ -295,7 +311,7 @@ typedef void (*uc_cb_mmio_write_t)(uc_engine *uc, uint64_t offset,
void *user_data);
// All type of memory accesses for UC_HOOK_MEM_*
typedef enum uc_mem_type {
enum uc_mem_type {
UC_MEM_READ = 16, // Memory is read from
UC_MEM_WRITE, // Memory is written to
UC_MEM_FETCH, // Memory is fetched
@ -306,7 +322,7 @@ typedef enum uc_mem_type {
UC_MEM_READ_PROT, // Read from read protected, but mapped, memory
UC_MEM_FETCH_PROT, // Fetch from non-executable, but mapped, memory
UC_MEM_READ_AFTER, // Memory is read from (successful access)
} uc_mem_type;
};
// These are all op codes we support to hook for UC_HOOK_TCG_OP_CODE.
// Be cautious since it may bring much more overhead than UC_HOOK_CODE without
@ -369,6 +385,10 @@ typedef enum uc_hook_type {
// Hook on specific tcg op code. The usage of this hook is similar to
// UC_HOOK_INSN.
UC_HOOK_TCG_OPCODE = 1 << 16,
// Hook on tlb fill requests.
// Register tlb fill request hook on the virtuall addresses.
// The callback will be triggert if the tlb cache don't contain an address.
UC_HOOK_TLB_FILL = 1 << 17,
} uc_hook_type;
// Hook type for all events of unmapped memory access
@ -490,6 +510,16 @@ typedef enum uc_query_type {
#define UC_CTL_WRITE(type, nr) UC_CTL(type, nr, UC_CTL_IO_WRITE)
#define UC_CTL_READ_WRITE(type, nr) UC_CTL(type, nr, UC_CTL_IO_READ_WRITE)
// unicorn tlb type selection
typedef enum uc_tlb_type {
// The default unicorn virtuall TLB implementation.
// The tlb implementation of the CPU, best to use for full system emulation.
UC_TLB_CPU = 0,
// This tlb defaults to virtuall address == physical address
// Also a hook is availible to override the tlb entries (see uc_cb_tlbevent_t).
UC_TLB_VIRTUAL
} uc_tlb_type;
// All type of controls for uc_ctl API.
// The controls are organized in a tree level.
// If a control don't have `Set` or `Get` for @args, it means it's r/o or w/o.
@ -536,7 +566,14 @@ typedef enum uc_control_type {
UC_CTL_TB_REMOVE_CACHE,
// Invalidate all translation blocks.
// No arguments.
UC_CTL_TB_FLUSH
UC_CTL_TB_FLUSH,
// Invalidate all TLB cache entries and translation blocks.
// No arguments
UC_CTL_TLB_FLUSH,
// Change the tlb implementation
// see uc_tlb_type for current implemented types
// Write: @args = (int)
UC_CTL_TLB_TYPE
} uc_control_type;
@ -611,7 +648,9 @@ See sample_ctl.c for a detailed example.
uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TB_REMOVE_CACHE, 2), (address), (end))
#define uc_ctl_request_cache(uc, address, tb) \
uc_ctl(uc, UC_CTL_READ_WRITE(UC_CTL_TB_REQUEST_CACHE, 2), (address), (tb))
#define uc_ctl_flush_tlb(uc) uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TB_FLUSH, 0))
#define uc_ctl_flush_tb(uc) uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TB_FLUSH, 0))
#define uc_ctl_flush_tlb(uc) uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TLB_FLUSH, 0))
#define uc_ctl_tlb_mode(uc, mode) uc_ctl(uc, UC_CTL_WRITE(UC_CTL_TLB_TYPE, 1), (mode))
// Opaque storage for CPU context, used with uc_context_*()
struct uc_context;
typedef struct uc_context uc_context;
@ -898,6 +937,11 @@ typedef enum uc_prot {
UC_PROT_ALL = 7,
} uc_prot;
struct uc_tlb_entry {
uint64_t paddr;
uc_prot perms;
};
/*
Map memory in for emulation.
This API adds a memory region that can be used by emulation.

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _aarch64
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_aarch64
#define uc_add_inline_hook uc_add_inline_hook_aarch64
#define uc_del_inline_hook uc_del_inline_hook_aarch64
#define tb_invalidate_phys_range tb_invalidate_phys_range_aarch64

View File

@ -893,6 +893,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
/* Now calculate the new entry */
tn.addend = addend - vaddr_page;
tn.paddr = paddr_page;
if (prot & PAGE_READ) {
tn.addr_read = address;
if (wp_flags & BP_MEM_READ) {
@ -1423,6 +1424,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
target_ulong paddr;
const size_t tlb_off = code_read ?
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
const MMUAccessType access_type =
@ -1436,146 +1438,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
bool handled;
HOOK_FOREACH_VAR_DECLARE;
struct uc_struct *uc = env->uc;
MemoryRegion *mr = find_memory_region(uc, addr);
// memory might be still unmapped while reading or fetching
if (mr == NULL) {
handled = false;
// if there is already an unhandled eror, skip callbacks.
if (uc->invalid_error == UC_ERR_OK) {
if (code_read) {
// code fetching
error_code = UC_ERR_FETCH_UNMAPPED;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, addr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
} else {
// data reading
error_code = UC_ERR_READ_UNMAPPED;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, addr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
}
} else {
error_code = uc->invalid_error;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
mr = find_memory_region(uc, addr);
if (mr == NULL) {
uc->invalid_error = UC_ERR_MAP;
cpu_exit(uc->cpu);
// XXX(@lazymio): We have to exit early so that the target register won't be overwritten
// because qemu might generate tcg code like:
// qemu_ld_i64 x0,x1,leq,8 sync: 0 dead: 0 1
// where we don't have a change to recover x0 value
cpu_loop_exit(uc->cpu);
return 0;
}
} else {
uc->invalid_addr = addr;
uc->invalid_error = error_code;
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
// See comments above
cpu_loop_exit(uc->cpu);
return 0;
}
}
// now it is read on mapped memory
if (!code_read) {
// this is date reading
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, addr, size, 0, hook->user_data);
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
// callback on non-readable memory
if (mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, addr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
} else {
uc->invalid_addr = addr;
uc->invalid_error = UC_ERR_READ_PROT;
// printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
// See comments above
cpu_loop_exit(uc->cpu);
return 0;
}
}
} else {
// code fetching
// Unicorn: callback on fetch from NX
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, addr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
} else {
uc->invalid_addr = addr;
uc->invalid_error = UC_ERR_FETCH_PROT;
// printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
// See comments above
cpu_loop_exit(uc->cpu);
return 0;
}
}
}
MemoryRegion *mr;
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@ -1596,6 +1459,173 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
tlb_addr &= ~TLB_INVALID_MASK;
}
paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK);
mr = find_memory_region(uc, paddr);
// memory might be still unmapped while reading or fetching
if (mr == NULL) {
handled = false;
// if there is already an unhandled eror, skip callbacks.
if (uc->invalid_error == UC_ERR_OK) {
if (code_read) {
// code fetching
error_code = UC_ERR_FETCH_UNMAPPED;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_UNMAPPED, paddr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
} else {
// data reading
error_code = UC_ERR_READ_UNMAPPED;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_UNMAPPED, paddr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
}
} else {
error_code = uc->invalid_error;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(env->uc, tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
tlb_addr &= ~TLB_INVALID_MASK;
}
paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK);
mr = find_memory_region(uc, paddr);
if (mr == NULL) {
uc->invalid_error = UC_ERR_MAP;
cpu_exit(uc->cpu);
// XXX(@lazymio): We have to exit early so that the target register won't be overwritten
// because qemu might generate tcg code like:
// qemu_ld_i64 x0,x1,leq,8 sync: 0 dead: 0 1
// where we don't have a change to recover x0 value
cpu_loop_exit(uc->cpu);
return 0;
}
} else {
uc->invalid_addr = paddr;
uc->invalid_error = error_code;
// printf("***** Invalid fetch (unmapped memory) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
// See comments above
cpu_loop_exit(uc->cpu);
return 0;
}
}
// now it is read on mapped memory
if (!code_read) {
// this is date reading
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ, paddr, size, 0, hook->user_data);
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
// callback on non-readable memory
if (mr != NULL && !(mr->perms & UC_PROT_READ)) { //non-readable
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_PROT) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_READ_PROT, paddr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(env->uc, tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size,
access_type, mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = code_read ? entry->addr_code : entry->addr_read;
tlb_addr &= ~TLB_INVALID_MASK;
}
} else {
uc->invalid_addr = paddr;
uc->invalid_error = UC_ERR_READ_PROT;
// printf("***** Invalid memory read (non-readable) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
// See comments above
cpu_loop_exit(uc->cpu);
return 0;
}
}
} else {
// code fetching
// Unicorn: callback on fetch from NX
if (mr != NULL && !(mr->perms & UC_PROT_EXEC)) { // non-executable
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_FETCH_PROT) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_FETCH_PROT, paddr, size, 0, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
} else {
uc->invalid_addr = paddr;
uc->invalid_error = UC_ERR_FETCH_PROT;
// printf("***** Invalid fetch (non-executable) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
// See comments above
cpu_loop_exit(uc->cpu);
return 0;
}
}
}
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;
@ -1678,9 +1708,9 @@ _out:
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_READ_AFTER) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, addr, size, res, hook->user_data);
((uc_cb_hookmem_t)hook->callback)(env->uc, UC_MEM_READ_AFTER, paddr, size, res, hook->user_data);
// the last callback may already asked to stop emulation
if (uc->stop_request)
@ -1986,6 +2016,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
target_ulong tlb_addr = tlb_addr_write(entry);
target_ulong paddr;
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
unsigned a_bits = get_alignment_bits(get_memop(oi));
void *haddr;
@ -1994,86 +2025,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
bool handled;
MemoryRegion *mr;
if (!uc->size_recur_mem) { // disabling write callback if in recursive call
// Unicorn: callback on memory write
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, addr, size, val, hook->user_data);
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
}
// Load the latest memory mapping.
mr = find_memory_region(uc, addr);
// Unicorn: callback on invalid memory
if (mr == NULL) {
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, addr, size, val, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (!handled) {
// save error & quit
uc->invalid_addr = addr;
uc->invalid_error = UC_ERR_WRITE_UNMAPPED;
// printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
return;
} else {
uc->invalid_error = UC_ERR_OK;
mr = find_memory_region(uc, addr);
if (mr == NULL) {
uc->invalid_error = UC_ERR_MAP;
cpu_exit(uc->cpu);
return;
}
}
}
// Unicorn: callback on non-writable memory
if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable
// printf("not writable memory???\n");
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, addr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, size, val, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (handled) {
uc->invalid_error = UC_ERR_OK;
} else {
uc->invalid_addr = addr;
uc->invalid_error = UC_ERR_WRITE_PROT;
// printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
return;
}
}
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
@ -2092,6 +2043,110 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
// Load the latest memory mapping.
paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK);
mr = find_memory_region(uc, paddr);
if (!uc->size_recur_mem) { // disabling write callback if in recursive call
// Unicorn: callback on memory write
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
((uc_cb_hookmem_t)hook->callback)(uc, UC_MEM_WRITE, paddr, size, val, hook->user_data);
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
}
// Unicorn: callback on invalid memory
if (mr == NULL) {
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_UNMAPPED) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_UNMAPPED, paddr, size, val, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (!handled) {
// save error & quit
uc->invalid_addr = paddr;
uc->invalid_error = UC_ERR_WRITE_UNMAPPED;
// printf("***** Invalid memory write at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
return;
} else {
uc->invalid_error = UC_ERR_OK;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(env->uc, tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
paddr = entry->paddr | (addr & ~TARGET_PAGE_MASK);
mr = find_memory_region(uc, paddr);
if (mr == NULL) {
uc->invalid_error = UC_ERR_MAP;
cpu_exit(uc->cpu);
return;
}
}
}
// Unicorn: callback on non-writable memory
if (mr != NULL && !(mr->perms & UC_PROT_WRITE)) { //non-writable
// printf("not writable memory???\n");
handled = false;
HOOK_FOREACH(uc, hook, UC_HOOK_MEM_WRITE_PROT) {
if (hook->to_delete)
continue;
if (!HOOK_BOUND_CHECK(hook, paddr))
continue;
if ((handled = ((uc_cb_eventmem_t)hook->callback)(uc, UC_MEM_WRITE_PROT, addr, size, val, hook->user_data)))
break;
// the last callback may already asked to stop emulation
if (uc->stop_request)
break;
}
if (handled) {
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(env->uc, tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
addr & TARGET_PAGE_MASK)) {
tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
index = tlb_index(env, mmu_idx, addr);
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
}
uc->invalid_error = UC_ERR_OK;
} else {
uc->invalid_addr = addr;
uc->invalid_error = UC_ERR_WRITE_PROT;
// printf("***** Invalid memory write (ro) at " TARGET_FMT_lx "\n", addr);
cpu_exit(uc->cpu);
return;
}
}
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
CPUIOTLBEntry *iotlbentry;

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _arm
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_arm
#define uc_add_inline_hook uc_add_inline_hook_arm
#define uc_del_inline_hook uc_del_inline_hook_arm
#define tb_invalidate_phys_range tb_invalidate_phys_range_arm

View File

@ -77,9 +77,9 @@ typedef uint64_t target_ulong;
#define CPU_VTLB_SIZE 8
#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
#define CPU_TLB_ENTRY_BITS 4
#else
#define CPU_TLB_ENTRY_BITS 5
#else
#define CPU_TLB_ENTRY_BITS 6
#endif
#define CPU_TLB_DYN_MIN_BITS 6
@ -112,6 +112,7 @@ typedef struct CPUTLBEntry {
target_ulong addr_read;
target_ulong addr_write;
target_ulong addr_code;
target_ulong paddr;
/* Addend to virtual address to get host address. IO accesses
use the corresponding iotlb value. */
uintptr_t addend;

View File

@ -117,6 +117,9 @@ typedef struct CPUClass {
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
bool (*tlb_fill_cpu)(CPUState *cpu, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
MemTxAttrs *attrs);

View File

@ -2,6 +2,7 @@
#define QEMU_CPUS_H
#include "qemu/timer.h"
#include "hw/core/cpu.h"
/* cpus.c */
bool qemu_in_vcpu_thread(void);

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _m68k
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_m68k
#define uc_add_inline_hook uc_add_inline_hook_m68k
#define uc_del_inline_hook uc_del_inline_hook_m68k
#define tb_invalidate_phys_range tb_invalidate_phys_range_m68k

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _mips
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_mips
#define uc_add_inline_hook uc_add_inline_hook_mips
#define uc_del_inline_hook uc_del_inline_hook_mips
#define tb_invalidate_phys_range tb_invalidate_phys_range_mips

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _mips64
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_mips64
#define uc_add_inline_hook uc_add_inline_hook_mips64
#define uc_del_inline_hook uc_del_inline_hook_mips64
#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _mips64el
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_mips64el
#define uc_add_inline_hook uc_add_inline_hook_mips64el
#define uc_del_inline_hook uc_del_inline_hook_mips64el
#define tb_invalidate_phys_range tb_invalidate_phys_range_mips64el

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _mipsel
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_mipsel
#define uc_add_inline_hook uc_add_inline_hook_mipsel
#define uc_del_inline_hook uc_del_inline_hook_mipsel
#define tb_invalidate_phys_range tb_invalidate_phys_range_mipsel

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _ppc
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_ppc
#define uc_add_inline_hook uc_add_inline_hook_ppc
#define uc_del_inline_hook uc_del_inline_hook_ppc
#define tb_invalidate_phys_range tb_invalidate_phys_range_ppc

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _ppc64
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_ppc64
#define uc_add_inline_hook uc_add_inline_hook_ppc64
#define uc_del_inline_hook uc_del_inline_hook_ppc64
#define tb_invalidate_phys_range tb_invalidate_phys_range_ppc64

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _riscv32
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_riscv32
#define uc_add_inline_hook uc_add_inline_hook_riscv32
#define uc_del_inline_hook uc_del_inline_hook_riscv32
#define tb_invalidate_phys_range tb_invalidate_phys_range_riscv32

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _riscv64
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_riscv64
#define uc_add_inline_hook uc_add_inline_hook_riscv64
#define uc_del_inline_hook uc_del_inline_hook_riscv64
#define tb_invalidate_phys_range tb_invalidate_phys_range_riscv64

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _s390x
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_s390x
#define uc_add_inline_hook uc_add_inline_hook_s390x
#define uc_del_inline_hook uc_del_inline_hook_s390x
#define tb_invalidate_phys_range tb_invalidate_phys_range_s390x

106
qemu/softmmu/unicorn_vtlb.c Normal file
View File

@ -0,0 +1,106 @@
#include <stdint.h>
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "exec/exec-all.h"
#include "uc_priv.h"
#include <stdio.h>
static void raise_mmu_exception(CPUState *cs, target_ulong address,
int rw, uintptr_t retaddr)
{
cs->uc->invalid_error = UC_ERR_EXCEPTION;
cs->uc->invalid_addr = address;
cpu_exit(cs->uc->cpu);
cpu_loop_exit_restore(cs, retaddr);
}
static uc_mem_type rw_to_mem_type(int rw)
{
switch (rw) {
case MMU_DATA_LOAD:
return UC_MEM_READ;
case MMU_DATA_STORE:
return UC_MEM_WRITE;
case MMU_INST_FETCH:
return UC_MEM_FETCH;
default:
return UC_MEM_READ;
}
}
static int perms_to_prot(int perms)
{
int ret = 0;
if (perms & UC_PROT_READ) {
ret |= PAGE_READ;
}
if (perms & UC_PROT_WRITE) {
ret |= PAGE_WRITE;
}
if (perms & UC_PROT_EXEC) {
ret |= PAGE_EXEC;
}
return ret;
}
bool unicorn_fill_tlb(CPUState *cs, vaddr address, int size,
MMUAccessType rw, int mmu_idx,
bool probe, uintptr_t retaddr)
{
bool handled = false;
bool ret = false;
struct uc_struct *uc = cs->uc;
uc_tlb_entry e;
struct hook *hook;
HOOK_FOREACH_VAR_DECLARE;
HOOK_FOREACH(uc, hook, UC_HOOK_TLB_FILL) {
if (hook->to_delete) {
continue;
}
if (!HOOK_BOUND_CHECK(hook, address)) {
continue;
}
handled = true;
if ((ret = ((uc_cb_tlbevent_t)hook->callback)(uc, address & TARGET_PAGE_MASK, rw_to_mem_type(rw), &e, hook->user_data))) {
break;
}
}
if (handled && !ret) {
goto tlb_miss;
}
if (!handled) {
e.paddr = address & TARGET_PAGE_MASK;
e.perms = UC_PROT_READ|UC_PROT_WRITE|UC_PROT_EXEC;
}
switch (rw) {
case MMU_DATA_LOAD:
ret = e.perms & UC_PROT_READ;
break;
case MMU_DATA_STORE:
ret = e.perms & UC_PROT_WRITE;
break;
case MMU_INST_FETCH:
ret = e.perms & UC_PROT_EXEC;
break;
default:
ret = false;
break;
}
if (ret) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, e.paddr & TARGET_PAGE_MASK, perms_to_prot(e.perms), mmu_idx, TARGET_PAGE_SIZE);
return true;
}
tlb_miss:
if (probe) {
return false;
}
raise_mmu_exception(cs, address, rw, retaddr);
return false;
}

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _sparc
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_sparc
#define uc_add_inline_hook uc_add_inline_hook_sparc
#define uc_del_inline_hook uc_del_inline_hook_sparc
#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _sparc64
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_sparc64
#define uc_add_inline_hook uc_add_inline_hook_sparc64
#define uc_del_inline_hook uc_del_inline_hook_sparc64
#define tb_invalidate_phys_range tb_invalidate_phys_range_sparc64

View File

@ -2080,7 +2080,7 @@ void arm_cpu_class_init(struct uc_struct *uc, CPUClass *oc)
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
cc->asidx_from_attrs = arm_asidx_from_attrs;
cc->tcg_initialize = arm_translate_init;
cc->tlb_fill = arm_cpu_tlb_fill;
cc->tlb_fill_cpu = arm_cpu_tlb_fill;
cc->debug_excp_handler = arm_debug_excp_handler;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
}

View File

@ -5066,7 +5066,7 @@ static void x86_cpu_common_class_init(struct uc_struct *uc, CPUClass *oc, void *
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
cc->tcg_initialize = tcg_x86_init;
cc->tlb_fill = x86_cpu_tlb_fill;
cc->tlb_fill_cpu = x86_cpu_tlb_fill;
}
X86CPU *cpu_x86_init(struct uc_struct *uc)

View File

@ -643,11 +643,6 @@ do_check_protect_pse36:
paddr &= TARGET_PAGE_MASK;
assert(prot & (1 << is_write1));
// Unicorn: indentity map guest virtual address to host virtual address
vaddr = addr & TARGET_PAGE_MASK;
paddr = vaddr;
//printf(">>> map address %"PRIx64" to %"PRIx64"\n", vaddr, paddr);
tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
prot, mmu_idx, page_size);
return 0;

View File

@ -977,10 +977,16 @@ static int reg_write(CPUX86State *env, unsigned int regid, const void *value,
default:
break;
case UC_X86_REG_CR0:
cpu_x86_update_cr0(env, *(uint32_t *)value);
goto write_cr;
case UC_X86_REG_CR1:
case UC_X86_REG_CR2:
case UC_X86_REG_CR3:
cpu_x86_update_cr3(env, *(uint32_t *)value);
goto write_cr;
case UC_X86_REG_CR4:
cpu_x86_update_cr4(env, *(uint32_t *)value);
write_cr:
env->cr[regid - UC_X86_REG_CR0] = *(uint32_t *)value;
break;
case UC_X86_REG_DR0:
@ -1163,10 +1169,16 @@ static int reg_write(CPUX86State *env, unsigned int regid, const void *value,
default:
break;
case UC_X86_REG_CR0:
cpu_x86_update_cr0(env, *(uint32_t *) value);
goto write_cr64;
case UC_X86_REG_CR1:
case UC_X86_REG_CR2:
case UC_X86_REG_CR3:
cpu_x86_update_cr3(env, *(uint32_t *) value);
goto write_cr64;
case UC_X86_REG_CR4:
cpu_x86_update_cr4(env, *(uint32_t *) value);
write_cr64:
env->cr[regid - UC_X86_REG_CR0] = *(uint64_t *)value;
break;
case UC_X86_REG_DR0:

View File

@ -231,7 +231,7 @@ static void m68k_cpu_class_init(CPUClass *c)
cc->do_interrupt = m68k_cpu_do_interrupt;
cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
cc->set_pc = m68k_cpu_set_pc;
cc->tlb_fill = m68k_cpu_tlb_fill;
cc->tlb_fill_cpu = m68k_cpu_tlb_fill;
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
cc->tcg_initialize = m68k_tcg_init;
}

View File

@ -147,7 +147,7 @@ static void mips_cpu_class_init(CPUClass *c)
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
cc->tcg_initialize = mips_tcg_init;
cc->tlb_fill = mips_cpu_tlb_fill;
cc->tlb_fill_cpu = mips_cpu_tlb_fill;
}
MIPSCPU *cpu_mips_init(struct uc_struct *uc)

View File

@ -17,21 +17,6 @@ typedef uint32_t mipsreg_t;
MIPSCPU *cpu_mips_init(struct uc_struct *uc);
static uint64_t mips_mem_redirect(uint64_t address)
{
// kseg0 range masks off high address bit
if (address >= 0x80000000 && address <= 0x9fffffff)
return address & 0x7fffffff;
// kseg1 range masks off top 3 address bits
if (address >= 0xa0000000 && address <= 0xbfffffff) {
return address & 0x1fffffff;
}
// no redirect
return address;
}
static void mips_set_pc(struct uc_struct *uc, uint64_t address)
{
((CPUMIPSState *)uc->cpu->env_ptr)->active_tc.PC = address;
@ -272,7 +257,6 @@ void mipsel_uc_init(struct uc_struct *uc)
uc->release = mips_release;
uc->set_pc = mips_set_pc;
uc->get_pc = mips_get_pc;
uc->mem_redirect = mips_mem_redirect;
uc->cpus_init = mips_cpus_init;
uc->cpu_context_size = offsetof(CPUMIPSState, end_reset_fields);
uc_common_init(uc);

View File

@ -10253,7 +10253,7 @@ static void ppc_cpu_class_init(struct uc_struct *uc, CPUClass *oc)
cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
cc->tcg_initialize = ppc_translate_init;
cc->tlb_fill = ppc_cpu_tlb_fill;
cc->tlb_fill_cpu = ppc_cpu_tlb_fill;
cc->cpu_exec_enter = ppc_cpu_exec_enter;
cc->cpu_exec_exit = ppc_cpu_exec_exit;
}

View File

@ -77,21 +77,6 @@ static inline int uc_ppc_store_msr(CPUPPCState *env, target_ulong value,
return 0;
}
static uint64_t ppc_mem_redirect(uint64_t address)
{
/* // kseg0 range masks off high address bit
if (address >= 0x80000000 && address <= 0x9fffffff)
return address & 0x7fffffff;
// kseg1 range masks off top 3 address bits
if (address >= 0xa0000000 && address <= 0xbfffffff) {
return address & 0x1fffffff;
}*/
// no redirect
return address;
}
static void ppc_set_pc(struct uc_struct *uc, uint64_t address)
{
((CPUPPCState *)uc->cpu->env_ptr)->nip = address;
@ -435,7 +420,6 @@ void ppc_uc_init(struct uc_struct *uc)
uc->release = ppc_release;
uc->set_pc = ppc_set_pc;
uc->get_pc = ppc_get_pc;
uc->mem_redirect = ppc_mem_redirect;
uc->cpus_init = ppc_cpus_init;
uc->cpu_context_size = offsetof(CPUPPCState, uc);
uc_common_init(uc);

View File

@ -307,7 +307,7 @@ static void riscv_cpu_class_init(struct uc_struct *uc, CPUClass *c, void *data)
cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb;
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
cc->tcg_initialize = riscv_translate_init;
cc->tlb_fill = riscv_cpu_tlb_fill;
cc->tlb_fill_cpu = riscv_cpu_tlb_fill;
}
typedef struct CPUModelInfo {

View File

@ -233,7 +233,7 @@ static void s390_cpu_class_init(struct uc_struct *uc, CPUClass *oc)
cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
cc->do_unaligned_access = s390x_cpu_do_unaligned_access;
cc->tcg_initialize = s390x_translate_init;
cc->tlb_fill = s390_cpu_tlb_fill;
cc->tlb_fill_cpu = s390_cpu_tlb_fill;
// s390_cpu_model_class_register_props(oc);
}

View File

@ -504,7 +504,7 @@ static void sparc_cpu_class_init(struct uc_struct *uc, CPUClass *oc)
cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt;
cc->set_pc = sparc_cpu_set_pc;
cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
cc->tlb_fill = sparc_cpu_tlb_fill;
cc->tlb_fill_cpu = sparc_cpu_tlb_fill;
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
cc->tcg_initialize = sparc_tcg_init;

View File

@ -137,7 +137,7 @@ static void tricore_cpu_class_init(CPUClass *c)
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
cc->tlb_fill = tricore_cpu_tlb_fill;
cc->tlb_fill_cpu = tricore_cpu_tlb_fill;
cc->tcg_initialize = tricore_tcg_init;
}

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _tricore
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_tricore
#define uc_add_inline_hook uc_add_inline_hook_tricore
#define uc_del_inline_hook uc_del_inline_hook_tricore
#define tb_invalidate_phys_range tb_invalidate_phys_range_tricore

View File

@ -11,6 +11,9 @@
void vm_start(struct uc_struct*);
void tcg_exec_init(struct uc_struct *uc, unsigned long tb_size);
bool unicorn_fill_tlb(CPUState *cs, vaddr address, int size,
MMUAccessType rw, int mmu_idx,
bool probe, uintptr_t retaddr);
// return true on success, false on failure
static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr,
@ -91,6 +94,19 @@ static inline void target_page_init(struct uc_struct* uc)
uc->target_page_align = TARGET_PAGE_SIZE - 1;
}
static uc_err uc_set_tlb(struct uc_struct *uc, int mode) {
switch (mode) {
case UC_TLB_VIRTUAL:
uc->cpu->cc->tlb_fill = unicorn_fill_tlb;
return UC_ERR_OK;
case UC_TLB_CPU:
uc->cpu->cc->tlb_fill = uc->cpu->cc->tlb_fill_cpu;
return UC_ERR_OK;
default:
return UC_ERR_ARG;
}
}
void softfloat_init(void);
static inline void uc_common_init(struct uc_struct* uc)
{
@ -107,6 +123,7 @@ static inline void uc_common_init(struct uc_struct* uc)
uc->softfloat_initialize = softfloat_init;
uc->tcg_flush_tlb = tcg_flush_softmmu_tlb;
uc->memory_map_io = memory_map_io;
uc->set_tlb = uc_set_tlb;
if (!uc->release)
uc->release = release_common;

View File

@ -4,6 +4,7 @@
#ifndef UNICORN_ARCH_POSTFIX
#define UNICORN_ARCH_POSTFIX _x86_64
#endif
#define unicorn_fill_tlb unicorn_fill_tlb_x86_64
#define uc_add_inline_hook uc_add_inline_hook_x86_64
#define uc_del_inline_hook uc_del_inline_hook_x86_64
#define tb_invalidate_phys_range tb_invalidate_phys_range_x86_64

View File

@ -85,6 +85,7 @@ SOURCES += shellcode.c
SOURCES += mem_apis.c
SOURCES += sample_x86_32_gdt_and_seg_regs.c
SOURCES += sample_batch_reg.c
SOURCES += sample_mmu.c
endif
ifneq (,$(findstring m68k,$(UNICORN_ARCHS)))
SOURCES += sample_m68k.c

View File

@ -57,3 +57,8 @@ if test -e $DIR/sample_x86_32_gdt_and_seg_regs; then
echo "=========================="
$DIR/sample_x86_32_gdt_and_seg_regs
fi
if test -e $DIR/sample_mmu; then
echo "=========================="
$DIR/sample_mmu
fi

430
samples/sample_mmu.c Normal file
View File

@ -0,0 +1,430 @@
#include <unicorn/unicorn.h>
#include <stdio.h>
/*
* mov rax, 57
* syscall
* test rax, rax
* jz child
* xor rax, rax
* mov rax, 60
* mov [0x4000], rax
* syscall
*
* child:
* xor rcx, rcx
* mov rcx, 42
* mov [0x4000], rcx
* mov rax, 60
* syscall
*/
char code[] = "\xB8\x39\x00\x00\x00\x0F\x05\x48\x85\xC0\x74\x0F\xB8\x3C\x00\x00\x00\x48\x89\x04\x25\x00\x40\x00\x00\x0F\x05\xB9\x2A\x00\x00\x00\x48\x89\x0C\x25\x00\x40\x00\x00\xB8\x3C\x00\x00\x00\x0F\x05";
static void mmu_write_callback(uc_engine *uc, uc_mem_type type, uint64_t address, int size, int64_t value, void *user_data)
{
printf("write at 0x%lx: 0x%lx\n", address, value);
}
static void x86_mmu_prepare_tlb(uc_engine *uc, uint64_t vaddr, uint64_t tlb_base)
{
uc_err err;
uint64_t cr0;
uint64_t cr4;
uc_x86_msr msr = {.rid = 0xC0000080, .value = 0};
uint64_t pml4o = ((vaddr & 0x00ff8000000000) >> 39)*8;
uint64_t pdpo = ((vaddr & 0x00007fc0000000) >> 30)*8;
uint64_t pdo = ((vaddr & 0x0000003fe00000) >> 21)*8;
uint64_t pml4e = (tlb_base + 0x1000) | 1 | (1 << 2);
uint64_t pdpe = (tlb_base + 0x2000) | 1 | (1 << 2);
uint64_t pde = (tlb_base + 0x3000) | 1 | (1 << 2);
err = uc_mem_write(uc, tlb_base + pml4o, &pml4e, sizeof(pml4o));
if (err) {
printf("failed to write pml4e\n");
exit(1);
}
err = uc_mem_write(uc, tlb_base + 0x1000 + pdpo, &pdpe, sizeof(pdpe));
if (err) {
printf("failed to write pml4e\n");
exit(1);
}
err = uc_mem_write(uc, tlb_base + 0x2000 + pdo, &pde, sizeof(pde));
if (err) {
printf("failed to write pde\n");
exit(1);
}
err = uc_reg_write(uc, UC_X86_REG_CR3, &tlb_base);
if (err) {
printf("failed to write CR3\n");
exit(1);
}
err = uc_reg_read(uc, UC_X86_REG_CR0, &cr0);
if (err) {
printf("failed to read CR0\n");
exit(1);
}
err = uc_reg_read(uc, UC_X86_REG_CR4, &cr4);
if (err) {
printf("failed to read CR4\n");
exit(1);
}
err = uc_reg_read(uc, UC_X86_REG_MSR, &msr);
if (err) {
printf("failed to read MSR\n");
exit(1);
}
cr0 |= 1; //enable protected mode
cr0 |= 1l << 31; //enable paging
cr4 |= 1l << 5; //enable physical address extension
msr.value |= 1l << 8; //enable long mode
err = uc_reg_write(uc, UC_X86_REG_CR0, &cr0);
if (err) {
printf("failed to write CR0\n");
exit(1);
}
err = uc_reg_write(uc, UC_X86_REG_CR4, &cr4);
if (err) {
printf("failed to write CR4\n");
exit(1);
}
err = uc_reg_write(uc, UC_X86_REG_MSR, &msr);
if (err) {
printf("failed to write MSR\n");
exit(1);
}
}
static void x86_mmu_pt_set(uc_engine *uc, uint64_t vaddr, uint64_t paddr, uint64_t tlb_base)
{
uint64_t pto = ((vaddr & 0x000000001ff000) >> 12)*8;
uint32_t pte = (paddr) | 1 | (1 << 2);
uc_mem_write(uc, tlb_base + 0x3000 + pto, &pte, sizeof(pte));
}
static void x86_mmu_syscall_callback(uc_engine *uc, void *userdata)
{
uc_err err;
bool *parrent_done = userdata;
uint64_t rax;
err = uc_reg_read(uc, UC_X86_REG_RAX, &rax);
if (err) {
printf("failed to read rax\n");
exit(1);
}
switch (rax) {
case 57:
/* fork */
break;
case 60:
/* exit */
*parrent_done = true;
uc_emu_stop(uc);
return;
default:
printf("unknown syscall");
exit(1);
}
if (!(*parrent_done)) {
rax = 27;
err = uc_reg_write(uc, UC_X86_REG_RAX, &rax);
if (err) {
printf("failed to write rax\n");
exit(1);
}
uc_emu_stop(uc);
}
}
void cpu_tlb(void)
{
uint64_t tlb_base = 0x3000;
uint64_t rax, rip;
bool parrent_done = false;
uint64_t parrent, child;
uc_context *context;
uc_engine *uc;
uc_err err;
uc_hook h1, h2;
printf("Emulate x86 amd64 code with mmu enabled and switch mappings\n");
err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc);
if (err) {
printf("Failed on uc_open() with error returned: %u\n", err);
exit(1);
}
uc_ctl_tlb_mode(uc, UC_TLB_CPU);
err = uc_context_alloc(uc, &context);
if (err) {
printf("Failed on uc_context_alloc() with error returned: %u\n", err);
exit(1);
}
err = uc_hook_add(uc, &h1, UC_HOOK_INSN, &x86_mmu_syscall_callback, &parrent_done, 1, 0, UC_X86_INS_SYSCALL);
if (err) {
printf("Failed on uc_hook_add() with error returned: %u\n", err);
exit(1);
}
// Memory hooks are called after the mmu translation, so hook the physicall addresses
err = uc_hook_add(uc, &h2, UC_HOOK_MEM_WRITE, &mmu_write_callback, NULL, 0x1000, 0x3000);
if (err) {
printf("Faled on uc_hook_add() with error returned: %u\n", err);
}
printf("map code\n");
err = uc_mem_map(uc, 0x0, 0x1000, UC_PROT_ALL); //Code
if (err) {
printf("Failed on uc_mem_map() with error return: %u\n", err);
exit(1);
}
err = uc_mem_write(uc, 0x0, code, sizeof(code) - 1);
if (err) {
printf("Failed on uc_mem_wirte() with error return: %u\n", err);
exit(1);
}
printf("map parrent memory\n");
err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); //Parrent
if (err) {
printf("Failed on uc_mem_map() with error return: %u\n", err);
exit(1);
}
printf("map child memory\n");
err = uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL); //Child
if (err) {
printf("failed to map child memory\n");
exit(1);
}
printf("map tlb memory\n");
err = uc_mem_map(uc, tlb_base, 0x4000, UC_PROT_ALL); //TLB
if (err) {
printf("failed to map memory for tlb\n");
exit(1);
}
printf("set up the tlb\n");
x86_mmu_prepare_tlb(uc, 0x0, tlb_base);
x86_mmu_pt_set(uc, 0x2000, 0x0, tlb_base);
x86_mmu_pt_set(uc, 0x4000, 0x1000, tlb_base);
err = uc_ctl_flush_tlb(uc);
if (err) {
printf("failed to flush tlb\n");
exit(1);
}
printf("run the parrent\n");
err = uc_emu_start(uc, 0x2000, 0x0, 0, 0);
if (err) {
printf("failed to run parrent\n");
exit(1);
}
printf("save the context for the child\n");
err = uc_context_save(uc, context);
printf("finish the parrent\n");
err = uc_reg_read(uc, UC_X86_REG_RIP, &rip);
if (err) {
printf("failed to read rip\n");
exit(1);
}
err = uc_emu_start(uc, rip, 0x0, 0, 0);
if (err) {
printf("failed to flush tlb\n");
exit(1);
}
printf("restore the context for the child\n");
err = uc_context_restore(uc, context);
if (err) {
printf("failed to restore context\n");
exit(1);
}
x86_mmu_prepare_tlb(uc, 0x0, tlb_base);
x86_mmu_pt_set(uc, 0x4000, 0x2000, tlb_base);
rax = 0;
err = uc_reg_write(uc, UC_X86_REG_RAX, &rax);
if (err) {
printf("failed to write rax\n");
exit(1);
}
err = uc_ctl_flush_tlb(uc);
if (err) {
printf("failed to flush tlb\n");
exit(1);
}
err = uc_emu_start(uc, rip, 0x0, 0, 0);
if (err) {
printf("failed to run child\n");
exit(1);
}
err = uc_mem_read(uc, 0x1000, &parrent, sizeof(parrent));
if (err) {
printf("failed to read from parrent memory\n");
exit(1);
}
err = uc_mem_read(uc, 0x2000, &child, sizeof(child));
if (err) {
printf("failed to read from child memory\n");
exit(1);
}
printf("parrent result == %lu\n", parrent);
printf("child result == %lu\n", child);
uc_close(uc);
}
static bool virtual_tlb_callback(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data)
{
bool *parrent_done = user_data;
printf("tlb lookup for address: 0x%lX\n", addr);
switch (addr & ~(0xfff)) {
case 0x2000:
result->paddr = 0x0;
result->perms = UC_PROT_EXEC;
return true;
case 0x4000:
if (*parrent_done) {
result->paddr = 0x2000;
} else {
result->paddr = 0x1000;
}
result->perms = UC_PROT_READ | UC_PROT_WRITE;
return true;
default:
break;
}
return false;
}
void virtual_tlb(void)
{
uint64_t rax, rip;
bool parrent_done = false;
uint64_t parrent, child;
uc_context *context;
uc_engine *uc;
uc_err err;
uc_hook h1, h2, h3;
printf("Emulate x86 amd64 code with virtual mmu\n");
err = uc_open(UC_ARCH_X86, UC_MODE_64, &uc);
if (err) {
printf("Failed on uc_open() with error returned: %u\n", err);
exit(1);
}
uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL);
err = uc_context_alloc(uc, &context);
if (err) {
printf("Failed on uc_context_alloc() with error returned: %u\n", err);
exit(1);
}
err = uc_hook_add(uc, &h1, UC_HOOK_INSN, &x86_mmu_syscall_callback, &parrent_done, 1, 0, UC_X86_INS_SYSCALL);
if (err) {
printf("Failed on uc_hook_add() with error returned: %u\n", err);
exit(1);
}
// Memory hooks are called after the mmu translation, so hook the physicall addresses
err = uc_hook_add(uc, &h2, UC_HOOK_MEM_WRITE, &mmu_write_callback, NULL, 0x1000, 0x3000);
if (err) {
printf("Faled on uc_hook_add() with error returned: %u\n", err);
}
printf("map code\n");
err = uc_mem_map(uc, 0x0, 0x1000, UC_PROT_ALL); //Code
if (err) {
printf("Failed on uc_mem_map() with error return: %u\n", err);
exit(1);
}
err = uc_mem_write(uc, 0x0, code, sizeof(code) - 1);
if (err) {
printf("Failed on uc_mem_wirte() with error return: %u\n", err);
exit(1);
}
printf("map parrent memory\n");
err = uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL); //Parrent
if (err) {
printf("Failed on uc_mem_map() with error return: %u\n", err);
exit(1);
}
printf("map child memory\n");
err = uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL); //Child
if (err) {
printf("failed to map child memory\n");
exit(1);
}
err = uc_hook_add(uc, &h3, UC_HOOK_TLB_FILL, virtual_tlb_callback, &parrent_done, 1, 0);
printf("run the parrent\n");
err = uc_emu_start(uc, 0x2000, 0x0, 0, 0);
if (err) {
printf("failed to run parrent\n");
exit(1);
}
printf("save the context for the child\n");
err = uc_context_save(uc, context);
printf("finish the parrent\n");
err = uc_reg_read(uc, UC_X86_REG_RIP, &rip);
if (err) {
printf("failed to read rip\n");
exit(1);
}
err = uc_emu_start(uc, rip, 0x0, 0, 0);
if (err) {
printf("failed to flush tlb\n");
exit(1);
}
printf("restore the context for the child\n");
err = uc_context_restore(uc, context);
if (err) {
printf("failed to restore context\n");
exit(1);
}
rax = 0;
parrent_done = true;
err = uc_reg_write(uc, UC_X86_REG_RAX, &rax);
if (err) {
printf("failed to write rax\n");
exit(1);
}
err = uc_ctl_flush_tlb(uc);
if (err) {
printf("failed to flush tlb\n");
exit(1);
}
err = uc_emu_start(uc, rip, 0x0, 0, 0);
if (err) {
printf("failed to run child\n");
exit(1);
}
err = uc_mem_read(uc, 0x1000, &parrent, sizeof(parrent));
if (err) {
printf("failed to read from parrent memory\n");
exit(1);
}
err = uc_mem_read(uc, 0x2000, &child, sizeof(child));
if (err) {
printf("failed to read from child memory\n");
exit(1);
}
printf("parrent result == %lu\n", parrent);
printf("child result == %lu\n", child);
uc_close(uc);
}
int main(void)
{
cpu_tlb();
virtual_tlb();
}

View File

@ -4,6 +4,7 @@ CMD_PATH=$(realpath $0)
SOURCE_DIR=$(dirname ${CMD_PATH})
COMMON_SYMBOLS="
unicorn_fill_tlb \
uc_add_inline_hook \
uc_del_inline_hook \
tb_invalidate_phys_range \

View File

@ -372,6 +372,107 @@ static void test_arm64_block_invalid_mem_read_write_sync(void)
OK(uc_close(uc));
}
static void test_arm64_mmu(void)
{
uc_engine *uc;
char *data;
char tlbe[8];
uint64_t x0, x1, x2;
/*
* Not exact the binary, but aarch64-linux-gnu-as generate this code and reference sometimes data after ttb0_base.
* // Read data from physical address
* ldr X0, =0x40000000
* ldr X1, [X0]
* // Initialize translation table control registers
* ldr X0, =0x180803F20
* msr TCR_EL1, X0
* ldr X0, =0xFFFFFFFF
* msr MAIR_EL1, X0
* // Set translation table
* adr X0, ttb0_base
* msr TTBR0_EL1, X0
* // Enable caches and the MMU
* mrs X0, SCTLR_EL1
* orr X0, X0, #(0x1 << 2) // The C bit (data cache).
* orr X0, X0, #(0x1 << 12) // The I bit (instruction cache)
* orr X0, X0, #0x1 // The M bit (MMU).
* msr SCTLR_EL1, X0
* dsb SY
* isb
* // Read the same memory area through virtual address
* ldr X0, =0x80000000
* ldr X2, [X0]
*
* // Stop
* b .
*/
char code[] = "\x00\x81\x00\x58\x01\x00\x40\xf9\x00\x81\x00\x58\x40\x20\x18\xd5\x00\x81\x00\x58\x00\xa2\x18\xd5\x40\x7f\x00\x10\x00\x20\x18\xd5\x00\x10\x38\xd5\x00\x00\x7e\xb2\x00\x00\x74\xb2\x00\x00\x40\xb2\x00\x10\x18\xd5\x9f\x3f\x03\xd5\xdf\x3f\x03\xd5\xe0\x7f\x00\x58\x02\x00\x40\xf9\x00\x00\x00\x14\x1f\x20\x03\xd5\x1f\x20\x03\xd5\x1F\x20\x03\xD5\x1F\x20\x03\xD5";
data = malloc(0x1000);
TEST_CHECK(data != NULL);
OK(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc));
OK(uc_ctl_tlb_mode(uc, UC_TLB_CPU));
OK(uc_mem_map(uc, 0, 0x2000, UC_PROT_ALL));
OK(uc_mem_write(uc, 0, code, sizeof(code) - 1));
// generate tlb entries
tlbe[0] = 0x41;
tlbe[1] = 0x07;
tlbe[2] = 0;
tlbe[3] = 0;
tlbe[4] = 0;
tlbe[5] = 0;
tlbe[6] = 0;
tlbe[7] = 0;
OK(uc_mem_write(uc, 0x1000, tlbe, sizeof(tlbe)));
tlbe[3] = 0x40;
OK(uc_mem_write(uc, 0x1008, tlbe, sizeof(tlbe)));
OK(uc_mem_write(uc, 0x1010, tlbe, sizeof(tlbe)));
OK(uc_mem_write(uc, 0x1018, tlbe, sizeof(tlbe)));
//mentioned data referenced by the asm generated my aarch64-linux-gnu-as
tlbe[0] = 0;
tlbe[1] = 0;
OK(uc_mem_write(uc, 0x1020, tlbe, sizeof(tlbe)));
tlbe[0] = 0x20;
tlbe[1] = 0x3f;
tlbe[2] = 0x80;
tlbe[3] = 0x80;
tlbe[4] = 0x1;
OK(uc_mem_write(uc, 0x1028, tlbe, sizeof(tlbe)));
tlbe[0] = 0xff;
tlbe[1] = 0xff;
tlbe[2] = 0xff;
tlbe[3] = 0xff;
tlbe[4] = 0x00;
OK(uc_mem_write(uc, 0x1030, tlbe, sizeof(tlbe)));
tlbe[0] = 0x00;
tlbe[1] = 0x00;
tlbe[2] = 0x00;
tlbe[3] = 0x80;
OK(uc_mem_write(uc, 0x1038, tlbe, sizeof(tlbe)));
for (size_t i = 0; i < 0x1000; i++) {
data[i] = 0x44;
}
OK(uc_mem_map_ptr(uc, 0x40000000, 0x1000, UC_PROT_READ, data));
OK(uc_emu_start(uc, 0, 0x44, 0, 0));
OK(uc_reg_read(uc, UC_ARM64_REG_X0, &x0));
OK(uc_reg_read(uc, UC_ARM64_REG_X1, &x1));
OK(uc_reg_read(uc, UC_ARM64_REG_X2, &x2));
TEST_CHECK(x0 == 0x80000000);
TEST_CHECK(x1 == 0x4444444444444444);
TEST_CHECK(x2 == 0x4444444444444444);
free(data);
}
TEST_LIST = {{"test_arm64_until", test_arm64_until},
{"test_arm64_code_patching", test_arm64_code_patching},
{"test_arm64_code_patching_count", test_arm64_code_patching_count},
@ -385,4 +486,5 @@ TEST_LIST = {{"test_arm64_until", test_arm64_until},
{"test_arm64_block_sync_pc", test_arm64_block_sync_pc},
{"test_arm64_block_invalid_mem_read_write_sync",
test_arm64_block_invalid_mem_read_write_sync},
{"test_arm64_mmu", test_arm64_mmu},
{NULL, NULL}};

View File

@ -338,6 +338,42 @@ static void test_uc_emu_stop_set_ip(void)
OK(uc_close(uc));
}
static bool test_tlb_clear_tlb(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data)
{
size_t *tlbcount = (size_t*)user_data;
*tlbcount += 1;
result->paddr = addr;
result->perms = UC_PROT_ALL;
return true;
}
static void test_tlb_clear_syscall(uc_engine *uc, void *user_data)
{
OK(uc_ctl_flush_tlb(uc));
}
static void test_tlb_clear(void)
{
uc_engine *uc;
uc_hook hook1, hook2;
size_t tlbcount = 0;
char code[] = "\xa3\x00\x00\x20\x00\x00\x00\x00\x00\x0f\x05\xa3\x00\x00\x20\x00\x00\x00\x00\x00"; //movabs dword ptr [0x200000], eax; syscall; movabs dword ptr [0x200000], eax
uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1);
OK(uc_mem_map(uc, 0x200000, 0x1000, UC_PROT_ALL));
OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL));
OK(uc_hook_add(uc, &hook1, UC_HOOK_TLB_FILL, test_tlb_clear_tlb, &tlbcount, 1, 0));
OK(uc_hook_add(uc, &hook2, UC_HOOK_INSN, test_tlb_clear_syscall, NULL, 1, 0, UC_X86_INS_SYSCALL));
OK(uc_emu_start(uc, code_start, code_start + sizeof(code) - 1, 0, 0));
TEST_CHECK(tlbcount == 4);
OK(uc_close(uc));
}
TEST_LIST = {{"test_uc_ctl_mode", test_uc_ctl_mode},
{"test_uc_ctl_page_size", test_uc_ctl_page_size},
{"test_uc_ctl_arch", test_uc_ctl_arch},
@ -350,4 +386,5 @@ TEST_LIST = {{"test_uc_ctl_mode", test_uc_ctl_mode},
#endif
{"test_uc_hook_cached_uaf", test_uc_hook_cached_uaf},
{"test_uc_emu_stop_set_ip", test_uc_emu_stop_set_ip},
{"test_tlb_clear", test_tlb_clear},
{NULL, NULL}};

View File

@ -634,6 +634,77 @@ static void test_riscv_correct_address_in_long_jump_hook(void)
OK(uc_close(uc));
}
static void test_riscv_mmu_prepare_tlb(uc_engine *uc, uint32_t data_address, uint32_t code_address)
{
uint64_t tlbe;
uint32_t sptbr = 0x2000;
OK(uc_mem_map(uc, sptbr, 0x3000, UC_PROT_ALL)); //tlb base
tlbe = ((sptbr + 0x1000) >> 2) | 1;
OK(uc_mem_write(uc, sptbr, &tlbe, sizeof(tlbe)));
tlbe = ((sptbr + 0x2000) >> 2) | 1;
OK(uc_mem_write(uc, sptbr + 0x1000, &tlbe, sizeof(tlbe)));
tlbe = (code_address >> 2) | (7 << 1) | 1;
OK(uc_mem_write(uc, sptbr + 0x2000 + 0x15*8, &tlbe, sizeof(tlbe)));
tlbe = (data_address >> 2) | (7 << 1) | 1;
OK(uc_mem_write(uc, sptbr + 0x2000 + 0x16*8, &tlbe, sizeof(tlbe)));
}
static void test_riscv_mmu_hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *userdata)
{
if (address == 0x15010) {
OK(uc_emu_stop(uc));
}
}
static void test_riscv_mmu(void)
{
uc_engine *uc;
uc_hook h;
uint32_t code_address = 0x5000;
uint32_t data_address = 0x6000;
uint32_t data_value = 0x41414141;
uint32_t data_result = 0;
/*
li t3, (8 << 60) | 2
csrw sptbr, t3
li t0, (1 << 11) | (1 << 5)
csrw mstatus, t0
la t1, 0x15000
csrw mepc, t1
mret
*/
char code_m[] = "\x1b\x0e\xf0\xff" "\x13\x1e\xfe\x03" "\x13\x0e\x2e\x00" "\x73\x10\x0e\x18" "\xb7\x12\x00\x00" "\x9b\x82\x02\x82" "\x73\x90\x02\x30" "\x37\x53\x01\x00" "\x73\x10\x13\x34" "\x73\x00\x20\x30";
/*
li t0, 0x41414141
li t1, 0x16000
sw t0, 0(t1)
nop
*/
char code_s[] = "\xb7\x42\x41\x41" "\x9b\x82\x12\x14" "\x37\x63\x01\x00" "\x23\x20\x53\x00" "\x13\x00\x00\x00";
OK(uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc));
OK(uc_ctl_tlb_mode(uc, UC_TLB_CPU));
OK(uc_hook_add(uc, &h, UC_HOOK_CODE, test_riscv_mmu_hook_code, NULL, 1, 0));
OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL));
OK(uc_mem_map(uc, code_address, 0x1000, UC_PROT_ALL));
OK(uc_mem_map(uc, data_address, 0x1000, UC_PROT_ALL));
OK(uc_mem_write(uc, code_address, &code_s, sizeof(code_s)));
OK(uc_mem_write(uc, 0x1000, &code_m, sizeof(code_m)));
test_riscv_mmu_prepare_tlb(uc, data_address, code_address);
OK(uc_emu_start(uc, 0x1000, sizeof(code_m) - 1, 0, 0));
OK(uc_mem_read(uc, data_address, &data_result, sizeof(data_result)));
TEST_CHECK(data_value == data_result);
}
TEST_LIST = {
{"test_riscv32_nop", test_riscv32_nop},
{"test_riscv64_nop", test_riscv64_nop},
@ -657,4 +728,5 @@ TEST_LIST = {
test_riscv_correct_address_in_small_jump_hook},
{"test_riscv_correct_address_in_long_jump_hook",
test_riscv_correct_address_in_long_jump_hook},
{"test_riscv_mmu", test_riscv_mmu},
{NULL, NULL}};

View File

@ -1090,6 +1090,7 @@ static void test_x86_correct_address_in_long_jump_hook(void)
uc_hook hook;
uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_64, code, sizeof(code) - 1);
OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL));
OK(uc_hook_add(uc, &hook, UC_HOOK_MEM_UNMAPPED,
test_x86_correct_address_in_long_jump_hook_callback, NULL, 1,
0));
@ -1259,6 +1260,162 @@ static void test_x86_16_incorrect_ip(void)
OK(uc_close(uc));
}
static void test_x86_mmu_prepare_tlb(uc_engine *uc, uint64_t vaddr, uint64_t tlb_base)
{
uint64_t cr0;
uint64_t cr4;
uc_x86_msr msr = {.rid = 0x0c0000080, .value = 0};
uint64_t pml4o = ((vaddr & 0x00ff8000000000) >> 39)*8;
uint64_t pdpo = ((vaddr & 0x00007fc0000000) >> 30)*8;
uint64_t pdo = ((vaddr & 0x0000003fe00000) >> 21)*8;
uint64_t pml4e = (tlb_base + 0x1000) | 1 | (1 << 2);
uint64_t pdpe = (tlb_base + 0x2000) | 1 | (1 << 2);
uint64_t pde = (tlb_base + 0x3000) | 1 | (1 << 2);
OK(uc_mem_write(uc, tlb_base + pml4o, &pml4e, sizeof(pml4o)));
OK(uc_mem_write(uc, tlb_base + 0x1000 + pdpo, &pdpe, sizeof(pdpe)));
OK(uc_mem_write(uc, tlb_base + 0x2000 + pdo, &pde, sizeof(pde)));
OK(uc_reg_write(uc, UC_X86_REG_CR3, &tlb_base));
OK(uc_reg_read(uc, UC_X86_REG_CR0, &cr0));
OK(uc_reg_read(uc, UC_X86_REG_CR4, &cr4));
OK(uc_reg_read(uc, UC_X86_REG_MSR, &msr));
cr0 |= 1;
cr0 |= 1l << 31;
cr4 |= 1l << 5;
msr.value |= 1l << 8;
OK(uc_reg_write(uc, UC_X86_REG_CR0, &cr0));
OK(uc_reg_write(uc, UC_X86_REG_CR4, &cr4));
OK(uc_reg_write(uc, UC_X86_REG_MSR, &msr));
}
static void test_x86_mmu_pt_set(uc_engine *uc, uint64_t vaddr, uint64_t paddr, uint64_t tlb_base)
{
uint64_t pto = ((vaddr & 0x000000001ff000) >> 12)*8;
uint32_t pte = (paddr) | 1 | (1 << 2);
uc_mem_write(uc, tlb_base + 0x3000 + pto, &pte, sizeof(pte));
}
static void test_x86_mmu_callback(uc_engine *uc, void *userdata)
{
bool *parrent_done = userdata;
uint64_t rax;
OK(uc_reg_read(uc, UC_X86_REG_RAX, &rax));
switch (rax) {
case 57:
/* fork */
break;
case 60:
/* exit */
uc_emu_stop(uc);
return;
default:
TEST_CHECK(false);
}
if (!(*parrent_done)) {
*parrent_done = true;
rax = 27;
OK(uc_reg_write(uc, UC_X86_REG_RAX, &rax));
uc_emu_stop(uc);
}
}
static void test_x86_mmu(void)
{
bool parrent_done = false;
uint64_t tlb_base = 0x3000;
uint64_t parrent, child;
uint64_t rax, rip;
uc_context *context;
uc_engine *uc;
uc_hook h1;
/*
* mov rax, 57
* syscall
* test rax, rax
* jz child
* xor rax, rax
* mov rax, 60
* mov [0x4000], rax
* syscall
*
* child:
* xor rcx, rcx
* mov rcx, 42
* mov [0x4000], rcx
* mov rax, 60
* syscall
*/
char code[] = "\xB8\x39\x00\x00\x00\x0F\x05\x48\x85\xC0\x74\x0F\xB8\x3C\x00\x00\x00\x48\x89\x04\x25\x00\x40\x00\x00\x0F\x05\xB9\x2A\x00\x00\x00\x48\x89\x0C\x25\x00\x40\x00\x00\xB8\x3C\x00\x00\x00\x0F\x05";
OK(uc_open(UC_ARCH_X86, UC_MODE_64, &uc));
OK(uc_ctl_tlb_mode(uc, UC_TLB_CPU));
OK(uc_hook_add(uc, &h1, UC_HOOK_INSN, &test_x86_mmu_callback, &parrent_done, 1, 0, UC_X86_INS_SYSCALL));
OK(uc_context_alloc(uc, &context));
OK(uc_mem_map(uc, 0x0, 0x1000, UC_PROT_ALL)); //Code
OK(uc_mem_write(uc, 0x0, code, sizeof(code) - 1));
OK(uc_mem_map(uc, 0x1000, 0x1000, UC_PROT_ALL)); //Parrent
OK(uc_mem_map(uc, 0x2000, 0x1000, UC_PROT_ALL)); //Child
OK(uc_mem_map(uc, tlb_base, 0x4000, UC_PROT_ALL)); //TLB
test_x86_mmu_prepare_tlb(uc, 0x0, tlb_base);
test_x86_mmu_pt_set(uc, 0x2000, 0x0, tlb_base);
test_x86_mmu_pt_set(uc, 0x4000, 0x1000, tlb_base);
OK(uc_ctl_flush_tlb(uc));
OK(uc_emu_start(uc, 0x2000, 0x0, 0, 0));
OK(uc_context_save(uc, context));
OK(uc_reg_read(uc, UC_X86_REG_RIP, &rip));
OK(uc_emu_start(uc, rip, 0x0, 0, 0));
/* restore for child */
OK(uc_context_restore(uc, context));
test_x86_mmu_prepare_tlb(uc, 0x0, tlb_base);
test_x86_mmu_pt_set(uc, 0x4000, 0x2000, tlb_base);
rax = 0;
OK(uc_reg_write(uc, UC_X86_REG_RAX, &rax));
OK(uc_ctl_flush_tlb(uc));
OK(uc_emu_start(uc, rip, 0x0, 0, 0));
OK(uc_mem_read(uc, 0x1000, &parrent, sizeof(parrent)));
OK(uc_mem_read(uc, 0x2000, &child, sizeof(child)));
TEST_CHECK(parrent == 60);
TEST_CHECK(child == 42);
}
static bool test_x86_vtlb_callback(uc_engine *uc, uint64_t addr, uc_mem_type type, uc_tlb_entry *result, void *user_data)
{
result->paddr = addr;
result->perms = UC_PROT_ALL;
return true;
}
static void test_x86_vtlb(void)
{
uc_engine *uc;
uc_hook hook;
char code[] = "\xeb\x02\x90\x90\x90\x90\x90\x90"; // jmp 4; nop; nop; nop;
// nop; nop; nop
uint64_t r_eip = 0;
uc_common_setup(&uc, UC_ARCH_X86, UC_MODE_32, code, sizeof(code) - 1);
OK(uc_ctl_tlb_mode(uc, UC_TLB_VIRTUAL));
OK(uc_hook_add(uc, &hook, UC_HOOK_TLB_FILL, test_x86_vtlb_callback, NULL, 1, 0));
OK(uc_emu_start(uc, code_start, code_start + 4, 0, 0));
OK(uc_reg_read(uc, UC_X86_REG_EIP, &r_eip));
TEST_CHECK(r_eip == code_start + 4);
OK(uc_close(uc));
}
TEST_LIST = {
{"test_x86_in", test_x86_in},
{"test_x86_out", test_x86_out},
@ -1302,4 +1459,6 @@ TEST_LIST = {
#endif
{"test_x86_lazy_mapping", test_x86_lazy_mapping},
{"test_x86_16_incorrect_ip", test_x86_16_incorrect_ip},
{"test_x86_mmu", test_x86_mmu},
{"test_x86_vtlb", test_x86_vtlb},
{NULL, NULL}};

71
uc.c
View File

@ -230,6 +230,11 @@ static uc_err uc_init(uc_engine *uc)
return UC_ERR_RESOURCE;
}
// init tlb function
if (!uc->cpu->cc->tlb_fill) {
uc->set_tlb(uc, UC_TLB_CPU);
}
// init fpu softfloat
uc->softfloat_initialize();
@ -577,10 +582,6 @@ uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size)
if (size > INT_MAX)
return UC_ERR_ARG;
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
if (!check_mem_area(uc, address, size)) {
return UC_ERR_READ_UNMAPPED;
}
@ -622,10 +623,6 @@ uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes,
if (size > INT_MAX)
return UC_ERR_ARG;
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
if (!check_mem_area(uc, address, size)) {
return UC_ERR_WRITE_UNMAPPED;
}
@ -1039,10 +1036,6 @@ uc_err uc_mem_map(uc_engine *uc, uint64_t address, size_t size, uint32_t perms)
UC_INIT(uc);
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
res = mem_map_check(uc, address, size, perms);
if (res) {
return res;
@ -1063,10 +1056,6 @@ uc_err uc_mem_map_ptr(uc_engine *uc, uint64_t address, size_t size,
return UC_ERR_ARG;
}
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
res = mem_map_check(uc, address, size, perms);
if (res) {
return res;
@ -1084,10 +1073,6 @@ uc_err uc_mmio_map(uc_engine *uc, uint64_t address, size_t size,
UC_INIT(uc);
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
res = mem_map_check(uc, address, size, UC_PROT_ALL);
if (res)
return res;
@ -1387,10 +1372,6 @@ uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size,
return UC_ERR_ARG;
}
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
// check that user's entire requested block is mapped
if (!check_mem_area(uc, address, size)) {
return UC_ERR_NOMEM;
@ -1467,10 +1448,6 @@ uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size)
return UC_ERR_ARG;
}
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
// check that user's entire requested block is mapped
if (!check_mem_area(uc, address, size)) {
return UC_ERR_NOMEM;
@ -1515,10 +1492,6 @@ MemoryRegion *find_memory_region(struct uc_struct *uc, uint64_t address)
return NULL;
}
if (uc->mem_redirect) {
address = uc->mem_redirect(address);
}
// try with the cache index first
i = uc->mapped_block_cache_index;
@ -2377,6 +2350,30 @@ uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...)
}
break;
case UC_CTL_TLB_FLUSH:
UC_INIT(uc);
if (rw == UC_CTL_IO_WRITE) {
uc->tcg_flush_tlb(uc);
} else {
err = UC_ERR_ARG;
}
break;
case UC_CTL_TLB_TYPE: {
UC_INIT(uc);
if (rw == UC_CTL_IO_WRITE) {
int mode = va_arg(args, int);
err = uc->set_tlb(uc, mode);
} else {
err = UC_ERR_ARG;
}
break;
}
default:
err = UC_ERR_ARG;
break;
@ -2387,6 +2384,16 @@ uc_err uc_ctl(uc_engine *uc, uc_control_type control, ...)
return err;
}
gint cmp_vaddr(gconstpointer a, gconstpointer b, gpointer user_data)
{
uint64_t va = (uint64_t)a;
uint64_t vb = (uint64_t)b;
if (va == vb) {
return 0;
}
return va < vb ? -1 : 1;
}
#ifdef UNICORN_TRACER
uc_tracer *get_tracer()
{