exec: Make ldq/ldub_*_phys input an AddressSpace
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
This commit is contained in:
parent
fdfba1a298
commit
2c17449b30
22
exec.c
22
exec.c
@ -1612,7 +1612,7 @@ static uint64_t watch_mem_read(void *opaque, hwaddr addr,
|
||||
{
|
||||
check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
|
||||
switch (size) {
|
||||
case 1: return ldub_phys(addr);
|
||||
case 1: return ldub_phys(&address_space_memory, addr);
|
||||
case 2: return lduw_phys(addr);
|
||||
case 4: return ldl_phys(&address_space_memory, addr);
|
||||
default: abort();
|
||||
@ -2406,7 +2406,7 @@ uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
|
||||
}
|
||||
|
||||
/* warning: addr must be aligned */
|
||||
static inline uint64_t ldq_phys_internal(hwaddr addr,
|
||||
static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
|
||||
enum device_endian endian)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
@ -2415,7 +2415,7 @@ static inline uint64_t ldq_phys_internal(hwaddr addr,
|
||||
hwaddr l = 8;
|
||||
hwaddr addr1;
|
||||
|
||||
mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
|
||||
mr = address_space_translate(as, addr, &addr1, &l,
|
||||
false);
|
||||
if (l < 8 || !memory_access_is_direct(mr, false)) {
|
||||
/* I/O case */
|
||||
@ -2449,26 +2449,26 @@ static inline uint64_t ldq_phys_internal(hwaddr addr,
|
||||
return val;
|
||||
}
|
||||
|
||||
uint64_t ldq_phys(hwaddr addr)
|
||||
uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
|
||||
return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t ldq_le_phys(hwaddr addr)
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
|
||||
return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
uint64_t ldq_be_phys(hwaddr addr)
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
|
||||
return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
|
||||
}
|
||||
|
||||
/* XXX: optimize */
|
||||
uint32_t ldub_phys(hwaddr addr)
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
|
||||
{
|
||||
uint8_t val;
|
||||
cpu_physical_memory_read(addr, &val, 1);
|
||||
address_space_rw(as, addr, &val, 1, 0);
|
||||
return val;
|
||||
}
|
||||
|
||||
|
@ -613,7 +613,7 @@ static bool make_iommu_tlbe(hwaddr taddr, hwaddr mask, IOMMUTLBEntry *ret)
|
||||
translation, given the address of the PTE. */
|
||||
static bool pte_translate(hwaddr pte_addr, IOMMUTLBEntry *ret)
|
||||
{
|
||||
uint64_t pte = ldq_phys(pte_addr);
|
||||
uint64_t pte = ldq_phys(&address_space_memory, pte_addr);
|
||||
|
||||
/* Check valid bit. */
|
||||
if ((pte & 1) == 0) {
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "hw/sysbus.h"
|
||||
#include "qemu/range.h"
|
||||
#include "ui/pixel_ops.h"
|
||||
#include "exec/address-spaces.h"
|
||||
|
||||
/*
|
||||
* Status: 2010/05/07
|
||||
|
@ -120,7 +120,7 @@ static void glue(draw_hwc_line_, PIXEL_NAME)(SM501State * s, int crt,
|
||||
|
||||
/* get pixel value */
|
||||
if (i % 4 == 0) {
|
||||
bitset = ldub_phys(cursor_addr);
|
||||
bitset = ldub_phys(&address_space_memory, cursor_addr);
|
||||
cursor_addr++;
|
||||
}
|
||||
v = bitset & 3;
|
||||
|
@ -65,7 +65,7 @@ vmw_shmem_set(hwaddr addr, uint8 val, int len)
|
||||
static inline uint32_t
|
||||
vmw_shmem_ld8(hwaddr addr)
|
||||
{
|
||||
uint8_t res = ldub_phys(addr);
|
||||
uint8_t res = ldub_phys(&address_space_memory, addr);
|
||||
VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
|
||||
return res;
|
||||
}
|
||||
@ -110,7 +110,7 @@ vmw_shmem_st32(hwaddr addr, uint32_t value)
|
||||
static inline uint64_t
|
||||
vmw_shmem_ld64(hwaddr addr)
|
||||
{
|
||||
uint64_t res = ldq_le_phys(addr);
|
||||
uint64_t res = ldq_le_phys(&address_space_memory, addr);
|
||||
VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
|
||||
return res;
|
||||
}
|
||||
|
@ -341,6 +341,7 @@ static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
|
||||
static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint16_t size;
|
||||
uint8_t tmp;
|
||||
|
||||
@ -367,7 +368,7 @@ static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
|
||||
|
||||
env->vpa_addr = vpa;
|
||||
|
||||
tmp = ldub_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET);
|
||||
tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
|
||||
tmp |= VPA_SHARED_PROC_VAL;
|
||||
stb_phys(env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
|
||||
|
||||
@ -540,7 +541,7 @@ static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
args[0] = ldub_phys(addr);
|
||||
args[0] = ldub_phys(cs->as, addr);
|
||||
return H_SUCCESS;
|
||||
case 2:
|
||||
args[0] = lduw_phys(addr);
|
||||
@ -549,7 +550,7 @@ static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
args[0] = ldl_phys(cs->as, addr);
|
||||
return H_SUCCESS;
|
||||
case 8:
|
||||
args[0] = ldq_phys(addr);
|
||||
args[0] = ldq_phys(cs->as, addr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
return H_PARAMETER;
|
||||
@ -610,7 +611,7 @@ static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
while (count--) {
|
||||
switch (esize) {
|
||||
case 0:
|
||||
tmp = ldub_phys(src);
|
||||
tmp = ldub_phys(cs->as, src);
|
||||
break;
|
||||
case 1:
|
||||
tmp = lduw_phys(src);
|
||||
@ -619,7 +620,7 @@ static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
tmp = ldl_phys(cs->as, src);
|
||||
break;
|
||||
case 3:
|
||||
tmp = ldq_phys(src);
|
||||
tmp = ldq_phys(cs->as, src);
|
||||
break;
|
||||
default:
|
||||
return H_PARAMETER;
|
||||
|
@ -324,7 +324,7 @@ static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq)
|
||||
(vq * VIRTIO_VQCONFIG_LEN) +
|
||||
VIRTIO_VQCONFIG_OFFS_TOKEN;
|
||||
|
||||
return ldq_be_phys(token_off);
|
||||
return ldq_be_phys(&address_space_memory, token_off);
|
||||
}
|
||||
|
||||
static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev)
|
||||
@ -405,7 +405,8 @@ void s390_virtio_device_update_status(VirtIOS390Device *dev)
|
||||
VirtIODevice *vdev = dev->vdev;
|
||||
uint32_t features;
|
||||
|
||||
virtio_set_status(vdev, ldub_phys(dev->dev_offs + VIRTIO_DEV_OFFS_STATUS));
|
||||
virtio_set_status(vdev, ldub_phys(&address_space_memory,
|
||||
dev->dev_offs + VIRTIO_DEV_OFFS_STATUS));
|
||||
|
||||
/* Update guest supported feature bitmap */
|
||||
|
||||
|
@ -262,7 +262,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
info.queue = ldq_phys(ccw.cda);
|
||||
info.queue = ldq_phys(&address_space_memory, ccw.cda);
|
||||
info.align = ldl_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(info.queue));
|
||||
info.index = lduw_phys(ccw.cda + sizeof(info.queue)
|
||||
@ -294,7 +294,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
features.index = ldub_phys(ccw.cda + sizeof(features.features));
|
||||
features.index = ldub_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(features.features));
|
||||
if (features.index < ARRAY_SIZE(dev->host_features)) {
|
||||
features.features = dev->host_features[features.index];
|
||||
} else {
|
||||
@ -320,7 +321,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
features.index = ldub_phys(ccw.cda + sizeof(features.features));
|
||||
features.index = ldub_phys(&address_space_memory,
|
||||
ccw.cda + sizeof(features.features));
|
||||
features.features = ldl_le_phys(&address_space_memory, ccw.cda);
|
||||
if (features.index < ARRAY_SIZE(dev->host_features)) {
|
||||
virtio_bus_set_vdev_features(&dev->bus, features.features);
|
||||
@ -398,7 +400,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
status = ldub_phys(ccw.cda);
|
||||
status = ldub_phys(&address_space_memory, ccw.cda);
|
||||
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||
virtio_ccw_stop_ioeventfd(dev);
|
||||
}
|
||||
@ -427,7 +429,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
indicators = ldq_phys(ccw.cda);
|
||||
indicators = ldq_phys(&address_space_memory, ccw.cda);
|
||||
dev->indicators = indicators;
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
|
||||
ret = 0;
|
||||
@ -447,7 +449,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
|
||||
if (!ccw.cda) {
|
||||
ret = -EFAULT;
|
||||
} else {
|
||||
indicators = ldq_phys(ccw.cda);
|
||||
indicators = ldq_phys(&address_space_memory, ccw.cda);
|
||||
dev->indicators2 = indicators;
|
||||
sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
|
||||
ret = 0;
|
||||
@ -867,7 +869,7 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
|
||||
if (!dev->indicators) {
|
||||
return;
|
||||
}
|
||||
indicators = ldq_phys(dev->indicators);
|
||||
indicators = ldq_phys(&address_space_memory, dev->indicators);
|
||||
indicators |= 1ULL << vector;
|
||||
stq_phys(dev->indicators, indicators);
|
||||
} else {
|
||||
@ -875,7 +877,7 @@ static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
|
||||
return;
|
||||
}
|
||||
vector = 0;
|
||||
indicators = ldq_phys(dev->indicators2);
|
||||
indicators = ldq_phys(&address_space_memory, dev->indicators2);
|
||||
indicators |= 1ULL << vector;
|
||||
stq_phys(dev->indicators2, indicators);
|
||||
}
|
||||
|
@ -158,7 +158,8 @@ static void megasas_frame_set_scsi_status(unsigned long frame, uint8_t v)
|
||||
*/
|
||||
static uint64_t megasas_frame_get_context(unsigned long frame)
|
||||
{
|
||||
return ldq_le_phys(frame + offsetof(struct mfi_frame_header, context));
|
||||
return ldq_le_phys(&address_space_memory,
|
||||
frame + offsetof(struct mfi_frame_header, context));
|
||||
}
|
||||
|
||||
static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
|
||||
|
@ -105,7 +105,7 @@ static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
|
||||
{
|
||||
hwaddr pa;
|
||||
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
|
||||
return ldq_phys(pa);
|
||||
return ldq_phys(&address_space_memory, pa);
|
||||
}
|
||||
|
||||
static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
|
||||
|
@ -83,13 +83,13 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr);
|
||||
*/
|
||||
void qemu_flush_coalesced_mmio_buffer(void);
|
||||
|
||||
uint32_t ldub_phys(hwaddr addr);
|
||||
uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t lduw_le_phys(hwaddr addr);
|
||||
uint32_t lduw_be_phys(hwaddr addr);
|
||||
uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_le_phys(hwaddr addr);
|
||||
uint64_t ldq_be_phys(hwaddr addr);
|
||||
uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
|
||||
void stb_phys(hwaddr addr, uint32_t val);
|
||||
void stw_le_phys(hwaddr addr, uint32_t val);
|
||||
void stw_be_phys(hwaddr addr, uint32_t val);
|
||||
@ -101,7 +101,7 @@ void stq_be_phys(hwaddr addr, uint64_t val);
|
||||
#ifdef NEED_CPU_H
|
||||
uint32_t lduw_phys(hwaddr addr);
|
||||
uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
|
||||
uint64_t ldq_phys(hwaddr addr);
|
||||
uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
|
||||
void stl_phys_notdirty(hwaddr addr, uint32_t val);
|
||||
void stw_phys(hwaddr addr, uint32_t val);
|
||||
void stl_phys(hwaddr addr, uint32_t val);
|
||||
|
@ -1445,7 +1445,7 @@ static void do_sum(Monitor *mon, const QDict *qdict)
|
||||
|
||||
sum = 0;
|
||||
for(addr = start; addr < (start + size); addr++) {
|
||||
uint8_t val = ldub_phys(addr);
|
||||
uint8_t val = ldub_phys(&address_space_memory, addr);
|
||||
/* BSD sum algorithm ('sum' Unix command) */
|
||||
sum = (sum >> 1) | (sum << 15);
|
||||
sum += val;
|
||||
|
@ -213,6 +213,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
|
||||
int prot_need, int mmu_idx,
|
||||
target_ulong *pphys, int *pprot)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
target_long saddr = addr;
|
||||
target_ulong phys = 0;
|
||||
target_ulong L1pte, L2pte, L3pte;
|
||||
@ -251,7 +252,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
|
||||
|
||||
/* L1 page table read. */
|
||||
index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
|
||||
L1pte = ldq_phys(pt + index*8);
|
||||
L1pte = ldq_phys(cs->as, pt + index*8);
|
||||
|
||||
if (unlikely((L1pte & PTE_VALID) == 0)) {
|
||||
ret = MM_K_TNV;
|
||||
@ -264,7 +265,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
|
||||
|
||||
/* L2 page table read. */
|
||||
index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
|
||||
L2pte = ldq_phys(pt + index*8);
|
||||
L2pte = ldq_phys(cs->as, pt + index*8);
|
||||
|
||||
if (unlikely((L2pte & PTE_VALID) == 0)) {
|
||||
ret = MM_K_TNV;
|
||||
@ -277,7 +278,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
|
||||
|
||||
/* L3 page table read. */
|
||||
index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
|
||||
L3pte = ldq_phys(pt + index*8);
|
||||
L3pte = ldq_phys(cs->as, pt + index*8);
|
||||
|
||||
phys = L3pte >> 32 << TARGET_PAGE_BITS;
|
||||
if (unlikely((L3pte & PTE_VALID) == 0)) {
|
||||
|
@ -102,7 +102,7 @@ DEF_HELPER_2(hw_ret, void, env, i64)
|
||||
DEF_HELPER_3(call_pal, void, env, i64, i64)
|
||||
|
||||
DEF_HELPER_2(ldl_phys, i64, env, i64)
|
||||
DEF_HELPER_1(ldq_phys, i64, i64)
|
||||
DEF_HELPER_2(ldq_phys, i64, env, i64)
|
||||
DEF_HELPER_2(ldl_l_phys, i64, env, i64)
|
||||
DEF_HELPER_2(ldq_l_phys, i64, env, i64)
|
||||
DEF_HELPER_2(stl_phys, void, i64, i64)
|
||||
|
@ -30,9 +30,10 @@ uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p)
|
||||
return (int32_t)ldl_phys(cs->as, p);
|
||||
}
|
||||
|
||||
uint64_t helper_ldq_phys(uint64_t p)
|
||||
uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p)
|
||||
{
|
||||
return ldq_phys(p);
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
return ldq_phys(cs->as, p);
|
||||
}
|
||||
|
||||
uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
|
||||
@ -44,8 +45,9 @@ uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
|
||||
|
||||
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
env->lock_addr = p;
|
||||
return env->lock_value = ldq_phys(p);
|
||||
return env->lock_value = ldq_phys(cs->as, p);
|
||||
}
|
||||
|
||||
void helper_stl_phys(uint64_t p, uint64_t v)
|
||||
@ -77,10 +79,11 @@ uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
|
||||
|
||||
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t ret = 0;
|
||||
|
||||
if (p == env->lock_addr) {
|
||||
uint64_t old = ldq_phys(p);
|
||||
uint64_t old = ldq_phys(cs->as, p);
|
||||
if (old == env->lock_value) {
|
||||
stq_phys(p, v);
|
||||
ret = 1;
|
||||
|
@ -2916,7 +2916,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
|
||||
break;
|
||||
case 0x1:
|
||||
/* Quadword physical access (hw_ldq/p) */
|
||||
gen_helper_ldq_phys(cpu_ir[ra], addr);
|
||||
gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
|
||||
break;
|
||||
case 0x2:
|
||||
/* Longword physical access with lock (hw_ldl_l/p) */
|
||||
|
@ -3036,6 +3036,7 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
|
||||
hwaddr *phys_ptr, int *prot,
|
||||
target_ulong *page_size_ptr)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
/* Read an LPAE long-descriptor translation table. */
|
||||
MMUFaultType fault_type = translation_fault;
|
||||
uint32_t level = 1;
|
||||
@ -3124,7 +3125,7 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
|
||||
uint64_t descriptor;
|
||||
|
||||
descaddr |= ((address >> (9 * (4 - level))) & 0xff8);
|
||||
descriptor = ldq_phys(descaddr);
|
||||
descriptor = ldq_phys(cs->as, descaddr);
|
||||
if (!(descriptor & 1) ||
|
||||
(!(descriptor & 2) && (level == 3))) {
|
||||
/* Invalid, or the Reserved level 3 encoding */
|
||||
|
@ -27,7 +27,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pte_addr = (pte_start_addr + i * 8) & a20_mask;
|
||||
pte = ldq_phys(pte_addr);
|
||||
pte = ldq_phys(as, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -89,7 +89,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pde_addr = (pde_start_addr + i * 8) & a20_mask;
|
||||
pde = ldq_phys(pde_addr);
|
||||
pde = ldq_phys(as, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -167,7 +167,7 @@ static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
pdpe = ldq_phys(as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -192,7 +192,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
pdpe = ldq_phys(as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
@ -228,7 +228,7 @@ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
|
||||
|
||||
for (i = 0; i < 512; i++) {
|
||||
pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
|
||||
pml4e = ldq_phys(pml4e_addr);
|
||||
pml4e = ldq_phys(as, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
/* not present */
|
||||
continue;
|
||||
|
@ -563,7 +563,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
|
||||
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
|
||||
env->a20_mask;
|
||||
pml4e = ldq_phys(pml4e_addr);
|
||||
pml4e = ldq_phys(cs->as, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK)) {
|
||||
error_code = 0;
|
||||
goto do_fault;
|
||||
@ -579,7 +579,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
ptep = pml4e ^ PG_NX_MASK;
|
||||
pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
|
||||
env->a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
pdpe = ldq_phys(cs->as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
error_code = 0;
|
||||
goto do_fault;
|
||||
@ -599,7 +599,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
/* XXX: load them when cr3 is loaded ? */
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
env->a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
pdpe = ldq_phys(cs->as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK)) {
|
||||
error_code = 0;
|
||||
goto do_fault;
|
||||
@ -609,7 +609,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
|
||||
pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
|
||||
env->a20_mask;
|
||||
pde = ldq_phys(pde_addr);
|
||||
pde = ldq_phys(cs->as, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
error_code = 0;
|
||||
goto do_fault;
|
||||
@ -674,7 +674,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
}
|
||||
pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
|
||||
env->a20_mask;
|
||||
pte = ldq_phys(pte_addr);
|
||||
pte = ldq_phys(cs->as, pte_addr);
|
||||
if (!(pte & PG_PRESENT_MASK)) {
|
||||
error_code = 0;
|
||||
goto do_fault;
|
||||
@ -920,13 +920,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
|
||||
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
|
||||
env->a20_mask;
|
||||
pml4e = ldq_phys(pml4e_addr);
|
||||
pml4e = ldq_phys(cs->as, pml4e_addr);
|
||||
if (!(pml4e & PG_PRESENT_MASK))
|
||||
return -1;
|
||||
|
||||
pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
|
||||
(((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
pdpe = ldq_phys(cs->as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK))
|
||||
return -1;
|
||||
} else
|
||||
@ -934,14 +934,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
|
||||
env->a20_mask;
|
||||
pdpe = ldq_phys(pdpe_addr);
|
||||
pdpe = ldq_phys(cs->as, pdpe_addr);
|
||||
if (!(pdpe & PG_PRESENT_MASK))
|
||||
return -1;
|
||||
}
|
||||
|
||||
pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
|
||||
(((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
|
||||
pde = ldq_phys(pde_addr);
|
||||
pde = ldq_phys(cs->as, pde_addr);
|
||||
if (!(pde & PG_PRESENT_MASK)) {
|
||||
return -1;
|
||||
}
|
||||
@ -954,7 +954,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
|
||||
(((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
|
||||
page_size = 4096;
|
||||
pte = ldq_phys(pte_addr);
|
||||
pte = ldq_phys(cs->as, pte_addr);
|
||||
}
|
||||
pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
|
||||
if (!(pte & PG_PRESENT_MASK))
|
||||
|
@ -188,46 +188,46 @@ void helper_rsm(CPUX86State *env)
|
||||
|
||||
sm_state = env->smbase + 0x8000;
|
||||
#ifdef TARGET_X86_64
|
||||
cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
|
||||
cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0));
|
||||
|
||||
for (i = 0; i < 6; i++) {
|
||||
offset = 0x7e00 + i * 16;
|
||||
cpu_x86_load_seg_cache(env, i,
|
||||
lduw_phys(sm_state + offset),
|
||||
ldq_phys(sm_state + offset + 8),
|
||||
ldq_phys(cs->as, sm_state + offset + 8),
|
||||
ldl_phys(cs->as, sm_state + offset + 4),
|
||||
(lduw_phys(sm_state + offset + 2) &
|
||||
0xf0ff) << 8);
|
||||
}
|
||||
|
||||
env->gdt.base = ldq_phys(sm_state + 0x7e68);
|
||||
env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68);
|
||||
env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64);
|
||||
|
||||
env->ldt.selector = lduw_phys(sm_state + 0x7e70);
|
||||
env->ldt.base = ldq_phys(sm_state + 0x7e78);
|
||||
env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78);
|
||||
env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74);
|
||||
env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
|
||||
|
||||
env->idt.base = ldq_phys(sm_state + 0x7e88);
|
||||
env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88);
|
||||
env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84);
|
||||
|
||||
env->tr.selector = lduw_phys(sm_state + 0x7e90);
|
||||
env->tr.base = ldq_phys(sm_state + 0x7e98);
|
||||
env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98);
|
||||
env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94);
|
||||
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
|
||||
|
||||
env->regs[R_EAX] = ldq_phys(sm_state + 0x7ff8);
|
||||
env->regs[R_ECX] = ldq_phys(sm_state + 0x7ff0);
|
||||
env->regs[R_EDX] = ldq_phys(sm_state + 0x7fe8);
|
||||
env->regs[R_EBX] = ldq_phys(sm_state + 0x7fe0);
|
||||
env->regs[R_ESP] = ldq_phys(sm_state + 0x7fd8);
|
||||
env->regs[R_EBP] = ldq_phys(sm_state + 0x7fd0);
|
||||
env->regs[R_ESI] = ldq_phys(sm_state + 0x7fc8);
|
||||
env->regs[R_EDI] = ldq_phys(sm_state + 0x7fc0);
|
||||
env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8);
|
||||
env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0);
|
||||
env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8);
|
||||
env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0);
|
||||
env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8);
|
||||
env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0);
|
||||
env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8);
|
||||
env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0);
|
||||
for (i = 8; i < 16; i++) {
|
||||
env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
|
||||
env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8);
|
||||
}
|
||||
env->eip = ldq_phys(sm_state + 0x7f78);
|
||||
env->eip = ldq_phys(cs->as, sm_state + 0x7f78);
|
||||
cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68);
|
||||
|
@ -105,7 +105,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
|
||||
unsigned int flags;
|
||||
|
||||
sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
|
||||
sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
|
||||
sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
|
||||
sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
|
||||
flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
|
||||
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
|
||||
@ -178,7 +178,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
|
||||
/* load the interception bitmaps so we do not need to access the
|
||||
vmcb in svm mode */
|
||||
env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.intercept));
|
||||
env->intercept_cr_read = lduw_phys(env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
@ -200,15 +200,15 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
/* enable intercepts */
|
||||
env->hflags |= HF_SVMI_MASK;
|
||||
|
||||
env->tsc_offset = ldq_phys(env->vm_vmcb +
|
||||
env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.tsc_offset));
|
||||
|
||||
env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
@ -216,13 +216,17 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
/* clear exit_info_2 so we behave like the real hardware */
|
||||
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
|
||||
|
||||
cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
cpu_x86_update_cr0(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr0)));
|
||||
cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
cpu_x86_update_cr4(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr4)));
|
||||
cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
cpu_x86_update_cr3(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cr3)));
|
||||
env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
|
||||
env->cr[2] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.cr2));
|
||||
int_ctl = ldl_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
|
||||
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
|
||||
@ -235,9 +239,11 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
}
|
||||
|
||||
cpu_load_efer(env,
|
||||
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
|
||||
ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.efer)));
|
||||
env->eflags = 0;
|
||||
cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
cpu_load_eflags(env, ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.rflags)),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
CC_OP = CC_OP_EFLAGS;
|
||||
@ -251,18 +257,25 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
||||
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
|
||||
env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
||||
env->eip = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rip));
|
||||
|
||||
env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
|
||||
env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
|
||||
env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
|
||||
cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
env->regs[R_ESP] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.rax));
|
||||
env->dr[7] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
|
||||
env->dr[6] = ldq_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
|
||||
cpu_x86_set_cpl(env, ldub_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb,
|
||||
save.cpl)));
|
||||
|
||||
/* FIXME: guest state consistency checks */
|
||||
|
||||
switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
|
||||
switch (ldub_phys(cs->as,
|
||||
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
|
||||
case TLB_CONTROL_DO_NOTHING:
|
||||
break;
|
||||
case TLB_CONTROL_FLUSH_ALL_ASID:
|
||||
@ -339,6 +352,7 @@ void helper_vmmcall(CPUX86State *env)
|
||||
|
||||
void helper_vmload(CPUX86State *env, int aflag)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
|
||||
@ -351,7 +365,7 @@ void helper_vmload(CPUX86State *env, int aflag)
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
|
||||
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
||||
addr, ldq_phys(addr + offsetof(struct vmcb,
|
||||
addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.fs.base)),
|
||||
env->segs[R_FS].base);
|
||||
|
||||
@ -361,22 +375,24 @@ void helper_vmload(CPUX86State *env, int aflag)
|
||||
svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
|
||||
|
||||
#ifdef TARGET_X86_64
|
||||
env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
|
||||
env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.kernel_gs_base));
|
||||
env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
|
||||
env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
|
||||
env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
|
||||
env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
|
||||
env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
|
||||
env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
|
||||
#endif
|
||||
env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
|
||||
env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
|
||||
env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
|
||||
env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
|
||||
env->sysenter_cs = ldq_phys(cs->as,
|
||||
addr + offsetof(struct vmcb, save.sysenter_cs));
|
||||
env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.sysenter_esp));
|
||||
env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
|
||||
env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
|
||||
save.sysenter_eip));
|
||||
}
|
||||
|
||||
void helper_vmsave(CPUX86State *env, int aflag)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
target_ulong addr;
|
||||
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
|
||||
@ -389,7 +405,8 @@ void helper_vmsave(CPUX86State *env, int aflag)
|
||||
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
|
||||
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
|
||||
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
|
||||
addr, ldq_phys(cs->as,
|
||||
addr + offsetof(struct vmcb, save.fs.base)),
|
||||
env->segs[R_FS].base);
|
||||
|
||||
svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
|
||||
@ -455,6 +472,8 @@ void helper_invlpga(CPUX86State *env, int aflag)
|
||||
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
uint64_t param)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
|
||||
if (likely(!(env->hflags & HF_SVMI_MASK))) {
|
||||
return;
|
||||
}
|
||||
@ -487,7 +506,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
case SVM_EXIT_MSR:
|
||||
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
|
||||
/* FIXME: this should be read in at vmrun (faster this way?) */
|
||||
uint64_t addr = ldq_phys(env->vm_vmcb +
|
||||
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb,
|
||||
control.msrpm_base_pa));
|
||||
uint32_t t0, t1;
|
||||
@ -513,7 +532,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
t1 = 0;
|
||||
break;
|
||||
}
|
||||
if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
|
||||
if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
|
||||
helper_vmexit(env, type, param);
|
||||
}
|
||||
}
|
||||
@ -535,9 +554,10 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
|
||||
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
|
||||
uint32_t next_eip_addend)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
|
||||
/* FIXME: this should be read in at vmrun (faster this way?) */
|
||||
uint64_t addr = ldq_phys(env->vm_vmcb +
|
||||
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
|
||||
offsetof(struct vmcb, control.iopm_base_pa));
|
||||
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
|
||||
|
||||
@ -559,7 +579,7 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
|
||||
PRIx64 ", " TARGET_FMT_lx ")!\n",
|
||||
exit_code, exit_info_1,
|
||||
ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
|
||||
ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
|
||||
control.exit_info_2)),
|
||||
env->eip);
|
||||
|
||||
@ -625,29 +645,33 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
env->tsc_offset = 0;
|
||||
|
||||
env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.gdtr.base));
|
||||
env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.gdtr.limit));
|
||||
|
||||
env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.idtr.base));
|
||||
env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.idtr.limit));
|
||||
|
||||
cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
cpu_x86_update_cr0(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr0)) |
|
||||
CR0_PE_MASK);
|
||||
cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
cpu_x86_update_cr4(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr4)));
|
||||
cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
cpu_x86_update_cr3(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.cr3)));
|
||||
/* we need to set the efer after the crs so the hidden flags get
|
||||
set properly */
|
||||
cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
|
||||
save.efer)));
|
||||
env->eflags = 0;
|
||||
cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
|
||||
cpu_load_eflags(env, ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb,
|
||||
save.rflags)),
|
||||
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
|
||||
CC_OP = CC_OP_EFLAGS;
|
||||
@ -661,14 +685,17 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
|
||||
svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
|
||||
R_DS);
|
||||
|
||||
env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
|
||||
env->regs[R_ESP] = ldq_phys(env->vm_hsave +
|
||||
env->eip = ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.rip));
|
||||
env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
|
||||
offsetof(struct vmcb, save.rsp));
|
||||
env->regs[R_EAX] = ldq_phys(env->vm_hsave +
|
||||
env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
|
||||
offsetof(struct vmcb, save.rax));
|
||||
|
||||
env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
|
||||
env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
|
||||
env->dr[6] = ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr6));
|
||||
env->dr[7] = ldq_phys(cs->as,
|
||||
env->vm_hsave + offsetof(struct vmcb, save.dr7));
|
||||
|
||||
/* other setups */
|
||||
cpu_x86_set_cpl(env, 0);
|
||||
|
@ -78,20 +78,23 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
|
||||
static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
|
||||
hwaddr pte_offset)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
if (env->external_htab) {
|
||||
return ldq_p(env->external_htab + pte_offset);
|
||||
} else {
|
||||
return ldq_phys(env->htab_base + pte_offset);
|
||||
return ldq_phys(cs->as, env->htab_base + pte_offset);
|
||||
}
|
||||
}
|
||||
|
||||
static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
|
||||
hwaddr pte_offset)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
if (env->external_htab) {
|
||||
return ldq_p(env->external_htab + pte_offset + HASH_PTE_SIZE_64/2);
|
||||
} else {
|
||||
return ldq_phys(env->htab_base + pte_offset + HASH_PTE_SIZE_64/2);
|
||||
return ldq_phys(cs->as,
|
||||
env->htab_base + pte_offset + HASH_PTE_SIZE_64/2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,6 +168,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
|
||||
uint64_t asc, uint64_t asce, int level,
|
||||
target_ulong *raddr, int *flags, int rw)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
uint64_t offs = 0;
|
||||
uint64_t origin;
|
||||
uint64_t new_asce;
|
||||
@ -218,7 +219,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
|
||||
/* XXX region protection flags */
|
||||
/* *flags &= ~PAGE_WRITE */
|
||||
|
||||
new_asce = ldq_phys(origin + offs);
|
||||
new_asce = ldq_phys(cs->as, origin + offs);
|
||||
PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
|
||||
__func__, origin, offs, new_asce);
|
||||
|
||||
|
@ -955,6 +955,7 @@ uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
|
||||
static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
|
||||
uint64_t mode1, uint64_t a2, uint64_t mode2)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
target_ulong src, dest;
|
||||
int flags, cc = 0, i;
|
||||
|
||||
@ -984,7 +985,7 @@ static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
|
||||
mvc_asc(env, l - i, a1 + i, mode1, a2 + i, mode2);
|
||||
break;
|
||||
}
|
||||
stb_phys(dest + i, ldub_phys(src + i));
|
||||
stb_phys(dest + i, ldub_phys(cs->as, src + i));
|
||||
}
|
||||
|
||||
return cc;
|
||||
|
@ -609,7 +609,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
|
||||
case 0x1c: /* LEON MMU passthrough */
|
||||
switch (size) {
|
||||
case 1:
|
||||
ret = ldub_phys(addr);
|
||||
ret = ldub_phys(cs->as, addr);
|
||||
break;
|
||||
case 2:
|
||||
ret = lduw_phys(addr);
|
||||
@ -619,14 +619,14 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
|
||||
ret = ldl_phys(cs->as, addr);
|
||||
break;
|
||||
case 8:
|
||||
ret = ldq_phys(addr);
|
||||
ret = ldq_phys(cs->as, addr);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
|
||||
switch (size) {
|
||||
case 1:
|
||||
ret = ldub_phys((hwaddr)addr
|
||||
ret = ldub_phys(cs->as, (hwaddr)addr
|
||||
| ((hwaddr)(asi & 0xf) << 32));
|
||||
break;
|
||||
case 2:
|
||||
@ -639,7 +639,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
|
||||
| ((hwaddr)(asi & 0xf) << 32));
|
||||
break;
|
||||
case 8:
|
||||
ret = ldq_phys((hwaddr)addr
|
||||
ret = ldq_phys(cs->as, (hwaddr)addr
|
||||
| ((hwaddr)(asi & 0xf) << 32));
|
||||
break;
|
||||
}
|
||||
@ -716,6 +716,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
|
||||
void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
|
||||
int size)
|
||||
{
|
||||
CPUState *cs = ENV_GET_CPU(env);
|
||||
helper_check_align(env, addr, size - 1);
|
||||
switch (asi) {
|
||||
case 2: /* SuperSparc MXCC registers and Leon3 cache control */
|
||||
@ -772,13 +773,17 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
|
||||
"%08x: unimplemented access size: %d\n", addr,
|
||||
size);
|
||||
}
|
||||
env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
|
||||
env->mxccdata[0] = ldq_phys(cs->as,
|
||||
(env->mxccregs[0] & 0xffffffffULL) +
|
||||
0);
|
||||
env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
|
||||
env->mxccdata[1] = ldq_phys(cs->as,
|
||||
(env->mxccregs[0] & 0xffffffffULL) +
|
||||
8);
|
||||
env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
|
||||
env->mxccdata[2] = ldq_phys(cs->as,
|
||||
(env->mxccregs[0] & 0xffffffffULL) +
|
||||
16);
|
||||
env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
|
||||
env->mxccdata[3] = ldq_phys(cs->as,
|
||||
(env->mxccregs[0] & 0xffffffffULL) +
|
||||
24);
|
||||
break;
|
||||
case 0x01c00200: /* MXCC stream destination */
|
||||
@ -1434,7 +1439,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
ret = ldub_phys(addr);
|
||||
ret = ldub_phys(cs->as, addr);
|
||||
break;
|
||||
case 2:
|
||||
ret = lduw_phys(addr);
|
||||
@ -1444,7 +1449,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
|
||||
break;
|
||||
default:
|
||||
case 8:
|
||||
ret = ldq_phys(addr);
|
||||
ret = ldq_phys(cs->as, addr);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user