Merge remote-tracking branch 'afaerber/qom-cpu' into staging
* afaerber/qom-cpu: target-i386: Use switch in check_hw_breakpoints() target-i386: Avoid goto in hw_breakpoint_insert() target-i386: Introduce hw_{local,global}_breakpoint_enabled() target-i386: Define DR7 bit field constants target-i386: Move kvm_check_features_against_host() check to realize time target-i386: cpu_x86_register() consolidate freeing resources target-i386: Move setting defaults out of cpu_x86_parse_featurestr() target-i386: check/enforce: Check all feature words target-i386/cpu.c: Add feature name array for ext4_features target-i386: kvm_check_features_against_host(): Use feature_word_info target-i386/cpu: Introduce FeatureWord typedefs target-i386: Disable kvm_mmu by default kvm: Add fake KVM constants to avoid #ifdefs on KVM-specific code exec: Return CPUState from qemu_get_cpu() xen: Simplify halting of first CPU kvm: Pass CPUState to kvm_init_vcpu() cpu: Move cpu_index field to CPUState cpu: Move numa_node field to CPUState target-mips: Clean up mips_cpu_map_tc() documentation cpu: Move nr_{cores,threads} fields to CPUState Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
commit
c94bf1c107
24
cpus.c
24
cpus.c
@ -390,13 +390,15 @@ void hw_error(const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
va_start(ap, fmt);
|
||||
fprintf(stderr, "qemu: hardware error: ");
|
||||
vfprintf(stderr, fmt, ap);
|
||||
fprintf(stderr, "\n");
|
||||
for(env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
fprintf(stderr, "CPU #%d:\n", env->cpu_index);
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
|
||||
cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU);
|
||||
}
|
||||
va_end(ap);
|
||||
@ -740,7 +742,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu_single_env = env;
|
||||
|
||||
r = kvm_init_vcpu(env);
|
||||
r = kvm_init_vcpu(cpu);
|
||||
if (r < 0) {
|
||||
fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
|
||||
exit(1);
|
||||
@ -1041,8 +1043,8 @@ void qemu_init_vcpu(void *_env)
|
||||
CPUArchState *env = _env;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
|
||||
env->nr_cores = smp_cores;
|
||||
env->nr_threads = smp_threads;
|
||||
cpu->nr_cores = smp_cores;
|
||||
cpu->nr_threads = smp_threads;
|
||||
cpu->stopped = true;
|
||||
if (kvm_enabled()) {
|
||||
qemu_kvm_start_vcpu(env);
|
||||
@ -1160,12 +1162,14 @@ static void tcg_exec_all(void)
|
||||
void set_numa_modes(void)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
int i;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
if (test_bit(env->cpu_index, node_cpumask[i])) {
|
||||
env->numa_node = i;
|
||||
if (test_bit(cpu->cpu_index, node_cpumask[i])) {
|
||||
cpu->numa_node = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1213,7 +1217,7 @@ CpuInfoList *qmp_query_cpus(Error **errp)
|
||||
|
||||
info = g_malloc0(sizeof(*info));
|
||||
info->value = g_malloc0(sizeof(*info->value));
|
||||
info->value->CPU = env->cpu_index;
|
||||
info->value->CPU = cpu->cpu_index;
|
||||
info->value->current = (env == first_cpu);
|
||||
info->value->halted = env->halted;
|
||||
info->value->thread_id = cpu->thread_id;
|
||||
@ -1251,6 +1255,7 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
|
||||
FILE *f;
|
||||
uint32_t l;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
uint8_t buf[1024];
|
||||
|
||||
if (!has_cpu) {
|
||||
@ -1258,7 +1263,8 @@ void qmp_memsave(int64_t addr, int64_t size, const char *filename,
|
||||
}
|
||||
|
||||
for (env = first_cpu; env; env = env->next_cpu) {
|
||||
if (cpu_index == env->cpu_index) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
if (cpu_index == cpu->cpu_index) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
19
exec.c
19
exec.c
@ -247,24 +247,25 @@ static const VMStateDescription vmstate_cpu_common = {
|
||||
};
|
||||
#endif
|
||||
|
||||
CPUArchState *qemu_get_cpu(int cpu)
|
||||
CPUState *qemu_get_cpu(int index)
|
||||
{
|
||||
CPUArchState *env = first_cpu;
|
||||
CPUState *cpu = NULL;
|
||||
|
||||
while (env) {
|
||||
if (env->cpu_index == cpu)
|
||||
cpu = ENV_GET_CPU(env);
|
||||
if (cpu->cpu_index == index) {
|
||||
break;
|
||||
}
|
||||
env = env->next_cpu;
|
||||
}
|
||||
|
||||
return env;
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void cpu_exec_init(CPUArchState *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
#endif
|
||||
CPUArchState **penv;
|
||||
int cpu_index;
|
||||
|
||||
@ -278,8 +279,8 @@ void cpu_exec_init(CPUArchState *env)
|
||||
penv = &(*penv)->next_cpu;
|
||||
cpu_index++;
|
||||
}
|
||||
env->cpu_index = cpu_index;
|
||||
env->numa_node = 0;
|
||||
cpu->cpu_index = cpu_index;
|
||||
cpu->numa_node = 0;
|
||||
QTAILQ_INIT(&env->breakpoints);
|
||||
QTAILQ_INIT(&env->watchpoints);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@ -531,7 +532,6 @@ CPUArchState *cpu_copy(CPUArchState *env)
|
||||
{
|
||||
CPUArchState *new_env = cpu_init(env->cpu_model_str);
|
||||
CPUArchState *next_cpu = new_env->next_cpu;
|
||||
int cpu_index = new_env->cpu_index;
|
||||
#if defined(TARGET_HAS_ICE)
|
||||
CPUBreakpoint *bp;
|
||||
CPUWatchpoint *wp;
|
||||
@ -539,9 +539,8 @@ CPUArchState *cpu_copy(CPUArchState *env)
|
||||
|
||||
memcpy(new_env, env, sizeof(CPUArchState));
|
||||
|
||||
/* Preserve chaining and index. */
|
||||
/* Preserve chaining. */
|
||||
new_env->next_cpu = next_cpu;
|
||||
new_env->cpu_index = cpu_index;
|
||||
|
||||
/* Clone all break/watchpoints.
|
||||
Note: Once we support ptrace with hw-debug register access, make sure
|
||||
|
@ -2401,9 +2401,10 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
|
||||
thread = strtoull(p+16, (char **)&p, 16);
|
||||
env = find_cpu(thread);
|
||||
if (env != NULL) {
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
cpu_synchronize_state(env);
|
||||
len = snprintf((char *)mem_buf, sizeof(mem_buf),
|
||||
"CPU#%d [%s]", env->cpu_index,
|
||||
"CPU#%d [%s]", cpu->cpu_index,
|
||||
env->halted ? "halted " : "running");
|
||||
memtohex(buf, mem_buf, len);
|
||||
put_packet(s, buf);
|
||||
|
@ -75,6 +75,7 @@ static uint64_t cchip_read(void *opaque, hwaddr addr, unsigned size)
|
||||
{
|
||||
CPUAlphaState *env = cpu_single_env;
|
||||
TyphoonState *s = opaque;
|
||||
CPUState *cpu;
|
||||
uint64_t ret = 0;
|
||||
|
||||
if (addr & 4) {
|
||||
@ -95,7 +96,8 @@ static uint64_t cchip_read(void *opaque, hwaddr addr, unsigned size)
|
||||
|
||||
case 0x0080:
|
||||
/* MISC: Miscellaneous Register. */
|
||||
ret = s->cchip.misc | (env->cpu_index & 3);
|
||||
cpu = ENV_GET_CPU(env);
|
||||
ret = s->cchip.misc | (cpu->cpu_index & 3);
|
||||
break;
|
||||
|
||||
case 0x00c0:
|
||||
|
@ -39,7 +39,8 @@ static const uint8_t gic_id[] = {
|
||||
static inline int gic_get_current_cpu(GICState *s)
|
||||
{
|
||||
if (s->num_cpu > 1) {
|
||||
return cpu_single_env->cpu_index;
|
||||
CPUState *cpu = ENV_GET_CPU(cpu_single_env);
|
||||
return cpu->cpu_index;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -49,11 +49,13 @@ typedef struct {
|
||||
|
||||
static inline int get_current_cpu(arm_mptimer_state *s)
|
||||
{
|
||||
if (cpu_single_env->cpu_index >= s->num_cpu) {
|
||||
CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
|
||||
|
||||
if (cpu_single_cpu->cpu_index >= s->num_cpu) {
|
||||
hw_error("arm_mptimer: num-cpu %d but this cpu is %d!\n",
|
||||
s->num_cpu, cpu_single_env->cpu_index);
|
||||
s->num_cpu, cpu_single_cpu->cpu_index);
|
||||
}
|
||||
return cpu_single_env->cpu_index;
|
||||
return cpu_single_cpu->cpu_index;
|
||||
}
|
||||
|
||||
static inline void timerblock_update_irq(timerblock *tb)
|
||||
|
@ -743,10 +743,13 @@ static int64_t load_kernel (void)
|
||||
return kernel_entry;
|
||||
}
|
||||
|
||||
static void malta_mips_config(CPUMIPSState *env)
|
||||
static void malta_mips_config(MIPSCPU *cpu)
|
||||
{
|
||||
CPUMIPSState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
env->mvp->CP0_MVPConf0 |= ((smp_cpus - 1) << CP0MVPC0_PVPE) |
|
||||
((smp_cpus * env->nr_threads - 1) << CP0MVPC0_PTC);
|
||||
((smp_cpus * cs->nr_threads - 1) << CP0MVPC0_PTC);
|
||||
}
|
||||
|
||||
static void main_cpu_reset(void *opaque)
|
||||
@ -763,7 +766,7 @@ static void main_cpu_reset(void *opaque)
|
||||
env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL));
|
||||
}
|
||||
|
||||
malta_mips_config(env);
|
||||
malta_mips_config(cpu);
|
||||
}
|
||||
|
||||
static void cpu_request_exit(void *opaque, int irq, int level)
|
||||
|
@ -153,11 +153,14 @@ static const int debug_openpic = 0;
|
||||
|
||||
static int get_current_cpu(void)
|
||||
{
|
||||
CPUState *cpu_single_cpu;
|
||||
|
||||
if (!cpu_single_env) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return cpu_single_env->cpu_index;
|
||||
cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
|
||||
return cpu_single_cpu->cpu_index;
|
||||
}
|
||||
|
||||
static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
|
||||
|
@ -239,25 +239,28 @@ static int ppce500_load_device_tree(CPUPPCState *env,
|
||||
/* We need to generate the cpu nodes in reverse order, so Linux can pick
|
||||
the first node as boot node and be happy */
|
||||
for (i = smp_cpus - 1; i >= 0; i--) {
|
||||
CPUState *cpu = NULL;
|
||||
char cpu_name[128];
|
||||
uint64_t cpu_release_addr = MPC8544_SPIN_BASE + (i * 0x20);
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (env->cpu_index == i) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
if (cpu->cpu_index == i) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!env) {
|
||||
if (cpu == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x", env->cpu_index);
|
||||
snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x",
|
||||
cpu->cpu_index);
|
||||
qemu_devtree_add_subnode(fdt, cpu_name);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq);
|
||||
qemu_devtree_setprop_string(fdt, cpu_name, "device_type", "cpu");
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "reg", env->cpu_index);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "reg", cpu->cpu_index);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "d-cache-line-size",
|
||||
env->dcache_line_size);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "i-cache-line-size",
|
||||
@ -265,7 +268,7 @@ static int ppce500_load_device_tree(CPUPPCState *env,
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "d-cache-size", 0x8000);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "i-cache-size", 0x8000);
|
||||
qemu_devtree_setprop_cell(fdt, cpu_name, "bus-frequency", 0);
|
||||
if (env->cpu_index) {
|
||||
if (cpu->cpu_index) {
|
||||
qemu_devtree_setprop_string(fdt, cpu_name, "status", "disabled");
|
||||
qemu_devtree_setprop_string(fdt, cpu_name, "enable-method", "spin-table");
|
||||
qemu_devtree_setprop_u64(fdt, cpu_name, "cpu-release-addr",
|
||||
@ -479,6 +482,7 @@ void ppce500_init(PPCE500Params *params)
|
||||
irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB);
|
||||
for (i = 0; i < smp_cpus; i++) {
|
||||
PowerPCCPU *cpu;
|
||||
CPUState *cs;
|
||||
qemu_irq *input;
|
||||
|
||||
cpu = cpu_ppc_init(params->cpu_model);
|
||||
@ -487,6 +491,7 @@ void ppce500_init(PPCE500Params *params)
|
||||
exit(1);
|
||||
}
|
||||
env = &cpu->env;
|
||||
cs = CPU(cpu);
|
||||
|
||||
if (!firstenv) {
|
||||
firstenv = env;
|
||||
@ -496,7 +501,7 @@ void ppce500_init(PPCE500Params *params)
|
||||
input = (qemu_irq *)env->irq_inputs;
|
||||
irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT];
|
||||
irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT];
|
||||
env->spr[SPR_BOOKE_PIR] = env->cpu_index = i;
|
||||
env->spr[SPR_BOOKE_PIR] = cs->cpu_index = i;
|
||||
env->mpic_iack = MPC8544_CCSRBAR_BASE +
|
||||
MPC8544_MPIC_REGS_OFFSET + 0x200A0;
|
||||
|
||||
|
@ -124,21 +124,23 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,
|
||||
SpinState *s = opaque;
|
||||
int env_idx = addr / sizeof(SpinInfo);
|
||||
CPUPPCState *env;
|
||||
CPUState *cpu = NULL;
|
||||
SpinInfo *curspin = &s->spin[env_idx];
|
||||
uint8_t *curspin_p = (uint8_t*)curspin;
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (env->cpu_index == env_idx) {
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
if (cpu->cpu_index == env_idx) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!env) {
|
||||
if (cpu == NULL) {
|
||||
/* Unknown CPU */
|
||||
return;
|
||||
}
|
||||
|
||||
if (!env->cpu_index) {
|
||||
if (cpu->cpu_index == 0) {
|
||||
/* primary CPU doesn't spin */
|
||||
return;
|
||||
}
|
||||
|
2
hw/pxa.h
2
hw/pxa.h
@ -69,7 +69,7 @@ DeviceState *pxa2xx_pic_init(hwaddr base, ARMCPU *cpu);
|
||||
|
||||
/* pxa2xx_gpio.c */
|
||||
DeviceState *pxa2xx_gpio_init(hwaddr base,
|
||||
CPUARMState *env, DeviceState *pic, int lines);
|
||||
ARMCPU *cpu, DeviceState *pic, int lines);
|
||||
void pxa2xx_gpio_read_notifier(DeviceState *dev, qemu_irq handler);
|
||||
|
||||
/* pxa2xx_dma.c */
|
||||
|
@ -2045,7 +2045,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space,
|
||||
qdev_get_gpio_in(s->pic, PXA27X_PIC_OST_4_11),
|
||||
NULL);
|
||||
|
||||
s->gpio = pxa2xx_gpio_init(0x40e00000, &s->cpu->env, s->pic, 121);
|
||||
s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 121);
|
||||
|
||||
dinfo = drive_get(IF_SD, 0, 0);
|
||||
if (!dinfo) {
|
||||
@ -2176,7 +2176,7 @@ PXA2xxState *pxa255_init(MemoryRegion *address_space, unsigned int sdram_size)
|
||||
qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3),
|
||||
NULL);
|
||||
|
||||
s->gpio = pxa2xx_gpio_init(0x40e00000, &s->cpu->env, s->pic, 85);
|
||||
s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 85);
|
||||
|
||||
dinfo = drive_get(IF_SD, 0, 0);
|
||||
if (!dinfo) {
|
||||
|
@ -250,13 +250,14 @@ static const MemoryRegionOps pxa_gpio_ops = {
|
||||
};
|
||||
|
||||
DeviceState *pxa2xx_gpio_init(hwaddr base,
|
||||
CPUARMState *env, DeviceState *pic, int lines)
|
||||
ARMCPU *cpu, DeviceState *pic, int lines)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
DeviceState *dev;
|
||||
|
||||
dev = qdev_create(NULL, "pxa2xx-gpio");
|
||||
qdev_prop_set_int32(dev, "lines", lines);
|
||||
qdev_prop_set_int32(dev, "ncpu", env->cpu_index);
|
||||
qdev_prop_set_int32(dev, "ncpu", cs->cpu_index);
|
||||
qdev_init_nofail(dev);
|
||||
|
||||
sysbus_mmio_map(sysbus_from_qdev(dev), 0, base);
|
||||
@ -276,7 +277,7 @@ static int pxa2xx_gpio_initfn(SysBusDevice *dev)
|
||||
|
||||
s = FROM_SYSBUS(PXA2xxGPIOInfo, dev);
|
||||
|
||||
s->cpu = arm_env_get_cpu(qemu_get_cpu(s->ncpu));
|
||||
s->cpu = ARM_CPU(qemu_get_cpu(s->ncpu));
|
||||
|
||||
qdev_init_gpio_in(&dev->qdev, pxa2xx_gpio_set, s->lines);
|
||||
qdev_init_gpio_out(&dev->qdev, s->handler, s->lines);
|
||||
|
13
hw/spapr.c
13
hw/spapr.c
@ -140,6 +140,7 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
|
||||
{
|
||||
int ret = 0, offset;
|
||||
CPUPPCState *env;
|
||||
CPUState *cpu;
|
||||
char cpu_model[32];
|
||||
int smt = kvmppc_smt_threads();
|
||||
uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
|
||||
@ -147,19 +148,20 @@ static int spapr_fixup_cpu_dt(void *fdt, sPAPREnvironment *spapr)
|
||||
assert(spapr->cpu_model);
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
uint32_t associativity[] = {cpu_to_be32(0x5),
|
||||
cpu_to_be32(0x0),
|
||||
cpu_to_be32(0x0),
|
||||
cpu_to_be32(0x0),
|
||||
cpu_to_be32(env->numa_node),
|
||||
cpu_to_be32(env->cpu_index)};
|
||||
cpu_to_be32(cpu->numa_node),
|
||||
cpu_to_be32(cpu->cpu_index)};
|
||||
|
||||
if ((env->cpu_index % smt) != 0) {
|
||||
if ((cpu->cpu_index % smt) != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
snprintf(cpu_model, 32, "/cpus/%s@%x", spapr->cpu_model,
|
||||
env->cpu_index);
|
||||
cpu->cpu_index);
|
||||
|
||||
offset = fdt_path_offset(fdt, cpu_model);
|
||||
if (offset < 0) {
|
||||
@ -308,7 +310,8 @@ static void *spapr_create_fdt_skel(const char *cpu_model,
|
||||
spapr->cpu_model = g_strdup(modelname);
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
int index = env->cpu_index;
|
||||
CPUState *cpu = CPU(ppc_env_get_cpu(env));
|
||||
int index = cpu->cpu_index;
|
||||
uint32_t servers_prop[smp_threads];
|
||||
uint32_t gservers_prop[smp_threads * 2];
|
||||
char *nodename;
|
||||
|
@ -467,9 +467,11 @@ static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
target_ulong vpa = args[2];
|
||||
target_ulong ret = H_PARAMETER;
|
||||
CPUPPCState *tenv;
|
||||
CPUState *tcpu;
|
||||
|
||||
for (tenv = first_cpu; tenv; tenv = tenv->next_cpu) {
|
||||
if (tenv->cpu_index == procno) {
|
||||
tcpu = CPU(ppc_env_get_cpu(tenv));
|
||||
if (tcpu->cpu_index == procno) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -131,6 +131,7 @@ static void rtas_query_cpu_stopped_state(sPAPREnvironment *spapr,
|
||||
{
|
||||
target_ulong id;
|
||||
CPUPPCState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
if (nargs != 1 || nret != 2) {
|
||||
rtas_st(rets, 0, -3);
|
||||
@ -139,7 +140,8 @@ static void rtas_query_cpu_stopped_state(sPAPREnvironment *spapr,
|
||||
|
||||
id = rtas_ld(args, 0);
|
||||
for (env = first_cpu; env; env = env->next_cpu) {
|
||||
if (env->cpu_index != id) {
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
if (cpu->cpu_index != id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -176,9 +178,9 @@ static void rtas_start_cpu(sPAPREnvironment *spapr,
|
||||
r3 = rtas_ld(args, 2);
|
||||
|
||||
for (env = first_cpu; env; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
|
||||
if (env->cpu_index != id) {
|
||||
if (cpu->cpu_index != id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
22
hw/xics.c
22
hw/xics.c
@ -357,10 +357,10 @@ void xics_set_irq_type(struct icp_state *icp, int irq, bool lsi)
|
||||
static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
target_ulong cppr = args[0];
|
||||
|
||||
icp_set_cppr(spapr->icp, env->cpu_index, cppr);
|
||||
icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -376,14 +376,13 @@ static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
|
||||
icp_set_mfrr(spapr->icp, server, mfrr);
|
||||
return H_SUCCESS;
|
||||
|
||||
}
|
||||
|
||||
static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint32_t xirr = icp_accept(spapr->icp->ss + env->cpu_index);
|
||||
CPUState *cs = CPU(cpu);
|
||||
uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
|
||||
|
||||
args[0] = xirr;
|
||||
return H_SUCCESS;
|
||||
@ -392,10 +391,10 @@ static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
CPUState *cs = CPU(cpu);
|
||||
target_ulong xirr = args[0];
|
||||
|
||||
icp_eoi(spapr->icp, env->cpu_index, xirr);
|
||||
icp_eoi(spapr->icp, cs->cpu_index, xirr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -525,14 +524,16 @@ static void xics_reset(void *opaque)
|
||||
struct icp_state *xics_system_init(int nr_irqs)
|
||||
{
|
||||
CPUPPCState *env;
|
||||
CPUState *cpu;
|
||||
int max_server_num;
|
||||
struct icp_state *icp;
|
||||
struct ics_state *ics;
|
||||
|
||||
max_server_num = -1;
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (env->cpu_index > max_server_num) {
|
||||
max_server_num = env->cpu_index;
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
if (cpu->cpu_index > max_server_num) {
|
||||
max_server_num = cpu->cpu_index;
|
||||
}
|
||||
}
|
||||
|
||||
@ -541,7 +542,8 @@ struct icp_state *xics_system_init(int nr_irqs)
|
||||
icp->ss = g_malloc0(icp->nr_servers*sizeof(struct icp_server_state));
|
||||
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
struct icp_server_state *ss = &icp->ss[env->cpu_index];
|
||||
cpu = CPU(ppc_env_get_cpu(env));
|
||||
struct icp_server_state *ss = &icp->ss[cpu->cpu_index];
|
||||
|
||||
switch (PPC_INPUT(env)) {
|
||||
case PPC_FLAGS_INPUT_POWER7:
|
||||
|
@ -354,7 +354,6 @@ int page_check_range(target_ulong start, target_ulong len, int flags);
|
||||
#endif
|
||||
|
||||
CPUArchState *cpu_copy(CPUArchState *env);
|
||||
CPUArchState *qemu_get_cpu(int cpu);
|
||||
|
||||
#define CPU_DUMP_CODE 0x00010000
|
||||
#define CPU_DUMP_FPU 0x00020000 /* dump FPU register state, not just integer */
|
||||
|
@ -193,11 +193,7 @@ typedef struct CPUWatchpoint {
|
||||
int exception_index; \
|
||||
\
|
||||
CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
|
||||
int cpu_index; /* CPU index (informative) */ \
|
||||
uint32_t host_tid; /* host thread ID */ \
|
||||
int numa_node; /* NUMA node this cpu is belonging to */ \
|
||||
int nr_cores; /* number of cores within this CPU package */ \
|
||||
int nr_threads;/* number of threads within this CPU */ \
|
||||
int running; /* Nonzero if cpu is currently running(usermode). */ \
|
||||
/* user data */ \
|
||||
void *opaque; \
|
||||
|
@ -35,7 +35,8 @@ static inline int cpu_index(CPUArchState *env)
|
||||
#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
|
||||
return env->host_tid;
|
||||
#else
|
||||
return env->cpu_index + 1;
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
return cpu->cpu_index + 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,10 @@ struct kvm_run;
|
||||
|
||||
/**
|
||||
* CPUState:
|
||||
* @cpu_index: CPU index (informative).
|
||||
* @nr_cores: Number of cores within this CPU package.
|
||||
* @nr_threads: Number of threads within this CPU.
|
||||
* @numa_node: NUMA node this CPU is belonging to.
|
||||
* @created: Indicates whether the CPU thread has been successfully created.
|
||||
* @stop: Indicates a pending stop request.
|
||||
* @stopped: Indicates the CPU has been artificially stopped.
|
||||
@ -69,6 +73,10 @@ struct CPUState {
|
||||
DeviceState parent_obj;
|
||||
/*< public >*/
|
||||
|
||||
int nr_cores;
|
||||
int nr_threads;
|
||||
int numa_node;
|
||||
|
||||
struct QemuThread *thread;
|
||||
#ifdef _WIN32
|
||||
HANDLE hThread;
|
||||
@ -89,6 +97,7 @@ struct CPUState {
|
||||
struct kvm_run *kvm_run;
|
||||
|
||||
/* TODO Move common fields from CPUArchState here. */
|
||||
int cpu_index; /* used by alpha TCG */
|
||||
};
|
||||
|
||||
|
||||
@ -147,5 +156,15 @@ bool cpu_is_stopped(CPUState *cpu);
|
||||
*/
|
||||
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
|
||||
|
||||
/**
|
||||
* qemu_get_cpu:
|
||||
* @index: The CPUState@cpu_index value of the CPU to obtain.
|
||||
*
|
||||
* Gets a CPU matching @index.
|
||||
*
|
||||
* Returns: The CPU or %NULL if there is no matching CPU.
|
||||
*/
|
||||
CPUState *qemu_get_cpu(int index);
|
||||
|
||||
|
||||
#endif
|
||||
|
@ -17,10 +17,25 @@
|
||||
#include <errno.h>
|
||||
#include "config-host.h"
|
||||
#include "qemu/queue.h"
|
||||
#include "qom/cpu.h"
|
||||
|
||||
#ifdef CONFIG_KVM
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#else
|
||||
/* These constants must never be used at runtime if kvm_enabled() is false.
|
||||
* They exist so we don't need #ifdefs around KVM-specific code that already
|
||||
* checks kvm_enabled() properly.
|
||||
*/
|
||||
#define KVM_CPUID_SIGNATURE 0
|
||||
#define KVM_CPUID_FEATURES 0
|
||||
#define KVM_FEATURE_CLOCKSOURCE 0
|
||||
#define KVM_FEATURE_NOP_IO_DELAY 0
|
||||
#define KVM_FEATURE_MMU_OP 0
|
||||
#define KVM_FEATURE_CLOCKSOURCE2 0
|
||||
#define KVM_FEATURE_ASYNC_PF 0
|
||||
#define KVM_FEATURE_STEAL_TIME 0
|
||||
#define KVM_FEATURE_PV_EOI 0
|
||||
#endif
|
||||
|
||||
extern int kvm_allowed;
|
||||
@ -120,9 +135,9 @@ int kvm_has_many_ioeventfds(void);
|
||||
int kvm_has_gsi_routing(void);
|
||||
int kvm_has_intx_set_mask(void);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
int kvm_init_vcpu(CPUArchState *env);
|
||||
int kvm_init_vcpu(CPUState *cpu);
|
||||
|
||||
#ifdef NEED_CPU_H
|
||||
int kvm_cpu_exec(CPUArchState *env);
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
@ -214,16 +214,15 @@ static void kvm_reset_vcpu(void *opaque)
|
||||
kvm_arch_reset_vcpu(cpu);
|
||||
}
|
||||
|
||||
int kvm_init_vcpu(CPUArchState *env)
|
||||
int kvm_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
CPUState *cpu = ENV_GET_CPU(env);
|
||||
KVMState *s = kvm_state;
|
||||
long mmap_size;
|
||||
int ret;
|
||||
|
||||
DPRINTF("kvm_init_vcpu\n");
|
||||
|
||||
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
|
||||
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, cpu->cpu_index);
|
||||
if (ret < 0) {
|
||||
DPRINTF("kvm_create_vcpu failed\n");
|
||||
goto err;
|
||||
|
@ -24,7 +24,7 @@ bool kvm_irqfds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
|
||||
int kvm_init_vcpu(CPUArchState *env)
|
||||
int kvm_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
19
monitor.c
19
monitor.c
@ -872,9 +872,11 @@ EventInfoList *qmp_query_events(Error **errp)
|
||||
int monitor_set_cpu(int cpu_index)
|
||||
{
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
for(env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (env->cpu_index == cpu_index) {
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
cpu = ENV_GET_CPU(env);
|
||||
if (cpu->cpu_index == cpu_index) {
|
||||
cur_mon->mon_cpu = env;
|
||||
return 0;
|
||||
}
|
||||
@ -893,7 +895,8 @@ static CPUArchState *mon_get_cpu(void)
|
||||
|
||||
int monitor_get_cpu_index(void)
|
||||
{
|
||||
return mon_get_cpu()->cpu_index;
|
||||
CPUState *cpu = ENV_GET_CPU(mon_get_cpu());
|
||||
return cpu->cpu_index;
|
||||
}
|
||||
|
||||
static void do_info_registers(Monitor *mon)
|
||||
@ -1783,13 +1786,15 @@ static void do_info_numa(Monitor *mon)
|
||||
{
|
||||
int i;
|
||||
CPUArchState *env;
|
||||
CPUState *cpu;
|
||||
|
||||
monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
|
||||
for (i = 0; i < nb_numa_nodes; i++) {
|
||||
monitor_printf(mon, "node %d cpus:", i);
|
||||
for (env = first_cpu; env != NULL; env = env->next_cpu) {
|
||||
if (env->numa_node == i) {
|
||||
monitor_printf(mon, " %d", env->cpu_index);
|
||||
cpu = ENV_GET_CPU(env);
|
||||
if (cpu->numa_node == i) {
|
||||
monitor_printf(mon, " %d", cpu->cpu_index);
|
||||
}
|
||||
}
|
||||
monitor_printf(mon, "\n");
|
||||
@ -1991,6 +1996,7 @@ static void do_inject_mce(Monitor *mon, const QDict *qdict)
|
||||
{
|
||||
X86CPU *cpu;
|
||||
CPUX86State *cenv;
|
||||
CPUState *cs;
|
||||
int cpu_index = qdict_get_int(qdict, "cpu_index");
|
||||
int bank = qdict_get_int(qdict, "bank");
|
||||
uint64_t status = qdict_get_int(qdict, "status");
|
||||
@ -2004,7 +2010,8 @@ static void do_inject_mce(Monitor *mon, const QDict *qdict)
|
||||
}
|
||||
for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
|
||||
cpu = x86_env_get_cpu(cenv);
|
||||
if (cenv->cpu_index == cpu_index) {
|
||||
cs = CPU(cpu);
|
||||
if (cs->cpu_index == cpu_index) {
|
||||
cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
|
||||
flags);
|
||||
break;
|
||||
|
@ -1579,7 +1579,7 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
|
||||
case 0x3C:
|
||||
/* WHAMI */
|
||||
tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
|
||||
offsetof(CPUAlphaState, cpu_index));
|
||||
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -64,7 +64,7 @@ static void arm_cpu_reset(CPUState *s)
|
||||
CPUARMState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -902,7 +902,8 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
|
||||
static int mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t *value)
|
||||
{
|
||||
uint32_t mpidr = env->cpu_index;
|
||||
CPUState *cs = CPU(arm_env_get_cpu(env));
|
||||
uint32_t mpidr = cs->cpu_index;
|
||||
/* We don't support setting cluster ID ([8..11])
|
||||
* so these bits always RAZ.
|
||||
*/
|
||||
|
@ -35,7 +35,7 @@ static void cris_cpu_reset(CPUState *s)
|
||||
uint32_t vr;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -95,6 +95,17 @@ static const char *ext3_feature_name[] = {
|
||||
NULL, NULL, NULL, NULL,
|
||||
};
|
||||
|
||||
static const char *ext4_feature_name[] = {
|
||||
NULL, NULL, "xstore", "xstore-en",
|
||||
NULL, NULL, "xcrypt", "xcrypt-en",
|
||||
"ace2", "ace2-en", "phe", "phe-en",
|
||||
"pmm", "pmm-en", NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
NULL, NULL, NULL, NULL,
|
||||
};
|
||||
|
||||
static const char *kvm_feature_name[] = {
|
||||
"kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
|
||||
"kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", NULL,
|
||||
@ -124,6 +135,47 @@ static const char *cpuid_7_0_ebx_feature_name[] = {
|
||||
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
||||
};
|
||||
|
||||
typedef struct FeatureWordInfo {
|
||||
const char **feat_names;
|
||||
uint32_t cpuid_eax; /* Input EAX for CPUID */
|
||||
int cpuid_reg; /* R_* register constant */
|
||||
} FeatureWordInfo;
|
||||
|
||||
static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
|
||||
[FEAT_1_EDX] = {
|
||||
.feat_names = feature_name,
|
||||
.cpuid_eax = 1, .cpuid_reg = R_EDX,
|
||||
},
|
||||
[FEAT_1_ECX] = {
|
||||
.feat_names = ext_feature_name,
|
||||
.cpuid_eax = 1, .cpuid_reg = R_ECX,
|
||||
},
|
||||
[FEAT_8000_0001_EDX] = {
|
||||
.feat_names = ext2_feature_name,
|
||||
.cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
|
||||
},
|
||||
[FEAT_8000_0001_ECX] = {
|
||||
.feat_names = ext3_feature_name,
|
||||
.cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
|
||||
},
|
||||
[FEAT_C000_0001_EDX] = {
|
||||
.feat_names = ext4_feature_name,
|
||||
.cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
|
||||
},
|
||||
[FEAT_KVM] = {
|
||||
.feat_names = kvm_feature_name,
|
||||
.cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
|
||||
},
|
||||
[FEAT_SVM] = {
|
||||
.feat_names = svm_feature_name,
|
||||
.cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
|
||||
},
|
||||
[FEAT_7_0_EBX] = {
|
||||
.feat_names = cpuid_7_0_ebx_feature_name,
|
||||
.cpuid_eax = 7, .cpuid_reg = R_EBX,
|
||||
},
|
||||
};
|
||||
|
||||
const char *get_register_name_32(unsigned int reg)
|
||||
{
|
||||
static const char *reg_names[CPU_NB_REGS32] = {
|
||||
@ -148,9 +200,7 @@ const char *get_register_name_32(unsigned int reg)
|
||||
typedef struct model_features_t {
|
||||
uint32_t *guest_feat;
|
||||
uint32_t *host_feat;
|
||||
const char **flag_names;
|
||||
uint32_t cpuid;
|
||||
int reg;
|
||||
FeatureWord feat_word;
|
||||
} model_features_t;
|
||||
|
||||
int check_cpuid = 0;
|
||||
@ -159,7 +209,6 @@ int enforce_cpuid = 0;
|
||||
#if defined(CONFIG_KVM)
|
||||
static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
|
||||
(1 << KVM_FEATURE_NOP_IO_DELAY) |
|
||||
(1 << KVM_FEATURE_MMU_OP) |
|
||||
(1 << KVM_FEATURE_CLOCKSOURCE2) |
|
||||
(1 << KVM_FEATURE_ASYNC_PF) |
|
||||
(1 << KVM_FEATURE_STEAL_TIME) |
|
||||
@ -272,23 +321,20 @@ static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
|
||||
return found;
|
||||
}
|
||||
|
||||
static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
|
||||
uint32_t *ext_features,
|
||||
uint32_t *ext2_features,
|
||||
uint32_t *ext3_features,
|
||||
uint32_t *kvm_features,
|
||||
uint32_t *svm_features,
|
||||
uint32_t *cpuid_7_0_ebx_features)
|
||||
static void add_flagname_to_bitmaps(const char *flagname,
|
||||
FeatureWordArray words)
|
||||
{
|
||||
if (!lookup_feature(features, flagname, NULL, feature_name) &&
|
||||
!lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
|
||||
!lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
|
||||
!lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
|
||||
!lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
|
||||
!lookup_feature(svm_features, flagname, NULL, svm_feature_name) &&
|
||||
!lookup_feature(cpuid_7_0_ebx_features, flagname, NULL,
|
||||
cpuid_7_0_ebx_feature_name))
|
||||
fprintf(stderr, "CPU feature %s not found\n", flagname);
|
||||
FeatureWord w;
|
||||
for (w = 0; w < FEATURE_WORDS; w++) {
|
||||
FeatureWordInfo *wi = &feature_word_info[w];
|
||||
if (wi->feat_names &&
|
||||
lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (w == FEATURE_WORDS) {
|
||||
fprintf(stderr, "CPU feature %s not found\n", flagname);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct x86_def_t {
|
||||
@ -952,55 +998,69 @@ static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
|
||||
#endif /* CONFIG_KVM */
|
||||
}
|
||||
|
||||
static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
|
||||
static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 32; ++i)
|
||||
if (1 << i & mask) {
|
||||
const char *reg = get_register_name_32(f->reg);
|
||||
const char *reg = get_register_name_32(f->cpuid_reg);
|
||||
assert(reg);
|
||||
fprintf(stderr, "warning: host doesn't support requested feature: "
|
||||
"CPUID.%02XH:%s%s%s [bit %d]\n",
|
||||
f->cpuid, reg,
|
||||
f->flag_names[i] ? "." : "",
|
||||
f->flag_names[i] ? f->flag_names[i] : "", i);
|
||||
f->cpuid_eax, reg,
|
||||
f->feat_names[i] ? "." : "",
|
||||
f->feat_names[i] ? f->feat_names[i] : "", i);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* best effort attempt to inform user requested cpu flags aren't making
|
||||
* their way to the guest.
|
||||
/* Check if all requested cpu flags are making their way to the guest
|
||||
*
|
||||
* Returns 0 if all flags are supported by the host, non-zero otherwise.
|
||||
*
|
||||
* This function may be called only if KVM is enabled.
|
||||
*/
|
||||
static int kvm_check_features_against_host(x86_def_t *guest_def)
|
||||
static int kvm_check_features_against_host(X86CPU *cpu)
|
||||
{
|
||||
CPUX86State *env = &cpu->env;
|
||||
x86_def_t host_def;
|
||||
uint32_t mask;
|
||||
int rv, i;
|
||||
struct model_features_t ft[] = {
|
||||
{&guest_def->features, &host_def.features,
|
||||
feature_name, 0x00000001, R_EDX},
|
||||
{&guest_def->ext_features, &host_def.ext_features,
|
||||
ext_feature_name, 0x00000001, R_ECX},
|
||||
{&guest_def->ext2_features, &host_def.ext2_features,
|
||||
ext2_feature_name, 0x80000001, R_EDX},
|
||||
{&guest_def->ext3_features, &host_def.ext3_features,
|
||||
ext3_feature_name, 0x80000001, R_ECX}
|
||||
{&env->cpuid_features, &host_def.features,
|
||||
FEAT_1_EDX },
|
||||
{&env->cpuid_ext_features, &host_def.ext_features,
|
||||
FEAT_1_ECX },
|
||||
{&env->cpuid_ext2_features, &host_def.ext2_features,
|
||||
FEAT_8000_0001_EDX },
|
||||
{&env->cpuid_ext3_features, &host_def.ext3_features,
|
||||
FEAT_8000_0001_ECX },
|
||||
{&env->cpuid_ext4_features, &host_def.ext4_features,
|
||||
FEAT_C000_0001_EDX },
|
||||
{&env->cpuid_7_0_ebx_features, &host_def.cpuid_7_0_ebx_features,
|
||||
FEAT_7_0_EBX },
|
||||
{&env->cpuid_svm_features, &host_def.svm_features,
|
||||
FEAT_SVM },
|
||||
{&env->cpuid_kvm_features, &host_def.kvm_features,
|
||||
FEAT_KVM },
|
||||
};
|
||||
|
||||
assert(kvm_enabled());
|
||||
|
||||
kvm_cpu_fill_host(&host_def);
|
||||
for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
|
||||
for (mask = 1; mask; mask <<= 1)
|
||||
for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i) {
|
||||
FeatureWord w = ft[i].feat_word;
|
||||
FeatureWordInfo *wi = &feature_word_info[w];
|
||||
for (mask = 1; mask; mask <<= 1) {
|
||||
if (*ft[i].guest_feat & mask &&
|
||||
!(*ft[i].host_feat & mask)) {
|
||||
unavailable_host_feature(&ft[i], mask);
|
||||
rv = 1;
|
||||
}
|
||||
unavailable_host_feature(wi, mask);
|
||||
rv = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -1284,35 +1344,19 @@ static int cpu_x86_parse_featurestr(x86_def_t *x86_cpu_def, char *features)
|
||||
unsigned int i;
|
||||
char *featurestr; /* Single 'key=value" string being parsed */
|
||||
/* Features to be added */
|
||||
uint32_t plus_features = 0, plus_ext_features = 0;
|
||||
uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
|
||||
uint32_t plus_kvm_features = kvm_default_features, plus_svm_features = 0;
|
||||
uint32_t plus_7_0_ebx_features = 0;
|
||||
FeatureWordArray plus_features = { 0 };
|
||||
/* Features to be removed */
|
||||
uint32_t minus_features = 0, minus_ext_features = 0;
|
||||
uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
|
||||
uint32_t minus_kvm_features = 0, minus_svm_features = 0;
|
||||
uint32_t minus_7_0_ebx_features = 0;
|
||||
FeatureWordArray minus_features = { 0 };
|
||||
uint32_t numvalue;
|
||||
|
||||
add_flagname_to_bitmaps("hypervisor", &plus_features,
|
||||
&plus_ext_features, &plus_ext2_features, &plus_ext3_features,
|
||||
&plus_kvm_features, &plus_svm_features, &plus_7_0_ebx_features);
|
||||
|
||||
featurestr = features ? strtok(features, ",") : NULL;
|
||||
|
||||
while (featurestr) {
|
||||
char *val;
|
||||
if (featurestr[0] == '+') {
|
||||
add_flagname_to_bitmaps(featurestr + 1, &plus_features,
|
||||
&plus_ext_features, &plus_ext2_features,
|
||||
&plus_ext3_features, &plus_kvm_features,
|
||||
&plus_svm_features, &plus_7_0_ebx_features);
|
||||
add_flagname_to_bitmaps(featurestr + 1, plus_features);
|
||||
} else if (featurestr[0] == '-') {
|
||||
add_flagname_to_bitmaps(featurestr + 1, &minus_features,
|
||||
&minus_ext_features, &minus_ext2_features,
|
||||
&minus_ext3_features, &minus_kvm_features,
|
||||
&minus_svm_features, &minus_7_0_ebx_features);
|
||||
add_flagname_to_bitmaps(featurestr + 1, minus_features);
|
||||
} else if ((val = strchr(featurestr, '='))) {
|
||||
*val = 0; val++;
|
||||
if (!strcmp(featurestr, "family")) {
|
||||
@ -1412,24 +1456,22 @@ static int cpu_x86_parse_featurestr(x86_def_t *x86_cpu_def, char *features)
|
||||
}
|
||||
featurestr = strtok(NULL, ",");
|
||||
}
|
||||
x86_cpu_def->features |= plus_features;
|
||||
x86_cpu_def->ext_features |= plus_ext_features;
|
||||
x86_cpu_def->ext2_features |= plus_ext2_features;
|
||||
x86_cpu_def->ext3_features |= plus_ext3_features;
|
||||
x86_cpu_def->kvm_features |= plus_kvm_features;
|
||||
x86_cpu_def->svm_features |= plus_svm_features;
|
||||
x86_cpu_def->cpuid_7_0_ebx_features |= plus_7_0_ebx_features;
|
||||
x86_cpu_def->features &= ~minus_features;
|
||||
x86_cpu_def->ext_features &= ~minus_ext_features;
|
||||
x86_cpu_def->ext2_features &= ~minus_ext2_features;
|
||||
x86_cpu_def->ext3_features &= ~minus_ext3_features;
|
||||
x86_cpu_def->kvm_features &= ~minus_kvm_features;
|
||||
x86_cpu_def->svm_features &= ~minus_svm_features;
|
||||
x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features;
|
||||
if (check_cpuid && kvm_enabled()) {
|
||||
if (kvm_check_features_against_host(x86_cpu_def) && enforce_cpuid)
|
||||
goto error;
|
||||
}
|
||||
x86_cpu_def->features |= plus_features[FEAT_1_EDX];
|
||||
x86_cpu_def->ext_features |= plus_features[FEAT_1_ECX];
|
||||
x86_cpu_def->ext2_features |= plus_features[FEAT_8000_0001_EDX];
|
||||
x86_cpu_def->ext3_features |= plus_features[FEAT_8000_0001_ECX];
|
||||
x86_cpu_def->ext4_features |= plus_features[FEAT_C000_0001_EDX];
|
||||
x86_cpu_def->kvm_features |= plus_features[FEAT_KVM];
|
||||
x86_cpu_def->svm_features |= plus_features[FEAT_SVM];
|
||||
x86_cpu_def->cpuid_7_0_ebx_features |= plus_features[FEAT_7_0_EBX];
|
||||
x86_cpu_def->features &= ~minus_features[FEAT_1_EDX];
|
||||
x86_cpu_def->ext_features &= ~minus_features[FEAT_1_ECX];
|
||||
x86_cpu_def->ext2_features &= ~minus_features[FEAT_8000_0001_EDX];
|
||||
x86_cpu_def->ext3_features &= ~minus_features[FEAT_8000_0001_ECX];
|
||||
x86_cpu_def->ext4_features &= ~minus_features[FEAT_C000_0001_EDX];
|
||||
x86_cpu_def->kvm_features &= ~minus_features[FEAT_KVM];
|
||||
x86_cpu_def->svm_features &= ~minus_features[FEAT_SVM];
|
||||
x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_features[FEAT_7_0_EBX];
|
||||
return 0;
|
||||
|
||||
error:
|
||||
@ -1549,17 +1591,23 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
|
||||
|
||||
model_pieces = g_strsplit(cpu_model, ",", 2);
|
||||
if (!model_pieces[0]) {
|
||||
goto error;
|
||||
error_setg(&error, "Invalid/empty CPU model name");
|
||||
goto out;
|
||||
}
|
||||
name = model_pieces[0];
|
||||
features = model_pieces[1];
|
||||
|
||||
if (cpu_x86_find_by_name(def, name) < 0) {
|
||||
goto error;
|
||||
error_setg(&error, "Unable to find CPU definition: %s", name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
def->kvm_features |= kvm_default_features;
|
||||
def->ext_features |= CPUID_EXT_HYPERVISOR;
|
||||
|
||||
if (cpu_x86_parse_featurestr(def, features) < 0) {
|
||||
goto error;
|
||||
error_setg(&error, "Invalid cpu_model string format: %s", cpu_model);
|
||||
goto out;
|
||||
}
|
||||
assert(def->vendor1);
|
||||
env->cpuid_vendor1 = def->vendor1;
|
||||
@ -1584,17 +1632,15 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
|
||||
"tsc-frequency", &error);
|
||||
|
||||
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
|
||||
|
||||
out:
|
||||
g_strfreev(model_pieces);
|
||||
if (error) {
|
||||
fprintf(stderr, "%s\n", error_get_pretty(error));
|
||||
error_free(error);
|
||||
goto error;
|
||||
return -1;
|
||||
}
|
||||
|
||||
g_strfreev(model_pieces);
|
||||
return 0;
|
||||
error:
|
||||
g_strfreev(model_pieces);
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
@ -1691,8 +1737,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
*ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
|
||||
*ecx = env->cpuid_ext_features;
|
||||
*edx = env->cpuid_features;
|
||||
if (env->nr_cores * env->nr_threads > 1) {
|
||||
*ebx |= (env->nr_cores * env->nr_threads) << 16;
|
||||
if (cs->nr_cores * cs->nr_threads > 1) {
|
||||
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
|
||||
*edx |= 1 << 28; /* HTT bit */
|
||||
}
|
||||
break;
|
||||
@ -1705,8 +1751,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
break;
|
||||
case 4:
|
||||
/* cache info: needed for Core compatibility */
|
||||
if (env->nr_cores > 1) {
|
||||
*eax = (env->nr_cores - 1) << 26;
|
||||
if (cs->nr_cores > 1) {
|
||||
*eax = (cs->nr_cores - 1) << 26;
|
||||
} else {
|
||||
*eax = 0;
|
||||
}
|
||||
@ -1725,8 +1771,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
break;
|
||||
case 2: /* L2 cache info */
|
||||
*eax |= 0x0000143;
|
||||
if (env->nr_threads > 1) {
|
||||
*eax |= (env->nr_threads - 1) << 14;
|
||||
if (cs->nr_threads > 1) {
|
||||
*eax |= (cs->nr_threads - 1) << 14;
|
||||
}
|
||||
*ebx = 0x3c0003f;
|
||||
*ecx = 0x0000fff;
|
||||
@ -1830,7 +1876,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
* discards multiple thread information if it is set.
|
||||
* So dont set it here for Intel to make Linux guests happy.
|
||||
*/
|
||||
if (env->nr_cores * env->nr_threads > 1) {
|
||||
if (cs->nr_cores * cs->nr_threads > 1) {
|
||||
uint32_t tebx, tecx, tedx;
|
||||
get_cpuid_vendor(env, &tebx, &tecx, &tedx);
|
||||
if (tebx != CPUID_VENDOR_INTEL_1 ||
|
||||
@ -1878,8 +1924,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
*ebx = 0;
|
||||
*ecx = 0;
|
||||
*edx = 0;
|
||||
if (env->nr_cores * env->nr_threads > 1) {
|
||||
*ecx |= (env->nr_cores * env->nr_threads) - 1;
|
||||
if (cs->nr_cores * cs->nr_threads > 1) {
|
||||
*ecx |= (cs->nr_cores * cs->nr_threads) - 1;
|
||||
}
|
||||
break;
|
||||
case 0x8000000A:
|
||||
@ -1936,7 +1982,7 @@ static void x86_cpu_reset(CPUState *s)
|
||||
int i;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
|
||||
}
|
||||
|
||||
@ -2010,7 +2056,7 @@ static void x86_cpu_reset(CPUState *s)
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
/* We hard-wire the BSP to the first CPU. */
|
||||
if (env->cpu_index == 0) {
|
||||
if (s->cpu_index == 0) {
|
||||
apic_designate_bsp(env->apic_state);
|
||||
}
|
||||
|
||||
@ -2128,6 +2174,11 @@ void x86_cpu_realize(Object *obj, Error **errp)
|
||||
#ifdef CONFIG_KVM
|
||||
filter_features_for_kvm(cpu);
|
||||
#endif
|
||||
if (check_cpuid && kvm_check_features_against_host(cpu)
|
||||
&& enforce_cpuid) {
|
||||
error_setg(errp, "Host's CPU doesn't support requested features");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@ -2148,6 +2199,7 @@ void x86_cpu_realize(Object *obj, Error **errp)
|
||||
|
||||
static void x86_cpu_initfn(Object *obj)
|
||||
{
|
||||
CPUState *cs = CPU(obj);
|
||||
X86CPU *cpu = X86_CPU(obj);
|
||||
CPUX86State *env = &cpu->env;
|
||||
static int inited;
|
||||
@ -2179,7 +2231,7 @@ static void x86_cpu_initfn(Object *obj)
|
||||
x86_cpuid_get_tsc_freq,
|
||||
x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
|
||||
|
||||
env->cpuid_apic_id = env->cpu_index;
|
||||
env->cpuid_apic_id = cs->cpu_index;
|
||||
|
||||
/* init various static tables used in TCG mode */
|
||||
if (tcg_enabled() && !inited) {
|
||||
|
@ -231,6 +231,12 @@
|
||||
#define DR7_TYPE_SHIFT 16
|
||||
#define DR7_LEN_SHIFT 18
|
||||
#define DR7_FIXED_1 0x00000400
|
||||
#define DR7_LOCAL_BP_MASK 0x55
|
||||
#define DR7_MAX_BP 4
|
||||
#define DR7_TYPE_BP_INST 0x0
|
||||
#define DR7_TYPE_DATA_WR 0x1
|
||||
#define DR7_TYPE_IO_RW 0x2
|
||||
#define DR7_TYPE_DATA_RW 0x3
|
||||
|
||||
#define PG_PRESENT_BIT 0
|
||||
#define PG_RW_BIT 1
|
||||
@ -361,6 +367,21 @@
|
||||
|
||||
#define MSR_VM_HSAVE_PA 0xc0010117
|
||||
|
||||
/* CPUID feature words */
|
||||
typedef enum FeatureWord {
|
||||
FEAT_1_EDX, /* CPUID[1].EDX */
|
||||
FEAT_1_ECX, /* CPUID[1].ECX */
|
||||
FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
|
||||
FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
|
||||
FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
|
||||
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
|
||||
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
|
||||
FEAT_SVM, /* CPUID[8000_000A].EDX */
|
||||
FEATURE_WORDS,
|
||||
} FeatureWord;
|
||||
|
||||
typedef uint32_t FeatureWordArray[FEATURE_WORDS];
|
||||
|
||||
/* cpuid_features bits */
|
||||
#define CPUID_FP87 (1 << 0)
|
||||
#define CPUID_VME (1 << 1)
|
||||
@ -993,9 +1014,20 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
|
||||
#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
|
||||
void cpu_x86_set_a20(CPUX86State *env, int a20_state);
|
||||
|
||||
static inline int hw_breakpoint_enabled(unsigned long dr7, int index)
|
||||
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
|
||||
{
|
||||
return (dr7 >> (index * 2)) & 3;
|
||||
return (dr7 >> (index * 2)) & 1;
|
||||
}
|
||||
|
||||
static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
|
||||
{
|
||||
return (dr7 >> (index * 2)) & 2;
|
||||
|
||||
}
|
||||
static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
|
||||
{
|
||||
return hw_global_breakpoint_enabled(dr7, index) ||
|
||||
hw_local_breakpoint_enabled(dr7, index);
|
||||
}
|
||||
|
||||
static inline int hw_breakpoint_type(unsigned long dr7, int index)
|
||||
@ -1011,7 +1043,7 @@ static inline int hw_breakpoint_len(unsigned long dr7, int index)
|
||||
|
||||
void hw_breakpoint_insert(CPUX86State *env, int index);
|
||||
void hw_breakpoint_remove(CPUX86State *env, int index);
|
||||
int check_hw_breakpoints(CPUX86State *env, int force_dr6_update);
|
||||
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
|
||||
void breakpoint_handler(CPUX86State *env);
|
||||
|
||||
/* will be suppressed */
|
||||
|
@ -966,30 +966,35 @@ hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
|
||||
|
||||
void hw_breakpoint_insert(CPUX86State *env, int index)
|
||||
{
|
||||
int type, err = 0;
|
||||
int type = 0, err = 0;
|
||||
|
||||
switch (hw_breakpoint_type(env->dr[7], index)) {
|
||||
case 0:
|
||||
if (hw_breakpoint_enabled(env->dr[7], index))
|
||||
case DR7_TYPE_BP_INST:
|
||||
if (hw_breakpoint_enabled(env->dr[7], index)) {
|
||||
err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
|
||||
&env->cpu_breakpoint[index]);
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
case DR7_TYPE_DATA_WR:
|
||||
type = BP_CPU | BP_MEM_WRITE;
|
||||
goto insert_wp;
|
||||
case 2:
|
||||
/* No support for I/O watchpoints yet */
|
||||
break;
|
||||
case 3:
|
||||
case DR7_TYPE_IO_RW:
|
||||
/* No support for I/O watchpoints yet */
|
||||
break;
|
||||
case DR7_TYPE_DATA_RW:
|
||||
type = BP_CPU | BP_MEM_ACCESS;
|
||||
insert_wp:
|
||||
break;
|
||||
}
|
||||
|
||||
if (type != 0) {
|
||||
err = cpu_watchpoint_insert(env, env->dr[index],
|
||||
hw_breakpoint_len(env->dr[7], index),
|
||||
type, &env->cpu_watchpoint[index]);
|
||||
break;
|
||||
}
|
||||
if (err)
|
||||
|
||||
if (err) {
|
||||
env->cpu_breakpoint[index] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void hw_breakpoint_remove(CPUX86State *env, int index)
|
||||
@ -997,39 +1002,60 @@ void hw_breakpoint_remove(CPUX86State *env, int index)
|
||||
if (!env->cpu_breakpoint[index])
|
||||
return;
|
||||
switch (hw_breakpoint_type(env->dr[7], index)) {
|
||||
case 0:
|
||||
if (hw_breakpoint_enabled(env->dr[7], index))
|
||||
case DR7_TYPE_BP_INST:
|
||||
if (hw_breakpoint_enabled(env->dr[7], index)) {
|
||||
cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
case 3:
|
||||
case DR7_TYPE_DATA_WR:
|
||||
case DR7_TYPE_DATA_RW:
|
||||
cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
|
||||
break;
|
||||
case 2:
|
||||
case DR7_TYPE_IO_RW:
|
||||
/* No support for I/O watchpoints yet */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
|
||||
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
|
||||
{
|
||||
target_ulong dr6;
|
||||
int reg, type;
|
||||
int hit_enabled = 0;
|
||||
int reg;
|
||||
bool hit_enabled = false;
|
||||
|
||||
dr6 = env->dr[6] & ~0xf;
|
||||
for (reg = 0; reg < 4; reg++) {
|
||||
type = hw_breakpoint_type(env->dr[7], reg);
|
||||
if ((type == 0 && env->dr[reg] == env->eip) ||
|
||||
((type & 1) && env->cpu_watchpoint[reg] &&
|
||||
(env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
|
||||
for (reg = 0; reg < DR7_MAX_BP; reg++) {
|
||||
bool bp_match = false;
|
||||
bool wp_match = false;
|
||||
|
||||
switch (hw_breakpoint_type(env->dr[7], reg)) {
|
||||
case DR7_TYPE_BP_INST:
|
||||
if (env->dr[reg] == env->eip) {
|
||||
bp_match = true;
|
||||
}
|
||||
break;
|
||||
case DR7_TYPE_DATA_WR:
|
||||
case DR7_TYPE_DATA_RW:
|
||||
if (env->cpu_watchpoint[reg] &&
|
||||
env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
|
||||
wp_match = true;
|
||||
}
|
||||
break;
|
||||
case DR7_TYPE_IO_RW:
|
||||
break;
|
||||
}
|
||||
if (bp_match || wp_match) {
|
||||
dr6 |= 1 << reg;
|
||||
if (hw_breakpoint_enabled(env->dr[7], reg))
|
||||
hit_enabled = 1;
|
||||
if (hw_breakpoint_enabled(env->dr[7], reg)) {
|
||||
hit_enabled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hit_enabled || force_dr6_update)
|
||||
|
||||
if (hit_enabled || force_dr6_update) {
|
||||
env->dr[6] = dr6;
|
||||
}
|
||||
|
||||
return hit_enabled;
|
||||
}
|
||||
|
||||
@ -1040,16 +1066,17 @@ void breakpoint_handler(CPUX86State *env)
|
||||
if (env->watchpoint_hit) {
|
||||
if (env->watchpoint_hit->flags & BP_CPU) {
|
||||
env->watchpoint_hit = NULL;
|
||||
if (check_hw_breakpoints(env, 0))
|
||||
if (check_hw_breakpoints(env, false)) {
|
||||
raise_exception(env, EXCP01_DB);
|
||||
else
|
||||
} else {
|
||||
cpu_resume_from_signal(env, NULL);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
QTAILQ_FOREACH(bp, &env->breakpoints, entry)
|
||||
if (bp->pc == env->eip) {
|
||||
if (bp->flags & BP_CPU) {
|
||||
check_hw_breakpoints(env, 1);
|
||||
check_hw_breakpoints(env, true);
|
||||
raise_exception(env, EXCP01_DB);
|
||||
}
|
||||
break;
|
||||
@ -1059,7 +1086,7 @@ void breakpoint_handler(CPUX86State *env)
|
||||
|
||||
typedef struct MCEInjectionParams {
|
||||
Monitor *mon;
|
||||
CPUX86State *env;
|
||||
X86CPU *cpu;
|
||||
int bank;
|
||||
uint64_t status;
|
||||
uint64_t mcg_status;
|
||||
@ -1071,7 +1098,8 @@ typedef struct MCEInjectionParams {
|
||||
static void do_inject_x86_mce(void *data)
|
||||
{
|
||||
MCEInjectionParams *params = data;
|
||||
CPUX86State *cenv = params->env;
|
||||
CPUX86State *cenv = ¶ms->cpu->env;
|
||||
CPUState *cpu = CPU(params->cpu);
|
||||
uint64_t *banks = cenv->mce_banks + 4 * params->bank;
|
||||
|
||||
cpu_synchronize_state(cenv);
|
||||
@ -1094,7 +1122,7 @@ static void do_inject_x86_mce(void *data)
|
||||
if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
|
||||
monitor_printf(params->mon,
|
||||
"CPU %d: Uncorrected error reporting disabled\n",
|
||||
cenv->cpu_index);
|
||||
cpu->cpu_index);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1106,7 +1134,7 @@ static void do_inject_x86_mce(void *data)
|
||||
monitor_printf(params->mon,
|
||||
"CPU %d: Uncorrected error reporting disabled for"
|
||||
" bank %d\n",
|
||||
cenv->cpu_index, params->bank);
|
||||
cpu->cpu_index, params->bank);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1115,7 +1143,7 @@ static void do_inject_x86_mce(void *data)
|
||||
monitor_printf(params->mon,
|
||||
"CPU %d: Previous MCE still in progress, raising"
|
||||
" triple fault\n",
|
||||
cenv->cpu_index);
|
||||
cpu->cpu_index);
|
||||
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
|
||||
qemu_system_reset_request();
|
||||
return;
|
||||
@ -1148,7 +1176,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
||||
CPUX86State *cenv = &cpu->env;
|
||||
MCEInjectionParams params = {
|
||||
.mon = mon,
|
||||
.env = cenv,
|
||||
.cpu = cpu,
|
||||
.bank = bank,
|
||||
.status = status,
|
||||
.mcg_status = mcg_status,
|
||||
@ -1188,7 +1216,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
||||
if (cenv == env) {
|
||||
continue;
|
||||
}
|
||||
params.env = env;
|
||||
params.cpu = x86_env_get_cpu(env);
|
||||
run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms);
|
||||
}
|
||||
}
|
||||
|
@ -265,10 +265,11 @@ static int cpu_post_load(void *opaque, int version_id)
|
||||
|
||||
cpu_breakpoint_remove_all(env, BP_CPU);
|
||||
cpu_watchpoint_remove_all(env, BP_CPU);
|
||||
for (i = 0; i < 4; i++)
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
hw_breakpoint_insert(env, i);
|
||||
|
||||
}
|
||||
tlb_flush(env, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ void helper_into(CPUX86State *env, int next_eip_addend)
|
||||
void helper_single_step(CPUX86State *env)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
check_hw_breakpoints(env, 1);
|
||||
check_hw_breakpoints(env, true);
|
||||
env->dr[6] |= DR6_BS;
|
||||
#endif
|
||||
raise_exception(env, EXCP01_DB);
|
||||
@ -197,11 +197,11 @@ void helper_movl_drN_T0(CPUX86State *env, int reg, target_ulong t0)
|
||||
env->dr[reg] = t0;
|
||||
hw_breakpoint_insert(env, reg);
|
||||
} else if (reg == 7) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
hw_breakpoint_remove(env, i);
|
||||
}
|
||||
env->dr[7] = t0;
|
||||
for (i = 0; i < 4; i++) {
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
hw_breakpoint_insert(env, i);
|
||||
}
|
||||
} else {
|
||||
@ -580,14 +580,17 @@ void helper_monitor(CPUX86State *env, target_ulong ptr)
|
||||
|
||||
void helper_mwait(CPUX86State *env, int next_eip_addend)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
if ((uint32_t)ECX != 0) {
|
||||
raise_exception(env, EXCP0D_GPF);
|
||||
}
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
|
||||
EIP += next_eip_addend;
|
||||
|
||||
cpu = CPU(x86_env_get_cpu(env));
|
||||
/* XXX: not complete but not completely erroneous */
|
||||
if (env->cpu_index != 0 || env->next_cpu != NULL) {
|
||||
if (cpu->cpu_index != 0 || env->next_cpu != NULL) {
|
||||
/* more than one CPU: do not sleep because another CPU may
|
||||
wake this one */
|
||||
} else {
|
||||
|
@ -465,13 +465,14 @@ static void switch_tss(CPUX86State *env, int tss_selector,
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* reset local breakpoints */
|
||||
if (env->dr[7] & 0x55) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
|
||||
if (env->dr[7] & DR7_LOCAL_BP_MASK) {
|
||||
for (i = 0; i < DR7_MAX_BP; i++) {
|
||||
if (hw_local_breakpoint_enabled(env->dr[7], i) &&
|
||||
!hw_global_breakpoint_enabled(env->dr[7], i)) {
|
||||
hw_breakpoint_remove(env, i);
|
||||
}
|
||||
}
|
||||
env->dr[7] &= ~0x55;
|
||||
env->dr[7] &= ~DR7_LOCAL_BP_MASK;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ static void lm32_cpu_reset(CPUState *s)
|
||||
CPULM32State *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ static void m68k_cpu_reset(CPUState *s)
|
||||
CPUM68KState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ static void mb_cpu_reset(CPUState *s)
|
||||
CPUMBState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,16 @@ static void mips_cpu_reset(CPUState *s)
|
||||
MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu);
|
||||
CPUMIPSState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
mcc->parent_reset(s);
|
||||
|
||||
memset(env, 0, offsetof(CPUMIPSState, breakpoints));
|
||||
tlb_flush(env, 1);
|
||||
|
||||
cpu_state_reset(env);
|
||||
}
|
||||
|
||||
|
@ -572,17 +572,23 @@ static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
|
||||
}
|
||||
}
|
||||
|
||||
/* tc should point to an int with the value of the global TC index.
|
||||
This function will transform it into a local index within the
|
||||
returned CPUMIPSState.
|
||||
|
||||
FIXME: This code assumes that all VPEs have the same number of TCs,
|
||||
/**
|
||||
* mips_cpu_map_tc:
|
||||
* @env: CPU from which mapping is performed.
|
||||
* @tc: Should point to an int with the value of the global TC index.
|
||||
*
|
||||
* This function will transform @tc into a local index within the
|
||||
* returned #CPUMIPSState.
|
||||
*/
|
||||
/* FIXME: This code assumes that all VPEs have the same number of TCs,
|
||||
which depends on runtime setup. Can probably be fixed by
|
||||
walking the list of CPUMIPSStates. */
|
||||
static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
|
||||
{
|
||||
CPUMIPSState *other;
|
||||
int vpe_idx, nr_threads = env->nr_threads;
|
||||
MIPSCPU *cpu;
|
||||
CPUState *cs;
|
||||
CPUState *other_cs;
|
||||
int vpe_idx;
|
||||
int tc_idx = *tc;
|
||||
|
||||
if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
|
||||
@ -591,10 +597,15 @@ static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
|
||||
return env;
|
||||
}
|
||||
|
||||
vpe_idx = tc_idx / nr_threads;
|
||||
*tc = tc_idx % nr_threads;
|
||||
other = qemu_get_cpu(vpe_idx);
|
||||
return other ? other : env;
|
||||
cs = CPU(mips_env_get_cpu(env));
|
||||
vpe_idx = tc_idx / cs->nr_threads;
|
||||
*tc = tc_idx % cs->nr_threads;
|
||||
other_cs = qemu_get_cpu(vpe_idx);
|
||||
if (other_cs == NULL) {
|
||||
return env;
|
||||
}
|
||||
cpu = MIPS_CPU(other_cs);
|
||||
return &cpu->env;
|
||||
}
|
||||
|
||||
/* The per VPE CP0_Status register shares some fields with the per TC
|
||||
|
@ -15878,13 +15878,10 @@ MIPSCPU *cpu_mips_init(const char *cpu_model)
|
||||
|
||||
void cpu_state_reset(CPUMIPSState *env)
|
||||
{
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
memset(env, 0, offsetof(CPUMIPSState, breakpoints));
|
||||
tlb_flush(env, 1);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
MIPSCPU *cpu = mips_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
#endif
|
||||
|
||||
/* Reset registers to their default values */
|
||||
env->CP0_PRid = env->cpu_model->CP0_PRid;
|
||||
@ -15953,7 +15950,7 @@ void cpu_state_reset(CPUMIPSState *env)
|
||||
env->CP0_Random = env->tlb->nb_tlb - 1;
|
||||
env->tlb->tlb_in_use = env->tlb->nb_tlb;
|
||||
env->CP0_Wired = 0;
|
||||
env->CP0_EBase = 0x80000000 | (env->cpu_index & 0x3FF);
|
||||
env->CP0_EBase = 0x80000000 | (cs->cpu_index & 0x3FF);
|
||||
env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL);
|
||||
/* vectored interrupts not implemented, timer on int 7,
|
||||
no performance counters. */
|
||||
@ -15976,13 +15973,13 @@ void cpu_state_reset(CPUMIPSState *env)
|
||||
|
||||
/* Only TC0 on VPE 0 starts as active. */
|
||||
for (i = 0; i < ARRAY_SIZE(env->tcs); i++) {
|
||||
env->tcs[i].CP0_TCBind = env->cpu_index << CP0TCBd_CurVPE;
|
||||
env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE;
|
||||
env->tcs[i].CP0_TCHalt = 1;
|
||||
}
|
||||
env->active_tc.CP0_TCHalt = 1;
|
||||
env->halted = 1;
|
||||
|
||||
if (!env->cpu_index) {
|
||||
if (cs->cpu_index == 0) {
|
||||
/* VPE0 starts up enabled. */
|
||||
env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
|
||||
env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
|
||||
|
@ -27,7 +27,7 @@ static void openrisc_cpu_reset(CPUState *s)
|
||||
OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(cpu);
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", cpu->env.cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(&cpu->env, 0);
|
||||
}
|
||||
|
||||
|
@ -766,8 +766,9 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
||||
|
||||
dprintf("injected interrupt %d\n", irq);
|
||||
r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
|
||||
if (r < 0)
|
||||
printf("cpu %d fail inject %x\n", env->cpu_index, irq);
|
||||
if (r < 0) {
|
||||
printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
|
||||
}
|
||||
|
||||
/* Always wake up soon in case the interrupt was level based */
|
||||
qemu_mod_timer(idle_timer, qemu_get_clock_ns(vm_clock) +
|
||||
@ -1275,14 +1276,15 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
|
||||
}
|
||||
}
|
||||
|
||||
int kvmppc_fixup_cpu(CPUPPCState *env)
|
||||
int kvmppc_fixup_cpu(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
int smt;
|
||||
|
||||
/* Adjust cpu index for SMT */
|
||||
smt = kvmppc_smt_threads();
|
||||
env->cpu_index = (env->cpu_index / smp_threads) * smt
|
||||
+ (env->cpu_index % smp_threads);
|
||||
cs->cpu_index = (cs->cpu_index / smp_threads) * smt
|
||||
+ (cs->cpu_index % smp_threads);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
|
||||
int kvmppc_reset_htab(int shift_hint);
|
||||
uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
int kvmppc_fixup_cpu(CPUPPCState *env);
|
||||
int kvmppc_fixup_cpu(PowerPCCPU *cpu);
|
||||
|
||||
#else
|
||||
|
||||
@ -122,7 +122,7 @@ static inline int kvmppc_update_sdr1(CPUPPCState *env)
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static inline int kvmppc_fixup_cpu(CPUPPCState *env)
|
||||
static inline int kvmppc_fixup_cpu(PowerPCCPU *cpu)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
@ -10005,8 +10005,10 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_fixup_cpu(CPUPPCState *env)
|
||||
static int ppc_fixup_cpu(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
/* TCG doesn't (yet) emulate some groups of instructions that
|
||||
* are implemented on some otherwise supported CPUs (e.g. VSX
|
||||
* and decimal floating point instructions on POWER7). We
|
||||
@ -10036,12 +10038,12 @@ static void ppc_cpu_realize(Object *obj, Error **errp)
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
if (kvmppc_fixup_cpu(env) != 0) {
|
||||
if (kvmppc_fixup_cpu(cpu) != 0) {
|
||||
error_setg(errp, "Unable to virtualize selected CPU with KVM");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (ppc_fixup_cpu(env) != 0) {
|
||||
if (ppc_fixup_cpu(cpu) != 0) {
|
||||
error_setg(errp, "Unable to emulate selected CPU with TCG");
|
||||
return;
|
||||
}
|
||||
@ -10460,7 +10462,7 @@ static void ppc_cpu_reset(CPUState *s)
|
||||
target_ulong msr;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ static void s390_cpu_reset(CPUState *s)
|
||||
CPUS390XState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ static void superh_cpu_reset(CPUState *s)
|
||||
CPUSH4State *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ static void sparc_cpu_reset(CPUState *s)
|
||||
CPUSPARCState *env = &cpu->env;
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
|
||||
qemu_log("CPU Reset (CPU %d)\n", s->cpu_index);
|
||||
log_cpu_state(env, 0);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user