x86 and machine queue, 2020-09-02
Bug fixes: * Revert EPYC topology patches that caused regressions (Babu Moger) * Memory leak fixes (Pan Nengyuan) QOM Cleanups: * Fix typo in AARCH64_CPU_GET_CLASS * Rename QOM macros for consistency and/or to avoid conflicts with other symbols * Move typedefs to header files * Correct instance/class sizes -----BEGIN PGP SIGNATURE----- iQJIBAABCAAyFiEEWjIv1avE09usz9GqKAeTb5hNxaYFAl9PhuUUHGVoYWJrb3N0 QHJlZGhhdC5jb20ACgkQKAeTb5hNxaa3bA//TxsYcYKfqPj6EyvW2FclxPA3Ziwz 0OOZFo+bZcx9OjLb+Zok6rBnV8j3T7pIvJqoaxsKAnIkF6SgxReu4UIJixQeTKB+ 4GRNUAaCX99NmO+VhuX7GY5argN3m3bM8T6BYLvraYxHDKe+Azxv8JuLC/BE7U9Z VKv1sm5ZSUnXlGvJ/Mdo9nZSZEW54pl6zNSpE7Lk/LLHzjlQswrOssSd0uQ7qfTt O1k9duR4d6U/yqpuEbopyaJAlozgaBxT+G60jmR0AxrOZ6wm6Y/27MHN8LPmqgby F+7Nc/P21gEVIziqxTJOIXsjd2OS+tuV2kPqpCTNuw23na1aBsS2ft0UY6MKTXcQ 17XoPgRowbHDKEPva/JoFL6NyaYDDBXVKssTXagCz3Na41OkXDRUEg4ofKKvFaOR j+nmkJsY6m+sDqzcwiMsjLUsZrDlCCEwKnTOt16hM8VddQYvEPczHSWh3DpPlfLU NmfqPD108qVDQi79oKFfPqN4J/a+6GM1NpsORS4f5RnRKpjp88LWMFp2BZOr13uN pyFtm8ouYoXrKu3VkFvTIjAfObDygk/2O5SVB0kL6VjCnIHD25Qzvkk3ivKER7iq DC3G3iSEkjBpwB6CaPpJHKCxCtwbj4nSEE7+cYBA13rkQ1rpOXCE/1r1TTLgFAkC 2yZTsa4P8YFN8mg= =Zbyt -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/ehabkost/tags/machine-next-pull-request' into staging x86 and machine queue, 2020-09-02 Bug fixes: * Revert EPYC topology patches that caused regressions (Babu Moger) * Memory leak fixes (Pan Nengyuan) QOM Cleanups: * Fix typo in AARCH64_CPU_GET_CLASS * Rename QOM macros for consistency and/or to avoid conflicts with other symbols * Move typedefs to header files * Correct instance/class sizes # gpg: Signature made Wed 02 Sep 2020 12:49:57 BST # gpg: using RSA key 5A322FD5ABC4D3DBACCFD1AA2807936F984DC5A6 # gpg: issuer "ehabkost@redhat.com" # gpg: Good signature from "Eduardo Habkost <ehabkost@redhat.com>" [full] # Primary key fingerprint: 5A32 2FD5 ABC4 D3DB ACCF D1AA 2807 936F 984D C5A6 * remotes/ehabkost/tags/machine-next-pull-request: target/i386/sev: Plug memleak in sev_read_file_base64 target/i386/cpu: Fix memleak in x86_cpu_class_check_missing_features virtio: add Virtio*BusClass sizes Revert "hw/i386: Update structures to save the number of nodes per package" Revert "hw/386: Add EPYC mode topology decoding functions" Revert "target/i386: Cleanup and use the EPYC mode topology functions" Revert "hw/i386: Introduce apicid functions inside X86MachineState" Revert "i386: Introduce use_epyc_apic_id_encoding in X86CPUDefinition" Revert "hw/i386: Move arch_id decode inside x86_cpus_init" Revert "target/i386: Enable new apic id encoding for EPYC based cpus models" Revert "i386: Fix pkg_id offset for EPYC cpu models" tls-cipher-suites: Correct instance_size hda-audio: Set instance_size at base class rx: Move typedef RXCPU to cpu-qom.h rx: Rename QOM type check macros arm: Fix typo in AARCH64_CPU_GET_CLASS definition rdma: Rename INTERFACE_RDMA_PROVIDER_CLASS macro x86-iommu: Rename QOM type macros mos6522: Rename QOM macros imx_ccm: Rename IMX_GET_CLASS macro Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
ed215cec0f
@ -108,7 +108,7 @@ static void qcrypto_tls_cipher_suites_class_init(ObjectClass *oc, void *data)
|
||||
static const TypeInfo qcrypto_tls_cipher_suites_info = {
|
||||
.parent = TYPE_QCRYPTO_TLS_CREDS,
|
||||
.name = TYPE_QCRYPTO_TLS_CIPHER_SUITES,
|
||||
.instance_size = sizeof(QCryptoTLSCreds),
|
||||
.instance_size = sizeof(QCryptoTLSCipherSuites),
|
||||
.class_size = sizeof(QCryptoTLSCredsClass),
|
||||
.class_init = qcrypto_tls_cipher_suites_class_init,
|
||||
.interfaces = (InterfaceInfo[]) {
|
||||
|
@ -898,6 +898,7 @@ static void hda_audio_base_class_init(ObjectClass *klass, void *data)
|
||||
static const TypeInfo hda_audio_info = {
|
||||
.name = TYPE_HDA_AUDIO,
|
||||
.parent = TYPE_HDA_CODEC_DEVICE,
|
||||
.instance_size = sizeof(HDAAudioState),
|
||||
.class_init = hda_audio_base_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
@ -914,7 +915,6 @@ static void hda_audio_output_class_init(ObjectClass *klass, void *data)
|
||||
static const TypeInfo hda_audio_output_info = {
|
||||
.name = "hda-output",
|
||||
.parent = TYPE_HDA_AUDIO,
|
||||
.instance_size = sizeof(HDAAudioState),
|
||||
.class_init = hda_audio_output_class_init,
|
||||
};
|
||||
|
||||
@ -930,7 +930,6 @@ static void hda_audio_duplex_class_init(ObjectClass *klass, void *data)
|
||||
static const TypeInfo hda_audio_duplex_info = {
|
||||
.name = "hda-duplex",
|
||||
.parent = TYPE_HDA_AUDIO,
|
||||
.instance_size = sizeof(HDAAudioState),
|
||||
.class_init = hda_audio_duplex_class_init,
|
||||
};
|
||||
|
||||
@ -946,7 +945,6 @@ static void hda_audio_micro_class_init(ObjectClass *klass, void *data)
|
||||
static const TypeInfo hda_audio_micro_info = {
|
||||
.name = "hda-micro",
|
||||
.parent = TYPE_HDA_AUDIO,
|
||||
.instance_size = sizeof(HDAAudioState),
|
||||
.class_init = hda_audio_micro_class_init,
|
||||
};
|
||||
|
||||
|
@ -1600,7 +1600,7 @@ static void amdvi_instance_init(Object *klass)
|
||||
static void amdvi_class_init(ObjectClass *klass, void* data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
|
||||
X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass);
|
||||
|
||||
dc->reset = amdvi_reset;
|
||||
dc->vmsd = &vmstate_amdvi;
|
||||
|
@ -3853,7 +3853,7 @@ static void vtd_realize(DeviceState *dev, Error **errp)
|
||||
static void vtd_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
|
||||
X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_CLASS(klass);
|
||||
|
||||
dc->reset = vtd_reset;
|
||||
dc->vmsd = &vtd_vmstate;
|
||||
|
@ -1501,8 +1501,6 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
|
||||
init_topo_info(&topo_info, x86ms);
|
||||
|
||||
env->nr_dies = x86ms->smp_dies;
|
||||
env->nr_nodes = topo_info.nodes_per_pkg;
|
||||
env->pkg_offset = x86ms->apicid_pkg_offset(&topo_info);
|
||||
|
||||
/*
|
||||
* If APIC ID is not set,
|
||||
@ -1557,14 +1555,14 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
|
||||
topo_ids.die_id = cpu->die_id;
|
||||
topo_ids.core_id = cpu->core_id;
|
||||
topo_ids.smt_id = cpu->thread_id;
|
||||
cpu->apic_id = x86ms->apicid_from_topo_ids(&topo_info, &topo_ids);
|
||||
cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
|
||||
}
|
||||
|
||||
cpu_slot = pc_find_cpu_slot(MACHINE(pcms), cpu->apic_id, &idx);
|
||||
if (!cpu_slot) {
|
||||
MachineState *ms = MACHINE(pcms);
|
||||
|
||||
x86ms->topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
|
||||
x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
|
||||
error_setg(errp,
|
||||
"Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with"
|
||||
" APIC ID %" PRIu32 ", valid index range 0:%d",
|
||||
@ -1585,7 +1583,7 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
|
||||
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
|
||||
* once -smp refactoring is complete and there will be CPU private
|
||||
* CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
|
||||
x86ms->topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
|
||||
x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
|
||||
if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
|
||||
error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
|
||||
" 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
|
||||
|
@ -107,7 +107,7 @@ IommuType x86_iommu_get_type(void)
|
||||
static void x86_iommu_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
|
||||
X86IOMMUClass *x86_class = X86_IOMMU_GET_CLASS(dev);
|
||||
X86IOMMUClass *x86_class = X86_IOMMU_DEVICE_GET_CLASS(dev);
|
||||
MachineState *ms = MACHINE(qdev_get_machine());
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
PCMachineState *pcms =
|
||||
|
@ -62,28 +62,11 @@ inline void init_topo_info(X86CPUTopoInfo *topo_info,
|
||||
{
|
||||
MachineState *ms = MACHINE(x86ms);
|
||||
|
||||
topo_info->nodes_per_pkg = ms->numa_state->num_nodes / ms->smp.sockets;
|
||||
topo_info->dies_per_pkg = x86ms->smp_dies;
|
||||
topo_info->cores_per_die = ms->smp.cores;
|
||||
topo_info->threads_per_core = ms->smp.threads;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up with the new EPYC topology handlers
|
||||
*
|
||||
* AMD uses different apic id encoding for EPYC based cpus. Override
|
||||
* the default topo handlers with EPYC encoding handlers.
|
||||
*/
|
||||
static void x86_set_epyc_topo_handlers(MachineState *machine)
|
||||
{
|
||||
X86MachineState *x86ms = X86_MACHINE(machine);
|
||||
|
||||
x86ms->apicid_from_cpu_idx = x86_apicid_from_cpu_idx_epyc;
|
||||
x86ms->topo_ids_from_apicid = x86_topo_ids_from_apicid_epyc;
|
||||
x86ms->apicid_from_topo_ids = x86_apicid_from_topo_ids_epyc;
|
||||
x86ms->apicid_pkg_offset = apicid_pkg_offset_epyc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculates initial APIC ID for a specific CPU index
|
||||
*
|
||||
@ -102,7 +85,7 @@ uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms,
|
||||
|
||||
init_topo_info(&topo_info, x86ms);
|
||||
|
||||
correct_id = x86ms->apicid_from_cpu_idx(&topo_info, cpu_index);
|
||||
correct_id = x86_apicid_from_cpu_idx(&topo_info, cpu_index);
|
||||
if (x86mc->compat_apic_id_mode) {
|
||||
if (cpu_index != correct_id && !warned && !qtest_enabled()) {
|
||||
error_report("APIC IDs set in compatibility mode, "
|
||||
@ -136,11 +119,6 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
|
||||
MachineState *ms = MACHINE(x86ms);
|
||||
MachineClass *mc = MACHINE_GET_CLASS(x86ms);
|
||||
|
||||
/* Check for apicid encoding */
|
||||
if (cpu_x86_use_epyc_apic_id_encoding(ms->cpu_type)) {
|
||||
x86_set_epyc_topo_handlers(ms);
|
||||
}
|
||||
|
||||
x86_cpu_set_default_version(default_cpu_version);
|
||||
|
||||
/*
|
||||
@ -154,12 +132,6 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
|
||||
x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms,
|
||||
ms->smp.max_cpus - 1) + 1;
|
||||
possible_cpus = mc->possible_cpu_arch_ids(ms);
|
||||
|
||||
for (i = 0; i < ms->possible_cpus->len; i++) {
|
||||
ms->possible_cpus->cpus[i].arch_id =
|
||||
x86_cpu_apic_id_from_index(x86ms, i);
|
||||
}
|
||||
|
||||
for (i = 0; i < ms->smp.cpus; i++) {
|
||||
x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal);
|
||||
}
|
||||
@ -184,7 +156,8 @@ int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx)
|
||||
init_topo_info(&topo_info, x86ms);
|
||||
|
||||
assert(idx < ms->possible_cpus->len);
|
||||
x86_topo_ids_from_idx(&topo_info, idx, &topo_ids);
|
||||
x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id,
|
||||
&topo_info, &topo_ids);
|
||||
return topo_ids.pkg_id % ms->numa_state->num_nodes;
|
||||
}
|
||||
|
||||
@ -215,7 +188,10 @@ const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms)
|
||||
|
||||
ms->possible_cpus->cpus[i].type = ms->cpu_type;
|
||||
ms->possible_cpus->cpus[i].vcpus_count = 1;
|
||||
x86_topo_ids_from_idx(&topo_info, i, &topo_ids);
|
||||
ms->possible_cpus->cpus[i].arch_id =
|
||||
x86_cpu_apic_id_from_index(x86ms, i);
|
||||
x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
|
||||
&topo_info, &topo_ids);
|
||||
ms->possible_cpus->cpus[i].props.has_socket_id = true;
|
||||
ms->possible_cpus->cpus[i].props.socket_id = topo_ids.pkg_id;
|
||||
if (x86ms->smp_dies > 1) {
|
||||
@ -919,11 +895,6 @@ static void x86_machine_initfn(Object *obj)
|
||||
x86ms->smm = ON_OFF_AUTO_AUTO;
|
||||
x86ms->acpi = ON_OFF_AUTO_AUTO;
|
||||
x86ms->smp_dies = 1;
|
||||
|
||||
x86ms->apicid_from_cpu_idx = x86_apicid_from_cpu_idx;
|
||||
x86ms->topo_ids_from_apicid = x86_topo_ids_from_apicid;
|
||||
x86ms->apicid_from_topo_ids = x86_apicid_from_topo_ids;
|
||||
x86ms->apicid_pkg_offset = apicid_pkg_offset;
|
||||
}
|
||||
|
||||
static void x86_machine_class_init(ObjectClass *oc, void *data)
|
||||
|
@ -32,7 +32,7 @@
|
||||
uint32_t imx_ccm_get_clock_frequency(IMXCCMState *dev, IMXClk clock)
|
||||
{
|
||||
uint32_t freq = 0;
|
||||
IMXCCMClass *klass = IMX_GET_CLASS(dev);
|
||||
IMXCCMClass *klass = IMX_CCM_GET_CLASS(dev);
|
||||
|
||||
if (klass->get_clock_frequency) {
|
||||
freq = klass->get_clock_frequency(dev, clock);
|
||||
|
@ -328,7 +328,7 @@ static void via1_VBL(void *opaque)
|
||||
{
|
||||
MOS6522Q800VIA1State *v1s = opaque;
|
||||
MOS6522State *s = MOS6522(v1s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
s->ifr |= VIA1_IRQ_VBLANK;
|
||||
mdc->update_irq(s);
|
||||
@ -340,7 +340,7 @@ static void via1_one_second(void *opaque)
|
||||
{
|
||||
MOS6522Q800VIA1State *v1s = opaque;
|
||||
MOS6522State *s = MOS6522(v1s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
s->ifr |= VIA1_IRQ_ONE_SECOND;
|
||||
mdc->update_irq(s);
|
||||
@ -352,7 +352,7 @@ static void via1_irq_request(void *opaque, int irq, int level)
|
||||
{
|
||||
MOS6522Q800VIA1State *v1s = opaque;
|
||||
MOS6522State *s = MOS6522(v1s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
if (level) {
|
||||
s->ifr |= 1 << irq;
|
||||
@ -367,7 +367,7 @@ static void via2_irq_request(void *opaque, int irq, int level)
|
||||
{
|
||||
MOS6522Q800VIA2State *v2s = opaque;
|
||||
MOS6522State *s = MOS6522(v2s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
if (level) {
|
||||
s->ifr |= 1 << irq;
|
||||
@ -1183,7 +1183,7 @@ static TypeInfo mac_via_info = {
|
||||
static void mos6522_q800_via1_reset(DeviceState *dev)
|
||||
{
|
||||
MOS6522State *ms = MOS6522(dev);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
|
||||
|
||||
mdc->parent_reset(dev);
|
||||
|
||||
@ -1226,7 +1226,7 @@ static void mos6522_q800_via2_portB_write(MOS6522State *s)
|
||||
static void mos6522_q800_via2_reset(DeviceState *dev)
|
||||
{
|
||||
MOS6522State *ms = MOS6522(dev);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
|
||||
|
||||
mdc->parent_reset(dev);
|
||||
|
||||
@ -1246,7 +1246,7 @@ static void mos6522_q800_via2_init(Object *obj)
|
||||
static void mos6522_q800_via2_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
|
||||
|
||||
dc->reset = mos6522_q800_via2_reset;
|
||||
mdc->portB_write = mos6522_q800_via2_portB_write;
|
||||
|
@ -96,7 +96,7 @@ static void cuda_set_sr_int(void *opaque)
|
||||
CUDAState *s = opaque;
|
||||
MOS6522CUDAState *mcs = &s->mos6522_cuda;
|
||||
MOS6522State *ms = MOS6522(mcs);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
|
||||
|
||||
mdc->set_sr_int(ms);
|
||||
}
|
||||
@ -592,7 +592,7 @@ static void mos6522_cuda_portB_write(MOS6522State *s)
|
||||
static void mos6522_cuda_reset(DeviceState *dev)
|
||||
{
|
||||
MOS6522State *ms = MOS6522(dev);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
|
||||
|
||||
mdc->parent_reset(dev);
|
||||
|
||||
@ -603,7 +603,7 @@ static void mos6522_cuda_reset(DeviceState *dev)
|
||||
static void mos6522_cuda_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
|
||||
|
||||
dc->reset = mos6522_cuda_reset;
|
||||
mdc->portB_write = mos6522_cuda_portB_write;
|
||||
|
@ -75,7 +75,7 @@ static void via_set_sr_int(void *opaque)
|
||||
PMUState *s = opaque;
|
||||
MOS6522PMUState *mps = MOS6522_PMU(&s->mos6522_pmu);
|
||||
MOS6522State *ms = MOS6522(mps);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
|
||||
|
||||
mdc->set_sr_int(ms);
|
||||
}
|
||||
@ -834,7 +834,7 @@ static void mos6522_pmu_reset(DeviceState *dev)
|
||||
MOS6522State *ms = MOS6522(dev);
|
||||
MOS6522PMUState *mps = container_of(ms, MOS6522PMUState, parent_obj);
|
||||
PMUState *s = container_of(mps, PMUState, mos6522_pmu);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(ms);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(ms);
|
||||
|
||||
mdc->parent_reset(dev);
|
||||
|
||||
@ -847,7 +847,7 @@ static void mos6522_pmu_reset(DeviceState *dev)
|
||||
static void mos6522_pmu_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
|
||||
|
||||
dc->reset = mos6522_pmu_reset;
|
||||
mdc->portB_write = mos6522_pmu_portB_write;
|
||||
|
@ -54,7 +54,7 @@ static void mos6522_update_irq(MOS6522State *s)
|
||||
|
||||
static uint64_t get_counter_value(MOS6522State *s, MOS6522Timer *ti)
|
||||
{
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
if (ti->index == 0) {
|
||||
return mdc->get_timer1_counter_value(s, ti);
|
||||
@ -65,7 +65,7 @@ static uint64_t get_counter_value(MOS6522State *s, MOS6522Timer *ti)
|
||||
|
||||
static uint64_t get_load_time(MOS6522State *s, MOS6522Timer *ti)
|
||||
{
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
if (ti->index == 0) {
|
||||
return mdc->get_timer1_load_time(s, ti);
|
||||
@ -313,7 +313,7 @@ uint64_t mos6522_read(void *opaque, hwaddr addr, unsigned size)
|
||||
void mos6522_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
|
||||
{
|
||||
MOS6522State *s = opaque;
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_GET_CLASS(s);
|
||||
MOS6522DeviceClass *mdc = MOS6522_GET_CLASS(s);
|
||||
|
||||
trace_mos6522_write(addr, val);
|
||||
|
||||
@ -498,7 +498,7 @@ static Property mos6522_properties[] = {
|
||||
static void mos6522_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_DEVICE_CLASS(oc);
|
||||
MOS6522DeviceClass *mdc = MOS6522_CLASS(oc);
|
||||
|
||||
dc->reset = mos6522_reset;
|
||||
dc->vmsd = &vmstate_mos6522;
|
||||
|
@ -681,7 +681,7 @@ static void pvrdma_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
|
||||
RdmaProviderClass *ir = INTERFACE_RDMA_PROVIDER_CLASS(klass);
|
||||
RdmaProviderClass *ir = RDMA_PROVIDER_CLASS(klass);
|
||||
|
||||
k->realize = pvrdma_realize;
|
||||
k->vendor_id = PCI_VENDOR_ID_VMWARE;
|
||||
|
@ -118,7 +118,7 @@ static void rx_gdbsim_init(MachineState *machine)
|
||||
* the latter half of the SDRAM space.
|
||||
*/
|
||||
kernel_offset = machine->ram_size / 2;
|
||||
rx_load_image(RXCPU(first_cpu), kernel_filename,
|
||||
rx_load_image(RX_CPU(first_cpu), kernel_filename,
|
||||
SDRAM_BASE + kernel_offset, kernel_offset);
|
||||
if (dtb_filename) {
|
||||
ram_addr_t dtb_offset;
|
||||
@ -141,7 +141,7 @@ static void rx_gdbsim_init(MachineState *machine)
|
||||
rom_add_blob_fixed("dtb", dtb, dtb_size,
|
||||
SDRAM_BASE + dtb_offset);
|
||||
/* Set dtb address to R1 */
|
||||
RXCPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset;
|
||||
RX_CPU(first_cpu)->env.regs[1] = SDRAM_BASE + dtb_offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1237,6 +1237,7 @@ static const TypeInfo virtio_ccw_bus_info = {
|
||||
.name = TYPE_VIRTIO_CCW_BUS,
|
||||
.parent = TYPE_VIRTIO_BUS,
|
||||
.instance_size = sizeof(VirtioCcwBusState),
|
||||
.class_size = sizeof(VirtioCcwBusClass),
|
||||
.class_init = virtio_ccw_bus_class_init,
|
||||
};
|
||||
|
||||
|
@ -2133,6 +2133,7 @@ static const TypeInfo virtio_pci_bus_info = {
|
||||
.name = TYPE_VIRTIO_PCI_BUS,
|
||||
.parent = TYPE_VIRTIO_BUS,
|
||||
.instance_size = sizeof(VirtioPCIBusState),
|
||||
.class_size = sizeof(VirtioPCIBusClass),
|
||||
.class_init = virtio_pci_bus_class_init,
|
||||
};
|
||||
|
||||
|
@ -47,14 +47,12 @@ typedef uint32_t apic_id_t;
|
||||
|
||||
typedef struct X86CPUTopoIDs {
|
||||
unsigned pkg_id;
|
||||
unsigned node_id;
|
||||
unsigned die_id;
|
||||
unsigned core_id;
|
||||
unsigned smt_id;
|
||||
} X86CPUTopoIDs;
|
||||
|
||||
typedef struct X86CPUTopoInfo {
|
||||
unsigned nodes_per_pkg;
|
||||
unsigned dies_per_pkg;
|
||||
unsigned cores_per_die;
|
||||
unsigned threads_per_core;
|
||||
@ -89,11 +87,6 @@ static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info)
|
||||
return apicid_bitwidth_for_count(topo_info->dies_per_pkg);
|
||||
}
|
||||
|
||||
/* Bit width of the node_id field per socket */
|
||||
static inline unsigned apicid_node_width_epyc(X86CPUTopoInfo *topo_info)
|
||||
{
|
||||
return apicid_bitwidth_for_count(MAX(topo_info->nodes_per_pkg, 1));
|
||||
}
|
||||
/* Bit offset of the Core_ID field
|
||||
*/
|
||||
static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info)
|
||||
@ -114,100 +107,6 @@ static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info)
|
||||
return apicid_die_offset(topo_info) + apicid_die_width(topo_info);
|
||||
}
|
||||
|
||||
#define NODE_ID_OFFSET 3 /* Minimum node_id offset if numa configured */
|
||||
|
||||
/*
|
||||
* Bit offset of the node_id field
|
||||
*
|
||||
* Make sure nodes_per_pkg > 0 if numa configured else zero.
|
||||
*/
|
||||
static inline unsigned apicid_node_offset_epyc(X86CPUTopoInfo *topo_info)
|
||||
{
|
||||
unsigned offset = apicid_die_offset(topo_info) +
|
||||
apicid_die_width(topo_info);
|
||||
|
||||
if (topo_info->nodes_per_pkg) {
|
||||
return MAX(NODE_ID_OFFSET, offset);
|
||||
} else {
|
||||
return offset;
|
||||
}
|
||||
}
|
||||
|
||||
/* Bit offset of the Pkg_ID (socket ID) field */
|
||||
static inline unsigned apicid_pkg_offset_epyc(X86CPUTopoInfo *topo_info)
|
||||
{
|
||||
return apicid_node_offset_epyc(topo_info) +
|
||||
apicid_node_width_epyc(topo_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
|
||||
*
|
||||
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
|
||||
*/
|
||||
static inline apic_id_t
|
||||
x86_apicid_from_topo_ids_epyc(X86CPUTopoInfo *topo_info,
|
||||
const X86CPUTopoIDs *topo_ids)
|
||||
{
|
||||
return (topo_ids->pkg_id << apicid_pkg_offset_epyc(topo_info)) |
|
||||
(topo_ids->node_id << apicid_node_offset_epyc(topo_info)) |
|
||||
(topo_ids->die_id << apicid_die_offset(topo_info)) |
|
||||
(topo_ids->core_id << apicid_core_offset(topo_info)) |
|
||||
topo_ids->smt_id;
|
||||
}
|
||||
|
||||
static inline void x86_topo_ids_from_idx_epyc(X86CPUTopoInfo *topo_info,
|
||||
unsigned cpu_index,
|
||||
X86CPUTopoIDs *topo_ids)
|
||||
{
|
||||
unsigned nr_nodes = MAX(topo_info->nodes_per_pkg, 1);
|
||||
unsigned nr_dies = topo_info->dies_per_pkg;
|
||||
unsigned nr_cores = topo_info->cores_per_die;
|
||||
unsigned nr_threads = topo_info->threads_per_core;
|
||||
unsigned cores_per_node = DIV_ROUND_UP((nr_dies * nr_cores * nr_threads),
|
||||
nr_nodes);
|
||||
|
||||
topo_ids->pkg_id = cpu_index / (nr_dies * nr_cores * nr_threads);
|
||||
topo_ids->node_id = (cpu_index / cores_per_node) % nr_nodes;
|
||||
topo_ids->die_id = cpu_index / (nr_cores * nr_threads) % nr_dies;
|
||||
topo_ids->core_id = cpu_index / nr_threads % nr_cores;
|
||||
topo_ids->smt_id = cpu_index % nr_threads;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate thread/core/package IDs for a specific topology,
|
||||
* based on APIC ID
|
||||
*/
|
||||
static inline void x86_topo_ids_from_apicid_epyc(apic_id_t apicid,
|
||||
X86CPUTopoInfo *topo_info,
|
||||
X86CPUTopoIDs *topo_ids)
|
||||
{
|
||||
topo_ids->smt_id = apicid &
|
||||
~(0xFFFFFFFFUL << apicid_smt_width(topo_info));
|
||||
topo_ids->core_id =
|
||||
(apicid >> apicid_core_offset(topo_info)) &
|
||||
~(0xFFFFFFFFUL << apicid_core_width(topo_info));
|
||||
topo_ids->die_id =
|
||||
(apicid >> apicid_die_offset(topo_info)) &
|
||||
~(0xFFFFFFFFUL << apicid_die_width(topo_info));
|
||||
topo_ids->node_id =
|
||||
(apicid >> apicid_node_offset_epyc(topo_info)) &
|
||||
~(0xFFFFFFFFUL << apicid_node_width_epyc(topo_info));
|
||||
topo_ids->pkg_id = apicid >> apicid_pkg_offset_epyc(topo_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make APIC ID for the CPU 'cpu_index'
|
||||
*
|
||||
* 'cpu_index' is a sequential, contiguous ID for the CPU.
|
||||
*/
|
||||
static inline apic_id_t x86_apicid_from_cpu_idx_epyc(X86CPUTopoInfo *topo_info,
|
||||
unsigned cpu_index)
|
||||
{
|
||||
X86CPUTopoIDs topo_ids;
|
||||
x86_topo_ids_from_idx_epyc(topo_info, cpu_index, &topo_ids);
|
||||
return x86_apicid_from_topo_ids_epyc(topo_info, &topo_ids);
|
||||
}
|
||||
/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID
|
||||
*
|
||||
* The caller must make sure core_id < nr_cores and smt_id < nr_threads.
|
||||
|
@ -27,9 +27,9 @@
|
||||
#define TYPE_X86_IOMMU_DEVICE ("x86-iommu")
|
||||
#define X86_IOMMU_DEVICE(obj) \
|
||||
OBJECT_CHECK(X86IOMMUState, (obj), TYPE_X86_IOMMU_DEVICE)
|
||||
#define X86_IOMMU_CLASS(klass) \
|
||||
#define X86_IOMMU_DEVICE_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(X86IOMMUClass, (klass), TYPE_X86_IOMMU_DEVICE)
|
||||
#define X86_IOMMU_GET_CLASS(obj) \
|
||||
#define X86_IOMMU_DEVICE_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(X86IOMMUClass, obj, TYPE_X86_IOMMU_DEVICE)
|
||||
|
||||
#define X86_IOMMU_SID_INVALID (0xffff)
|
||||
|
@ -63,15 +63,6 @@ typedef struct {
|
||||
OnOffAuto smm;
|
||||
OnOffAuto acpi;
|
||||
|
||||
/* Apic id specific handlers */
|
||||
uint32_t (*apicid_from_cpu_idx)(X86CPUTopoInfo *topo_info,
|
||||
unsigned cpu_index);
|
||||
void (*topo_ids_from_apicid)(apic_id_t apicid, X86CPUTopoInfo *topo_info,
|
||||
X86CPUTopoIDs *topo_ids);
|
||||
apic_id_t (*apicid_from_topo_ids)(X86CPUTopoInfo *topo_info,
|
||||
const X86CPUTopoIDs *topo_ids);
|
||||
uint32_t (*apicid_pkg_offset)(X86CPUTopoInfo *topo_info);
|
||||
|
||||
/*
|
||||
* Address space used by IOAPIC device. All IOAPIC interrupts
|
||||
* will be translated to MSI messages in the address space.
|
||||
|
@ -31,7 +31,7 @@
|
||||
OBJECT_CHECK(IMXCCMState, (obj), TYPE_IMX_CCM)
|
||||
#define IMX_CCM_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(IMXCCMClass, (klass), TYPE_IMX_CCM)
|
||||
#define IMX_GET_CLASS(obj) \
|
||||
#define IMX_CCM_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(IMXCCMClass, (obj), TYPE_IMX_CCM)
|
||||
|
||||
typedef struct IMXCCMState {
|
||||
|
@ -140,9 +140,9 @@ typedef struct MOS6522DeviceClass {
|
||||
uint64_t (*get_timer2_load_time)(MOS6522State *dev, MOS6522Timer *ti);
|
||||
} MOS6522DeviceClass;
|
||||
|
||||
#define MOS6522_DEVICE_CLASS(cls) \
|
||||
#define MOS6522_CLASS(cls) \
|
||||
OBJECT_CLASS_CHECK(MOS6522DeviceClass, (cls), TYPE_MOS6522)
|
||||
#define MOS6522_DEVICE_GET_CLASS(obj) \
|
||||
#define MOS6522_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(MOS6522DeviceClass, (obj), TYPE_MOS6522)
|
||||
|
||||
extern const VMStateDescription vmstate_mos6522;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#define INTERFACE_RDMA_PROVIDER "rdma"
|
||||
|
||||
#define INTERFACE_RDMA_PROVIDER_CLASS(klass) \
|
||||
#define RDMA_PROVIDER_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(RdmaProviderClass, (klass), \
|
||||
INTERFACE_RDMA_PROVIDER)
|
||||
#define RDMA_PROVIDER_GET_CLASS(obj) \
|
||||
|
@ -67,7 +67,7 @@ typedef struct ARMCPU ARMCPU;
|
||||
#define AARCH64_CPU_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU)
|
||||
#define AARCH64_CPU_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU)
|
||||
OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AARCH64_CPU)
|
||||
|
||||
typedef struct AArch64CPUClass {
|
||||
/*< private >*/
|
||||
|
@ -338,15 +338,68 @@ static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
|
||||
* Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
|
||||
* Define the constants to build the cpu topology. Right now, TOPOEXT
|
||||
* feature is enabled only on EPYC. So, these constants are based on
|
||||
* EPYC supported configurations. We may need to handle the cases if
|
||||
* these values change in future.
|
||||
*/
|
||||
/* Maximum core complexes in a node */
|
||||
#define MAX_CCX 2
|
||||
/* Maximum cores in a core complex */
|
||||
#define MAX_CORES_IN_CCX 4
|
||||
/* Maximum cores in a node */
|
||||
#define MAX_CORES_IN_NODE 8
|
||||
/* Maximum nodes in a socket */
|
||||
#define MAX_NODES_PER_SOCKET 4
|
||||
|
||||
/*
|
||||
* Figure out the number of nodes required to build this config.
|
||||
* Max cores in a node is 8
|
||||
*/
|
||||
static int nodes_in_socket(int nr_cores)
|
||||
{
|
||||
int nodes;
|
||||
|
||||
nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
|
||||
|
||||
/* Hardware does not support config with 3 nodes, return 4 in that case */
|
||||
return (nodes == 3) ? 4 : nodes;
|
||||
}
|
||||
|
||||
/*
|
||||
* Decide the number of cores in a core complex with the given nr_cores using
|
||||
* following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
|
||||
* MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
|
||||
* L3 cache is shared across all cores in a core complex. So, this will also
|
||||
* tell us how many cores are sharing the L3 cache.
|
||||
*/
|
||||
static int cores_in_core_complex(int nr_cores)
|
||||
{
|
||||
int nodes;
|
||||
|
||||
/* Check if we can fit all the cores in one core complex */
|
||||
if (nr_cores <= MAX_CORES_IN_CCX) {
|
||||
return nr_cores;
|
||||
}
|
||||
/* Get the number of nodes required to build this config */
|
||||
nodes = nodes_in_socket(nr_cores);
|
||||
|
||||
/*
|
||||
* Divide the cores accros all the core complexes
|
||||
* Return rounded up value
|
||||
*/
|
||||
return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
|
||||
}
|
||||
|
||||
/* Encode cache info for CPUID[8000001D] */
|
||||
static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
|
||||
X86CPUTopoInfo *topo_info,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
{
|
||||
uint32_t l3_cores;
|
||||
unsigned nodes = MAX(topo_info->nodes_per_pkg, 1);
|
||||
|
||||
assert(cache->size == cache->line_size * cache->associativity *
|
||||
cache->partitions * cache->sets);
|
||||
|
||||
@ -355,13 +408,10 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
|
||||
|
||||
/* L3 is shared among multiple cores */
|
||||
if (cache->level == 3) {
|
||||
l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg *
|
||||
topo_info->cores_per_die *
|
||||
topo_info->threads_per_core),
|
||||
nodes);
|
||||
*eax |= (l3_cores - 1) << 14;
|
||||
l3_cores = cores_in_core_complex(cs->nr_cores);
|
||||
*eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
|
||||
} else {
|
||||
*eax |= ((topo_info->threads_per_core - 1) << 14);
|
||||
*eax |= ((cs->nr_threads - 1) << 14);
|
||||
}
|
||||
|
||||
assert(cache->line_size > 0);
|
||||
@ -381,17 +431,55 @@ static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
|
||||
(cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
|
||||
}
|
||||
|
||||
/* Data structure to hold the configuration info for a given core index */
|
||||
struct core_topology {
|
||||
/* core complex id of the current core index */
|
||||
int ccx_id;
|
||||
/*
|
||||
* Adjusted core index for this core in the topology
|
||||
* This can be 0,1,2,3 with max 4 cores in a core complex
|
||||
*/
|
||||
int core_id;
|
||||
/* Node id for this core index */
|
||||
int node_id;
|
||||
/* Number of nodes in this config */
|
||||
int num_nodes;
|
||||
};
|
||||
|
||||
/*
|
||||
* Build the configuration closely match the EPYC hardware. Using the EPYC
|
||||
* hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
|
||||
* right now. This could change in future.
|
||||
* nr_cores : Total number of cores in the config
|
||||
* core_id : Core index of the current CPU
|
||||
* topo : Data structure to hold all the config info for this core index
|
||||
*/
|
||||
static void build_core_topology(int nr_cores, int core_id,
|
||||
struct core_topology *topo)
|
||||
{
|
||||
int nodes, cores_in_ccx;
|
||||
|
||||
/* First get the number of nodes required */
|
||||
nodes = nodes_in_socket(nr_cores);
|
||||
|
||||
cores_in_ccx = cores_in_core_complex(nr_cores);
|
||||
|
||||
topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
|
||||
topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
|
||||
topo->core_id = core_id % cores_in_ccx;
|
||||
topo->num_nodes = nodes;
|
||||
}
|
||||
|
||||
/* Encode cache info for CPUID[8000001E] */
|
||||
static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
|
||||
static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
{
|
||||
X86CPUTopoIDs topo_ids = {0};
|
||||
unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1);
|
||||
struct core_topology topo = {0};
|
||||
unsigned long nodes;
|
||||
int shift;
|
||||
|
||||
x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids);
|
||||
|
||||
build_core_topology(cs->nr_cores, cpu->core_id, &topo);
|
||||
*eax = cpu->apic_id;
|
||||
/*
|
||||
* CPUID_Fn8000001E_EBX
|
||||
@ -408,8 +496,12 @@ static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
|
||||
* 3 Core complex id
|
||||
* 1:0 Core id
|
||||
*/
|
||||
*ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) |
|
||||
(topo_ids.core_id);
|
||||
if (cs->nr_threads - 1) {
|
||||
*ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
|
||||
(topo.ccx_id << 2) | topo.core_id;
|
||||
} else {
|
||||
*ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
|
||||
}
|
||||
/*
|
||||
* CPUID_Fn8000001E_ECX
|
||||
* 31:11 Reserved
|
||||
@ -418,8 +510,9 @@ static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
|
||||
* 2 Socket id
|
||||
* 1:0 Node id
|
||||
*/
|
||||
if (nodes <= 4) {
|
||||
*ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id;
|
||||
if (topo.num_nodes <= 4) {
|
||||
*ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
|
||||
topo.node_id;
|
||||
} else {
|
||||
/*
|
||||
* Node id fix up. Actual hardware supports up to 4 nodes. But with
|
||||
@ -434,10 +527,10 @@ static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
|
||||
* number of nodes. find_last_bit returns last set bit(0 based). Left
|
||||
* shift(+1) the socket id to represent all the nodes.
|
||||
*/
|
||||
nodes -= 1;
|
||||
nodes = topo.num_nodes - 1;
|
||||
shift = find_last_bit(&nodes, 8);
|
||||
*ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) |
|
||||
topo_ids.node_id;
|
||||
*ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
|
||||
topo.node_id;
|
||||
}
|
||||
*edx = 0;
|
||||
}
|
||||
@ -1638,10 +1731,6 @@ typedef struct X86CPUDefinition {
|
||||
FeatureWordArray features;
|
||||
const char *model_id;
|
||||
CPUCaches *cache_info;
|
||||
|
||||
/* Use AMD EPYC encoding for apic id */
|
||||
bool use_epyc_apic_id_encoding;
|
||||
|
||||
/*
|
||||
* Definitions for alternative versions of CPU model.
|
||||
* List is terminated by item with version == 0.
|
||||
@ -1683,18 +1772,6 @@ static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition
|
||||
return def->versions ?: default_version_list;
|
||||
}
|
||||
|
||||
bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type)
|
||||
{
|
||||
X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type));
|
||||
|
||||
assert(xcc);
|
||||
if (xcc->model && xcc->model->cpudef) {
|
||||
return xcc->model->cpudef->use_epyc_apic_id_encoding;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static CPUCaches epyc_cache_info = {
|
||||
.l1d_cache = &(CPUCacheInfo) {
|
||||
.type = DATA_CACHE,
|
||||
@ -3995,7 +4072,6 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
||||
.xlevel = 0x8000001E,
|
||||
.model_id = "AMD EPYC Processor",
|
||||
.cache_info = &epyc_cache_info,
|
||||
.use_epyc_apic_id_encoding = 1,
|
||||
.versions = (X86CPUVersionDefinition[]) {
|
||||
{ .version = 1 },
|
||||
{
|
||||
@ -4123,7 +4199,6 @@ static X86CPUDefinition builtin_x86_defs[] = {
|
||||
.xlevel = 0x8000001E,
|
||||
.model_id = "AMD EPYC-Rome Processor",
|
||||
.cache_info = &epyc_rome_cache_info,
|
||||
.use_epyc_apic_id_encoding = 1,
|
||||
},
|
||||
};
|
||||
|
||||
@ -4872,6 +4947,7 @@ static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
|
||||
new->value = g_strdup("type");
|
||||
*next = new;
|
||||
next = &new->next;
|
||||
error_free(err);
|
||||
}
|
||||
|
||||
x86_cpu_filter_features(xc, false);
|
||||
@ -5489,7 +5565,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
uint32_t signature[3];
|
||||
X86CPUTopoInfo topo_info;
|
||||
|
||||
topo_info.nodes_per_pkg = env->nr_nodes;
|
||||
topo_info.dies_per_pkg = env->nr_dies;
|
||||
topo_info.cores_per_die = cs->nr_cores;
|
||||
topo_info.threads_per_core = cs->nr_threads;
|
||||
@ -5678,7 +5753,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
|
||||
break;
|
||||
case 1:
|
||||
*eax = env->pkg_offset;
|
||||
*eax = apicid_pkg_offset(&topo_info);
|
||||
*ebx = cs->nr_cores * cs->nr_threads;
|
||||
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
|
||||
break;
|
||||
@ -5712,7 +5787,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
|
||||
break;
|
||||
case 2:
|
||||
*eax = env->pkg_offset;
|
||||
*eax = apicid_pkg_offset(&topo_info);
|
||||
*ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
|
||||
*ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
|
||||
break;
|
||||
@ -5889,11 +5964,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
/*
|
||||
* Bits 15:12 is "The number of bits in the initial
|
||||
* Core::X86::Apic::ApicId[ApicId] value that indicate
|
||||
* thread ID within a package". This is already stored at
|
||||
* CPUX86State::pkg_offset.
|
||||
* thread ID within a package".
|
||||
* Bits 7:0 is "The number of threads in the package is NC+1"
|
||||
*/
|
||||
*ecx = (env->pkg_offset << 12) |
|
||||
*ecx = (apicid_pkg_offset(&topo_info) << 12) |
|
||||
((cs->nr_cores * cs->nr_threads) - 1);
|
||||
} else {
|
||||
*ecx = 0;
|
||||
@ -5921,20 +5995,20 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
}
|
||||
switch (count) {
|
||||
case 0: /* L1 dcache info */
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
|
||||
&topo_info, eax, ebx, ecx, edx);
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
|
||||
eax, ebx, ecx, edx);
|
||||
break;
|
||||
case 1: /* L1 icache info */
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
|
||||
&topo_info, eax, ebx, ecx, edx);
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
|
||||
eax, ebx, ecx, edx);
|
||||
break;
|
||||
case 2: /* L2 cache info */
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
|
||||
&topo_info, eax, ebx, ecx, edx);
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
|
||||
eax, ebx, ecx, edx);
|
||||
break;
|
||||
case 3: /* L3 cache info */
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
|
||||
&topo_info, eax, ebx, ecx, edx);
|
||||
encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
|
||||
eax, ebx, ecx, edx);
|
||||
break;
|
||||
default: /* end of info */
|
||||
*eax = *ebx = *ecx = *edx = 0;
|
||||
@ -5943,7 +6017,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
break;
|
||||
case 0x8000001E:
|
||||
assert(cpu->core_id <= 255);
|
||||
encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx);
|
||||
encode_topo_cpuid8000001e(cs, cpu,
|
||||
eax, ebx, ecx, edx);
|
||||
break;
|
||||
case 0xC0000000:
|
||||
*eax = env->cpuid_xlevel2;
|
||||
@ -6949,7 +7024,6 @@ static void x86_cpu_initfn(Object *obj)
|
||||
FeatureWord w;
|
||||
|
||||
env->nr_dies = 1;
|
||||
env->nr_nodes = 1;
|
||||
cpu_set_cpustate_pointers(cpu);
|
||||
|
||||
object_property_add(obj, "family", "int",
|
||||
|
@ -1629,8 +1629,6 @@ typedef struct CPUX86State {
|
||||
TPRAccess tpr_access_type;
|
||||
|
||||
unsigned nr_dies;
|
||||
unsigned nr_nodes;
|
||||
unsigned pkg_offset;
|
||||
} CPUX86State;
|
||||
|
||||
struct kvm_msrs;
|
||||
@ -1919,7 +1917,6 @@ void cpu_clear_apic_feature(CPUX86State *env);
|
||||
void host_cpuid(uint32_t function, uint32_t count,
|
||||
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
|
||||
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
|
||||
bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type);
|
||||
|
||||
/* helper.c */
|
||||
bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
||||
|
@ -4607,7 +4607,7 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
||||
if (iommu) {
|
||||
int ret;
|
||||
MSIMessage src, dst;
|
||||
X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
|
||||
X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
|
||||
|
||||
if (!class->int_remap) {
|
||||
return 0;
|
||||
|
@ -500,6 +500,7 @@ sev_read_file_base64(const char *filename, guchar **data, gsize *len)
|
||||
|
||||
if (!g_file_get_contents(filename, &base64, &sz, &error)) {
|
||||
error_report("failed to read '%s' (%s)", filename, error->message);
|
||||
g_error_free(error);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -25,11 +25,12 @@
|
||||
|
||||
#define TYPE_RX62N_CPU RX_CPU_TYPE_NAME("rx62n")
|
||||
|
||||
#define RXCPU_CLASS(klass) \
|
||||
typedef struct RXCPU RXCPU;
|
||||
#define RX_CPU_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(RXCPUClass, (klass), TYPE_RX_CPU)
|
||||
#define RXCPU(obj) \
|
||||
#define RX_CPU(obj) \
|
||||
OBJECT_CHECK(RXCPU, (obj), TYPE_RX_CPU)
|
||||
#define RXCPU_GET_CLASS(obj) \
|
||||
#define RX_CPU_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(RXCPUClass, (obj), TYPE_RX_CPU)
|
||||
|
||||
/*
|
||||
|
@ -28,14 +28,14 @@
|
||||
|
||||
static void rx_cpu_set_pc(CPUState *cs, vaddr value)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
|
||||
cpu->env.pc = value;
|
||||
}
|
||||
|
||||
static void rx_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
|
||||
cpu->env.pc = tb->pc;
|
||||
}
|
||||
@ -48,8 +48,8 @@ static bool rx_cpu_has_work(CPUState *cs)
|
||||
|
||||
static void rx_cpu_reset(DeviceState *dev)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(dev);
|
||||
RXCPUClass *rcc = RXCPU_GET_CLASS(cpu);
|
||||
RXCPU *cpu = RX_CPU(dev);
|
||||
RXCPUClass *rcc = RX_CPU_GET_CLASS(cpu);
|
||||
CPURXState *env = &cpu->env;
|
||||
uint32_t *resetvec;
|
||||
|
||||
@ -108,7 +108,7 @@ static ObjectClass *rx_cpu_class_by_name(const char *cpu_model)
|
||||
static void rx_cpu_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
CPUState *cs = CPU(dev);
|
||||
RXCPUClass *rcc = RXCPU_GET_CLASS(dev);
|
||||
RXCPUClass *rcc = RX_CPU_GET_CLASS(dev);
|
||||
Error *local_err = NULL;
|
||||
|
||||
cpu_exec_realizefn(cs, &local_err);
|
||||
@ -164,7 +164,7 @@ static bool rx_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
static void rx_cpu_init(Object *obj)
|
||||
{
|
||||
CPUState *cs = CPU(obj);
|
||||
RXCPU *cpu = RXCPU(obj);
|
||||
RXCPU *cpu = RX_CPU(obj);
|
||||
CPURXState *env = &cpu->env;
|
||||
|
||||
cpu_set_cpustate_pointers(cpu);
|
||||
@ -176,7 +176,7 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
CPUClass *cc = CPU_CLASS(klass);
|
||||
RXCPUClass *rcc = RXCPU_CLASS(klass);
|
||||
RXCPUClass *rcc = RX_CPU_CLASS(klass);
|
||||
|
||||
device_class_set_parent_realize(dc, rx_cpu_realize,
|
||||
&rcc->parent_realize);
|
||||
|
@ -115,7 +115,6 @@ struct RXCPU {
|
||||
CPURXState env;
|
||||
};
|
||||
|
||||
typedef struct RXCPU RXCPU;
|
||||
typedef RXCPU ArchCPU;
|
||||
|
||||
#define ENV_OFFSET offsetof(RXCPU, env)
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
CPURXState *env = &cpu->env;
|
||||
|
||||
switch (n) {
|
||||
@ -54,7 +54,7 @@ int rx_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
||||
|
||||
int rx_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
CPURXState *env = &cpu->env;
|
||||
uint32_t psw;
|
||||
switch (n) {
|
||||
|
@ -44,7 +44,7 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
|
||||
#define INT_FLAGS (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR)
|
||||
void rx_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
CPURXState *env = &cpu->env;
|
||||
int do_irq = cs->interrupt_request & INT_FLAGS;
|
||||
uint32_t save_psw;
|
||||
@ -121,7 +121,7 @@ void rx_cpu_do_interrupt(CPUState *cs)
|
||||
|
||||
bool rx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
CPURXState *env = &cpu->env;
|
||||
int accept = 0;
|
||||
/* hardware interrupt (Normal) */
|
||||
|
@ -128,7 +128,7 @@ static int bdsp_s(DisasContext *ctx, int d)
|
||||
|
||||
void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
||||
{
|
||||
RXCPU *cpu = RXCPU(cs);
|
||||
RXCPU *cpu = RX_CPU(cs);
|
||||
CPURXState *env = &cpu->env;
|
||||
int i;
|
||||
uint32_t psw;
|
||||
|
@ -31,12 +31,12 @@ static void test_topo_bits(void)
|
||||
X86CPUTopoInfo topo_info = {0};
|
||||
|
||||
/* simple tests for 1 thread per core, 1 core per die, 1 die per package */
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 1};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 1};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 0);
|
||||
g_assert_cmpuint(apicid_core_width(&topo_info), ==, 0);
|
||||
g_assert_cmpuint(apicid_die_width(&topo_info), ==, 0);
|
||||
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 1};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 1};
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 0), ==, 0);
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 1), ==, 1);
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 2), ==, 2);
|
||||
@ -45,39 +45,39 @@ static void test_topo_bits(void)
|
||||
|
||||
/* Test field width calculation for multiple values
|
||||
*/
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 2};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 2};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 1);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 3};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 3};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 2);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 4};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 4};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 2);
|
||||
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 14};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 14};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 4);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 15};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 15};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 4);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 16};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 16};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 4);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 1, 17};
|
||||
topo_info = (X86CPUTopoInfo) {1, 1, 17};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 5);
|
||||
|
||||
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 30, 2};
|
||||
topo_info = (X86CPUTopoInfo) {1, 30, 2};
|
||||
g_assert_cmpuint(apicid_core_width(&topo_info), ==, 5);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 31, 2};
|
||||
topo_info = (X86CPUTopoInfo) {1, 31, 2};
|
||||
g_assert_cmpuint(apicid_core_width(&topo_info), ==, 5);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 32, 2};
|
||||
topo_info = (X86CPUTopoInfo) {1, 32, 2};
|
||||
g_assert_cmpuint(apicid_core_width(&topo_info), ==, 5);
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 33, 2};
|
||||
topo_info = (X86CPUTopoInfo) {1, 33, 2};
|
||||
g_assert_cmpuint(apicid_core_width(&topo_info), ==, 6);
|
||||
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 30, 2};
|
||||
topo_info = (X86CPUTopoInfo) {1, 30, 2};
|
||||
g_assert_cmpuint(apicid_die_width(&topo_info), ==, 0);
|
||||
topo_info = (X86CPUTopoInfo) {0, 2, 30, 2};
|
||||
topo_info = (X86CPUTopoInfo) {2, 30, 2};
|
||||
g_assert_cmpuint(apicid_die_width(&topo_info), ==, 1);
|
||||
topo_info = (X86CPUTopoInfo) {0, 3, 30, 2};
|
||||
topo_info = (X86CPUTopoInfo) {3, 30, 2};
|
||||
g_assert_cmpuint(apicid_die_width(&topo_info), ==, 2);
|
||||
topo_info = (X86CPUTopoInfo) {0, 4, 30, 2};
|
||||
topo_info = (X86CPUTopoInfo) {4, 30, 2};
|
||||
g_assert_cmpuint(apicid_die_width(&topo_info), ==, 2);
|
||||
|
||||
/* build a weird topology and see if IDs are calculated correctly
|
||||
@ -85,18 +85,18 @@ static void test_topo_bits(void)
|
||||
|
||||
/* This will use 2 bits for thread ID and 3 bits for core ID
|
||||
*/
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 6, 3};
|
||||
topo_info = (X86CPUTopoInfo) {1, 6, 3};
|
||||
g_assert_cmpuint(apicid_smt_width(&topo_info), ==, 2);
|
||||
g_assert_cmpuint(apicid_core_offset(&topo_info), ==, 2);
|
||||
g_assert_cmpuint(apicid_die_offset(&topo_info), ==, 5);
|
||||
g_assert_cmpuint(apicid_pkg_offset(&topo_info), ==, 5);
|
||||
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 6, 3};
|
||||
topo_info = (X86CPUTopoInfo) {1, 6, 3};
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 0), ==, 0);
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 1), ==, 1);
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 2), ==, 2);
|
||||
|
||||
topo_info = (X86CPUTopoInfo) {0, 1, 6, 3};
|
||||
topo_info = (X86CPUTopoInfo) {1, 6, 3};
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 1 * 3 + 0), ==,
|
||||
(1 << 2) | 0);
|
||||
g_assert_cmpuint(x86_apicid_from_cpu_idx(&topo_info, 1 * 3 + 1), ==,
|
||||
|
Loading…
Reference in New Issue
Block a user