ppc patch queue for 2017-03-01
I was hoping to get this pull request squeezed in before the soft freeze, but I ran into some difficulties during testing. Everything here was at least posted before the soft freeze, so I'm hoping we can still merge it for 2.9. The biggest things here are: * Cleanups to handling of hashed page tables, that will make adding support for the POWER9 MMU easier * Cleanups to the XICS interrupt controller that will make implementing the powernv machine easier * TCG implementation of extended overflow and carry handling for POWER9 It also includes: * Increasing the CPU limit for pseries to 1024 vCPUs * Generating proper OF node names in qemu (making hotplug and coldplug logic closer together) -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJYtlFaAAoJEGw4ysog2bOSPEIP/0eu92l+pP/05AROudxNu0NT O+JMUCuUM6phK/O/NU74G1Lbmr601Uu+kElcfuMRILwNQ3QWnyDk3OsIERV2oOlC 9mqgqKfMIv29QZcN/U9gb2mBBakJV6z2gAF89CXpdmwxOjzyZR3DwiFccbCzKcl+ 4XFlGYYirye7fBZr6ZsSoZcM4EPIYdNe2bY7ymqisXnsvy0C3PYTZXG1s7+s5CDZ JWzKX1cgpePUj7rW0ecghP2MYKcKBxIzzqNbzl4ihObK7dSTfF2nUPVj6rYhtl8s V2vQkhUveR4FcE/pwVbPAUWbQWsTk1NUbRkqB8AqV+xbTfaFIN5MdFdvJzGD0gTk htzJFFXi6SQAsC0eH6RsF3JBFZe1HnDxql9A3htvTdPC1D/thFETg2nnb8Cd5EJM X7bdouew5txSP2SEYDfXA2/2IS8Fh3ZD+mXM6Y56Rl4KDEd+LeClfE2xDUAhU4C6 rA0QGNu0d9nRlXEkj3vAQPRmdp1hzREve40ha0DnhQh4DSsCHJSk3yaDn2pQC1kZ 01EhJF65AK1jdEyhT7Tk0PIfSAzkNjvS+K2Y9kFaGh5X9PVjCeMOrZaaRGS2+Im1 3C7YJNUQb7VDjfHYKCQBzBXUJynzO8uDt9k8juhZyy67LvsHJBb+6DURSLV/y03k KuvTG7yrEUqpNmycjd/M =gI0E -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.9-20170301' into staging ppc patch queue for 2017-03-01 I was hoping to get this pull request squeezed in before the soft freeze, but I ran into some difficulties during testing. Everything here was at least posted before the soft freeze, so I'm hoping we can still merge it for 2.9. The biggest things here are: * Cleanups to handling of hashed page tables, that will make adding support for the POWER9 MMU easier * Cleanups to the XICS interrupt controller that will make implementing the powernv machine easier * TCG implementation of extended overflow and carry handling for POWER9 It also includes: * Increasing the CPU limit for pseries to 1024 vCPUs * Generating proper OF node names in qemu (making hotplug and coldplug logic closer together) # gpg: Signature made Wed 01 Mar 2017 04:43:06 GMT # gpg: using RSA key 0x6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-2.9-20170301: (50 commits) Add PowerPC 32-bit guest memory dump support ppc/xics: rename 'ICPState *' variables to 'icp' ppc/xics: move InterruptStatsProvider to the sPAPR machine ppc/xics: move ics-simple post_load under the machine ppc/xics: remove the XICSState classes ppc/xics: export the XICS init routines ppc/xics: move the ICP array under the sPAPR machine ppc/xics: register the reset handler of ICP objects ppc/xics: simplify spapr_dt_xics() interface ppc/xics: use the QOM interface to grab an ICP ppc/xics: move the cpu_setup() handler under the ICPState class ppc/xics: simplify the cpu_setup() handler ppc/xics: move kernel_xics_fd out of KVMXICSState ppc/xics: extend the QOM interface to handle ICPs ppc/xics: remove the XICS list of ICS ppc/xics: register the reset handler of ICS objects ppc/xics: remove xics_find_source() ppc/xics: use the QOM interface to resend irqs ppc/xics: use the QOM interface to get irqs ppc/xics: use the QOM interface under the sPAPR machine ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
ab711e216b
475
hw/intc/xics.c
475
hw/intc/xics.c
@ -49,40 +49,41 @@ int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
|
||||
return -1;
|
||||
}
|
||||
|
||||
void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu)
|
||||
void xics_cpu_destroy(XICSFabric *xi, PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *ss = &xics->ss[cs->cpu_index];
|
||||
ICPState *icp = xics_icp_get(xi, cs->cpu_index);
|
||||
|
||||
assert(cs->cpu_index < xics->nr_servers);
|
||||
assert(cs == ss->cs);
|
||||
assert(icp);
|
||||
assert(cs == icp->cs);
|
||||
|
||||
ss->output = NULL;
|
||||
ss->cs = NULL;
|
||||
icp->output = NULL;
|
||||
icp->cs = NULL;
|
||||
}
|
||||
|
||||
void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
|
||||
void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ICPState *ss = &xics->ss[cs->cpu_index];
|
||||
XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
|
||||
ICPState *icp = xics_icp_get(xi, cs->cpu_index);
|
||||
ICPStateClass *icpc;
|
||||
|
||||
assert(cs->cpu_index < xics->nr_servers);
|
||||
assert(icp);
|
||||
|
||||
ss->cs = cs;
|
||||
icp->cs = cs;
|
||||
|
||||
if (info->cpu_setup) {
|
||||
info->cpu_setup(xics, cpu);
|
||||
icpc = ICP_GET_CLASS(icp);
|
||||
if (icpc->cpu_setup) {
|
||||
icpc->cpu_setup(icp, cpu);
|
||||
}
|
||||
|
||||
switch (PPC_INPUT(env)) {
|
||||
case PPC_FLAGS_INPUT_POWER7:
|
||||
ss->output = env->irq_inputs[POWER7_INPUT_INT];
|
||||
icp->output = env->irq_inputs[POWER7_INPUT_INT];
|
||||
break;
|
||||
|
||||
case PPC_FLAGS_INPUT_970:
|
||||
ss->output = env->irq_inputs[PPC970_INPUT_INT];
|
||||
icp->output = env->irq_inputs[PPC970_INPUT_INT];
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -92,185 +93,43 @@ void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void xics_common_pic_print_info(InterruptStatsProvider *obj,
|
||||
Monitor *mon)
|
||||
void icp_pic_print_info(ICPState *icp, Monitor *mon)
|
||||
{
|
||||
int cpu_index = icp->cs ? icp->cs->cpu_index : -1;
|
||||
|
||||
if (!icp->output) {
|
||||
return;
|
||||
}
|
||||
monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
|
||||
cpu_index, icp->xirr, icp->xirr_owner,
|
||||
icp->pending_priority, icp->mfrr);
|
||||
}
|
||||
|
||||
void ics_pic_print_info(ICSState *ics, Monitor *mon)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
ICSState *ics;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < xics->nr_servers; i++) {
|
||||
ICPState *icp = &xics->ss[i];
|
||||
monitor_printf(mon, "ICS %4x..%4x %p\n",
|
||||
ics->offset, ics->offset + ics->nr_irqs - 1, ics);
|
||||
|
||||
if (!icp->output) {
|
||||
if (!ics->irqs) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ics->nr_irqs; i++) {
|
||||
ICSIRQState *irq = ics->irqs + i;
|
||||
|
||||
if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
|
||||
continue;
|
||||
}
|
||||
monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
|
||||
i, icp->xirr, icp->xirr_owner,
|
||||
icp->pending_priority, icp->mfrr);
|
||||
}
|
||||
|
||||
QLIST_FOREACH(ics, &xics->ics, list) {
|
||||
monitor_printf(mon, "ICS %4x..%4x %p\n",
|
||||
ics->offset, ics->offset + ics->nr_irqs - 1, ics);
|
||||
|
||||
if (!ics->irqs) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < ics->nr_irqs; i++) {
|
||||
ICSIRQState *irq = ics->irqs + i;
|
||||
|
||||
if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
|
||||
continue;
|
||||
}
|
||||
monitor_printf(mon, " %4x %s %02x %02x\n",
|
||||
ics->offset + i,
|
||||
(irq->flags & XICS_FLAGS_IRQ_LSI) ?
|
||||
"LSI" : "MSI",
|
||||
irq->priority, irq->status);
|
||||
}
|
||||
monitor_printf(mon, " %4x %s %02x %02x\n",
|
||||
ics->offset + i,
|
||||
(irq->flags & XICS_FLAGS_IRQ_LSI) ?
|
||||
"LSI" : "MSI",
|
||||
irq->priority, irq->status);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* XICS Common class - parent for emulated XICS and KVM-XICS
|
||||
*/
|
||||
static void xics_common_reset(DeviceState *d)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(d);
|
||||
ICSState *ics;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < xics->nr_servers; i++) {
|
||||
device_reset(DEVICE(&xics->ss[i]));
|
||||
}
|
||||
|
||||
QLIST_FOREACH(ics, &xics->ics, list) {
|
||||
device_reset(DEVICE(ics));
|
||||
}
|
||||
}
|
||||
|
||||
static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
int64_t value = xics->nr_irqs;
|
||||
|
||||
visit_type_int(v, name, &value, errp);
|
||||
}
|
||||
|
||||
static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name,
|
||||
void *opaque, Error **errp)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
|
||||
Error *error = NULL;
|
||||
int64_t value;
|
||||
|
||||
visit_type_int(v, name, &value, &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
return;
|
||||
}
|
||||
if (xics->nr_irqs) {
|
||||
error_setg(errp, "Number of interrupts is already set to %u",
|
||||
xics->nr_irqs);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(info->set_nr_irqs);
|
||||
info->set_nr_irqs(xics, value, errp);
|
||||
}
|
||||
|
||||
void xics_set_nr_servers(XICSState *xics, uint32_t nr_servers,
|
||||
const char *typename, Error **errp)
|
||||
{
|
||||
int i;
|
||||
|
||||
xics->nr_servers = nr_servers;
|
||||
|
||||
xics->ss = g_malloc0(xics->nr_servers * sizeof(ICPState));
|
||||
for (i = 0; i < xics->nr_servers; i++) {
|
||||
char name[32];
|
||||
ICPState *icp = &xics->ss[i];
|
||||
|
||||
object_initialize(icp, sizeof(*icp), typename);
|
||||
snprintf(name, sizeof(name), "icp[%d]", i);
|
||||
object_property_add_child(OBJECT(xics), name, OBJECT(icp), errp);
|
||||
icp->xics = xics;
|
||||
}
|
||||
}
|
||||
|
||||
static void xics_prop_get_nr_servers(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque,
|
||||
Error **errp)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
int64_t value = xics->nr_servers;
|
||||
|
||||
visit_type_int(v, name, &value, errp);
|
||||
}
|
||||
|
||||
static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
|
||||
const char *name, void *opaque,
|
||||
Error **errp)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
XICSStateClass *xsc = XICS_COMMON_GET_CLASS(xics);
|
||||
Error *error = NULL;
|
||||
int64_t value;
|
||||
|
||||
visit_type_int(v, name, &value, &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
return;
|
||||
}
|
||||
if (xics->nr_servers) {
|
||||
error_setg(errp, "Number of servers is already set to %u",
|
||||
xics->nr_servers);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(xsc->set_nr_servers);
|
||||
xsc->set_nr_servers(xics, value, errp);
|
||||
}
|
||||
|
||||
static void xics_common_initfn(Object *obj)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
|
||||
QLIST_INIT(&xics->ics);
|
||||
object_property_add(obj, "nr_irqs", "int",
|
||||
xics_prop_get_nr_irqs, xics_prop_set_nr_irqs,
|
||||
NULL, NULL, NULL);
|
||||
object_property_add(obj, "nr_servers", "int",
|
||||
xics_prop_get_nr_servers, xics_prop_set_nr_servers,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
static void xics_common_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(oc);
|
||||
|
||||
dc->reset = xics_common_reset;
|
||||
ic->print_info = xics_common_pic_print_info;
|
||||
}
|
||||
|
||||
static const TypeInfo xics_common_info = {
|
||||
.name = TYPE_XICS_COMMON,
|
||||
.parent = TYPE_SYS_BUS_DEVICE,
|
||||
.instance_size = sizeof(XICSState),
|
||||
.class_size = sizeof(XICSStateClass),
|
||||
.instance_init = xics_common_initfn,
|
||||
.class_init = xics_common_class_init,
|
||||
.interfaces = (InterfaceInfo[]) {
|
||||
{ TYPE_INTERRUPT_STATS_PROVIDER },
|
||||
{ }
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* ICP: Presentation layer
|
||||
*/
|
||||
@ -278,8 +137,8 @@ static const TypeInfo xics_common_info = {
|
||||
#define XISR_MASK 0x00ffffff
|
||||
#define CPPR_MASK 0xff000000
|
||||
|
||||
#define XISR(ss) (((ss)->xirr) & XISR_MASK)
|
||||
#define CPPR(ss) (((ss)->xirr) >> 24)
|
||||
#define XISR(icp) (((icp)->xirr) & XISR_MASK)
|
||||
#define CPPR(icp) (((icp)->xirr) >> 24)
|
||||
|
||||
static void ics_reject(ICSState *ics, uint32_t nr)
|
||||
{
|
||||
@ -290,7 +149,7 @@ static void ics_reject(ICSState *ics, uint32_t nr)
|
||||
}
|
||||
}
|
||||
|
||||
static void ics_resend(ICSState *ics)
|
||||
void ics_resend(ICSState *ics)
|
||||
{
|
||||
ICSStateClass *k = ICS_BASE_GET_CLASS(ics);
|
||||
|
||||
@ -308,151 +167,152 @@ static void ics_eoi(ICSState *ics, int nr)
|
||||
}
|
||||
}
|
||||
|
||||
static void icp_check_ipi(ICPState *ss)
|
||||
static void icp_check_ipi(ICPState *icp)
|
||||
{
|
||||
if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
|
||||
if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
trace_xics_icp_check_ipi(ss->cs->cpu_index, ss->mfrr);
|
||||
trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr);
|
||||
|
||||
if (XISR(ss) && ss->xirr_owner) {
|
||||
ics_reject(ss->xirr_owner, XISR(ss));
|
||||
if (XISR(icp) && icp->xirr_owner) {
|
||||
ics_reject(icp->xirr_owner, XISR(icp));
|
||||
}
|
||||
|
||||
ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
|
||||
ss->pending_priority = ss->mfrr;
|
||||
ss->xirr_owner = NULL;
|
||||
qemu_irq_raise(ss->output);
|
||||
icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI;
|
||||
icp->pending_priority = icp->mfrr;
|
||||
icp->xirr_owner = NULL;
|
||||
qemu_irq_raise(icp->output);
|
||||
}
|
||||
|
||||
static void icp_resend(ICPState *ss)
|
||||
void icp_resend(ICPState *icp)
|
||||
{
|
||||
ICSState *ics;
|
||||
XICSFabric *xi = icp->xics;
|
||||
XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
|
||||
|
||||
if (ss->mfrr < CPPR(ss)) {
|
||||
icp_check_ipi(ss);
|
||||
}
|
||||
QLIST_FOREACH(ics, &ss->xics->ics, list) {
|
||||
ics_resend(ics);
|
||||
if (icp->mfrr < CPPR(icp)) {
|
||||
icp_check_ipi(icp);
|
||||
}
|
||||
|
||||
xic->ics_resend(xi);
|
||||
}
|
||||
|
||||
void icp_set_cppr(ICPState *ss, uint8_t cppr)
|
||||
void icp_set_cppr(ICPState *icp, uint8_t cppr)
|
||||
{
|
||||
uint8_t old_cppr;
|
||||
uint32_t old_xisr;
|
||||
|
||||
old_cppr = CPPR(ss);
|
||||
ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
|
||||
old_cppr = CPPR(icp);
|
||||
icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24);
|
||||
|
||||
if (cppr < old_cppr) {
|
||||
if (XISR(ss) && (cppr <= ss->pending_priority)) {
|
||||
old_xisr = XISR(ss);
|
||||
ss->xirr &= ~XISR_MASK; /* Clear XISR */
|
||||
ss->pending_priority = 0xff;
|
||||
qemu_irq_lower(ss->output);
|
||||
if (ss->xirr_owner) {
|
||||
ics_reject(ss->xirr_owner, old_xisr);
|
||||
ss->xirr_owner = NULL;
|
||||
if (XISR(icp) && (cppr <= icp->pending_priority)) {
|
||||
old_xisr = XISR(icp);
|
||||
icp->xirr &= ~XISR_MASK; /* Clear XISR */
|
||||
icp->pending_priority = 0xff;
|
||||
qemu_irq_lower(icp->output);
|
||||
if (icp->xirr_owner) {
|
||||
ics_reject(icp->xirr_owner, old_xisr);
|
||||
icp->xirr_owner = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!XISR(ss)) {
|
||||
icp_resend(ss);
|
||||
if (!XISR(icp)) {
|
||||
icp_resend(icp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void icp_set_mfrr(ICPState *ss, uint8_t mfrr)
|
||||
void icp_set_mfrr(ICPState *icp, uint8_t mfrr)
|
||||
{
|
||||
ss->mfrr = mfrr;
|
||||
if (mfrr < CPPR(ss)) {
|
||||
icp_check_ipi(ss);
|
||||
icp->mfrr = mfrr;
|
||||
if (mfrr < CPPR(icp)) {
|
||||
icp_check_ipi(icp);
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t icp_accept(ICPState *ss)
|
||||
uint32_t icp_accept(ICPState *icp)
|
||||
{
|
||||
uint32_t xirr = ss->xirr;
|
||||
uint32_t xirr = icp->xirr;
|
||||
|
||||
qemu_irq_lower(ss->output);
|
||||
ss->xirr = ss->pending_priority << 24;
|
||||
ss->pending_priority = 0xff;
|
||||
ss->xirr_owner = NULL;
|
||||
qemu_irq_lower(icp->output);
|
||||
icp->xirr = icp->pending_priority << 24;
|
||||
icp->pending_priority = 0xff;
|
||||
icp->xirr_owner = NULL;
|
||||
|
||||
trace_xics_icp_accept(xirr, ss->xirr);
|
||||
trace_xics_icp_accept(xirr, icp->xirr);
|
||||
|
||||
return xirr;
|
||||
}
|
||||
|
||||
uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr)
|
||||
uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr)
|
||||
{
|
||||
if (mfrr) {
|
||||
*mfrr = ss->mfrr;
|
||||
*mfrr = icp->mfrr;
|
||||
}
|
||||
return ss->xirr;
|
||||
return icp->xirr;
|
||||
}
|
||||
|
||||
void icp_eoi(ICPState *ss, uint32_t xirr)
|
||||
void icp_eoi(ICPState *icp, uint32_t xirr)
|
||||
{
|
||||
XICSFabric *xi = icp->xics;
|
||||
XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
|
||||
ICSState *ics;
|
||||
uint32_t irq;
|
||||
|
||||
/* Send EOI -> ICS */
|
||||
ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
|
||||
trace_xics_icp_eoi(ss->cs->cpu_index, xirr, ss->xirr);
|
||||
icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
|
||||
trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr);
|
||||
irq = xirr & XISR_MASK;
|
||||
QLIST_FOREACH(ics, &ss->xics->ics, list) {
|
||||
if (ics_valid_irq(ics, irq)) {
|
||||
ics_eoi(ics, irq);
|
||||
}
|
||||
|
||||
ics = xic->ics_get(xi, irq);
|
||||
if (ics) {
|
||||
ics_eoi(ics, irq);
|
||||
}
|
||||
if (!XISR(ss)) {
|
||||
icp_resend(ss);
|
||||
if (!XISR(icp)) {
|
||||
icp_resend(icp);
|
||||
}
|
||||
}
|
||||
|
||||
static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
|
||||
{
|
||||
XICSState *xics = ics->xics;
|
||||
ICPState *ss = xics->ss + server;
|
||||
ICPState *icp = xics_icp_get(ics->xics, server);
|
||||
|
||||
trace_xics_icp_irq(server, nr, priority);
|
||||
|
||||
if ((priority >= CPPR(ss))
|
||||
|| (XISR(ss) && (ss->pending_priority <= priority))) {
|
||||
if ((priority >= CPPR(icp))
|
||||
|| (XISR(icp) && (icp->pending_priority <= priority))) {
|
||||
ics_reject(ics, nr);
|
||||
} else {
|
||||
if (XISR(ss) && ss->xirr_owner) {
|
||||
ics_reject(ss->xirr_owner, XISR(ss));
|
||||
ss->xirr_owner = NULL;
|
||||
if (XISR(icp) && icp->xirr_owner) {
|
||||
ics_reject(icp->xirr_owner, XISR(icp));
|
||||
icp->xirr_owner = NULL;
|
||||
}
|
||||
ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
|
||||
ss->xirr_owner = ics;
|
||||
ss->pending_priority = priority;
|
||||
trace_xics_icp_raise(ss->xirr, ss->pending_priority);
|
||||
qemu_irq_raise(ss->output);
|
||||
icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK);
|
||||
icp->xirr_owner = ics;
|
||||
icp->pending_priority = priority;
|
||||
trace_xics_icp_raise(icp->xirr, icp->pending_priority);
|
||||
qemu_irq_raise(icp->output);
|
||||
}
|
||||
}
|
||||
|
||||
static void icp_dispatch_pre_save(void *opaque)
|
||||
{
|
||||
ICPState *ss = opaque;
|
||||
ICPStateClass *info = ICP_GET_CLASS(ss);
|
||||
ICPState *icp = opaque;
|
||||
ICPStateClass *info = ICP_GET_CLASS(icp);
|
||||
|
||||
if (info->pre_save) {
|
||||
info->pre_save(ss);
|
||||
info->pre_save(icp);
|
||||
}
|
||||
}
|
||||
|
||||
static int icp_dispatch_post_load(void *opaque, int version_id)
|
||||
{
|
||||
ICPState *ss = opaque;
|
||||
ICPStateClass *info = ICP_GET_CLASS(ss);
|
||||
ICPState *icp = opaque;
|
||||
ICPStateClass *info = ICP_GET_CLASS(icp);
|
||||
|
||||
if (info->post_load) {
|
||||
return info->post_load(ss, version_id);
|
||||
return info->post_load(icp, version_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -485,12 +345,30 @@ static void icp_reset(DeviceState *dev)
|
||||
qemu_set_irq(icp->output, 0);
|
||||
}
|
||||
|
||||
static void icp_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
ICPState *icp = ICP(dev);
|
||||
Object *obj;
|
||||
Error *err = NULL;
|
||||
|
||||
obj = object_property_get_link(OBJECT(dev), "xics", &err);
|
||||
if (!obj) {
|
||||
error_setg(errp, "%s: required link 'xics' not found: %s",
|
||||
__func__, error_get_pretty(err));
|
||||
return;
|
||||
}
|
||||
|
||||
icp->xics = XICS_FABRIC(obj);
|
||||
}
|
||||
|
||||
|
||||
static void icp_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->reset = icp_reset;
|
||||
dc->vmsd = &vmstate_icp_server;
|
||||
dc->realize = icp_realize;
|
||||
}
|
||||
|
||||
static const TypeInfo icp_info = {
|
||||
@ -663,17 +541,6 @@ static void ics_simple_reset(DeviceState *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static int ics_simple_post_load(ICSState *ics, int version_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ics->xics->nr_servers; i++) {
|
||||
icp_resend(&ics->xics->ss[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ics_simple_dispatch_pre_save(void *opaque)
|
||||
{
|
||||
ICSState *ics = opaque;
|
||||
@ -746,15 +613,20 @@ static void ics_simple_realize(DeviceState *dev, Error **errp)
|
||||
ics->qirqs = qemu_allocate_irqs(ics_simple_set_irq, ics, ics->nr_irqs);
|
||||
}
|
||||
|
||||
static Property ics_simple_properties[] = {
|
||||
DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void ics_simple_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ICSStateClass *isc = ICS_BASE_CLASS(klass);
|
||||
|
||||
dc->realize = ics_simple_realize;
|
||||
isc->realize = ics_simple_realize;
|
||||
dc->props = ics_simple_properties;
|
||||
dc->vmsd = &vmstate_ics_simple;
|
||||
dc->reset = ics_simple_reset;
|
||||
isc->post_load = ics_simple_post_load;
|
||||
isc->reject = ics_simple_reject;
|
||||
isc->resend = ics_simple_resend;
|
||||
isc->eoi = ics_simple_eoi;
|
||||
@ -769,32 +641,56 @@ static const TypeInfo ics_simple_info = {
|
||||
.instance_init = ics_simple_initfn,
|
||||
};
|
||||
|
||||
static void ics_base_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
ICSStateClass *icsc = ICS_BASE_GET_CLASS(dev);
|
||||
ICSState *ics = ICS_BASE(dev);
|
||||
Object *obj;
|
||||
Error *err = NULL;
|
||||
|
||||
obj = object_property_get_link(OBJECT(dev), "xics", &err);
|
||||
if (!obj) {
|
||||
error_setg(errp, "%s: required link 'xics' not found: %s",
|
||||
__func__, error_get_pretty(err));
|
||||
return;
|
||||
}
|
||||
ics->xics = XICS_FABRIC(obj);
|
||||
|
||||
|
||||
if (icsc->realize) {
|
||||
icsc->realize(dev, errp);
|
||||
}
|
||||
}
|
||||
|
||||
static void ics_base_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
|
||||
dc->realize = ics_base_realize;
|
||||
}
|
||||
|
||||
static const TypeInfo ics_base_info = {
|
||||
.name = TYPE_ICS_BASE,
|
||||
.parent = TYPE_DEVICE,
|
||||
.abstract = true,
|
||||
.instance_size = sizeof(ICSState),
|
||||
.class_init = ics_base_class_init,
|
||||
.class_size = sizeof(ICSStateClass),
|
||||
};
|
||||
|
||||
static const TypeInfo xics_fabric_info = {
|
||||
.name = TYPE_XICS_FABRIC,
|
||||
.parent = TYPE_INTERFACE,
|
||||
.class_size = sizeof(XICSFabricClass),
|
||||
};
|
||||
|
||||
/*
|
||||
* Exported functions
|
||||
*/
|
||||
ICSState *xics_find_source(XICSState *xics, int irq)
|
||||
qemu_irq xics_get_qirq(XICSFabric *xi, int irq)
|
||||
{
|
||||
ICSState *ics;
|
||||
|
||||
QLIST_FOREACH(ics, &xics->ics, list) {
|
||||
if (ics_valid_irq(ics, irq)) {
|
||||
return ics;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
qemu_irq xics_get_qirq(XICSState *xics, int irq)
|
||||
{
|
||||
ICSState *ics = xics_find_source(xics, irq);
|
||||
XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
|
||||
ICSState *ics = xic->ics_get(xi, irq);
|
||||
|
||||
if (ics) {
|
||||
return ics->qirqs[irq - ics->offset];
|
||||
@ -803,6 +699,13 @@ qemu_irq xics_get_qirq(XICSState *xics, int irq)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ICPState *xics_icp_get(XICSFabric *xi, int server)
|
||||
{
|
||||
XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
|
||||
|
||||
return xic->icp_get(xi, server);
|
||||
}
|
||||
|
||||
void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
|
||||
{
|
||||
assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
|
||||
@ -813,10 +716,10 @@ void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
|
||||
|
||||
static void xics_register_types(void)
|
||||
{
|
||||
type_register_static(&xics_common_info);
|
||||
type_register_static(&ics_simple_info);
|
||||
type_register_static(&ics_base_info);
|
||||
type_register_static(&icp_info);
|
||||
type_register_static(&xics_fabric_info);
|
||||
}
|
||||
|
||||
type_init(xics_register_types)
|
||||
|
@ -40,16 +40,12 @@
|
||||
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
typedef struct KVMXICSState {
|
||||
XICSState parent_obj;
|
||||
|
||||
int kernel_xics_fd;
|
||||
} KVMXICSState;
|
||||
static int kernel_xics_fd = -1;
|
||||
|
||||
/*
|
||||
* ICP-KVM
|
||||
*/
|
||||
static void icp_get_kvm_state(ICPState *ss)
|
||||
static void icp_get_kvm_state(ICPState *icp)
|
||||
{
|
||||
uint64_t state;
|
||||
struct kvm_one_reg reg = {
|
||||
@ -59,25 +55,25 @@ static void icp_get_kvm_state(ICPState *ss)
|
||||
int ret;
|
||||
|
||||
/* ICP for this CPU thread is not in use, exiting */
|
||||
if (!ss->cs) {
|
||||
if (!icp->cs) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_vcpu_ioctl(ss->cs, KVM_GET_ONE_REG, ®);
|
||||
ret = kvm_vcpu_ioctl(icp->cs, KVM_GET_ONE_REG, ®);
|
||||
if (ret != 0) {
|
||||
error_report("Unable to retrieve KVM interrupt controller state"
|
||||
" for CPU %ld: %s", kvm_arch_vcpu_id(ss->cs), strerror(errno));
|
||||
" for CPU %ld: %s", kvm_arch_vcpu_id(icp->cs), strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ss->xirr = state >> KVM_REG_PPC_ICP_XISR_SHIFT;
|
||||
ss->mfrr = (state >> KVM_REG_PPC_ICP_MFRR_SHIFT)
|
||||
icp->xirr = state >> KVM_REG_PPC_ICP_XISR_SHIFT;
|
||||
icp->mfrr = (state >> KVM_REG_PPC_ICP_MFRR_SHIFT)
|
||||
& KVM_REG_PPC_ICP_MFRR_MASK;
|
||||
ss->pending_priority = (state >> KVM_REG_PPC_ICP_PPRI_SHIFT)
|
||||
icp->pending_priority = (state >> KVM_REG_PPC_ICP_PPRI_SHIFT)
|
||||
& KVM_REG_PPC_ICP_PPRI_MASK;
|
||||
}
|
||||
|
||||
static int icp_set_kvm_state(ICPState *ss, int version_id)
|
||||
static int icp_set_kvm_state(ICPState *icp, int version_id)
|
||||
{
|
||||
uint64_t state;
|
||||
struct kvm_one_reg reg = {
|
||||
@ -87,18 +83,18 @@ static int icp_set_kvm_state(ICPState *ss, int version_id)
|
||||
int ret;
|
||||
|
||||
/* ICP for this CPU thread is not in use, exiting */
|
||||
if (!ss->cs) {
|
||||
if (!icp->cs) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
state = ((uint64_t)ss->xirr << KVM_REG_PPC_ICP_XISR_SHIFT)
|
||||
| ((uint64_t)ss->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT)
|
||||
| ((uint64_t)ss->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT);
|
||||
state = ((uint64_t)icp->xirr << KVM_REG_PPC_ICP_XISR_SHIFT)
|
||||
| ((uint64_t)icp->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT)
|
||||
| ((uint64_t)icp->pending_priority << KVM_REG_PPC_ICP_PPRI_SHIFT);
|
||||
|
||||
ret = kvm_vcpu_ioctl(ss->cs, KVM_SET_ONE_REG, ®);
|
||||
ret = kvm_vcpu_ioctl(icp->cs, KVM_SET_ONE_REG, ®);
|
||||
if (ret != 0) {
|
||||
error_report("Unable to restore KVM interrupt controller state (0x%"
|
||||
PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(ss->cs),
|
||||
PRIx64 ") for CPU %ld: %s", state, kvm_arch_vcpu_id(icp->cs),
|
||||
strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
@ -122,6 +118,34 @@ static void icp_kvm_reset(DeviceState *dev)
|
||||
icp_set_kvm_state(icp, 1);
|
||||
}
|
||||
|
||||
static void icp_kvm_cpu_setup(ICPState *icp, PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
int ret;
|
||||
|
||||
if (kernel_xics_fd == -1) {
|
||||
abort();
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are reusing a parked vCPU fd corresponding to the CPU
|
||||
* which was hot-removed earlier we don't have to renable
|
||||
* KVM_CAP_IRQ_XICS capability again.
|
||||
*/
|
||||
if (icp->cap_irq_xics_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0, kernel_xics_fd,
|
||||
kvm_arch_vcpu_id(cs));
|
||||
if (ret < 0) {
|
||||
error_report("Unable to connect CPU%ld to kernel XICS: %s",
|
||||
kvm_arch_vcpu_id(cs), strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
icp->cap_irq_xics_enabled = true;
|
||||
}
|
||||
|
||||
static void icp_kvm_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
@ -130,6 +154,7 @@ static void icp_kvm_class_init(ObjectClass *klass, void *data)
|
||||
dc->reset = icp_kvm_reset;
|
||||
icpc->pre_save = icp_get_kvm_state;
|
||||
icpc->post_load = icp_set_kvm_state;
|
||||
icpc->cpu_setup = icp_kvm_cpu_setup;
|
||||
}
|
||||
|
||||
static const TypeInfo icp_kvm_info = {
|
||||
@ -145,7 +170,6 @@ static const TypeInfo icp_kvm_info = {
|
||||
*/
|
||||
static void ics_get_kvm_state(ICSState *ics)
|
||||
{
|
||||
KVMXICSState *xicskvm = XICS_SPAPR_KVM(ics->xics);
|
||||
uint64_t state;
|
||||
struct kvm_device_attr attr = {
|
||||
.flags = 0,
|
||||
@ -160,7 +184,7 @@ static void ics_get_kvm_state(ICSState *ics)
|
||||
|
||||
attr.attr = i + ics->offset;
|
||||
|
||||
ret = ioctl(xicskvm->kernel_xics_fd, KVM_GET_DEVICE_ATTR, &attr);
|
||||
ret = ioctl(kernel_xics_fd, KVM_GET_DEVICE_ATTR, &attr);
|
||||
if (ret != 0) {
|
||||
error_report("Unable to retrieve KVM interrupt controller state"
|
||||
" for IRQ %d: %s", i + ics->offset, strerror(errno));
|
||||
@ -204,7 +228,6 @@ static void ics_get_kvm_state(ICSState *ics)
|
||||
|
||||
static int ics_set_kvm_state(ICSState *ics, int version_id)
|
||||
{
|
||||
KVMXICSState *xicskvm = XICS_SPAPR_KVM(ics->xics);
|
||||
uint64_t state;
|
||||
struct kvm_device_attr attr = {
|
||||
.flags = 0,
|
||||
@ -238,7 +261,7 @@ static int ics_set_kvm_state(ICSState *ics, int version_id)
|
||||
}
|
||||
}
|
||||
|
||||
ret = ioctl(xicskvm->kernel_xics_fd, KVM_SET_DEVICE_ATTR, &attr);
|
||||
ret = ioctl(kernel_xics_fd, KVM_SET_DEVICE_ATTR, &attr);
|
||||
if (ret != 0) {
|
||||
error_report("Unable to restore KVM interrupt controller state"
|
||||
" for IRQs %d: %s", i + ics->offset, strerror(errno));
|
||||
@ -308,7 +331,7 @@ static void ics_kvm_class_init(ObjectClass *klass, void *data)
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
ICSStateClass *icsc = ICS_BASE_CLASS(klass);
|
||||
|
||||
dc->realize = ics_kvm_realize;
|
||||
icsc->realize = ics_kvm_realize;
|
||||
dc->reset = ics_kvm_reset;
|
||||
icsc->pre_save = ics_get_kvm_state;
|
||||
icsc->post_load = ics_set_kvm_state;
|
||||
@ -324,57 +347,6 @@ static const TypeInfo ics_kvm_info = {
|
||||
/*
|
||||
* XICS-KVM
|
||||
*/
|
||||
static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
|
||||
{
|
||||
CPUState *cs;
|
||||
ICPState *ss;
|
||||
KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
|
||||
int ret;
|
||||
|
||||
cs = CPU(cpu);
|
||||
ss = &xics->ss[cs->cpu_index];
|
||||
|
||||
assert(cs->cpu_index < xics->nr_servers);
|
||||
if (xicskvm->kernel_xics_fd == -1) {
|
||||
abort();
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are reusing a parked vCPU fd corresponding to the CPU
|
||||
* which was hot-removed earlier we don't have to renable
|
||||
* KVM_CAP_IRQ_XICS capability again.
|
||||
*/
|
||||
if (ss->cap_irq_xics_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0, xicskvm->kernel_xics_fd,
|
||||
kvm_arch_vcpu_id(cs));
|
||||
if (ret < 0) {
|
||||
error_report("Unable to connect CPU%ld to kernel XICS: %s",
|
||||
kvm_arch_vcpu_id(cs), strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
ss->cap_irq_xics_enabled = true;
|
||||
}
|
||||
|
||||
static void xics_kvm_set_nr_irqs(XICSState *xics, uint32_t nr_irqs,
|
||||
Error **errp)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&xics->ics);
|
||||
|
||||
/* This needs to be deprecated ... */
|
||||
xics->nr_irqs = nr_irqs;
|
||||
if (ics) {
|
||||
ics->nr_irqs = nr_irqs;
|
||||
}
|
||||
}
|
||||
|
||||
static void xics_kvm_set_nr_servers(XICSState *xics, uint32_t nr_servers,
|
||||
Error **errp)
|
||||
{
|
||||
xics_set_nr_servers(xics, nr_servers, TYPE_KVM_ICP, errp);
|
||||
}
|
||||
|
||||
static void rtas_dummy(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
uint32_t token,
|
||||
@ -385,13 +357,9 @@ static void rtas_dummy(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
__func__);
|
||||
}
|
||||
|
||||
static void xics_kvm_realize(DeviceState *dev, Error **errp)
|
||||
int xics_kvm_init(sPAPRMachineState *spapr, Error **errp)
|
||||
{
|
||||
KVMXICSState *xicskvm = XICS_SPAPR_KVM(dev);
|
||||
XICSState *xics = XICS_COMMON(dev);
|
||||
ICSState *ics;
|
||||
int i, rc;
|
||||
Error *error = NULL;
|
||||
int rc;
|
||||
struct kvm_create_device xics_create_device = {
|
||||
.type = KVM_DEV_TYPE_XICS,
|
||||
.flags = 0,
|
||||
@ -439,72 +407,24 @@ static void xics_kvm_realize(DeviceState *dev, Error **errp)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
xicskvm->kernel_xics_fd = xics_create_device.fd;
|
||||
|
||||
QLIST_FOREACH(ics, &xics->ics, list) {
|
||||
object_property_set_bool(OBJECT(ics), true, "realized", &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
assert(xics->nr_servers);
|
||||
for (i = 0; i < xics->nr_servers; i++) {
|
||||
object_property_set_bool(OBJECT(&xics->ss[i]), true, "realized",
|
||||
&error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
kernel_xics_fd = xics_create_device.fd;
|
||||
|
||||
kvm_kernel_irqchip = true;
|
||||
kvm_msi_via_irqfd_allowed = true;
|
||||
kvm_gsi_direct_mapping = true;
|
||||
|
||||
return;
|
||||
return rc;
|
||||
|
||||
fail:
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,set-xive");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,get-xive");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,int-on");
|
||||
kvmppc_define_rtas_kernel_token(0, "ibm,int-off");
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void xics_kvm_initfn(Object *obj)
|
||||
{
|
||||
XICSState *xics = XICS_COMMON(obj);
|
||||
ICSState *ics;
|
||||
|
||||
ics = ICS_SIMPLE(object_new(TYPE_ICS_KVM));
|
||||
object_property_add_child(obj, "ics", OBJECT(ics), NULL);
|
||||
ics->xics = xics;
|
||||
QLIST_INSERT_HEAD(&xics->ics, ics, list);
|
||||
}
|
||||
|
||||
static void xics_kvm_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
XICSStateClass *xsc = XICS_COMMON_CLASS(oc);
|
||||
|
||||
dc->realize = xics_kvm_realize;
|
||||
xsc->cpu_setup = xics_kvm_cpu_setup;
|
||||
xsc->set_nr_irqs = xics_kvm_set_nr_irqs;
|
||||
xsc->set_nr_servers = xics_kvm_set_nr_servers;
|
||||
}
|
||||
|
||||
static const TypeInfo xics_spapr_kvm_info = {
|
||||
.name = TYPE_XICS_SPAPR_KVM,
|
||||
.parent = TYPE_XICS_COMMON,
|
||||
.instance_size = sizeof(KVMXICSState),
|
||||
.class_init = xics_kvm_class_init,
|
||||
.instance_init = xics_kvm_initfn,
|
||||
};
|
||||
|
||||
static void xics_kvm_register_types(void)
|
||||
{
|
||||
type_register_static(&xics_spapr_kvm_info);
|
||||
type_register_static(&ics_kvm_info);
|
||||
type_register_static(&icp_kvm_info);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = &spapr->xics->ss[cs->cpu_index];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
target_ulong cppr = args[0];
|
||||
|
||||
icp_set_cppr(icp, cppr);
|
||||
@ -56,12 +56,13 @@ static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
{
|
||||
target_ulong server = xics_get_cpu_index_by_dt_id(args[0]);
|
||||
target_ulong mfrr = args[1];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), server);
|
||||
|
||||
if (server >= spapr->xics->nr_servers) {
|
||||
if (!icp) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
icp_set_mfrr(spapr->xics->ss + server, mfrr);
|
||||
icp_set_mfrr(icp, mfrr);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -69,7 +70,7 @@ static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = &spapr->xics->ss[cs->cpu_index];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
uint32_t xirr = icp_accept(icp);
|
||||
|
||||
args[0] = xirr;
|
||||
@ -80,7 +81,7 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = &spapr->xics->ss[cs->cpu_index];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
uint32_t xirr = icp_accept(icp);
|
||||
|
||||
args[0] = xirr;
|
||||
@ -92,7 +93,7 @@ static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = &spapr->xics->ss[cs->cpu_index];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
target_ulong xirr = args[0];
|
||||
|
||||
icp_eoi(icp, xirr);
|
||||
@ -103,7 +104,7 @@ static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
ICPState *icp = &spapr->xics->ss[cs->cpu_index];
|
||||
ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), cs->cpu_index);
|
||||
uint32_t mfrr;
|
||||
uint32_t xirr = icp_ipoll(icp, &mfrr);
|
||||
|
||||
@ -118,7 +119,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
uint32_t nargs, target_ulong args,
|
||||
uint32_t nret, target_ulong rets)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
|
||||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno, server, priority;
|
||||
|
||||
if ((nargs != 3) || (nret != 1)) {
|
||||
@ -134,7 +135,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
server = xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
|
||||
priority = rtas_ld(args, 2);
|
||||
|
||||
if (!ics_valid_irq(ics, nr) || (server >= ics->xics->nr_servers)
|
||||
if (!ics_valid_irq(ics, nr) || !xics_icp_get(XICS_FABRIC(spapr), server)
|
||||
|| (priority > 0xff)) {
|
||||
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
|
||||
return;
|
||||
@ -151,7 +152,7 @@ static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
uint32_t nargs, target_ulong args,
|
||||
uint32_t nret, target_ulong rets)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
|
||||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno;
|
||||
|
||||
if ((nargs != 1) || (nret != 3)) {
|
||||
@ -181,7 +182,7 @@ static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
uint32_t nargs, target_ulong args,
|
||||
uint32_t nret, target_ulong rets)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
|
||||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno;
|
||||
|
||||
if ((nargs != 1) || (nret != 1)) {
|
||||
@ -212,7 +213,7 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
uint32_t nargs, target_ulong args,
|
||||
uint32_t nret, target_ulong rets)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&spapr->xics->ics);
|
||||
ICSState *ics = spapr->ics;
|
||||
uint32_t nr, srcno;
|
||||
|
||||
if ((nargs != 1) || (nret != 1)) {
|
||||
@ -239,36 +240,8 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
|
||||
}
|
||||
|
||||
static void xics_spapr_set_nr_irqs(XICSState *xics, uint32_t nr_irqs,
|
||||
Error **errp)
|
||||
int xics_spapr_init(sPAPRMachineState *spapr, Error **errp)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&xics->ics);
|
||||
|
||||
/* This needs to be deprecated ... */
|
||||
xics->nr_irqs = nr_irqs;
|
||||
if (ics) {
|
||||
ics->nr_irqs = nr_irqs;
|
||||
}
|
||||
}
|
||||
|
||||
static void xics_spapr_set_nr_servers(XICSState *xics, uint32_t nr_servers,
|
||||
Error **errp)
|
||||
{
|
||||
xics_set_nr_servers(xics, nr_servers, TYPE_ICP, errp);
|
||||
}
|
||||
|
||||
static void xics_spapr_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
XICSState *xics = XICS_SPAPR(dev);
|
||||
ICSState *ics;
|
||||
Error *error = NULL;
|
||||
int i;
|
||||
|
||||
if (!xics->nr_servers) {
|
||||
error_setg(errp, "Number of servers needs to be greater 0");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Registration of global state belongs into realize */
|
||||
spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
|
||||
spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
|
||||
@ -281,55 +254,9 @@ static void xics_spapr_realize(DeviceState *dev, Error **errp)
|
||||
spapr_register_hypercall(H_XIRR_X, h_xirr_x);
|
||||
spapr_register_hypercall(H_EOI, h_eoi);
|
||||
spapr_register_hypercall(H_IPOLL, h_ipoll);
|
||||
|
||||
QLIST_FOREACH(ics, &xics->ics, list) {
|
||||
object_property_set_bool(OBJECT(ics), true, "realized", &error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < xics->nr_servers; i++) {
|
||||
object_property_set_bool(OBJECT(&xics->ss[i]), true, "realized",
|
||||
&error);
|
||||
if (error) {
|
||||
error_propagate(errp, error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xics_spapr_initfn(Object *obj)
|
||||
{
|
||||
XICSState *xics = XICS_SPAPR(obj);
|
||||
ICSState *ics;
|
||||
|
||||
ics = ICS_SIMPLE(object_new(TYPE_ICS_SIMPLE));
|
||||
object_property_add_child(obj, "ics", OBJECT(ics), NULL);
|
||||
ics->xics = xics;
|
||||
QLIST_INSERT_HEAD(&xics->ics, ics, list);
|
||||
}
|
||||
|
||||
static void xics_spapr_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
XICSStateClass *xsc = XICS_SPAPR_CLASS(oc);
|
||||
|
||||
dc->realize = xics_spapr_realize;
|
||||
xsc->set_nr_irqs = xics_spapr_set_nr_irqs;
|
||||
xsc->set_nr_servers = xics_spapr_set_nr_servers;
|
||||
}
|
||||
|
||||
static const TypeInfo xics_spapr_info = {
|
||||
.name = TYPE_XICS_SPAPR,
|
||||
.parent = TYPE_XICS_COMMON,
|
||||
.instance_size = sizeof(XICSState),
|
||||
.class_size = sizeof(XICSStateClass),
|
||||
.class_init = xics_spapr_class_init,
|
||||
.instance_init = xics_spapr_initfn,
|
||||
};
|
||||
|
||||
#define ICS_IRQ_FREE(ics, srcno) \
|
||||
(!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
|
||||
|
||||
@ -354,9 +281,8 @@ static int ics_find_free_block(ICSState *ics, int num, int alignnum)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int xics_spapr_alloc(XICSState *xics, int irq_hint, bool lsi, Error **errp)
|
||||
int spapr_ics_alloc(ICSState *ics, int irq_hint, bool lsi, Error **errp)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&xics->ics);
|
||||
int irq;
|
||||
|
||||
if (!ics) {
|
||||
@ -387,10 +313,9 @@ int xics_spapr_alloc(XICSState *xics, int irq_hint, bool lsi, Error **errp)
|
||||
* Allocate block of consecutive IRQs, and return the number of the first IRQ in
|
||||
* the block. If align==true, aligns the first IRQ number to num.
|
||||
*/
|
||||
int xics_spapr_alloc_block(XICSState *xics, int num, bool lsi, bool align,
|
||||
Error **errp)
|
||||
int spapr_ics_alloc_block(ICSState *ics, int num, bool lsi,
|
||||
bool align, Error **errp)
|
||||
{
|
||||
ICSState *ics = QLIST_FIRST(&xics->ics);
|
||||
int i, first = -1;
|
||||
|
||||
if (!ics) {
|
||||
@ -440,20 +365,18 @@ static void ics_free(ICSState *ics, int srcno, int num)
|
||||
}
|
||||
}
|
||||
|
||||
void xics_spapr_free(XICSState *xics, int irq, int num)
|
||||
void spapr_ics_free(ICSState *ics, int irq, int num)
|
||||
{
|
||||
ICSState *ics = xics_find_source(xics, irq);
|
||||
|
||||
if (ics) {
|
||||
if (ics_valid_irq(ics, irq)) {
|
||||
trace_xics_ics_free(0, irq, num);
|
||||
ics_free(ics, irq - ics->offset, num);
|
||||
}
|
||||
}
|
||||
|
||||
void spapr_dt_xics(XICSState *xics, void *fdt, uint32_t phandle)
|
||||
void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle)
|
||||
{
|
||||
uint32_t interrupt_server_ranges_prop[] = {
|
||||
0, cpu_to_be32(xics->nr_servers),
|
||||
0, cpu_to_be32(nr_servers),
|
||||
};
|
||||
int node;
|
||||
|
||||
@ -470,10 +393,3 @@ void spapr_dt_xics(XICSState *xics, void *fdt, uint32_t phandle)
|
||||
_FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
|
||||
_FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
|
||||
}
|
||||
|
||||
static void xics_spapr_register_types(void)
|
||||
{
|
||||
type_register_static(&xics_spapr_info);
|
||||
}
|
||||
|
||||
type_init(xics_spapr_register_types)
|
||||
|
28
hw/pci/pci.c
28
hw/pci/pci.c
@ -1530,6 +1530,34 @@ static const pci_class_desc pci_class_descriptions[] =
|
||||
{ 0, NULL}
|
||||
};
|
||||
|
||||
static void pci_for_each_device_under_bus_reverse(PCIBus *bus,
|
||||
void (*fn)(PCIBus *b,
|
||||
PCIDevice *d,
|
||||
void *opaque),
|
||||
void *opaque)
|
||||
{
|
||||
PCIDevice *d;
|
||||
int devfn;
|
||||
|
||||
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
|
||||
d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
|
||||
if (d) {
|
||||
fn(bus, d, opaque);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
|
||||
void (*fn)(PCIBus *b, PCIDevice *d, void *opaque),
|
||||
void *opaque)
|
||||
{
|
||||
bus = pci_find_bus_nr(bus, bus_num);
|
||||
|
||||
if (bus) {
|
||||
pci_for_each_device_under_bus_reverse(bus, fn, opaque);
|
||||
}
|
||||
}
|
||||
|
||||
static void pci_for_each_device_under_bus(PCIBus *bus,
|
||||
void (*fn)(PCIBus *b, PCIDevice *d,
|
||||
void *opaque),
|
||||
|
196
hw/ppc/spapr.c
196
hw/ppc/spapr.c
@ -63,6 +63,7 @@
|
||||
#include "qemu/error-report.h"
|
||||
#include "trace.h"
|
||||
#include "hw/nmi.h"
|
||||
#include "hw/intc/intc.h"
|
||||
|
||||
#include "hw/compat.h"
|
||||
#include "qemu/cutils.h"
|
||||
@ -95,37 +96,68 @@
|
||||
|
||||
#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
|
||||
|
||||
static XICSState *try_create_xics(const char *type, int nr_servers,
|
||||
int nr_irqs, Error **errp)
|
||||
static int try_create_xics(sPAPRMachineState *spapr, const char *type_ics,
|
||||
const char *type_icp, int nr_servers,
|
||||
int nr_irqs, Error **errp)
|
||||
{
|
||||
Error *err = NULL;
|
||||
DeviceState *dev;
|
||||
XICSFabric *xi = XICS_FABRIC(spapr);
|
||||
Error *err = NULL, *local_err = NULL;
|
||||
ICSState *ics = NULL;
|
||||
int i;
|
||||
|
||||
dev = qdev_create(NULL, type);
|
||||
qdev_prop_set_uint32(dev, "nr_servers", nr_servers);
|
||||
qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs);
|
||||
object_property_set_bool(OBJECT(dev), true, "realized", &err);
|
||||
ics = ICS_SIMPLE(object_new(type_ics));
|
||||
qdev_set_parent_bus(DEVICE(ics), sysbus_get_default());
|
||||
object_property_add_child(OBJECT(spapr), "ics", OBJECT(ics), NULL);
|
||||
object_property_set_int(OBJECT(ics), nr_irqs, "nr-irqs", &err);
|
||||
object_property_add_const_link(OBJECT(ics), "xics", OBJECT(xi), NULL);
|
||||
object_property_set_bool(OBJECT(ics), true, "realized", &local_err);
|
||||
error_propagate(&err, local_err);
|
||||
if (err) {
|
||||
error_propagate(errp, err);
|
||||
object_unparent(OBJECT(dev));
|
||||
return NULL;
|
||||
goto error;
|
||||
}
|
||||
return XICS_COMMON(dev);
|
||||
|
||||
spapr->icps = g_malloc0(nr_servers * sizeof(ICPState));
|
||||
spapr->nr_servers = nr_servers;
|
||||
|
||||
for (i = 0; i < nr_servers; i++) {
|
||||
ICPState *icp = &spapr->icps[i];
|
||||
|
||||
object_initialize(icp, sizeof(*icp), type_icp);
|
||||
qdev_set_parent_bus(DEVICE(icp), sysbus_get_default());
|
||||
object_property_add_child(OBJECT(spapr), "icp[*]", OBJECT(icp), NULL);
|
||||
object_property_add_const_link(OBJECT(icp), "xics", OBJECT(xi), NULL);
|
||||
object_property_set_bool(OBJECT(icp), true, "realized", &err);
|
||||
if (err) {
|
||||
goto error;
|
||||
}
|
||||
object_unref(OBJECT(icp));
|
||||
}
|
||||
|
||||
spapr->ics = ics;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
error_propagate(errp, err);
|
||||
if (ics) {
|
||||
object_unparent(OBJECT(ics));
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static XICSState *xics_system_init(MachineState *machine,
|
||||
int nr_servers, int nr_irqs, Error **errp)
|
||||
static int xics_system_init(MachineState *machine,
|
||||
int nr_servers, int nr_irqs, Error **errp)
|
||||
{
|
||||
XICSState *xics = NULL;
|
||||
int rc = -1;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
Error *err = NULL;
|
||||
|
||||
if (machine_kernel_irqchip_allowed(machine)) {
|
||||
xics = try_create_xics(TYPE_XICS_SPAPR_KVM, nr_servers, nr_irqs,
|
||||
&err);
|
||||
if (machine_kernel_irqchip_allowed(machine) &&
|
||||
!xics_kvm_init(SPAPR_MACHINE(machine), errp)) {
|
||||
rc = try_create_xics(SPAPR_MACHINE(machine), TYPE_ICS_KVM,
|
||||
TYPE_KVM_ICP, nr_servers, nr_irqs, &err);
|
||||
}
|
||||
if (machine_kernel_irqchip_required(machine) && !xics) {
|
||||
if (machine_kernel_irqchip_required(machine) && rc < 0) {
|
||||
error_reportf_err(err,
|
||||
"kernel_irqchip requested but unavailable: ");
|
||||
} else {
|
||||
@ -133,11 +165,13 @@ static XICSState *xics_system_init(MachineState *machine,
|
||||
}
|
||||
}
|
||||
|
||||
if (!xics) {
|
||||
xics = try_create_xics(TYPE_XICS_SPAPR, nr_servers, nr_irqs, errp);
|
||||
if (rc < 0) {
|
||||
xics_spapr_init(SPAPR_MACHINE(machine), errp);
|
||||
rc = try_create_xics(SPAPR_MACHINE(machine), TYPE_ICS_SIMPLE,
|
||||
TYPE_ICP, nr_servers, nr_irqs, errp);
|
||||
}
|
||||
|
||||
return xics;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
|
||||
@ -924,7 +958,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr,
|
||||
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
|
||||
|
||||
/* /interrupt controller */
|
||||
spapr_dt_xics(spapr->xics, fdt, PHANDLE_XICP);
|
||||
spapr_dt_xics(spapr->nr_servers, fdt, PHANDLE_XICP);
|
||||
|
||||
ret = spapr_populate_memory(spapr, fdt);
|
||||
if (ret < 0) {
|
||||
@ -1053,6 +1087,62 @@ static void close_htab_fd(sPAPRMachineState *spapr)
|
||||
spapr->htab_fd = -1;
|
||||
}
|
||||
|
||||
static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||
|
||||
return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
|
||||
}
|
||||
|
||||
static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
|
||||
hwaddr ptex, int n)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||
hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
|
||||
|
||||
if (!spapr->htab) {
|
||||
/*
|
||||
* HTAB is controlled by KVM. Fetch into temporary buffer
|
||||
*/
|
||||
ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
|
||||
kvmppc_read_hptes(hptes, ptex, n);
|
||||
return hptes;
|
||||
}
|
||||
|
||||
/*
|
||||
* HTAB is controlled by QEMU. Just point to the internally
|
||||
* accessible PTEG.
|
||||
*/
|
||||
return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
|
||||
}
|
||||
|
||||
static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
|
||||
const ppc_hash_pte64_t *hptes,
|
||||
hwaddr ptex, int n)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||
|
||||
if (!spapr->htab) {
|
||||
g_free((void *)hptes);
|
||||
}
|
||||
|
||||
/* Nothing to do for qemu managed HPT */
|
||||
}
|
||||
|
||||
static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
||||
uint64_t pte0, uint64_t pte1)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
|
||||
hwaddr offset = ptex * HASH_PTE_SIZE_64;
|
||||
|
||||
if (!spapr->htab) {
|
||||
kvmppc_write_hpte(ptex, pte0, pte1);
|
||||
} else {
|
||||
stq_p(spapr->htab + offset, pte0);
|
||||
stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
|
||||
}
|
||||
}
|
||||
|
||||
static int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
|
||||
{
|
||||
int shift;
|
||||
@ -1252,6 +1342,13 @@ static int spapr_post_load(void *opaque, int version_id)
|
||||
sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
|
||||
int err = 0;
|
||||
|
||||
if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
|
||||
int i;
|
||||
for (i = 0; i < spapr->nr_servers; i++) {
|
||||
icp_resend(&spapr->icps[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* In earlier versions, there was no separate qdev for the PAPR
|
||||
* RTC, so the RTC offset was stored directly in sPAPREnvironment.
|
||||
* So when migrating from those versions, poke the incoming offset
|
||||
@ -1902,9 +1999,8 @@ static void ppc_spapr_init(MachineState *machine)
|
||||
load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
|
||||
|
||||
/* Set up Interrupt Controller before we create the VCPUs */
|
||||
spapr->xics = xics_system_init(machine,
|
||||
DIV_ROUND_UP(max_cpus * smt, smp_threads),
|
||||
XICS_IRQS_SPAPR, &error_fatal);
|
||||
xics_system_init(machine, DIV_ROUND_UP(max_cpus * smt, smp_threads),
|
||||
XICS_IRQS_SPAPR, &error_fatal);
|
||||
|
||||
/* Set up containers for ibm,client-set-architecture negotiated options */
|
||||
spapr->ov5 = spapr_ovec_new();
|
||||
@ -2872,6 +2968,40 @@ static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
|
||||
*mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
|
||||
}
|
||||
|
||||
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
|
||||
|
||||
return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL;
|
||||
}
|
||||
|
||||
static void spapr_ics_resend(XICSFabric *dev)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(dev);
|
||||
|
||||
ics_resend(spapr->ics);
|
||||
}
|
||||
|
||||
static ICPState *spapr_icp_get(XICSFabric *xi, int server)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(xi);
|
||||
|
||||
return (server < spapr->nr_servers) ? &spapr->icps[server] : NULL;
|
||||
}
|
||||
|
||||
static void spapr_pic_print_info(InterruptStatsProvider *obj,
|
||||
Monitor *mon)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < spapr->nr_servers; i++) {
|
||||
icp_pic_print_info(&spapr->icps[i], mon);
|
||||
}
|
||||
|
||||
ics_pic_print_info(spapr->ics, mon);
|
||||
}
|
||||
|
||||
static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
MachineClass *mc = MACHINE_CLASS(oc);
|
||||
@ -2880,6 +3010,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||
NMIClass *nc = NMI_CLASS(oc);
|
||||
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
|
||||
PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc);
|
||||
XICSFabricClass *xic = XICS_FABRIC_CLASS(oc);
|
||||
InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc);
|
||||
|
||||
mc->desc = "pSeries Logical Partition (PAPR compliant)";
|
||||
|
||||
@ -2891,7 +3023,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||
mc->init = ppc_spapr_init;
|
||||
mc->reset = ppc_spapr_reset;
|
||||
mc->block_default_type = IF_SCSI;
|
||||
mc->max_cpus = 255;
|
||||
mc->max_cpus = 1024;
|
||||
mc->no_parallel = 1;
|
||||
mc->default_boot_order = "";
|
||||
mc->default_ram_size = 512 * M_BYTE;
|
||||
@ -2913,6 +3045,14 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
|
||||
nc->nmi_monitor_handler = spapr_nmi;
|
||||
smc->phb_placement = spapr_phb_placement;
|
||||
vhc->hypercall = emulate_spapr_hypercall;
|
||||
vhc->hpt_mask = spapr_hpt_mask;
|
||||
vhc->map_hptes = spapr_map_hptes;
|
||||
vhc->unmap_hptes = spapr_unmap_hptes;
|
||||
vhc->store_hpte = spapr_store_hpte;
|
||||
xic->ics_get = spapr_ics_get;
|
||||
xic->ics_resend = spapr_ics_resend;
|
||||
xic->icp_get = spapr_icp_get;
|
||||
ispc->print_info = spapr_pic_print_info;
|
||||
}
|
||||
|
||||
static const TypeInfo spapr_machine_info = {
|
||||
@ -2929,6 +3069,8 @@ static const TypeInfo spapr_machine_info = {
|
||||
{ TYPE_NMI },
|
||||
{ TYPE_HOTPLUG_HANDLER },
|
||||
{ TYPE_PPC_VIRTUAL_HYPERVISOR },
|
||||
{ TYPE_XICS_FABRIC },
|
||||
{ TYPE_INTERRUPT_STATS_PROVIDER },
|
||||
{ }
|
||||
},
|
||||
};
|
||||
|
@ -13,10 +13,12 @@
|
||||
#include "hw/boards.h"
|
||||
#include "qapi/error.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "target/ppc/kvm_ppc.h"
|
||||
#include "hw/ppc/ppc.h"
|
||||
#include "target/ppc/mmu-hash64.h"
|
||||
#include "sysemu/numa.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
static void spapr_cpu_reset(void *opaque)
|
||||
{
|
||||
@ -34,15 +36,26 @@ static void spapr_cpu_reset(void *opaque)
|
||||
|
||||
env->spr[SPR_HIOR] = 0;
|
||||
|
||||
ppc_hash64_set_external_hpt(cpu, spapr->htab, spapr->htab_shift,
|
||||
&error_fatal);
|
||||
/*
|
||||
* This is a hack for the benefit of KVM PR - it abuses the SDR1
|
||||
* slot in kvm_sregs to communicate the userspace address of the
|
||||
* HPT
|
||||
*/
|
||||
if (kvm_enabled()) {
|
||||
env->spr[SPR_SDR1] = (target_ulong)(uintptr_t)spapr->htab
|
||||
| (spapr->htab_shift - 18);
|
||||
if (kvmppc_put_books_sregs(cpu) < 0) {
|
||||
error_report("Unable to update SDR1 in KVM");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void spapr_cpu_destroy(PowerPCCPU *cpu)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
|
||||
xics_cpu_destroy(spapr->xics, cpu);
|
||||
xics_cpu_destroy(XICS_FABRIC(spapr), cpu);
|
||||
qemu_unregister_reset(spapr_cpu_reset, cpu);
|
||||
}
|
||||
|
||||
@ -57,8 +70,7 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
|
||||
|
||||
/* Enable PAPR mode in TCG or KVM */
|
||||
cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
|
||||
cpu_ppc_set_papr(cpu);
|
||||
cpu_ppc_set_papr(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
|
||||
|
||||
if (cpu->max_compat) {
|
||||
Error *local_err = NULL;
|
||||
@ -76,7 +88,7 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
|
||||
cs->numa_node = i;
|
||||
}
|
||||
|
||||
xics_cpu_setup(spapr->xics, cpu);
|
||||
xics_cpu_setup(XICS_FABRIC(spapr), cpu);
|
||||
|
||||
qemu_register_reset(spapr_cpu_reset, cpu);
|
||||
spapr_cpu_reset(cpu);
|
||||
|
@ -481,7 +481,7 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
|
||||
|
||||
rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow, true);
|
||||
|
||||
qemu_irq_pulse(xics_get_qirq(spapr->xics,
|
||||
qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
|
||||
rtas_event_log_to_irq(spapr,
|
||||
RTAS_LOG_TYPE_EPOW)));
|
||||
}
|
||||
@ -574,7 +574,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
|
||||
|
||||
rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
|
||||
|
||||
qemu_irq_pulse(xics_get_qirq(spapr->xics,
|
||||
qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
|
||||
rtas_event_log_to_irq(spapr,
|
||||
RTAS_LOG_TYPE_HOTPLUG)));
|
||||
}
|
||||
@ -695,7 +695,7 @@ static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
spapr_event_sources_get_source(spapr->event_sources, i);
|
||||
|
||||
g_assert(source->enabled);
|
||||
qemu_irq_pulse(xics_get_qirq(spapr->xics, source->irq));
|
||||
qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), source->irq));
|
||||
}
|
||||
}
|
||||
|
||||
@ -752,7 +752,7 @@ void spapr_events_init(sPAPRMachineState *spapr)
|
||||
spapr->event_sources = spapr_event_sources_new();
|
||||
|
||||
spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
|
||||
xics_spapr_alloc(spapr->xics, 0, false,
|
||||
spapr_ics_alloc(spapr->ics, 0, false,
|
||||
&error_fatal));
|
||||
|
||||
/* NOTE: if machine supports modern/dedicated hotplug event source,
|
||||
@ -765,7 +765,7 @@ void spapr_events_init(sPAPRMachineState *spapr)
|
||||
*/
|
||||
if (spapr->use_hotplug_event_source) {
|
||||
spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
|
||||
xics_spapr_alloc(spapr->xics, 0, false,
|
||||
spapr_ics_alloc(spapr->ics, 0, false,
|
||||
&error_fatal));
|
||||
}
|
||||
|
||||
|
@ -47,12 +47,12 @@ static bool has_spr(PowerPCCPU *cpu, int spr)
|
||||
return cpu->env.spr_cb[spr].name != NULL;
|
||||
}
|
||||
|
||||
static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
|
||||
static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
|
||||
{
|
||||
/*
|
||||
* hash value/pteg group index is normalized by htab_mask
|
||||
* hash value/pteg group index is normalized by HPT mask
|
||||
*/
|
||||
if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
|
||||
if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -77,15 +77,14 @@ static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
|
||||
static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong flags = args[0];
|
||||
target_ulong pte_index = args[1];
|
||||
target_ulong ptex = args[1];
|
||||
target_ulong pteh = args[2];
|
||||
target_ulong ptel = args[3];
|
||||
unsigned apshift;
|
||||
target_ulong raddr;
|
||||
target_ulong index;
|
||||
uint64_t token;
|
||||
target_ulong slot;
|
||||
const ppc_hash_pte64_t *hptes;
|
||||
|
||||
apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
|
||||
if (!apshift) {
|
||||
@ -116,36 +115,36 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
|
||||
pteh &= ~0x60ULL;
|
||||
|
||||
if (!valid_pte_index(env, pte_index)) {
|
||||
if (!valid_ptex(cpu, ptex)) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
index = 0;
|
||||
slot = ptex & 7ULL;
|
||||
ptex = ptex & ~7ULL;
|
||||
|
||||
if (likely((flags & H_EXACT) == 0)) {
|
||||
pte_index &= ~7ULL;
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
for (; index < 8; index++) {
|
||||
if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
|
||||
hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
|
||||
for (slot = 0; slot < 8; slot++) {
|
||||
if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
if (index == 8) {
|
||||
ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
|
||||
if (slot == 8) {
|
||||
return H_PTEG_FULL;
|
||||
}
|
||||
} else {
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
|
||||
if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
|
||||
ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
|
||||
return H_PTEG_FULL;
|
||||
}
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
|
||||
}
|
||||
|
||||
ppc_hash64_store_hpte(cpu, pte_index + index,
|
||||
pteh | HPTE64_V_HPTE_DIRTY, ptel);
|
||||
ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
|
||||
|
||||
args[0] = pte_index + index;
|
||||
args[0] = ptex + slot;
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
@ -161,18 +160,17 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
||||
target_ulong flags,
|
||||
target_ulong *vp, target_ulong *rp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t token;
|
||||
const ppc_hash_pte64_t *hptes;
|
||||
target_ulong v, r;
|
||||
|
||||
if (!valid_pte_index(env, ptex)) {
|
||||
if (!valid_ptex(cpu, ptex)) {
|
||||
return REMOVE_PARM;
|
||||
}
|
||||
|
||||
token = ppc_hash64_start_access(cpu, ptex);
|
||||
v = ppc_hash64_load_hpte0(cpu, token, 0);
|
||||
r = ppc_hash64_load_hpte1(cpu, token, 0);
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
|
||||
v = ppc_hash64_hpte0(cpu, hptes, 0);
|
||||
r = ppc_hash64_hpte1(cpu, hptes, 0);
|
||||
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
|
||||
|
||||
if ((v & HPTE64_V_VALID) == 0 ||
|
||||
((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
|
||||
@ -191,11 +189,11 @@ static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong flags = args[0];
|
||||
target_ulong pte_index = args[1];
|
||||
target_ulong ptex = args[1];
|
||||
target_ulong avpn = args[2];
|
||||
RemoveResult ret;
|
||||
|
||||
ret = remove_hpte(cpu, pte_index, avpn, flags,
|
||||
ret = remove_hpte(cpu, ptex, avpn, flags,
|
||||
&args[0], &args[1]);
|
||||
|
||||
switch (ret) {
|
||||
@ -291,19 +289,19 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong flags = args[0];
|
||||
target_ulong pte_index = args[1];
|
||||
target_ulong ptex = args[1];
|
||||
target_ulong avpn = args[2];
|
||||
uint64_t token;
|
||||
const ppc_hash_pte64_t *hptes;
|
||||
target_ulong v, r;
|
||||
|
||||
if (!valid_pte_index(env, pte_index)) {
|
||||
if (!valid_ptex(cpu, ptex)) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
v = ppc_hash64_load_hpte0(cpu, token, 0);
|
||||
r = ppc_hash64_load_hpte1(cpu, token, 0);
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
|
||||
v = ppc_hash64_hpte0(cpu, hptes, 0);
|
||||
r = ppc_hash64_hpte1(cpu, hptes, 0);
|
||||
ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
|
||||
|
||||
if ((v & HPTE64_V_VALID) == 0 ||
|
||||
((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
|
||||
@ -315,36 +313,35 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
r |= (flags << 55) & HPTE64_R_PP0;
|
||||
r |= (flags << 48) & HPTE64_R_KEY_HI;
|
||||
r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
|
||||
ppc_hash64_store_hpte(cpu, pte_index,
|
||||
ppc_hash64_store_hpte(cpu, ptex,
|
||||
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
|
||||
ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
|
||||
ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
|
||||
/* Flush the tlb */
|
||||
check_tlb_flush(env, true);
|
||||
/* Don't need a memory barrier, due to qemu's global lock */
|
||||
ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
|
||||
ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
target_ulong opcode, target_ulong *args)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong flags = args[0];
|
||||
target_ulong pte_index = args[1];
|
||||
target_ulong ptex = args[1];
|
||||
uint8_t *hpte;
|
||||
int i, ridx, n_entries = 1;
|
||||
|
||||
if (!valid_pte_index(env, pte_index)) {
|
||||
if (!valid_ptex(cpu, ptex)) {
|
||||
return H_PARAMETER;
|
||||
}
|
||||
|
||||
if (flags & H_READ_4) {
|
||||
/* Clear the two low order bits */
|
||||
pte_index &= ~(3ULL);
|
||||
ptex &= ~(3ULL);
|
||||
n_entries = 4;
|
||||
}
|
||||
|
||||
hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
|
||||
hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64);
|
||||
|
||||
for (i = 0, ridx = 0; i < n_entries; i++) {
|
||||
args[ridx++] = ldq_p(hpte);
|
||||
|
@ -43,6 +43,7 @@
|
||||
|
||||
#include "hw/pci/pci_bridge.h"
|
||||
#include "hw/pci/pci_bus.h"
|
||||
#include "hw/pci/pci_ids.h"
|
||||
#include "hw/ppc/spapr_drc.h"
|
||||
#include "sysemu/device_tree.h"
|
||||
#include "sysemu/kvm.h"
|
||||
@ -325,7 +326,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
return;
|
||||
}
|
||||
|
||||
xics_spapr_free(spapr->xics, msi->first_irq, msi->num);
|
||||
spapr_ics_free(spapr->ics, msi->first_irq, msi->num);
|
||||
if (msi_present(pdev)) {
|
||||
spapr_msi_setmsg(pdev, 0, false, 0, 0);
|
||||
}
|
||||
@ -363,7 +364,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
}
|
||||
|
||||
/* Allocate MSIs */
|
||||
irq = xics_spapr_alloc_block(spapr->xics, req_num, false,
|
||||
irq = spapr_ics_alloc_block(spapr->ics, req_num, false,
|
||||
ret_intr_type == RTAS_TYPE_MSI, &err);
|
||||
if (err) {
|
||||
error_reportf_err(err, "Can't allocate MSIs for device %x: ",
|
||||
@ -374,7 +375,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
|
||||
|
||||
/* Release previous MSIs */
|
||||
if (msi) {
|
||||
xics_spapr_free(spapr->xics, msi->first_irq, msi->num);
|
||||
spapr_ics_free(spapr->ics, msi->first_irq, msi->num);
|
||||
g_hash_table_remove(phb->msi, &config_addr);
|
||||
}
|
||||
|
||||
@ -736,7 +737,7 @@ static void spapr_msi_write(void *opaque, hwaddr addr,
|
||||
|
||||
trace_spapr_pci_msi_write(addr, data, irq);
|
||||
|
||||
qemu_irq_pulse(xics_get_qirq(spapr->xics, irq));
|
||||
qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), irq));
|
||||
}
|
||||
|
||||
static const MemoryRegionOps spapr_msi_ops = {
|
||||
@ -946,6 +947,274 @@ static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
|
||||
rp->assigned_len = assigned_idx * sizeof(ResourceFields);
|
||||
}
|
||||
|
||||
typedef struct PCIClass PCIClass;
|
||||
typedef struct PCISubClass PCISubClass;
|
||||
typedef struct PCIIFace PCIIFace;
|
||||
|
||||
struct PCIIFace {
|
||||
int iface;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct PCISubClass {
|
||||
int subclass;
|
||||
const char *name;
|
||||
const PCIIFace *iface;
|
||||
};
|
||||
|
||||
struct PCIClass {
|
||||
const char *name;
|
||||
const PCISubClass *subc;
|
||||
};
|
||||
|
||||
static const PCISubClass undef_subclass[] = {
|
||||
{ PCI_CLASS_NOT_DEFINED_VGA, "display", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass mass_subclass[] = {
|
||||
{ PCI_CLASS_STORAGE_SCSI, "scsi", NULL },
|
||||
{ PCI_CLASS_STORAGE_IDE, "ide", NULL },
|
||||
{ PCI_CLASS_STORAGE_FLOPPY, "fdc", NULL },
|
||||
{ PCI_CLASS_STORAGE_IPI, "ipi", NULL },
|
||||
{ PCI_CLASS_STORAGE_RAID, "raid", NULL },
|
||||
{ PCI_CLASS_STORAGE_ATA, "ata", NULL },
|
||||
{ PCI_CLASS_STORAGE_SATA, "sata", NULL },
|
||||
{ PCI_CLASS_STORAGE_SAS, "sas", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass net_subclass[] = {
|
||||
{ PCI_CLASS_NETWORK_ETHERNET, "ethernet", NULL },
|
||||
{ PCI_CLASS_NETWORK_TOKEN_RING, "token-ring", NULL },
|
||||
{ PCI_CLASS_NETWORK_FDDI, "fddi", NULL },
|
||||
{ PCI_CLASS_NETWORK_ATM, "atm", NULL },
|
||||
{ PCI_CLASS_NETWORK_ISDN, "isdn", NULL },
|
||||
{ PCI_CLASS_NETWORK_WORLDFIP, "worldfip", NULL },
|
||||
{ PCI_CLASS_NETWORK_PICMG214, "picmg", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass displ_subclass[] = {
|
||||
{ PCI_CLASS_DISPLAY_VGA, "vga", NULL },
|
||||
{ PCI_CLASS_DISPLAY_XGA, "xga", NULL },
|
||||
{ PCI_CLASS_DISPLAY_3D, "3d-controller", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass media_subclass[] = {
|
||||
{ PCI_CLASS_MULTIMEDIA_VIDEO, "video", NULL },
|
||||
{ PCI_CLASS_MULTIMEDIA_AUDIO, "sound", NULL },
|
||||
{ PCI_CLASS_MULTIMEDIA_PHONE, "telephony", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass mem_subclass[] = {
|
||||
{ PCI_CLASS_MEMORY_RAM, "memory", NULL },
|
||||
{ PCI_CLASS_MEMORY_FLASH, "flash", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass bridg_subclass[] = {
|
||||
{ PCI_CLASS_BRIDGE_HOST, "host", NULL },
|
||||
{ PCI_CLASS_BRIDGE_ISA, "isa", NULL },
|
||||
{ PCI_CLASS_BRIDGE_EISA, "eisa", NULL },
|
||||
{ PCI_CLASS_BRIDGE_MC, "mca", NULL },
|
||||
{ PCI_CLASS_BRIDGE_PCI, "pci", NULL },
|
||||
{ PCI_CLASS_BRIDGE_PCMCIA, "pcmcia", NULL },
|
||||
{ PCI_CLASS_BRIDGE_NUBUS, "nubus", NULL },
|
||||
{ PCI_CLASS_BRIDGE_CARDBUS, "cardbus", NULL },
|
||||
{ PCI_CLASS_BRIDGE_RACEWAY, "raceway", NULL },
|
||||
{ PCI_CLASS_BRIDGE_PCI_SEMITP, "semi-transparent-pci", NULL },
|
||||
{ PCI_CLASS_BRIDGE_IB_PCI, "infiniband", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass comm_subclass[] = {
|
||||
{ PCI_CLASS_COMMUNICATION_SERIAL, "serial", NULL },
|
||||
{ PCI_CLASS_COMMUNICATION_PARALLEL, "parallel", NULL },
|
||||
{ PCI_CLASS_COMMUNICATION_MULTISERIAL, "multiport-serial", NULL },
|
||||
{ PCI_CLASS_COMMUNICATION_MODEM, "modem", NULL },
|
||||
{ PCI_CLASS_COMMUNICATION_GPIB, "gpib", NULL },
|
||||
{ PCI_CLASS_COMMUNICATION_SC, "smart-card", NULL },
|
||||
{ 0xFF, NULL, NULL, },
|
||||
};
|
||||
|
||||
static const PCIIFace pic_iface[] = {
|
||||
{ PCI_CLASS_SYSTEM_PIC_IOAPIC, "io-apic" },
|
||||
{ PCI_CLASS_SYSTEM_PIC_IOXAPIC, "io-xapic" },
|
||||
{ 0xFF, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass sys_subclass[] = {
|
||||
{ PCI_CLASS_SYSTEM_PIC, "interrupt-controller", pic_iface },
|
||||
{ PCI_CLASS_SYSTEM_DMA, "dma-controller", NULL },
|
||||
{ PCI_CLASS_SYSTEM_TIMER, "timer", NULL },
|
||||
{ PCI_CLASS_SYSTEM_RTC, "rtc", NULL },
|
||||
{ PCI_CLASS_SYSTEM_PCI_HOTPLUG, "hot-plug-controller", NULL },
|
||||
{ PCI_CLASS_SYSTEM_SDHCI, "sd-host-controller", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass inp_subclass[] = {
|
||||
{ PCI_CLASS_INPUT_KEYBOARD, "keyboard", NULL },
|
||||
{ PCI_CLASS_INPUT_PEN, "pen", NULL },
|
||||
{ PCI_CLASS_INPUT_MOUSE, "mouse", NULL },
|
||||
{ PCI_CLASS_INPUT_SCANNER, "scanner", NULL },
|
||||
{ PCI_CLASS_INPUT_GAMEPORT, "gameport", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass dock_subclass[] = {
|
||||
{ PCI_CLASS_DOCKING_GENERIC, "dock", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass cpu_subclass[] = {
|
||||
{ PCI_CLASS_PROCESSOR_PENTIUM, "pentium", NULL },
|
||||
{ PCI_CLASS_PROCESSOR_POWERPC, "powerpc", NULL },
|
||||
{ PCI_CLASS_PROCESSOR_MIPS, "mips", NULL },
|
||||
{ PCI_CLASS_PROCESSOR_CO, "co-processor", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCIIFace usb_iface[] = {
|
||||
{ PCI_CLASS_SERIAL_USB_UHCI, "usb-uhci" },
|
||||
{ PCI_CLASS_SERIAL_USB_OHCI, "usb-ohci", },
|
||||
{ PCI_CLASS_SERIAL_USB_EHCI, "usb-ehci" },
|
||||
{ PCI_CLASS_SERIAL_USB_XHCI, "usb-xhci" },
|
||||
{ PCI_CLASS_SERIAL_USB_UNKNOWN, "usb-unknown" },
|
||||
{ PCI_CLASS_SERIAL_USB_DEVICE, "usb-device" },
|
||||
{ 0xFF, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass ser_subclass[] = {
|
||||
{ PCI_CLASS_SERIAL_FIREWIRE, "firewire", NULL },
|
||||
{ PCI_CLASS_SERIAL_ACCESS, "access-bus", NULL },
|
||||
{ PCI_CLASS_SERIAL_SSA, "ssa", NULL },
|
||||
{ PCI_CLASS_SERIAL_USB, "usb", usb_iface },
|
||||
{ PCI_CLASS_SERIAL_FIBER, "fibre-channel", NULL },
|
||||
{ PCI_CLASS_SERIAL_SMBUS, "smb", NULL },
|
||||
{ PCI_CLASS_SERIAL_IB, "infiniband", NULL },
|
||||
{ PCI_CLASS_SERIAL_IPMI, "ipmi", NULL },
|
||||
{ PCI_CLASS_SERIAL_SERCOS, "sercos", NULL },
|
||||
{ PCI_CLASS_SERIAL_CANBUS, "canbus", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass wrl_subclass[] = {
|
||||
{ PCI_CLASS_WIRELESS_IRDA, "irda", NULL },
|
||||
{ PCI_CLASS_WIRELESS_CIR, "consumer-ir", NULL },
|
||||
{ PCI_CLASS_WIRELESS_RF_CONTROLLER, "rf-controller", NULL },
|
||||
{ PCI_CLASS_WIRELESS_BLUETOOTH, "bluetooth", NULL },
|
||||
{ PCI_CLASS_WIRELESS_BROADBAND, "broadband", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass sat_subclass[] = {
|
||||
{ PCI_CLASS_SATELLITE_TV, "satellite-tv", NULL },
|
||||
{ PCI_CLASS_SATELLITE_AUDIO, "satellite-audio", NULL },
|
||||
{ PCI_CLASS_SATELLITE_VOICE, "satellite-voice", NULL },
|
||||
{ PCI_CLASS_SATELLITE_DATA, "satellite-data", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass crypt_subclass[] = {
|
||||
{ PCI_CLASS_CRYPT_NETWORK, "network-encryption", NULL },
|
||||
{ PCI_CLASS_CRYPT_ENTERTAINMENT,
|
||||
"entertainment-encryption", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCISubClass spc_subclass[] = {
|
||||
{ PCI_CLASS_SP_DPIO, "dpio", NULL },
|
||||
{ PCI_CLASS_SP_PERF, "counter", NULL },
|
||||
{ PCI_CLASS_SP_SYNCH, "measurement", NULL },
|
||||
{ PCI_CLASS_SP_MANAGEMENT, "management-card", NULL },
|
||||
{ 0xFF, NULL, NULL },
|
||||
};
|
||||
|
||||
static const PCIClass pci_classes[] = {
|
||||
{ "legacy-device", undef_subclass },
|
||||
{ "mass-storage", mass_subclass },
|
||||
{ "network", net_subclass },
|
||||
{ "display", displ_subclass, },
|
||||
{ "multimedia-device", media_subclass },
|
||||
{ "memory-controller", mem_subclass },
|
||||
{ "unknown-bridge", bridg_subclass },
|
||||
{ "communication-controller", comm_subclass},
|
||||
{ "system-peripheral", sys_subclass },
|
||||
{ "input-controller", inp_subclass },
|
||||
{ "docking-station", dock_subclass },
|
||||
{ "cpu", cpu_subclass },
|
||||
{ "serial-bus", ser_subclass },
|
||||
{ "wireless-controller", wrl_subclass },
|
||||
{ "intelligent-io", NULL },
|
||||
{ "satellite-device", sat_subclass },
|
||||
{ "encryption", crypt_subclass },
|
||||
{ "data-processing-controller", spc_subclass },
|
||||
};
|
||||
|
||||
static const char *pci_find_device_name(uint8_t class, uint8_t subclass,
|
||||
uint8_t iface)
|
||||
{
|
||||
const PCIClass *pclass;
|
||||
const PCISubClass *psubclass;
|
||||
const PCIIFace *piface;
|
||||
const char *name;
|
||||
|
||||
if (class >= ARRAY_SIZE(pci_classes)) {
|
||||
return "pci";
|
||||
}
|
||||
|
||||
pclass = pci_classes + class;
|
||||
name = pclass->name;
|
||||
|
||||
if (pclass->subc == NULL) {
|
||||
return name;
|
||||
}
|
||||
|
||||
psubclass = pclass->subc;
|
||||
while ((psubclass->subclass & 0xff) != 0xff) {
|
||||
if ((psubclass->subclass & 0xff) == subclass) {
|
||||
name = psubclass->name;
|
||||
break;
|
||||
}
|
||||
psubclass++;
|
||||
}
|
||||
|
||||
piface = psubclass->iface;
|
||||
if (piface == NULL) {
|
||||
return name;
|
||||
}
|
||||
while ((piface->iface & 0xff) != 0xff) {
|
||||
if ((piface->iface & 0xff) == iface) {
|
||||
name = piface->name;
|
||||
break;
|
||||
}
|
||||
piface++;
|
||||
}
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
static void pci_get_node_name(char *nodename, int len, PCIDevice *dev)
|
||||
{
|
||||
int slot = PCI_SLOT(dev->devfn);
|
||||
int func = PCI_FUNC(dev->devfn);
|
||||
uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3);
|
||||
const char *name;
|
||||
|
||||
name = pci_find_device_name((ccode >> 16) & 0xff, (ccode >> 8) & 0xff,
|
||||
ccode & 0xff);
|
||||
|
||||
if (func != 0) {
|
||||
snprintf(nodename, len, "%s@%x,%x", name, slot, func);
|
||||
} else {
|
||||
snprintf(nodename, len, "%s@%x", name, slot);
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
|
||||
PCIDevice *pdev);
|
||||
|
||||
@ -957,6 +1226,7 @@ static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
|
||||
int pci_status, err;
|
||||
char *buf = NULL;
|
||||
uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev);
|
||||
uint32_t ccode = pci_default_read_config(dev, PCI_CLASS_PROG, 3);
|
||||
uint32_t max_msi, max_msix;
|
||||
|
||||
if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
|
||||
@ -971,8 +1241,7 @@ static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
|
||||
pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
|
||||
_FDT(fdt_setprop_cell(fdt, offset, "revision-id",
|
||||
pci_default_read_config(dev, PCI_REVISION_ID, 1)));
|
||||
_FDT(fdt_setprop_cell(fdt, offset, "class-code",
|
||||
pci_default_read_config(dev, PCI_CLASS_PROG, 3)));
|
||||
_FDT(fdt_setprop_cell(fdt, offset, "class-code", ccode));
|
||||
if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
|
||||
_FDT(fdt_setprop_cell(fdt, offset, "interrupts",
|
||||
pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
|
||||
@ -1013,11 +1282,10 @@ static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
|
||||
_FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
|
||||
}
|
||||
|
||||
/* NOTE: this is normally generated by firmware via path/unit name,
|
||||
* but in our case we must set it manually since it does not get
|
||||
* processed by OF beforehand
|
||||
*/
|
||||
_FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
|
||||
_FDT(fdt_setprop_string(fdt, offset, "name",
|
||||
pci_find_device_name((ccode >> 16) & 0xff,
|
||||
(ccode >> 8) & 0xff,
|
||||
ccode & 0xff)));
|
||||
buf = spapr_phb_get_loc_code(sphb, dev);
|
||||
if (!buf) {
|
||||
error_report("Failed setting the ibm,loc-code");
|
||||
@ -1061,15 +1329,9 @@ static int spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
|
||||
void *fdt, int node_offset)
|
||||
{
|
||||
int offset, ret;
|
||||
int slot = PCI_SLOT(dev->devfn);
|
||||
int func = PCI_FUNC(dev->devfn);
|
||||
char nodename[FDT_NAME_MAX];
|
||||
|
||||
if (func != 0) {
|
||||
snprintf(nodename, FDT_NAME_MAX, "pci@%x,%x", slot, func);
|
||||
} else {
|
||||
snprintf(nodename, FDT_NAME_MAX, "pci@%x", slot);
|
||||
}
|
||||
pci_get_node_name(nodename, FDT_NAME_MAX, dev);
|
||||
offset = fdt_add_subnode(fdt, node_offset, nodename);
|
||||
ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb);
|
||||
|
||||
@ -1485,7 +1747,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
|
||||
uint32_t irq;
|
||||
Error *local_err = NULL;
|
||||
|
||||
irq = xics_spapr_alloc_block(spapr->xics, 1, true, false, &local_err);
|
||||
irq = spapr_ics_alloc_block(spapr->ics, 1, true, false, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
error_prepend(errp, "can't allocate LSIs: ");
|
||||
@ -1782,9 +2044,9 @@ static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev,
|
||||
s_fdt.fdt = p->fdt;
|
||||
s_fdt.node_off = offset;
|
||||
s_fdt.sphb = p->sphb;
|
||||
pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
|
||||
spapr_populate_pci_devices_dt,
|
||||
&s_fdt);
|
||||
pci_for_each_device_reverse(sec_bus, pci_bus_num(sec_bus),
|
||||
spapr_populate_pci_devices_dt,
|
||||
&s_fdt);
|
||||
}
|
||||
|
||||
static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
|
||||
@ -1953,9 +2215,9 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
|
||||
s_fdt.fdt = fdt;
|
||||
s_fdt.node_off = bus_off;
|
||||
s_fdt.sphb = phb;
|
||||
pci_for_each_device(bus, pci_bus_num(bus),
|
||||
spapr_populate_pci_devices_dt,
|
||||
&s_fdt);
|
||||
pci_for_each_device_reverse(bus, pci_bus_num(bus),
|
||||
spapr_populate_pci_devices_dt,
|
||||
&s_fdt);
|
||||
|
||||
ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
|
||||
SPAPR_DR_CONNECTOR_TYPE_PCI);
|
||||
|
@ -454,7 +454,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
|
||||
dev->qdev.id = id;
|
||||
}
|
||||
|
||||
dev->irq = xics_spapr_alloc(spapr->xics, dev->irq, false, &local_err);
|
||||
dev->irq = spapr_ics_alloc(spapr->ics, dev->irq, false, &local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
|
@ -106,7 +106,7 @@ static inline qemu_irq spapr_phb_lsi_qirq(struct sPAPRPHBState *phb, int pin)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
|
||||
return xics_get_qirq(spapr->xics, phb->lsi_table[pin].irq);
|
||||
return xics_get_qirq(XICS_FABRIC(spapr), phb->lsi_table[pin].irq);
|
||||
}
|
||||
|
||||
PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index);
|
||||
|
@ -429,6 +429,10 @@ int pci_bus_numa_node(PCIBus *bus);
|
||||
void pci_for_each_device(PCIBus *bus, int bus_num,
|
||||
void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
|
||||
void *opaque);
|
||||
void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
|
||||
void (*fn)(PCIBus *bus, PCIDevice *d,
|
||||
void *opaque),
|
||||
void *opaque);
|
||||
void pci_for_each_bus_depth_first(PCIBus *bus,
|
||||
void *(*begin)(PCIBus *bus, void *parent_state),
|
||||
void (*end)(PCIBus *bus, void *state),
|
||||
|
@ -13,41 +13,84 @@
|
||||
|
||||
/* Device classes and subclasses */
|
||||
|
||||
#define PCI_BASE_CLASS_STORAGE 0x01
|
||||
#define PCI_BASE_CLASS_NETWORK 0x02
|
||||
#define PCI_CLASS_NOT_DEFINED 0x0000
|
||||
#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
|
||||
|
||||
#define PCI_BASE_CLASS_STORAGE 0x01
|
||||
#define PCI_CLASS_STORAGE_SCSI 0x0100
|
||||
#define PCI_CLASS_STORAGE_IDE 0x0101
|
||||
#define PCI_CLASS_STORAGE_FLOPPY 0x0102
|
||||
#define PCI_CLASS_STORAGE_IPI 0x0103
|
||||
#define PCI_CLASS_STORAGE_RAID 0x0104
|
||||
#define PCI_CLASS_STORAGE_ATA 0x0105
|
||||
#define PCI_CLASS_STORAGE_SATA 0x0106
|
||||
#define PCI_CLASS_STORAGE_SAS 0x0107
|
||||
#define PCI_CLASS_STORAGE_EXPRESS 0x0108
|
||||
#define PCI_CLASS_STORAGE_OTHER 0x0180
|
||||
|
||||
#define PCI_BASE_CLASS_NETWORK 0x02
|
||||
#define PCI_CLASS_NETWORK_ETHERNET 0x0200
|
||||
#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
|
||||
#define PCI_CLASS_NETWORK_FDDI 0x0202
|
||||
#define PCI_CLASS_NETWORK_ATM 0x0203
|
||||
#define PCI_CLASS_NETWORK_ISDN 0x0204
|
||||
#define PCI_CLASS_NETWORK_WORLDFIP 0x0205
|
||||
#define PCI_CLASS_NETWORK_PICMG214 0x0206
|
||||
#define PCI_CLASS_NETWORK_OTHER 0x0280
|
||||
|
||||
#define PCI_BASE_CLASS_DISPLAY 0x03
|
||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
||||
#define PCI_CLASS_DISPLAY_XGA 0x0301
|
||||
#define PCI_CLASS_DISPLAY_3D 0x0302
|
||||
#define PCI_CLASS_DISPLAY_OTHER 0x0380
|
||||
|
||||
#define PCI_BASE_CLASS_MULTIMEDIA 0x04
|
||||
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
|
||||
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
|
||||
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
|
||||
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
|
||||
|
||||
#define PCI_BASE_CLASS_MEMORY 0x05
|
||||
#define PCI_CLASS_MEMORY_RAM 0x0500
|
||||
#define PCI_CLASS_MEMORY_FLASH 0x0501
|
||||
#define PCI_CLASS_MEMORY_OTHER 0x0580
|
||||
|
||||
#define PCI_BASE_CLASS_BRIDGE 0x06
|
||||
#define PCI_CLASS_BRIDGE_HOST 0x0600
|
||||
#define PCI_CLASS_BRIDGE_ISA 0x0601
|
||||
#define PCI_CLASS_BRIDGE_EISA 0x0602
|
||||
#define PCI_CLASS_BRIDGE_MC 0x0603
|
||||
#define PCI_CLASS_BRIDGE_PCI 0x0604
|
||||
#define PCI_CLASS_BRIDGE_PCI_INF_SUB 0x01
|
||||
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
|
||||
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
|
||||
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
|
||||
#define PCI_CLASS_BRIDGE_RACEWAY 0x0608
|
||||
#define PCI_CLASS_BRIDGE_PCI_SEMITP 0x0609
|
||||
#define PCI_CLASS_BRIDGE_IB_PCI 0x060a
|
||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
||||
|
||||
#define PCI_BASE_CLASS_COMMUNICATION 0x07
|
||||
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
|
||||
#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
|
||||
#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
|
||||
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
|
||||
#define PCI_CLASS_COMMUNICATION_GPIB 0x0704
|
||||
#define PCI_CLASS_COMMUNICATION_SC 0x0705
|
||||
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
|
||||
|
||||
#define PCI_BASE_CLASS_SYSTEM 0x08
|
||||
#define PCI_CLASS_SYSTEM_PIC 0x0800
|
||||
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
|
||||
#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020
|
||||
#define PCI_CLASS_SYSTEM_DMA 0x0801
|
||||
#define PCI_CLASS_SYSTEM_TIMER 0x0802
|
||||
#define PCI_CLASS_SYSTEM_RTC 0x0803
|
||||
#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
|
||||
#define PCI_CLASS_SYSTEM_SDHCI 0x0805
|
||||
#define PCI_CLASS_SYSTEM_OTHER 0x0880
|
||||
|
||||
#define PCI_CLASS_SERIAL_USB 0x0c03
|
||||
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
|
||||
|
||||
#define PCI_CLASS_BRIDGE_HOST 0x0600
|
||||
#define PCI_CLASS_BRIDGE_ISA 0x0601
|
||||
#define PCI_CLASS_BRIDGE_PCI 0x0604
|
||||
#define PCI_CLASS_BRIDGE_PCI_INF_SUB 0x01
|
||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
||||
|
||||
#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
|
||||
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
|
||||
|
||||
#define PCI_BASE_CLASS_INPUT 0x09
|
||||
#define PCI_CLASS_INPUT_KEYBOARD 0x0900
|
||||
#define PCI_CLASS_INPUT_PEN 0x0901
|
||||
#define PCI_CLASS_INPUT_MOUSE 0x0902
|
||||
@ -55,8 +98,59 @@
|
||||
#define PCI_CLASS_INPUT_GAMEPORT 0x0904
|
||||
#define PCI_CLASS_INPUT_OTHER 0x0980
|
||||
|
||||
#define PCI_CLASS_PROCESSOR_CO 0x0b40
|
||||
#define PCI_BASE_CLASS_DOCKING 0x0a
|
||||
#define PCI_CLASS_DOCKING_GENERIC 0x0a00
|
||||
#define PCI_CLASS_DOCKING_OTHER 0x0a80
|
||||
|
||||
#define PCI_BASE_CLASS_PROCESSOR 0x0b
|
||||
#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
|
||||
#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
|
||||
#define PCI_CLASS_PROCESSOR_MIPS 0x0b30
|
||||
#define PCI_CLASS_PROCESSOR_CO 0x0b40
|
||||
|
||||
#define PCI_BASE_CLASS_SERIAL 0x0c
|
||||
#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
|
||||
#define PCI_CLASS_SERIAL_ACCESS 0x0c01
|
||||
#define PCI_CLASS_SERIAL_SSA 0x0c02
|
||||
#define PCI_CLASS_SERIAL_USB 0x0c03
|
||||
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
|
||||
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
|
||||
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
|
||||
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
|
||||
#define PCI_CLASS_SERIAL_USB_UNKNOWN 0x0c0380
|
||||
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
|
||||
#define PCI_CLASS_SERIAL_FIBER 0x0c04
|
||||
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
|
||||
#define PCI_CLASS_SERIAL_IB 0x0c06
|
||||
#define PCI_CLASS_SERIAL_IPMI 0x0c07
|
||||
#define PCI_CLASS_SERIAL_SERCOS 0x0c08
|
||||
#define PCI_CLASS_SERIAL_CANBUS 0x0c09
|
||||
|
||||
#define PCI_BASE_CLASS_WIRELESS 0x0d
|
||||
#define PCI_CLASS_WIRELESS_IRDA 0x0d00
|
||||
#define PCI_CLASS_WIRELESS_CIR 0x0d01
|
||||
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
|
||||
#define PCI_CLASS_WIRELESS_BLUETOOTH 0x0d11
|
||||
#define PCI_CLASS_WIRELESS_BROADBAND 0x0d12
|
||||
#define PCI_CLASS_WIRELESS_OTHER 0x0d80
|
||||
|
||||
#define PCI_BASE_CLASS_SATELLITE 0x0f
|
||||
#define PCI_CLASS_SATELLITE_TV 0x0f00
|
||||
#define PCI_CLASS_SATELLITE_AUDIO 0x0f01
|
||||
#define PCI_CLASS_SATELLITE_VOICE 0x0f03
|
||||
#define PCI_CLASS_SATELLITE_DATA 0x0f04
|
||||
|
||||
#define PCI_BASE_CLASS_CRYPT 0x10
|
||||
#define PCI_CLASS_CRYPT_NETWORK 0x1000
|
||||
#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001
|
||||
#define PCI_CLASS_CRYPT_OTHER 0x1080
|
||||
|
||||
#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
|
||||
#define PCI_CLASS_SP_DPIO 0x1100
|
||||
#define PCI_CLASS_SP_PERF 0x1101
|
||||
#define PCI_CLASS_SP_SYNCH 0x1110
|
||||
#define PCI_CLASS_SP_MANAGEMENT 0x1120
|
||||
#define PCI_CLASS_SP_OTHER 0x1180
|
||||
|
||||
#define PCI_CLASS_OTHERS 0xff
|
||||
|
||||
|
@ -58,7 +58,7 @@ struct sPAPRMachineState {
|
||||
struct VIOsPAPRBus *vio_bus;
|
||||
QLIST_HEAD(, sPAPRPHBState) phbs;
|
||||
struct sPAPRNVRAM *nvram;
|
||||
XICSState *xics;
|
||||
ICSState *ics;
|
||||
DeviceState *rtc;
|
||||
|
||||
void *htab;
|
||||
@ -94,6 +94,9 @@ struct sPAPRMachineState {
|
||||
/*< public >*/
|
||||
char *kvm_type;
|
||||
MemoryHotplugState hotplug_memory;
|
||||
|
||||
uint32_t nr_servers;
|
||||
ICPState *icps;
|
||||
};
|
||||
|
||||
#define H_SUCCESS 0
|
||||
|
@ -87,7 +87,7 @@ static inline qemu_irq spapr_vio_qirq(VIOsPAPRDevice *dev)
|
||||
{
|
||||
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
||||
|
||||
return xics_get_qirq(spapr->xics, dev->irq);
|
||||
return xics_get_qirq(XICS_FABRIC(spapr), dev->irq);
|
||||
}
|
||||
|
||||
static inline bool spapr_vio_dma_valid(VIOsPAPRDevice *dev, uint64_t taddr,
|
||||
|
@ -30,29 +30,6 @@
|
||||
|
||||
#include "hw/sysbus.h"
|
||||
|
||||
#define TYPE_XICS_COMMON "xics-common"
|
||||
#define XICS_COMMON(obj) OBJECT_CHECK(XICSState, (obj), TYPE_XICS_COMMON)
|
||||
|
||||
/*
|
||||
* Retain xics as the type name to be compatible for migration. Rest all the
|
||||
* functions, class and variables are renamed as xics_spapr.
|
||||
*/
|
||||
#define TYPE_XICS_SPAPR "xics"
|
||||
#define XICS_SPAPR(obj) OBJECT_CHECK(XICSState, (obj), TYPE_XICS_SPAPR)
|
||||
|
||||
#define TYPE_XICS_SPAPR_KVM "xics-spapr-kvm"
|
||||
#define XICS_SPAPR_KVM(obj) \
|
||||
OBJECT_CHECK(KVMXICSState, (obj), TYPE_XICS_SPAPR_KVM)
|
||||
|
||||
#define XICS_COMMON_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(XICSStateClass, (klass), TYPE_XICS_COMMON)
|
||||
#define XICS_SPAPR_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(XICSStateClass, (klass), TYPE_XICS_SPAPR)
|
||||
#define XICS_COMMON_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(XICSStateClass, (obj), TYPE_XICS_COMMON)
|
||||
#define XICS_SPAPR_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(XICSStateClass, (obj), TYPE_XICS_SPAPR)
|
||||
|
||||
#define XICS_IPI 0x2
|
||||
#define XICS_BUID 0x1
|
||||
#define XICS_IRQ_BASE (XICS_BUID << 12)
|
||||
@ -62,31 +39,12 @@
|
||||
* (the kernel implementation supports more but we don't exploit
|
||||
* that yet)
|
||||
*/
|
||||
typedef struct XICSStateClass XICSStateClass;
|
||||
typedef struct XICSState XICSState;
|
||||
typedef struct ICPStateClass ICPStateClass;
|
||||
typedef struct ICPState ICPState;
|
||||
typedef struct ICSStateClass ICSStateClass;
|
||||
typedef struct ICSState ICSState;
|
||||
typedef struct ICSIRQState ICSIRQState;
|
||||
|
||||
struct XICSStateClass {
|
||||
DeviceClass parent_class;
|
||||
|
||||
void (*cpu_setup)(XICSState *icp, PowerPCCPU *cpu);
|
||||
void (*set_nr_irqs)(XICSState *icp, uint32_t nr_irqs, Error **errp);
|
||||
void (*set_nr_servers)(XICSState *icp, uint32_t nr_servers, Error **errp);
|
||||
};
|
||||
|
||||
struct XICSState {
|
||||
/*< private >*/
|
||||
SysBusDevice parent_obj;
|
||||
/*< public >*/
|
||||
uint32_t nr_servers;
|
||||
uint32_t nr_irqs;
|
||||
ICPState *ss;
|
||||
QLIST_HEAD(, ICSState) ics;
|
||||
};
|
||||
typedef struct XICSFabric XICSFabric;
|
||||
|
||||
#define TYPE_ICP "icp"
|
||||
#define ICP(obj) OBJECT_CHECK(ICPState, (obj), TYPE_ICP)
|
||||
@ -104,6 +62,7 @@ struct ICPStateClass {
|
||||
|
||||
void (*pre_save)(ICPState *s);
|
||||
int (*post_load)(ICPState *s, int version_id);
|
||||
void (*cpu_setup)(ICPState *icp, PowerPCCPU *cpu);
|
||||
};
|
||||
|
||||
struct ICPState {
|
||||
@ -118,7 +77,7 @@ struct ICPState {
|
||||
qemu_irq output;
|
||||
bool cap_irq_xics_enabled;
|
||||
|
||||
XICSState *xics;
|
||||
XICSFabric *xics;
|
||||
};
|
||||
|
||||
#define TYPE_ICS_BASE "ics-base"
|
||||
@ -139,6 +98,7 @@ struct ICPState {
|
||||
struct ICSStateClass {
|
||||
DeviceClass parent_class;
|
||||
|
||||
void (*realize)(DeviceState *dev, Error **errp);
|
||||
void (*pre_save)(ICSState *s);
|
||||
int (*post_load)(ICSState *s, int version_id);
|
||||
void (*reject)(ICSState *s, uint32_t irq);
|
||||
@ -154,8 +114,7 @@ struct ICSState {
|
||||
uint32_t offset;
|
||||
qemu_irq *qirqs;
|
||||
ICSIRQState *irqs;
|
||||
XICSState *xics;
|
||||
QLIST_ENTRY(ICSState) list;
|
||||
XICSFabric *xics;
|
||||
};
|
||||
|
||||
static inline bool ics_valid_irq(ICSState *ics, uint32_t nr)
|
||||
@ -180,19 +139,37 @@ struct ICSIRQState {
|
||||
uint8_t flags;
|
||||
};
|
||||
|
||||
typedef struct XICSFabric {
|
||||
Object parent;
|
||||
} XICSFabric;
|
||||
|
||||
#define TYPE_XICS_FABRIC "xics-fabric"
|
||||
#define XICS_FABRIC(obj) \
|
||||
OBJECT_CHECK(XICSFabric, (obj), TYPE_XICS_FABRIC)
|
||||
#define XICS_FABRIC_CLASS(klass) \
|
||||
OBJECT_CLASS_CHECK(XICSFabricClass, (klass), TYPE_XICS_FABRIC)
|
||||
#define XICS_FABRIC_GET_CLASS(obj) \
|
||||
OBJECT_GET_CLASS(XICSFabricClass, (obj), TYPE_XICS_FABRIC)
|
||||
|
||||
typedef struct XICSFabricClass {
|
||||
InterfaceClass parent;
|
||||
ICSState *(*ics_get)(XICSFabric *xi, int irq);
|
||||
void (*ics_resend)(XICSFabric *xi);
|
||||
ICPState *(*icp_get)(XICSFabric *xi, int server);
|
||||
} XICSFabricClass;
|
||||
|
||||
#define XICS_IRQS_SPAPR 1024
|
||||
|
||||
qemu_irq xics_get_qirq(XICSState *icp, int irq);
|
||||
int xics_spapr_alloc(XICSState *icp, int irq_hint, bool lsi, Error **errp);
|
||||
int xics_spapr_alloc_block(XICSState *icp, int num, bool lsi, bool align,
|
||||
int spapr_ics_alloc(ICSState *ics, int irq_hint, bool lsi, Error **errp);
|
||||
int spapr_ics_alloc_block(ICSState *ics, int num, bool lsi, bool align,
|
||||
Error **errp);
|
||||
void xics_spapr_free(XICSState *icp, int irq, int num);
|
||||
void spapr_dt_xics(XICSState *xics, void *fdt, uint32_t phandle);
|
||||
void spapr_ics_free(ICSState *ics, int irq, int num);
|
||||
void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle);
|
||||
|
||||
void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu);
|
||||
void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu);
|
||||
void xics_set_nr_servers(XICSState *xics, uint32_t nr_servers,
|
||||
const char *typename, Error **errp);
|
||||
qemu_irq xics_get_qirq(XICSFabric *xi, int irq);
|
||||
ICPState *xics_icp_get(XICSFabric *xi, int server);
|
||||
void xics_cpu_setup(XICSFabric *xi, PowerPCCPU *cpu);
|
||||
void xics_cpu_destroy(XICSFabric *xi, PowerPCCPU *cpu);
|
||||
|
||||
/* Internal XICS interfaces */
|
||||
int xics_get_cpu_index_by_dt_id(int cpu_dt_id);
|
||||
@ -207,7 +184,15 @@ void ics_simple_write_xive(ICSState *ics, int nr, int server,
|
||||
uint8_t priority, uint8_t saved_priority);
|
||||
|
||||
void ics_set_irq_type(ICSState *ics, int srcno, bool lsi);
|
||||
void icp_pic_print_info(ICPState *icp, Monitor *mon);
|
||||
void ics_pic_print_info(ICSState *ics, Monitor *mon);
|
||||
|
||||
ICSState *xics_find_source(XICSState *icp, int irq);
|
||||
void ics_resend(ICSState *ics);
|
||||
void icp_resend(ICPState *ss);
|
||||
|
||||
typedef struct sPAPRMachineState sPAPRMachineState;
|
||||
|
||||
int xics_kvm_init(sPAPRMachineState *spapr, Error **errp);
|
||||
int xics_spapr_init(sPAPRMachineState *spapr, Error **errp);
|
||||
|
||||
#endif /* XICS_H */
|
||||
|
@ -1,8 +1,9 @@
|
||||
obj-y += cpu-models.o
|
||||
obj-y += cpu.o
|
||||
obj-y += translate.o
|
||||
ifeq ($(CONFIG_SOFTMMU),y)
|
||||
obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o
|
||||
obj-$(TARGET_PPC64) += mmu-hash64.o arch_dump.o compat.o
|
||||
obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o arch_dump.o
|
||||
obj-$(TARGET_PPC64) += mmu-hash64.o compat.o
|
||||
endif
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* writing ELF notes for ppc64 arch
|
||||
* writing ELF notes for ppc{64,} arch
|
||||
*
|
||||
*
|
||||
* Copyright IBM, Corp. 2013
|
||||
@ -19,36 +19,48 @@
|
||||
#include "sysemu/dump.h"
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
struct PPC64UserRegStruct {
|
||||
uint64_t gpr[32];
|
||||
uint64_t nip;
|
||||
uint64_t msr;
|
||||
uint64_t orig_gpr3;
|
||||
uint64_t ctr;
|
||||
uint64_t link;
|
||||
uint64_t xer;
|
||||
uint64_t ccr;
|
||||
uint64_t softe;
|
||||
uint64_t trap;
|
||||
uint64_t dar;
|
||||
uint64_t dsisr;
|
||||
uint64_t result;
|
||||
#ifdef TARGET_PPC64
|
||||
#define ELFCLASS ELFCLASS64
|
||||
#define cpu_to_dump_reg cpu_to_dump64
|
||||
typedef uint64_t reg_t;
|
||||
typedef Elf64_Nhdr Elf_Nhdr;
|
||||
#else
|
||||
#define ELFCLASS ELFCLASS32
|
||||
#define cpu_to_dump_reg cpu_to_dump32
|
||||
typedef uint32_t reg_t;
|
||||
typedef Elf32_Nhdr Elf_Nhdr;
|
||||
#endif /* TARGET_PPC64 */
|
||||
|
||||
struct PPCUserRegStruct {
|
||||
reg_t gpr[32];
|
||||
reg_t nip;
|
||||
reg_t msr;
|
||||
reg_t orig_gpr3;
|
||||
reg_t ctr;
|
||||
reg_t link;
|
||||
reg_t xer;
|
||||
reg_t ccr;
|
||||
reg_t softe;
|
||||
reg_t trap;
|
||||
reg_t dar;
|
||||
reg_t dsisr;
|
||||
reg_t result;
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct PPC64ElfPrstatus {
|
||||
struct PPCElfPrstatus {
|
||||
char pad1[112];
|
||||
struct PPC64UserRegStruct pr_reg;
|
||||
uint64_t pad2[4];
|
||||
struct PPCUserRegStruct pr_reg;
|
||||
reg_t pad2[4];
|
||||
} QEMU_PACKED;
|
||||
|
||||
|
||||
struct PPC64ElfFpregset {
|
||||
struct PPCElfFpregset {
|
||||
uint64_t fpr[32];
|
||||
uint64_t fpscr;
|
||||
reg_t fpscr;
|
||||
} QEMU_PACKED;
|
||||
|
||||
|
||||
struct PPC64ElfVmxregset {
|
||||
struct PPCElfVmxregset {
|
||||
ppc_avr_t avr[32];
|
||||
ppc_avr_t vscr;
|
||||
union {
|
||||
@ -57,26 +69,26 @@ struct PPC64ElfVmxregset {
|
||||
} vrsave;
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct PPC64ElfVsxregset {
|
||||
struct PPCElfVsxregset {
|
||||
uint64_t vsr[32];
|
||||
} QEMU_PACKED;
|
||||
|
||||
struct PPC64ElfSperegset {
|
||||
struct PPCElfSperegset {
|
||||
uint32_t evr[32];
|
||||
uint64_t spe_acc;
|
||||
uint32_t spe_fscr;
|
||||
} QEMU_PACKED;
|
||||
|
||||
typedef struct noteStruct {
|
||||
Elf64_Nhdr hdr;
|
||||
Elf_Nhdr hdr;
|
||||
char name[5];
|
||||
char pad3[3];
|
||||
union {
|
||||
struct PPC64ElfPrstatus prstatus;
|
||||
struct PPC64ElfFpregset fpregset;
|
||||
struct PPC64ElfVmxregset vmxregset;
|
||||
struct PPC64ElfVsxregset vsxregset;
|
||||
struct PPC64ElfSperegset speregset;
|
||||
struct PPCElfPrstatus prstatus;
|
||||
struct PPCElfFpregset fpregset;
|
||||
struct PPCElfVmxregset vmxregset;
|
||||
struct PPCElfVsxregset vsxregset;
|
||||
struct PPCElfSperegset speregset;
|
||||
} contents;
|
||||
} QEMU_PACKED Note;
|
||||
|
||||
@ -85,12 +97,12 @@ typedef struct NoteFuncArg {
|
||||
DumpState *state;
|
||||
} NoteFuncArg;
|
||||
|
||||
static void ppc64_write_elf64_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
static void ppc_write_elf_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
{
|
||||
int i;
|
||||
uint64_t cr;
|
||||
struct PPC64ElfPrstatus *prstatus;
|
||||
struct PPC64UserRegStruct *reg;
|
||||
reg_t cr;
|
||||
struct PPCElfPrstatus *prstatus;
|
||||
struct PPCUserRegStruct *reg;
|
||||
Note *note = &arg->note;
|
||||
DumpState *s = arg->state;
|
||||
|
||||
@ -101,25 +113,25 @@ static void ppc64_write_elf64_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
reg = &prstatus->pr_reg;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
reg->gpr[i] = cpu_to_dump64(s, cpu->env.gpr[i]);
|
||||
reg->gpr[i] = cpu_to_dump_reg(s, cpu->env.gpr[i]);
|
||||
}
|
||||
reg->nip = cpu_to_dump64(s, cpu->env.nip);
|
||||
reg->msr = cpu_to_dump64(s, cpu->env.msr);
|
||||
reg->ctr = cpu_to_dump64(s, cpu->env.ctr);
|
||||
reg->link = cpu_to_dump64(s, cpu->env.lr);
|
||||
reg->xer = cpu_to_dump64(s, cpu_read_xer(&cpu->env));
|
||||
reg->nip = cpu_to_dump_reg(s, cpu->env.nip);
|
||||
reg->msr = cpu_to_dump_reg(s, cpu->env.msr);
|
||||
reg->ctr = cpu_to_dump_reg(s, cpu->env.ctr);
|
||||
reg->link = cpu_to_dump_reg(s, cpu->env.lr);
|
||||
reg->xer = cpu_to_dump_reg(s, cpu_read_xer(&cpu->env));
|
||||
|
||||
cr = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
cr |= (cpu->env.crf[i] & 15) << (4 * (7 - i));
|
||||
}
|
||||
reg->ccr = cpu_to_dump64(s, cr);
|
||||
reg->ccr = cpu_to_dump_reg(s, cr);
|
||||
}
|
||||
|
||||
static void ppc64_write_elf64_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
static void ppc_write_elf_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
{
|
||||
int i;
|
||||
struct PPC64ElfFpregset *fpregset;
|
||||
struct PPCElfFpregset *fpregset;
|
||||
Note *note = &arg->note;
|
||||
DumpState *s = arg->state;
|
||||
|
||||
@ -131,13 +143,13 @@ static void ppc64_write_elf64_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
for (i = 0; i < 32; i++) {
|
||||
fpregset->fpr[i] = cpu_to_dump64(s, cpu->env.fpr[i]);
|
||||
}
|
||||
fpregset->fpscr = cpu_to_dump64(s, cpu->env.fpscr);
|
||||
fpregset->fpscr = cpu_to_dump_reg(s, cpu->env.fpscr);
|
||||
}
|
||||
|
||||
static void ppc64_write_elf64_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
{
|
||||
int i;
|
||||
struct PPC64ElfVmxregset *vmxregset;
|
||||
struct PPCElfVmxregset *vmxregset;
|
||||
Note *note = &arg->note;
|
||||
DumpState *s = arg->state;
|
||||
|
||||
@ -164,10 +176,11 @@ static void ppc64_write_elf64_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
}
|
||||
vmxregset->vscr.u32[3] = cpu_to_dump32(s, cpu->env.vscr);
|
||||
}
|
||||
static void ppc64_write_elf64_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
|
||||
static void ppc_write_elf_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
{
|
||||
int i;
|
||||
struct PPC64ElfVsxregset *vsxregset;
|
||||
struct PPCElfVsxregset *vsxregset;
|
||||
Note *note = &arg->note;
|
||||
DumpState *s = arg->state;
|
||||
|
||||
@ -179,9 +192,10 @@ static void ppc64_write_elf64_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
vsxregset->vsr[i] = cpu_to_dump64(s, cpu->env.vsr[i]);
|
||||
}
|
||||
}
|
||||
static void ppc64_write_elf64_speregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
|
||||
static void ppc_write_elf_speregset(NoteFuncArg *arg, PowerPCCPU *cpu)
|
||||
{
|
||||
struct PPC64ElfSperegset *speregset;
|
||||
struct PPCElfSperegset *speregset;
|
||||
Note *note = &arg->note;
|
||||
DumpState *s = arg->state;
|
||||
|
||||
@ -197,11 +211,11 @@ static const struct NoteFuncDescStruct {
|
||||
int contents_size;
|
||||
void (*note_contents_func)(NoteFuncArg *arg, PowerPCCPU *cpu);
|
||||
} note_func[] = {
|
||||
{sizeof(((Note *)0)->contents.prstatus), ppc64_write_elf64_prstatus},
|
||||
{sizeof(((Note *)0)->contents.fpregset), ppc64_write_elf64_fpregset},
|
||||
{sizeof(((Note *)0)->contents.vmxregset), ppc64_write_elf64_vmxregset},
|
||||
{sizeof(((Note *)0)->contents.vsxregset), ppc64_write_elf64_vsxregset},
|
||||
{sizeof(((Note *)0)->contents.speregset), ppc64_write_elf64_speregset},
|
||||
{sizeof(((Note *)0)->contents.prstatus), ppc_write_elf_prstatus},
|
||||
{sizeof(((Note *)0)->contents.fpregset), ppc_write_elf_fpregset},
|
||||
{sizeof(((Note *)0)->contents.vmxregset), ppc_write_elf_vmxregset},
|
||||
{sizeof(((Note *)0)->contents.vsxregset), ppc_write_elf_vsxregset},
|
||||
{sizeof(((Note *)0)->contents.speregset), ppc_write_elf_speregset},
|
||||
{ 0, NULL}
|
||||
};
|
||||
|
||||
@ -213,8 +227,9 @@ int cpu_get_dump_info(ArchDumpInfo *info,
|
||||
PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
|
||||
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
|
||||
|
||||
info->d_machine = EM_PPC64;
|
||||
info->d_class = ELFCLASS64;
|
||||
info->d_machine = PPC_ELF_MACHINE;
|
||||
info->d_class = ELFCLASS;
|
||||
|
||||
if ((*pcc->interrupts_big_endian)(cpu)) {
|
||||
info->d_endian = ELFDATA2MSB;
|
||||
} else {
|
||||
@ -236,25 +251,19 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
|
||||
int note_head_size;
|
||||
const NoteFuncDesc *nf;
|
||||
|
||||
if (class != ELFCLASS64) {
|
||||
return -1;
|
||||
}
|
||||
assert(machine == EM_PPC64);
|
||||
|
||||
note_head_size = sizeof(Elf64_Nhdr);
|
||||
|
||||
note_head_size = sizeof(Elf_Nhdr);
|
||||
for (nf = note_func; nf->note_contents_func; nf++) {
|
||||
elf_note_size = elf_note_size + note_head_size + name_size +
|
||||
nf->contents_size;
|
||||
nf->contents_size;
|
||||
}
|
||||
|
||||
return (elf_note_size) * nr_cpus;
|
||||
}
|
||||
|
||||
static int ppc64_write_all_elf64_notes(const char *note_name,
|
||||
WriteCoreDumpFunction f,
|
||||
PowerPCCPU *cpu, int id,
|
||||
void *opaque)
|
||||
static int ppc_write_all_elf_notes(const char *note_name,
|
||||
WriteCoreDumpFunction f,
|
||||
PowerPCCPU *cpu, int id,
|
||||
void *opaque)
|
||||
{
|
||||
NoteFuncArg arg = { .state = opaque };
|
||||
int ret = -1;
|
||||
@ -282,5 +291,12 @@ int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, void *opaque)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
return ppc64_write_all_elf64_notes("CORE", f, cpu, cpuid, opaque);
|
||||
return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque);
|
||||
}
|
||||
|
||||
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, void *opaque)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque);
|
||||
}
|
||||
|
47
target/ppc/cpu.c
Normal file
47
target/ppc/cpu.c
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* PowerPC CPU routines for qemu.
|
||||
*
|
||||
* Copyright (c) 2017 Nikunj A Dadhania, IBM Corporation.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "cpu.h"
|
||||
#include "cpu-models.h"
|
||||
|
||||
target_ulong cpu_read_xer(CPUPPCState *env)
|
||||
{
|
||||
if (is_isa300(env)) {
|
||||
return env->xer | (env->so << XER_SO) |
|
||||
(env->ov << XER_OV) | (env->ca << XER_CA) |
|
||||
(env->ov32 << XER_OV32) | (env->ca32 << XER_CA32);
|
||||
}
|
||||
|
||||
return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) |
|
||||
(env->ca << XER_CA);
|
||||
}
|
||||
|
||||
void cpu_write_xer(CPUPPCState *env, target_ulong xer)
|
||||
{
|
||||
env->so = (xer >> XER_SO) & 1;
|
||||
env->ov = (xer >> XER_OV) & 1;
|
||||
env->ca = (xer >> XER_CA) & 1;
|
||||
/* write all the flags, while reading back check of isa300 */
|
||||
env->ov32 = (xer >> XER_OV32) & 1;
|
||||
env->ca32 = (xer >> XER_CA32) & 1;
|
||||
env->xer = xer & ~((1ul << XER_SO) |
|
||||
(1ul << XER_OV) | (1ul << XER_CA) |
|
||||
(1ul << XER_OV32) | (1ul << XER_CA32));
|
||||
}
|
@ -223,11 +223,12 @@ enum {
|
||||
typedef struct opc_handler_t opc_handler_t;
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Types used to describe some PowerPC registers */
|
||||
/* Types used to describe some PowerPC registers etc. */
|
||||
typedef struct DisasContext DisasContext;
|
||||
typedef struct ppc_spr_t ppc_spr_t;
|
||||
typedef union ppc_avr_t ppc_avr_t;
|
||||
typedef union ppc_tlb_t ppc_tlb_t;
|
||||
typedef struct ppc_hash_pte64 ppc_hash_pte64_t;
|
||||
|
||||
/* SPR access micro-ops generations callbacks */
|
||||
struct ppc_spr_t {
|
||||
@ -305,14 +306,6 @@ union ppc_tlb_t {
|
||||
#define TLB_MAS 3
|
||||
#endif
|
||||
|
||||
#define SDR_32_HTABORG 0xFFFF0000UL
|
||||
#define SDR_32_HTABMASK 0x000001FFUL
|
||||
|
||||
#if defined(TARGET_PPC64)
|
||||
#define SDR_64_HTABORG 0xFFFFFFFFFFFC0000ULL
|
||||
#define SDR_64_HTABSIZE 0x000000000000001FULL
|
||||
#endif /* defined(TARGET_PPC64 */
|
||||
|
||||
typedef struct ppc_slb_t ppc_slb_t;
|
||||
struct ppc_slb_t {
|
||||
uint64_t esid;
|
||||
@ -965,6 +958,8 @@ struct CPUPPCState {
|
||||
target_ulong so;
|
||||
target_ulong ov;
|
||||
target_ulong ca;
|
||||
target_ulong ov32;
|
||||
target_ulong ca32;
|
||||
/* Reservation address */
|
||||
target_ulong reserve_addr;
|
||||
/* Reservation value */
|
||||
@ -1005,12 +1000,7 @@ struct CPUPPCState {
|
||||
/* tcg TLB needs flush (deferred slb inval instruction typically) */
|
||||
#endif
|
||||
/* segment registers */
|
||||
hwaddr htab_base;
|
||||
/* mask used to normalize hash value to PTEG index */
|
||||
hwaddr htab_mask;
|
||||
target_ulong sr[32];
|
||||
/* externally stored hash table */
|
||||
uint8_t *external_htab;
|
||||
/* BATs */
|
||||
uint32_t nb_BATs;
|
||||
target_ulong DBAT[2][8];
|
||||
@ -1218,6 +1208,14 @@ struct PPCVirtualHypervisor {
|
||||
struct PPCVirtualHypervisorClass {
|
||||
InterfaceClass parent;
|
||||
void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
|
||||
hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp);
|
||||
const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp,
|
||||
hwaddr ptex, int n);
|
||||
void (*unmap_hptes)(PPCVirtualHypervisor *vhyp,
|
||||
const ppc_hash_pte64_t *hptes,
|
||||
hwaddr ptex, int n);
|
||||
void (*store_hpte)(PPCVirtualHypervisor *vhyp, hwaddr ptex,
|
||||
uint64_t pte0, uint64_t pte1);
|
||||
};
|
||||
|
||||
#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
|
||||
@ -1243,6 +1241,8 @@ int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
||||
int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg);
|
||||
int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, void *opaque);
|
||||
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
|
||||
int cpuid, void *opaque);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void ppc_cpu_do_system_reset(CPUState *cs);
|
||||
extern const struct VMStateDescription vmstate_ppc_cpu;
|
||||
@ -1300,8 +1300,7 @@ void store_booke_tcr (CPUPPCState *env, target_ulong val);
|
||||
void store_booke_tsr (CPUPPCState *env, target_ulong val);
|
||||
void ppc_tlb_invalidate_all (CPUPPCState *env);
|
||||
void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
|
||||
void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
|
||||
void cpu_ppc_set_papr(PowerPCCPU *cpu);
|
||||
void cpu_ppc_set_papr(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ -1372,11 +1371,15 @@ int ppc_compat_max_threads(PowerPCCPU *cpu);
|
||||
#define XER_SO 31
|
||||
#define XER_OV 30
|
||||
#define XER_CA 29
|
||||
#define XER_OV32 19
|
||||
#define XER_CA32 18
|
||||
#define XER_CMP 8
|
||||
#define XER_BC 0
|
||||
#define xer_so (env->so)
|
||||
#define xer_ov (env->ov)
|
||||
#define xer_ca (env->ca)
|
||||
#define xer_ov32 (env->ov)
|
||||
#define xer_ca32 (env->ca)
|
||||
#define xer_cmp ((env->xer >> XER_CMP) & 0xFF)
|
||||
#define xer_bc ((env->xer >> XER_BC) & 0x7F)
|
||||
|
||||
@ -2343,18 +2346,9 @@ enum {
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
static inline target_ulong cpu_read_xer(CPUPPCState *env)
|
||||
{
|
||||
return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) | (env->ca << XER_CA);
|
||||
}
|
||||
|
||||
static inline void cpu_write_xer(CPUPPCState *env, target_ulong xer)
|
||||
{
|
||||
env->so = (xer >> XER_SO) & 1;
|
||||
env->ov = (xer >> XER_OV) & 1;
|
||||
env->ca = (xer >> XER_CA) & 1;
|
||||
env->xer = xer & ~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA));
|
||||
}
|
||||
#define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300))
|
||||
target_ulong cpu_read_xer(CPUPPCState *env);
|
||||
void cpu_write_xer(CPUPPCState *env, target_ulong xer);
|
||||
|
||||
static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
|
||||
target_ulong *cs_base, uint32_t *flags)
|
||||
|
@ -28,6 +28,15 @@
|
||||
/*****************************************************************************/
|
||||
/* Fixed point operations helpers */
|
||||
|
||||
static inline void helper_update_ov_legacy(CPUPPCState *env, int ov)
|
||||
{
|
||||
if (unlikely(ov)) {
|
||||
env->so = env->ov = 1;
|
||||
} else {
|
||||
env->ov = 0;
|
||||
}
|
||||
}
|
||||
|
||||
target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
|
||||
uint32_t oe)
|
||||
{
|
||||
@ -49,11 +58,7 @@ target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
|
||||
}
|
||||
|
||||
if (oe) {
|
||||
if (unlikely(overflow)) {
|
||||
env->so = env->ov = 1;
|
||||
} else {
|
||||
env->ov = 0;
|
||||
}
|
||||
helper_update_ov_legacy(env, overflow);
|
||||
}
|
||||
|
||||
return (target_ulong)rt;
|
||||
@ -81,11 +86,7 @@ target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
|
||||
}
|
||||
|
||||
if (oe) {
|
||||
if (unlikely(overflow)) {
|
||||
env->so = env->ov = 1;
|
||||
} else {
|
||||
env->ov = 0;
|
||||
}
|
||||
helper_update_ov_legacy(env, overflow);
|
||||
}
|
||||
|
||||
return (target_ulong)rt;
|
||||
@ -105,11 +106,7 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
|
||||
}
|
||||
|
||||
if (oe) {
|
||||
if (unlikely(overflow)) {
|
||||
env->so = env->ov = 1;
|
||||
} else {
|
||||
env->ov = 0;
|
||||
}
|
||||
helper_update_ov_legacy(env, overflow);
|
||||
}
|
||||
|
||||
return rt;
|
||||
@ -127,12 +124,7 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
|
||||
}
|
||||
|
||||
if (oe) {
|
||||
|
||||
if (unlikely(overflow)) {
|
||||
env->so = env->ov = 1;
|
||||
} else {
|
||||
env->ov = 0;
|
||||
}
|
||||
helper_update_ov_legacy(env, overflow);
|
||||
}
|
||||
|
||||
return rt;
|
||||
|
132
target/ppc/kvm.c
132
target/ppc/kvm.c
@ -1251,7 +1251,7 @@ static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!env->external_htab) {
|
||||
if (!cpu->vhyp) {
|
||||
ppc_store_sdr1(env, sregs.u.s.sdr1);
|
||||
}
|
||||
|
||||
@ -2596,89 +2596,85 @@ void kvm_arch_init_irq_routing(KVMState *s)
|
||||
{
|
||||
}
|
||||
|
||||
struct kvm_get_htab_buf {
|
||||
struct kvm_get_htab_header header;
|
||||
/*
|
||||
* We require one extra byte for read
|
||||
*/
|
||||
target_ulong hpte[(HPTES_PER_GROUP * 2) + 1];
|
||||
};
|
||||
|
||||
uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index)
|
||||
void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
|
||||
{
|
||||
int htab_fd;
|
||||
struct kvm_get_htab_fd ghf;
|
||||
struct kvm_get_htab_buf *hpte_buf;
|
||||
struct kvm_get_htab_fd ghf = {
|
||||
.flags = 0,
|
||||
.start_index = ptex,
|
||||
};
|
||||
int fd, rc;
|
||||
int i;
|
||||
|
||||
ghf.flags = 0;
|
||||
ghf.start_index = pte_index;
|
||||
htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
|
||||
if (htab_fd < 0) {
|
||||
goto error_out;
|
||||
fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
|
||||
if (fd < 0) {
|
||||
hw_error("kvmppc_read_hptes: Unable to open HPT fd");
|
||||
}
|
||||
|
||||
hpte_buf = g_malloc0(sizeof(*hpte_buf));
|
||||
/*
|
||||
* Read the hpte group
|
||||
*/
|
||||
if (read(htab_fd, hpte_buf, sizeof(*hpte_buf)) < 0) {
|
||||
goto out_close;
|
||||
i = 0;
|
||||
while (i < n) {
|
||||
struct kvm_get_htab_header *hdr;
|
||||
int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
|
||||
char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
|
||||
|
||||
rc = read(fd, buf, sizeof(buf));
|
||||
if (rc < 0) {
|
||||
hw_error("kvmppc_read_hptes: Unable to read HPTEs");
|
||||
}
|
||||
|
||||
hdr = (struct kvm_get_htab_header *)buf;
|
||||
while ((i < n) && ((char *)hdr < (buf + rc))) {
|
||||
int invalid = hdr->n_invalid;
|
||||
|
||||
if (hdr->index != (ptex + i)) {
|
||||
hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
|
||||
" != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
|
||||
}
|
||||
|
||||
memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * hdr->n_valid);
|
||||
i += hdr->n_valid;
|
||||
|
||||
if ((n - i) < invalid) {
|
||||
invalid = n - i;
|
||||
}
|
||||
memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
|
||||
i += hdr->n_invalid;
|
||||
|
||||
hdr = (struct kvm_get_htab_header *)
|
||||
((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
|
||||
}
|
||||
}
|
||||
|
||||
close(htab_fd);
|
||||
return (uint64_t)(uintptr_t) hpte_buf->hpte;
|
||||
|
||||
out_close:
|
||||
g_free(hpte_buf);
|
||||
close(htab_fd);
|
||||
error_out:
|
||||
return 0;
|
||||
close(fd);
|
||||
}
|
||||
|
||||
void kvmppc_hash64_free_pteg(uint64_t token)
|
||||
void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
|
||||
{
|
||||
struct kvm_get_htab_buf *htab_buf;
|
||||
|
||||
htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf,
|
||||
hpte);
|
||||
g_free(htab_buf);
|
||||
return;
|
||||
}
|
||||
|
||||
void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
{
|
||||
int htab_fd;
|
||||
int fd, rc;
|
||||
struct kvm_get_htab_fd ghf;
|
||||
struct kvm_get_htab_buf hpte_buf;
|
||||
struct {
|
||||
struct kvm_get_htab_header hdr;
|
||||
uint64_t pte0;
|
||||
uint64_t pte1;
|
||||
} buf;
|
||||
|
||||
ghf.flags = 0;
|
||||
ghf.start_index = 0; /* Ignored */
|
||||
htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
|
||||
if (htab_fd < 0) {
|
||||
goto error_out;
|
||||
fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
|
||||
if (fd < 0) {
|
||||
hw_error("kvmppc_write_hpte: Unable to open HPT fd");
|
||||
}
|
||||
|
||||
hpte_buf.header.n_valid = 1;
|
||||
hpte_buf.header.n_invalid = 0;
|
||||
hpte_buf.header.index = pte_index;
|
||||
hpte_buf.hpte[0] = pte0;
|
||||
hpte_buf.hpte[1] = pte1;
|
||||
/*
|
||||
* Write the hpte entry.
|
||||
* CAUTION: write() has the warn_unused_result attribute. Hence we
|
||||
* need to check the return value, even though we do nothing.
|
||||
*/
|
||||
if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
|
||||
goto out_close;
|
||||
buf.hdr.n_valid = 1;
|
||||
buf.hdr.n_invalid = 0;
|
||||
buf.hdr.index = ptex;
|
||||
buf.pte0 = cpu_to_be64(pte0);
|
||||
buf.pte1 = cpu_to_be64(pte1);
|
||||
|
||||
rc = write(fd, &buf, sizeof(buf));
|
||||
if (rc != sizeof(buf)) {
|
||||
hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
|
||||
}
|
||||
|
||||
out_close:
|
||||
close(htab_fd);
|
||||
return;
|
||||
|
||||
error_out:
|
||||
return;
|
||||
close(fd);
|
||||
}
|
||||
|
||||
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
||||
|
@ -49,11 +49,8 @@ int kvmppc_get_htab_fd(bool write);
|
||||
int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns);
|
||||
int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
|
||||
uint16_t n_valid, uint16_t n_invalid);
|
||||
uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index);
|
||||
void kvmppc_hash64_free_pteg(uint64_t token);
|
||||
|
||||
void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1);
|
||||
void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n);
|
||||
void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1);
|
||||
bool kvmppc_has_cap_fixup_hcalls(void);
|
||||
bool kvmppc_has_cap_htm(void);
|
||||
int kvmppc_enable_hwrng(void);
|
||||
@ -234,20 +231,13 @@ static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu,
|
||||
target_ulong pte_index)
|
||||
static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes,
|
||||
hwaddr ptex, int n)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline void kvmppc_hash64_free_pteg(uint64_t token)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline void kvmppc_hash64_write_pte(CPUPPCState *env,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
|
||||
qemu_get_betls(f, &env->pb[i]);
|
||||
for (i = 0; i < 1024; i++)
|
||||
qemu_get_betls(f, &env->spr[i]);
|
||||
if (!env->external_htab) {
|
||||
if (!cpu->vhyp) {
|
||||
ppc_store_sdr1(env, sdr1);
|
||||
}
|
||||
qemu_get_be32s(f, &env->vscr);
|
||||
@ -228,8 +228,7 @@ static int cpu_post_load(void *opaque, int version_id)
|
||||
env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
|
||||
}
|
||||
|
||||
if (!env->external_htab) {
|
||||
/* Restore htab_base and htab_mask variables */
|
||||
if (!cpu->vhyp) {
|
||||
ppc_store_sdr1(env, env->spr[SPR_SDR1]);
|
||||
}
|
||||
|
||||
|
@ -82,11 +82,9 @@ void helper_store_sdr1(CPUPPCState *env, target_ulong val)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
|
||||
if (!env->external_htab) {
|
||||
if (env->spr[SPR_SDR1] != val) {
|
||||
ppc_store_sdr1(env, val);
|
||||
tlb_flush(CPU(cpu));
|
||||
}
|
||||
if (env->spr[SPR_SDR1] != val) {
|
||||
ppc_store_sdr1(env, val);
|
||||
tlb_flush(CPU(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -304,9 +304,9 @@ static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
|
||||
|
||||
hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong mask = ppc_hash32_hpt_mask(cpu);
|
||||
|
||||
return (hash * HASH_PTEG_SIZE_32) & env->htab_mask;
|
||||
return (hash * HASH_PTEG_SIZE_32) & mask;
|
||||
}
|
||||
|
||||
static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
|
||||
@ -339,7 +339,6 @@ static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
|
||||
target_ulong sr, target_ulong eaddr,
|
||||
ppc_hash_pte32_t *pte)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
hwaddr pteg_off, pte_offset;
|
||||
hwaddr hash;
|
||||
uint32_t vsid, pgidx, ptem;
|
||||
@ -353,21 +352,22 @@ static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
|
||||
qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
|
||||
" htab_mask " TARGET_FMT_plx
|
||||
" hash " TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, hash);
|
||||
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
|
||||
|
||||
/* Primary PTEG lookup */
|
||||
qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
||||
" vsid=%" PRIx32 " ptem=%" PRIx32
|
||||
" hash=" TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, vsid, ptem, hash);
|
||||
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu),
|
||||
vsid, ptem, hash);
|
||||
pteg_off = get_pteg_offset32(cpu, hash);
|
||||
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
|
||||
if (pte_offset == -1) {
|
||||
/* Secondary PTEG lookup */
|
||||
qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
||||
" vsid=%" PRIx32 " api=%" PRIx32
|
||||
" hash=" TARGET_FMT_plx "\n", env->htab_base,
|
||||
env->htab_mask, vsid, ptem, ~hash);
|
||||
" hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
|
||||
ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash);
|
||||
pteg_off = get_pteg_offset32(cpu, ~hash);
|
||||
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
|
||||
}
|
||||
|
@ -44,6 +44,8 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
|
||||
/*
|
||||
* Hash page table definitions
|
||||
*/
|
||||
#define SDR_32_HTABORG 0xFFFF0000UL
|
||||
#define SDR_32_HTABMASK 0x000001FFUL
|
||||
|
||||
#define HPTES_PER_GROUP 8
|
||||
#define HASH_PTE_SIZE_32 8
|
||||
@ -65,42 +67,46 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
|
||||
#define HPTE32_R_WIMG 0x00000078
|
||||
#define HPTE32_R_PP 0x00000003
|
||||
|
||||
static inline hwaddr ppc_hash32_hpt_base(PowerPCCPU *cpu)
|
||||
{
|
||||
return cpu->env.spr[SPR_SDR1] & SDR_32_HTABORG;
|
||||
}
|
||||
|
||||
static inline hwaddr ppc_hash32_hpt_mask(PowerPCCPU *cpu)
|
||||
{
|
||||
return ((cpu->env.spr[SPR_SDR1] & SDR_32_HTABMASK) << 16) | 0xFFFF;
|
||||
}
|
||||
|
||||
static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong base = ppc_hash32_hpt_base(cpu);
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
return ldl_phys(CPU(cpu)->as, env->htab_base + pte_offset);
|
||||
return ldl_phys(CPU(cpu)->as, base + pte_offset);
|
||||
}
|
||||
|
||||
static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong base = ppc_hash32_hpt_base(cpu);
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
return ldl_phys(CPU(cpu)->as,
|
||||
env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2);
|
||||
return ldl_phys(CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2);
|
||||
}
|
||||
|
||||
static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset, target_ulong pte0)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong base = ppc_hash32_hpt_base(cpu);
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
stl_phys(CPU(cpu)->as, env->htab_base + pte_offset, pte0);
|
||||
stl_phys(CPU(cpu)->as, base + pte_offset, pte0);
|
||||
}
|
||||
|
||||
static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
|
||||
hwaddr pte_offset, target_ulong pte1)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong base = ppc_hash32_hpt_base(cpu);
|
||||
|
||||
assert(!env->external_htab); /* Not supported on 32-bit for now */
|
||||
stl_phys(CPU(cpu)->as,
|
||||
env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
|
||||
stl_phys(CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "kvm_ppc.h"
|
||||
#include "mmu-hash64.h"
|
||||
#include "exec/log.h"
|
||||
#include "hw/hw.h"
|
||||
|
||||
//#define DEBUG_SLB
|
||||
|
||||
@ -36,12 +37,6 @@
|
||||
# define LOG_SLB(...) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Used to indicate that a CPU has its hash page table (HPT) managed
|
||||
* within the host kernel
|
||||
*/
|
||||
#define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1)
|
||||
|
||||
/*
|
||||
* SLB handling
|
||||
*/
|
||||
@ -294,55 +289,6 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
|
||||
return rt;
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit hash table MMU handling
|
||||
*/
|
||||
void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
|
||||
Error **errp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
target_ulong htabsize = value & SDR_64_HTABSIZE;
|
||||
|
||||
env->spr[SPR_SDR1] = value;
|
||||
if (htabsize > 28) {
|
||||
error_setg(errp,
|
||||
"Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
|
||||
htabsize);
|
||||
htabsize = 28;
|
||||
}
|
||||
env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
|
||||
env->htab_base = value & SDR_64_HTABORG;
|
||||
}
|
||||
|
||||
void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
|
||||
Error **errp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
Error *local_err = NULL;
|
||||
|
||||
if (hpt) {
|
||||
env->external_htab = hpt;
|
||||
} else {
|
||||
env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
|
||||
}
|
||||
ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18),
|
||||
&local_err);
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Not strictly necessary, but makes it clearer that an external
|
||||
* htab is in use when debugging */
|
||||
env->htab_base = -1;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
if (kvmppc_put_books_sregs(cpu) < 0) {
|
||||
error_setg(errp, "Unable to update SDR1 in KVM");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
|
||||
ppc_slb_t *slb, ppc_hash_pte64_t pte)
|
||||
{
|
||||
@ -431,34 +377,43 @@ static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
|
||||
return prot;
|
||||
}
|
||||
|
||||
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
|
||||
const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
|
||||
hwaddr ptex, int n)
|
||||
{
|
||||
uint64_t token = 0;
|
||||
hwaddr pte_offset;
|
||||
hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
|
||||
hwaddr base = ppc_hash64_hpt_base(cpu);
|
||||
hwaddr plen = n * HASH_PTE_SIZE_64;
|
||||
const ppc_hash_pte64_t *hptes;
|
||||
|
||||
pte_offset = pte_index * HASH_PTE_SIZE_64;
|
||||
if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
|
||||
/*
|
||||
* HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
|
||||
*/
|
||||
token = kvmppc_hash64_read_pteg(cpu, pte_index);
|
||||
} else if (cpu->env.external_htab) {
|
||||
/*
|
||||
* HTAB is controlled by QEMU. Just point to the internally
|
||||
* accessible PTEG.
|
||||
*/
|
||||
token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
|
||||
} else if (cpu->env.htab_base) {
|
||||
token = cpu->env.htab_base + pte_offset;
|
||||
if (cpu->vhyp) {
|
||||
PPCVirtualHypervisorClass *vhc =
|
||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
return vhc->map_hptes(cpu->vhyp, ptex, n);
|
||||
}
|
||||
return token;
|
||||
|
||||
if (!base) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false);
|
||||
if (plen < (n * HASH_PTE_SIZE_64)) {
|
||||
hw_error("%s: Unable to map all requested HPTEs\n", __func__);
|
||||
}
|
||||
return hptes;
|
||||
}
|
||||
|
||||
void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
|
||||
void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
|
||||
hwaddr ptex, int n)
|
||||
{
|
||||
if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
|
||||
kvmppc_hash64_free_pteg(token);
|
||||
if (cpu->vhyp) {
|
||||
PPCVirtualHypervisorClass *vhc =
|
||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
|
||||
return;
|
||||
}
|
||||
|
||||
address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
|
||||
false, n * HASH_PTE_SIZE_64);
|
||||
}
|
||||
|
||||
static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
|
||||
@ -503,20 +458,19 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
|
||||
target_ulong ptem,
|
||||
ppc_hash_pte64_t *pte, unsigned *pshift)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int i;
|
||||
uint64_t token;
|
||||
const ppc_hash_pte64_t *pteg;
|
||||
target_ulong pte0, pte1;
|
||||
target_ulong pte_index;
|
||||
target_ulong ptex;
|
||||
|
||||
pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
|
||||
token = ppc_hash64_start_access(cpu, pte_index);
|
||||
if (!token) {
|
||||
ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
|
||||
pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
|
||||
if (!pteg) {
|
||||
return -1;
|
||||
}
|
||||
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
||||
pte0 = ppc_hash64_load_hpte0(cpu, token, i);
|
||||
pte1 = ppc_hash64_load_hpte1(cpu, token, i);
|
||||
pte0 = ppc_hash64_hpte0(cpu, pteg, i);
|
||||
pte1 = ppc_hash64_hpte1(cpu, pteg, i);
|
||||
|
||||
/* This compares V, B, H (secondary) and the AVPN */
|
||||
if (HPTE64_V_COMPARE(pte0, ptem)) {
|
||||
@ -536,11 +490,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
|
||||
*/
|
||||
pte->pte0 = pte0;
|
||||
pte->pte1 = pte1;
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
return (pte_index + i) * HASH_PTE_SIZE_64;
|
||||
ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
|
||||
return ptex + i;
|
||||
}
|
||||
}
|
||||
ppc_hash64_stop_access(cpu, token);
|
||||
ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
|
||||
/*
|
||||
* We didn't find a valid entry.
|
||||
*/
|
||||
@ -552,8 +506,7 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
|
||||
ppc_hash_pte64_t *pte, unsigned *pshift)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
hwaddr pte_offset;
|
||||
hwaddr hash;
|
||||
hwaddr hash, ptex;
|
||||
uint64_t vsid, epnmask, epn, ptem;
|
||||
const struct ppc_one_seg_page_size *sps = slb->sps;
|
||||
|
||||
@ -588,29 +541,30 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
|
||||
" hash " TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, hash);
|
||||
ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
|
||||
|
||||
/* Primary PTEG lookup */
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
||||
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
|
||||
" hash=" TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, vsid, ptem, hash);
|
||||
pte_offset = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
|
||||
ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
|
||||
vsid, ptem, hash);
|
||||
ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
|
||||
|
||||
if (pte_offset == -1) {
|
||||
if (ptex == -1) {
|
||||
/* Secondary PTEG lookup */
|
||||
ptem |= HPTE64_V_SECONDARY;
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
|
||||
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
|
||||
" hash=" TARGET_FMT_plx "\n", env->htab_base,
|
||||
env->htab_mask, vsid, ptem, ~hash);
|
||||
" hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
|
||||
ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
|
||||
|
||||
pte_offset = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
|
||||
ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
|
||||
}
|
||||
|
||||
return pte_offset;
|
||||
return ptex;
|
||||
}
|
||||
|
||||
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
|
||||
@ -708,7 +662,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ppc_slb_t *slb;
|
||||
unsigned apshift;
|
||||
hwaddr pte_offset;
|
||||
hwaddr ptex;
|
||||
ppc_hash_pte64_t pte;
|
||||
int pp_prot, amr_prot, prot;
|
||||
uint64_t new_pte1, dsisr;
|
||||
@ -792,8 +746,8 @@ skip_slb_search:
|
||||
}
|
||||
|
||||
/* 4. Locate the PTE in the hash table */
|
||||
pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
|
||||
if (pte_offset == -1) {
|
||||
ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
|
||||
if (ptex == -1) {
|
||||
dsisr = 0x40000000;
|
||||
if (rwx == 2) {
|
||||
ppc_hash64_set_isi(cs, env, dsisr);
|
||||
@ -806,7 +760,7 @@ skip_slb_search:
|
||||
return 1;
|
||||
}
|
||||
qemu_log_mask(CPU_LOG_MMU,
|
||||
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
|
||||
"found PTE at index %08" HWADDR_PRIx "\n", ptex);
|
||||
|
||||
/* 5. Check access permissions */
|
||||
|
||||
@ -849,8 +803,7 @@ skip_slb_search:
|
||||
}
|
||||
|
||||
if (new_pte1 != pte.pte1) {
|
||||
ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
|
||||
pte.pte0, new_pte1);
|
||||
ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1);
|
||||
}
|
||||
|
||||
/* 7. Determine the real address from the PTE */
|
||||
@ -867,7 +820,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ppc_slb_t *slb;
|
||||
hwaddr pte_offset, raddr;
|
||||
hwaddr ptex, raddr;
|
||||
ppc_hash_pte64_t pte;
|
||||
unsigned apshift;
|
||||
|
||||
@ -900,8 +853,8 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
|
||||
}
|
||||
}
|
||||
|
||||
pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
|
||||
if (pte_offset == -1) {
|
||||
ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
|
||||
if (ptex == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -909,30 +862,24 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
|
||||
& TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
||||
uint64_t pte0, uint64_t pte1)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
hwaddr base = ppc_hash64_hpt_base(cpu);
|
||||
hwaddr offset = ptex * HASH_PTE_SIZE_64;
|
||||
|
||||
if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
|
||||
kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
|
||||
if (cpu->vhyp) {
|
||||
PPCVirtualHypervisorClass *vhc =
|
||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1);
|
||||
return;
|
||||
}
|
||||
|
||||
pte_index *= HASH_PTE_SIZE_64;
|
||||
if (env->external_htab) {
|
||||
stq_p(env->external_htab + pte_index, pte0);
|
||||
stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
|
||||
} else {
|
||||
stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
|
||||
stq_phys(CPU(cpu)->as,
|
||||
env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
|
||||
}
|
||||
stq_phys(CPU(cpu)->as, base + offset, pte0);
|
||||
stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1);
|
||||
}
|
||||
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
{
|
||||
/*
|
||||
|
@ -10,8 +10,8 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
||||
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
|
||||
int mmu_idx);
|
||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
|
||||
target_ulong pte0, target_ulong pte1);
|
||||
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
||||
uint64_t pte0, uint64_t pte1);
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1);
|
||||
@ -56,6 +56,9 @@ void ppc_hash64_update_rmls(CPUPPCState *env);
|
||||
* Hash page table definitions
|
||||
*/
|
||||
|
||||
#define SDR_64_HTABORG 0x0FFFFFFFFFFC0000ULL
|
||||
#define SDR_64_HTABSIZE 0x000000000000001FULL
|
||||
|
||||
#define HPTES_PER_GROUP 8
|
||||
#define HASH_PTE_SIZE_64 16
|
||||
#define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
|
||||
@ -91,45 +94,41 @@ void ppc_hash64_update_rmls(CPUPPCState *env);
|
||||
#define HPTE64_V_1TB_SEG 0x4000000000000000ULL
|
||||
#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL
|
||||
|
||||
void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
|
||||
Error **errp);
|
||||
void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
|
||||
Error **errp);
|
||||
|
||||
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
|
||||
void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token);
|
||||
|
||||
static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
|
||||
uint64_t token, int index)
|
||||
static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t addr;
|
||||
|
||||
addr = token + (index * HASH_PTE_SIZE_64);
|
||||
if (env->external_htab) {
|
||||
return ldq_p((const void *)(uintptr_t)addr);
|
||||
} else {
|
||||
return ldq_phys(CPU(cpu)->as, addr);
|
||||
}
|
||||
return cpu->env.spr[SPR_SDR1] & SDR_64_HTABORG;
|
||||
}
|
||||
|
||||
static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
|
||||
uint64_t token, int index)
|
||||
static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t addr;
|
||||
|
||||
addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
|
||||
if (env->external_htab) {
|
||||
return ldq_p((const void *)(uintptr_t)addr);
|
||||
} else {
|
||||
return ldq_phys(CPU(cpu)->as, addr);
|
||||
if (cpu->vhyp) {
|
||||
PPCVirtualHypervisorClass *vhc =
|
||||
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
|
||||
return vhc->hpt_mask(cpu->vhyp);
|
||||
}
|
||||
return (1ULL << ((cpu->env.spr[SPR_SDR1] & SDR_64_HTABSIZE) + 18 - 7)) - 1;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
struct ppc_hash_pte64 {
|
||||
uint64_t pte0, pte1;
|
||||
} ppc_hash_pte64_t;
|
||||
};
|
||||
|
||||
const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
|
||||
hwaddr ptex, int n);
|
||||
void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
|
||||
hwaddr ptex, int n);
|
||||
|
||||
static inline uint64_t ppc_hash64_hpte0(PowerPCCPU *cpu,
|
||||
const ppc_hash_pte64_t *hptes, int i)
|
||||
{
|
||||
return ldq_p(&(hptes[i].pte0));
|
||||
}
|
||||
|
||||
static inline uint64_t ppc_hash64_hpte1(PowerPCCPU *cpu,
|
||||
const ppc_hash_pte64_t *hptes, int i)
|
||||
{
|
||||
return ldq_p(&(hptes[i].pte1));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/log.h"
|
||||
#include "helper_regs.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
//#define DEBUG_MMU
|
||||
//#define DEBUG_BATS
|
||||
@ -466,6 +467,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||
static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||
target_ulong eaddr, int rw, int type)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
hwaddr hash;
|
||||
target_ulong vsid;
|
||||
int ds, pr, target_page_bits;
|
||||
@ -503,7 +505,7 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||
qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
|
||||
" htab_mask " TARGET_FMT_plx
|
||||
" hash " TARGET_FMT_plx "\n",
|
||||
env->htab_base, env->htab_mask, hash);
|
||||
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
|
||||
ctx->hash[0] = hash;
|
||||
ctx->hash[1] = ~hash;
|
||||
|
||||
@ -518,9 +520,11 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
|
||||
uint32_t a0, a1, a2, a3;
|
||||
|
||||
qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
|
||||
"\n", env->htab_base, env->htab_mask + 0x80);
|
||||
for (curaddr = env->htab_base;
|
||||
curaddr < (env->htab_base + env->htab_mask + 0x80);
|
||||
"\n", ppc_hash32_hpt_base(cpu),
|
||||
ppc_hash32_hpt_mask(env) + 0x80);
|
||||
for (curaddr = ppc_hash32_hpt_base(cpu);
|
||||
curaddr < (ppc_hash32_hpt_base(cpu)
|
||||
+ ppc_hash32_hpt_mask(cpu) + 0x80);
|
||||
curaddr += 16) {
|
||||
a0 = ldl_phys(cs->as, curaddr);
|
||||
a1 = ldl_phys(cs->as, curaddr + 4);
|
||||
@ -1205,12 +1209,13 @@ static void mmu6xx_dump_BATs(FILE *f, fprintf_function cpu_fprintf,
|
||||
static void mmu6xx_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
|
||||
CPUPPCState *env)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
ppc6xx_tlb_t *tlb;
|
||||
target_ulong sr;
|
||||
int type, way, entry, i;
|
||||
|
||||
cpu_fprintf(f, "HTAB base = 0x%"HWADDR_PRIx"\n", env->htab_base);
|
||||
cpu_fprintf(f, "HTAB mask = 0x%"HWADDR_PRIx"\n", env->htab_mask);
|
||||
cpu_fprintf(f, "HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
|
||||
cpu_fprintf(f, "HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
|
||||
|
||||
cpu_fprintf(f, "\nSegment registers:\n");
|
||||
for (i = 0; i < 32; i++) {
|
||||
@ -1592,9 +1597,9 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
|
||||
env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
|
||||
tlb_miss:
|
||||
env->error_code |= ctx.key << 19;
|
||||
env->spr[SPR_HASH1] = env->htab_base +
|
||||
env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
|
||||
get_pteg_offset32(cpu, ctx.hash[0]);
|
||||
env->spr[SPR_HASH2] = env->htab_base +
|
||||
env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
|
||||
get_pteg_offset32(cpu, ctx.hash[1]);
|
||||
break;
|
||||
case POWERPC_MMU_SOFT_74xx:
|
||||
@ -1997,26 +2002,28 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
|
||||
/* Special registers manipulation */
|
||||
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
|
||||
{
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
|
||||
assert(!env->external_htab);
|
||||
env->spr[SPR_SDR1] = value;
|
||||
assert(!cpu->vhyp);
|
||||
#if defined(TARGET_PPC64)
|
||||
if (env->mmu_model & POWERPC_MMU_64) {
|
||||
PowerPCCPU *cpu = ppc_env_get_cpu(env);
|
||||
Error *local_err = NULL;
|
||||
target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
|
||||
target_ulong htabsize = value & SDR_64_HTABSIZE;
|
||||
|
||||
ppc_hash64_set_sdr1(cpu, value, &local_err);
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
error_free(local_err);
|
||||
if (value & ~sdr_mask) {
|
||||
error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1",
|
||||
value & ~sdr_mask);
|
||||
value &= sdr_mask;
|
||||
}
|
||||
if (htabsize > 28) {
|
||||
error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
|
||||
htabsize);
|
||||
return;
|
||||
}
|
||||
} else
|
||||
#endif /* defined(TARGET_PPC64) */
|
||||
{
|
||||
/* FIXME: Should check for valid HTABMASK values */
|
||||
env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF;
|
||||
env->htab_base = value & SDR_32_HTABORG;
|
||||
}
|
||||
#endif /* defined(TARGET_PPC64) */
|
||||
/* FIXME: Should check for valid HTABMASK values in 32-bit case */
|
||||
env->spr[SPR_SDR1] = value;
|
||||
}
|
||||
|
||||
/* Segment registers load and store */
|
||||
|
@ -71,7 +71,7 @@ static TCGv cpu_lr;
|
||||
#if defined(TARGET_PPC64)
|
||||
static TCGv cpu_cfar;
|
||||
#endif
|
||||
static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
|
||||
static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
|
||||
static TCGv cpu_reserve;
|
||||
static TCGv cpu_fpscr;
|
||||
static TCGv_i32 cpu_access_type;
|
||||
@ -173,6 +173,10 @@ void ppc_translate_init(void)
|
||||
offsetof(CPUPPCState, ov), "OV");
|
||||
cpu_ca = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, ca), "CA");
|
||||
cpu_ov32 = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, ov32), "OV32");
|
||||
cpu_ca32 = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, ca32), "CA32");
|
||||
|
||||
cpu_reserve = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, reserve_addr),
|
||||
@ -806,12 +810,40 @@ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
|
||||
}
|
||||
tcg_temp_free(t0);
|
||||
if (NARROW_MODE(ctx)) {
|
||||
tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
|
||||
tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_mov_tl(cpu_ov32, cpu_ov);
|
||||
}
|
||||
} else {
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
|
||||
}
|
||||
tcg_gen_extract_tl(cpu_ov, cpu_ov, 63, 1);
|
||||
}
|
||||
tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
|
||||
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
|
||||
}
|
||||
|
||||
static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
|
||||
TCGv res, TCGv arg0, TCGv arg1,
|
||||
int sub)
|
||||
{
|
||||
TCGv t0;
|
||||
|
||||
if (!is_isa300(ctx)) {
|
||||
return;
|
||||
}
|
||||
|
||||
t0 = tcg_temp_new();
|
||||
if (sub) {
|
||||
tcg_gen_eqv_tl(t0, arg0, arg1);
|
||||
} else {
|
||||
tcg_gen_xor_tl(t0, arg0, arg1);
|
||||
}
|
||||
tcg_gen_xor_tl(t0, t0, res);
|
||||
tcg_gen_extract_tl(cpu_ca32, t0, 32, 1);
|
||||
tcg_temp_free(t0);
|
||||
}
|
||||
|
||||
/* Common add function */
|
||||
static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
TCGv arg2, bool add_ca, bool compute_ca,
|
||||
@ -838,6 +870,9 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
|
||||
tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
|
||||
}
|
||||
} else {
|
||||
TCGv zero = tcg_const_tl(0);
|
||||
if (add_ca) {
|
||||
@ -846,6 +881,7 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
} else {
|
||||
tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
|
||||
}
|
||||
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, 0);
|
||||
tcg_temp_free(zero);
|
||||
}
|
||||
} else {
|
||||
@ -985,6 +1021,9 @@ static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
}
|
||||
if (compute_ov) {
|
||||
tcg_gen_extu_i32_tl(cpu_ov, t2);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_extu_i32_tl(cpu_ov32, t2);
|
||||
}
|
||||
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
|
||||
}
|
||||
tcg_temp_free_i32(t0);
|
||||
@ -1056,6 +1095,9 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
}
|
||||
if (compute_ov) {
|
||||
tcg_gen_mov_tl(cpu_ov, t2);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_mov_tl(cpu_ov32, t2);
|
||||
}
|
||||
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
|
||||
}
|
||||
tcg_temp_free_i64(t0);
|
||||
@ -1074,10 +1116,10 @@ static void glue(gen_, name)(DisasContext *ctx)
|
||||
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
|
||||
sign, compute_ov); \
|
||||
}
|
||||
/* divwu divwu. divwuo divwuo. */
|
||||
/* divdu divdu. divduo divduo. */
|
||||
GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
|
||||
GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
|
||||
/* divw divw. divwo divwo. */
|
||||
/* divd divd. divdo divdo. */
|
||||
GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
|
||||
GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
|
||||
|
||||
@ -1249,6 +1291,9 @@ static void gen_mullwo(DisasContext *ctx)
|
||||
tcg_gen_sari_i32(t0, t0, 31);
|
||||
tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
|
||||
tcg_gen_extu_i32_tl(cpu_ov, t0);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_mov_tl(cpu_ov32, cpu_ov);
|
||||
}
|
||||
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
|
||||
|
||||
tcg_temp_free_i32(t0);
|
||||
@ -1310,6 +1355,9 @@ static void gen_mulldo(DisasContext *ctx)
|
||||
|
||||
tcg_gen_sari_i64(t0, t0, 63);
|
||||
tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_mov_tl(cpu_ov32, cpu_ov);
|
||||
}
|
||||
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
|
||||
|
||||
tcg_temp_free_i64(t0);
|
||||
@ -1353,17 +1401,22 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
|
||||
tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
|
||||
}
|
||||
} else if (add_ca) {
|
||||
TCGv zero, inv1 = tcg_temp_new();
|
||||
tcg_gen_not_tl(inv1, arg1);
|
||||
zero = tcg_const_tl(0);
|
||||
tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
|
||||
tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
|
||||
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, 0);
|
||||
tcg_temp_free(zero);
|
||||
tcg_temp_free(inv1);
|
||||
} else {
|
||||
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
|
||||
tcg_gen_sub_tl(t0, arg2, arg1);
|
||||
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, 1);
|
||||
}
|
||||
} else if (add_ca) {
|
||||
/* Since we're ignoring carry-out, we can simplify the
|
||||
@ -1442,7 +1495,10 @@ static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
|
||||
|
||||
static void gen_neg(DisasContext *ctx)
|
||||
{
|
||||
gen_op_arith_neg(ctx, 0);
|
||||
tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
|
||||
if (unlikely(Rc(ctx->opcode))) {
|
||||
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_nego(DisasContext *ctx)
|
||||
@ -3703,7 +3759,7 @@ static void gen_tdi(DisasContext *ctx)
|
||||
|
||||
/*** Processor control ***/
|
||||
|
||||
static void gen_read_xer(TCGv dst)
|
||||
static void gen_read_xer(DisasContext *ctx, TCGv dst)
|
||||
{
|
||||
TCGv t0 = tcg_temp_new();
|
||||
TCGv t1 = tcg_temp_new();
|
||||
@ -3715,6 +3771,12 @@ static void gen_read_xer(TCGv dst)
|
||||
tcg_gen_or_tl(t0, t0, t1);
|
||||
tcg_gen_or_tl(dst, dst, t2);
|
||||
tcg_gen_or_tl(dst, dst, t0);
|
||||
if (is_isa300(ctx)) {
|
||||
tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
|
||||
tcg_gen_or_tl(dst, dst, t0);
|
||||
tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
|
||||
tcg_gen_or_tl(dst, dst, t0);
|
||||
}
|
||||
tcg_temp_free(t0);
|
||||
tcg_temp_free(t1);
|
||||
tcg_temp_free(t2);
|
||||
@ -3722,14 +3784,16 @@ static void gen_read_xer(TCGv dst)
|
||||
|
||||
static void gen_write_xer(TCGv src)
|
||||
{
|
||||
/* Write all flags, while reading back check for isa300 */
|
||||
tcg_gen_andi_tl(cpu_xer, src,
|
||||
~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));
|
||||
tcg_gen_shri_tl(cpu_so, src, XER_SO);
|
||||
tcg_gen_shri_tl(cpu_ov, src, XER_OV);
|
||||
tcg_gen_shri_tl(cpu_ca, src, XER_CA);
|
||||
tcg_gen_andi_tl(cpu_so, cpu_so, 1);
|
||||
tcg_gen_andi_tl(cpu_ov, cpu_ov, 1);
|
||||
tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
|
||||
~((1u << XER_SO) |
|
||||
(1u << XER_OV) | (1u << XER_OV32) |
|
||||
(1u << XER_CA) | (1u << XER_CA32)));
|
||||
tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
|
||||
tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
|
||||
tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
|
||||
tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
|
||||
tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
|
||||
}
|
||||
|
||||
/* mcrxr */
|
||||
@ -3755,6 +3819,28 @@ static void gen_mcrxr(DisasContext *ctx)
|
||||
tcg_gen_movi_tl(cpu_ca, 0);
|
||||
}
|
||||
|
||||
#ifdef TARGET_PPC64
|
||||
/* mcrxrx */
|
||||
static void gen_mcrxrx(DisasContext *ctx)
|
||||
{
|
||||
TCGv t0 = tcg_temp_new();
|
||||
TCGv t1 = tcg_temp_new();
|
||||
TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
|
||||
|
||||
/* copy OV and OV32 */
|
||||
tcg_gen_shli_tl(t0, cpu_ov, 1);
|
||||
tcg_gen_or_tl(t0, t0, cpu_ov32);
|
||||
tcg_gen_shli_tl(t0, t0, 2);
|
||||
/* copy CA and CA32 */
|
||||
tcg_gen_shli_tl(t1, cpu_ca, 1);
|
||||
tcg_gen_or_tl(t1, t1, cpu_ca32);
|
||||
tcg_gen_or_tl(t0, t0, t1);
|
||||
tcg_gen_trunc_tl_i32(dst, t0);
|
||||
tcg_temp_free(t0);
|
||||
tcg_temp_free(t1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* mfcr mfocrf */
|
||||
static void gen_mfcr(DisasContext *ctx)
|
||||
{
|
||||
@ -6424,6 +6510,7 @@ GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
|
||||
#if defined(TARGET_PPC64)
|
||||
GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
|
||||
GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
|
||||
GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
|
||||
#endif
|
||||
GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
|
||||
GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
|
||||
|
@ -107,7 +107,7 @@ static void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
|
||||
/* XER */
|
||||
static void spr_read_xer (DisasContext *ctx, int gprn, int sprn)
|
||||
{
|
||||
gen_read_xer(cpu_gpr[gprn]);
|
||||
gen_read_xer(ctx, cpu_gpr[gprn]);
|
||||
}
|
||||
|
||||
static void spr_write_xer (DisasContext *ctx, int sprn, int gprn)
|
||||
@ -740,10 +740,22 @@ static void gen_spr_ne_601 (CPUPPCState *env)
|
||||
&spr_read_decr, &spr_write_decr,
|
||||
0x00000000);
|
||||
/* Memory management */
|
||||
spr_register(env, SPR_SDR1, "SDR1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_sdr1,
|
||||
0x00000000);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (env->has_hv_mode) {
|
||||
/* SDR1 is a hypervisor resource on CPUs which have a
|
||||
* hypervisor mode */
|
||||
spr_register_hv(env, SPR_SDR1, "SDR1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_sdr1,
|
||||
0x00000000);
|
||||
} else {
|
||||
spr_register(env, SPR_SDR1, "SDR1",
|
||||
SPR_NOACCESS, SPR_NOACCESS,
|
||||
&spr_read_generic, &spr_write_sdr1,
|
||||
0x00000000);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* BATs 0-3 */
|
||||
@ -8835,18 +8847,14 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
|
||||
{
|
||||
cpu->vhyp = vhyp;
|
||||
}
|
||||
|
||||
void cpu_ppc_set_papr(PowerPCCPU *cpu)
|
||||
void cpu_ppc_set_papr(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
ppc_spr_t *lpcr = &env->spr_cb[SPR_LPCR];
|
||||
ppc_spr_t *amor = &env->spr_cb[SPR_AMOR];
|
||||
|
||||
cpu->vhyp = vhyp;
|
||||
|
||||
/* PAPR always has exception vectors in RAM not ROM. To ensure this,
|
||||
* MSR[IP] should never be set.
|
||||
*
|
||||
@ -10489,11 +10497,12 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
|
||||
#else
|
||||
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
|
||||
cc->vmsd = &vmstate_ppc_cpu;
|
||||
#if defined(TARGET_PPC64)
|
||||
cc->write_elf64_note = ppc64_cpu_write_elf64_note;
|
||||
#endif
|
||||
#endif
|
||||
cc->cpu_exec_enter = ppc_cpu_exec_enter;
|
||||
#if defined(CONFIG_SOFTMMU)
|
||||
cc->write_elf64_note = ppc64_cpu_write_elf64_note;
|
||||
cc->write_elf32_note = ppc32_cpu_write_elf32_note;
|
||||
#endif
|
||||
|
||||
cc->gdb_num_core_regs = 71;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user