2016-06-17 17:23:46 +03:00
|
|
|
/*
|
2021-11-16 01:36:19 +03:00
|
|
|
* ARM Generic Interrupt Controller v3 (emulation)
|
2016-06-17 17:23:46 +03:00
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Huawei.
|
|
|
|
* Copyright (c) 2016 Linaro Limited
|
|
|
|
* Written by Shlomo Pongratz, Peter Maydell
|
|
|
|
*
|
|
|
|
* This code is licensed under the GPL, version 2 or (at your option)
|
|
|
|
* any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* This file contains implementation code for an interrupt controller
|
|
|
|
* which implements the GICv3 architecture. Specifically this is where
|
|
|
|
* the device class itself and the functions for handling interrupts
|
|
|
|
* coming in and going out live.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "qapi/error.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2016-06-17 17:23:46 +03:00
|
|
|
#include "hw/intc/arm_gicv3.h"
|
|
|
|
#include "gicv3_internal.h"
|
|
|
|
|
2024-04-19 16:33:05 +03:00
|
|
|
static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio, bool nmi)
|
2016-06-17 17:23:46 +03:00
|
|
|
{
|
|
|
|
/* Return true if this IRQ at this priority should take
|
|
|
|
* precedence over the current recorded highest priority
|
|
|
|
* pending interrupt for this CPU. We also return true if
|
|
|
|
* the current recorded highest priority pending interrupt
|
|
|
|
* is the same as this one (a property which the calling code
|
|
|
|
* relies on).
|
|
|
|
*/
|
2024-04-19 16:33:05 +03:00
|
|
|
if (prio != cs->hppi.prio) {
|
|
|
|
return prio < cs->hppi.prio;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The same priority IRQ with non-maskable property should signal to
|
|
|
|
* the CPU as it have the priority higher than the labelled 0x80 or 0x00.
|
|
|
|
*/
|
|
|
|
if (nmi != cs->hppi.nmi) {
|
|
|
|
return nmi;
|
2016-06-17 17:23:46 +03:00
|
|
|
}
|
2024-04-19 16:33:05 +03:00
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
/* If multiple pending interrupts have the same priority then it is an
|
|
|
|
* IMPDEF choice which of them to signal to the CPU. We choose to
|
|
|
|
* signal the one with the lowest interrupt number.
|
|
|
|
*/
|
2024-04-19 16:33:05 +03:00
|
|
|
if (irq <= cs->hppi.irq) {
|
2016-06-17 17:23:46 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t gicd_int_pending(GICv3State *s, int irq)
|
|
|
|
{
|
|
|
|
/* Recalculate which distributor interrupts are actually pending
|
|
|
|
* in the group of 32 interrupts starting at irq (which should be a multiple
|
|
|
|
* of 32), and return a 32-bit integer which has a bit set for each
|
|
|
|
* interrupt that is eligible to be signaled to the CPU interface.
|
|
|
|
*
|
|
|
|
* An interrupt is pending if:
|
|
|
|
* + the PENDING latch is set OR it is level triggered and the input is 1
|
|
|
|
* + its ENABLE bit is set
|
|
|
|
* + the GICD enable bit for its group is set
|
2016-12-27 17:59:25 +03:00
|
|
|
* + its ACTIVE bit is not set (otherwise it would be Active+Pending)
|
2016-06-17 17:23:46 +03:00
|
|
|
* Conveniently we can bulk-calculate this with bitwise operations.
|
|
|
|
*/
|
|
|
|
uint32_t pend, grpmask;
|
|
|
|
uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
|
|
|
|
uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
|
|
|
|
uint32_t level = *gic_bmp_ptr32(s->level, irq);
|
|
|
|
uint32_t group = *gic_bmp_ptr32(s->group, irq);
|
|
|
|
uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
|
|
|
|
uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
|
2016-12-27 17:59:25 +03:00
|
|
|
uint32_t active = *gic_bmp_ptr32(s->active, irq);
|
2016-06-17 17:23:46 +03:00
|
|
|
|
|
|
|
pend = pending | (~edge_trigger & level);
|
|
|
|
pend &= enable;
|
2016-12-27 17:59:25 +03:00
|
|
|
pend &= ~active;
|
2016-06-17 17:23:46 +03:00
|
|
|
|
|
|
|
if (s->gicd_ctlr & GICD_CTLR_DS) {
|
|
|
|
grpmod = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
grpmask = 0;
|
|
|
|
if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
|
|
|
|
grpmask |= group;
|
|
|
|
}
|
|
|
|
if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
|
|
|
|
grpmask |= (~group & grpmod);
|
|
|
|
}
|
|
|
|
if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
|
|
|
|
grpmask |= (~group & ~grpmod);
|
|
|
|
}
|
|
|
|
pend &= grpmask;
|
|
|
|
|
|
|
|
return pend;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t gicr_int_pending(GICv3CPUState *cs)
|
|
|
|
{
|
|
|
|
/* Recalculate which redistributor interrupts are actually pending,
|
|
|
|
* and return a 32-bit integer which has a bit set for each interrupt
|
|
|
|
* that is eligible to be signaled to the CPU interface.
|
|
|
|
*
|
|
|
|
* An interrupt is pending if:
|
|
|
|
* + the PENDING latch is set OR it is level triggered and the input is 1
|
|
|
|
* + its ENABLE bit is set
|
|
|
|
* + the GICD enable bit for its group is set
|
2016-12-27 17:59:25 +03:00
|
|
|
* + its ACTIVE bit is not set (otherwise it would be Active+Pending)
|
2016-06-17 17:23:46 +03:00
|
|
|
* Conveniently we can bulk-calculate this with bitwise operations.
|
|
|
|
*/
|
|
|
|
uint32_t pend, grpmask, grpmod;
|
|
|
|
|
|
|
|
pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
|
|
|
|
pend &= cs->gicr_ienabler0;
|
2016-12-27 17:59:25 +03:00
|
|
|
pend &= ~cs->gicr_iactiver0;
|
2016-06-17 17:23:46 +03:00
|
|
|
|
|
|
|
if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
|
|
|
|
grpmod = 0;
|
|
|
|
} else {
|
|
|
|
grpmod = cs->gicr_igrpmodr0;
|
|
|
|
}
|
|
|
|
|
|
|
|
grpmask = 0;
|
|
|
|
if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
|
|
|
|
grpmask |= cs->gicr_igroupr0;
|
|
|
|
}
|
|
|
|
if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
|
|
|
|
grpmask |= (~cs->gicr_igroupr0 & grpmod);
|
|
|
|
}
|
|
|
|
if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
|
|
|
|
grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
|
|
|
|
}
|
|
|
|
pend &= grpmask;
|
|
|
|
|
|
|
|
return pend;
|
|
|
|
}
|
|
|
|
|
2024-04-19 16:33:05 +03:00
|
|
|
static bool gicv3_get_priority(GICv3CPUState *cs, bool is_redist, int irq,
|
|
|
|
uint8_t *prio)
|
|
|
|
{
|
|
|
|
uint32_t nmi = 0x0;
|
|
|
|
|
|
|
|
if (is_redist) {
|
|
|
|
nmi = extract32(cs->gicr_inmir0, irq, 1);
|
|
|
|
} else {
|
|
|
|
nmi = *gic_bmp_ptr32(cs->gic->nmi, irq);
|
|
|
|
nmi = nmi & (1 << (irq & 0x1f));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nmi) {
|
|
|
|
/* DS = 0 & Non-secure NMI */
|
|
|
|
if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
|
|
|
|
((is_redist && extract32(cs->gicr_igroupr0, irq, 1)) ||
|
|
|
|
(!is_redist && gicv3_gicd_group_test(cs->gic, irq)))) {
|
|
|
|
*prio = 0x80;
|
|
|
|
} else {
|
|
|
|
*prio = 0x0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_redist) {
|
|
|
|
*prio = cs->gicr_ipriorityr[irq];
|
|
|
|
} else {
|
|
|
|
*prio = cs->gic->gicd_ipriority[irq];
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
/* Update the interrupt status after state in a redistributor
|
|
|
|
* or CPU interface has changed, but don't tell the CPU i/f.
|
|
|
|
*/
|
|
|
|
static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
|
|
|
|
{
|
|
|
|
/* Find the highest priority pending interrupt among the
|
|
|
|
* redistributor interrupts (SGIs and PPIs).
|
|
|
|
*/
|
|
|
|
bool seenbetter = false;
|
|
|
|
uint8_t prio;
|
|
|
|
int i;
|
|
|
|
uint32_t pend;
|
2024-04-19 16:33:05 +03:00
|
|
|
bool nmi = false;
|
2016-06-17 17:23:46 +03:00
|
|
|
|
|
|
|
/* Find out which redistributor interrupts are eligible to be
|
|
|
|
* signaled to the CPU interface.
|
|
|
|
*/
|
|
|
|
pend = gicr_int_pending(cs);
|
|
|
|
|
|
|
|
if (pend) {
|
|
|
|
for (i = 0; i < GIC_INTERNAL; i++) {
|
|
|
|
if (!(pend & (1 << i))) {
|
|
|
|
continue;
|
|
|
|
}
|
2024-04-19 16:33:05 +03:00
|
|
|
nmi = gicv3_get_priority(cs, true, i, &prio);
|
|
|
|
if (irqbetter(cs, i, prio, nmi)) {
|
2016-06-17 17:23:46 +03:00
|
|
|
cs->hppi.irq = i;
|
|
|
|
cs->hppi.prio = prio;
|
2024-04-19 16:33:05 +03:00
|
|
|
cs->hppi.nmi = nmi;
|
2016-06-17 17:23:46 +03:00
|
|
|
seenbetter = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (seenbetter) {
|
|
|
|
cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
|
|
|
|
}
|
|
|
|
|
2021-09-13 18:07:24 +03:00
|
|
|
if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
|
2022-01-22 21:24:36 +03:00
|
|
|
(cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) &&
|
2021-09-13 18:07:24 +03:00
|
|
|
(cs->hpplpi.prio != 0xff)) {
|
2024-04-19 16:33:05 +03:00
|
|
|
if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio, cs->hpplpi.nmi)) {
|
2021-09-13 18:07:24 +03:00
|
|
|
cs->hppi.irq = cs->hpplpi.irq;
|
|
|
|
cs->hppi.prio = cs->hpplpi.prio;
|
2024-04-19 16:33:05 +03:00
|
|
|
cs->hppi.nmi = cs->hpplpi.nmi;
|
2021-09-13 18:07:24 +03:00
|
|
|
cs->hppi.grp = cs->hpplpi.grp;
|
|
|
|
seenbetter = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
/* If the best interrupt we just found would preempt whatever
|
|
|
|
* was the previous best interrupt before this update, then
|
|
|
|
* we know it's definitely the best one now.
|
|
|
|
* If we didn't find an interrupt that would preempt the previous
|
|
|
|
* best, and the previous best is outside our range (or there was no
|
|
|
|
* previous pending interrupt at all), then that is still valid, and
|
|
|
|
* we leave it as the best.
|
|
|
|
* Otherwise, we need to do a full update (because the previous best
|
|
|
|
* interrupt has reduced in priority and any other interrupt could
|
|
|
|
* now be the new best one).
|
|
|
|
*/
|
2021-11-24 23:20:05 +03:00
|
|
|
if (!seenbetter && cs->hppi.prio != 0xff &&
|
|
|
|
(cs->hppi.irq < GIC_INTERNAL ||
|
|
|
|
cs->hppi.irq >= GICV3_LPI_INTID_START)) {
|
2016-06-17 17:23:46 +03:00
|
|
|
gicv3_full_update_noirqset(cs->gic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the GIC status after state in a redistributor or
|
|
|
|
* CPU interface has changed, and inform the CPU i/f of
|
|
|
|
* its new highest priority pending interrupt.
|
|
|
|
*/
|
|
|
|
void gicv3_redist_update(GICv3CPUState *cs)
|
|
|
|
{
|
|
|
|
gicv3_redist_update_noirqset(cs);
|
|
|
|
gicv3_cpuif_update(cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the GIC status after state in the distributor has
|
|
|
|
* changed affecting @len interrupts starting at @start,
|
|
|
|
* but don't tell the CPU i/f.
|
|
|
|
*/
|
|
|
|
static void gicv3_update_noirqset(GICv3State *s, int start, int len)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
uint8_t prio;
|
|
|
|
uint32_t pend = 0;
|
2024-04-19 16:33:05 +03:00
|
|
|
bool nmi = false;
|
2016-06-17 17:23:46 +03:00
|
|
|
|
|
|
|
assert(start >= GIC_INTERNAL);
|
|
|
|
assert(len > 0);
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
|
|
|
s->cpu[i].seenbetter = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find the highest priority pending interrupt in this range. */
|
|
|
|
for (i = start; i < start + len; i++) {
|
|
|
|
GICv3CPUState *cs;
|
|
|
|
|
|
|
|
if (i == start || (i & 0x1f) == 0) {
|
|
|
|
/* Calculate the next 32 bits worth of pending status */
|
|
|
|
pend = gicd_int_pending(s, i & ~0x1f);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(pend & (1 << (i & 0x1f)))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cs = s->gicd_irouter_target[i];
|
|
|
|
if (!cs) {
|
|
|
|
/* Interrupts targeting no implemented CPU should remain pending
|
|
|
|
* and not be forwarded to any CPU.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
2024-04-19 16:33:05 +03:00
|
|
|
nmi = gicv3_get_priority(cs, false, i, &prio);
|
|
|
|
if (irqbetter(cs, i, prio, nmi)) {
|
2016-06-17 17:23:46 +03:00
|
|
|
cs->hppi.irq = i;
|
|
|
|
cs->hppi.prio = prio;
|
2024-04-19 16:33:05 +03:00
|
|
|
cs->hppi.nmi = nmi;
|
2016-06-17 17:23:46 +03:00
|
|
|
cs->seenbetter = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the best interrupt we just found would preempt whatever
|
|
|
|
* was the previous best interrupt before this update, then
|
|
|
|
* we know it's definitely the best one now.
|
|
|
|
* If we didn't find an interrupt that would preempt the previous
|
|
|
|
* best, and the previous best is outside our range (or there was
|
|
|
|
* no previous pending interrupt at all), then that
|
|
|
|
* is still valid, and we leave it as the best.
|
|
|
|
* Otherwise, we need to do a full update (because the previous best
|
|
|
|
* interrupt has reduced in priority and any other interrupt could
|
|
|
|
* now be the new best one).
|
|
|
|
*/
|
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
|
|
|
GICv3CPUState *cs = &s->cpu[i];
|
|
|
|
|
|
|
|
if (cs->seenbetter) {
|
|
|
|
cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cs->seenbetter && cs->hppi.prio != 0xff &&
|
|
|
|
cs->hppi.irq >= start && cs->hppi.irq < start + len) {
|
|
|
|
gicv3_full_update_noirqset(s);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void gicv3_update(GICv3State *s, int start, int len)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
gicv3_update_noirqset(s, start, len);
|
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
|
|
|
gicv3_cpuif_update(&s->cpu[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void gicv3_full_update_noirqset(GICv3State *s)
|
|
|
|
{
|
|
|
|
/* Completely recalculate the GIC status from scratch, but
|
|
|
|
* don't update any outbound IRQ lines.
|
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
|
|
|
s->cpu[i].hppi.prio = 0xff;
|
2024-04-19 16:33:05 +03:00
|
|
|
s->cpu[i].hppi.nmi = false;
|
2016-06-17 17:23:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that we can guarantee that these functions will not
|
|
|
|
* recursively call back into gicv3_full_update(), because
|
|
|
|
* at each point the "previous best" is always outside the
|
|
|
|
* range we ask them to update.
|
|
|
|
*/
|
|
|
|
gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
|
|
|
|
|
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
|
|
|
gicv3_redist_update_noirqset(&s->cpu[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void gicv3_full_update(GICv3State *s)
|
|
|
|
{
|
|
|
|
/* Completely recalculate the GIC status from scratch, including
|
|
|
|
* updating outbound IRQ lines.
|
|
|
|
*/
|
|
|
|
int i;
|
|
|
|
|
|
|
|
gicv3_full_update_noirqset(s);
|
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
|
|
|
gicv3_cpuif_update(&s->cpu[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
/* Process a change in an external IRQ input. */
|
|
|
|
static void gicv3_set_irq(void *opaque, int irq, int level)
|
|
|
|
{
|
|
|
|
/* Meaning of the 'irq' parameter:
|
|
|
|
* [0..N-1] : external interrupts
|
|
|
|
* [N..N+31] : PPI (internal) interrupts for CPU 0
|
|
|
|
* [N+32..N+63] : PPI (internal interrupts for CPU 1
|
|
|
|
* ...
|
|
|
|
*/
|
2016-06-17 17:23:47 +03:00
|
|
|
GICv3State *s = opaque;
|
|
|
|
|
|
|
|
if (irq < (s->num_irq - GIC_INTERNAL)) {
|
|
|
|
/* external interrupt (SPI) */
|
|
|
|
gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
|
|
|
|
} else {
|
|
|
|
/* per-cpu interrupt (PPI) */
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
irq -= (s->num_irq - GIC_INTERNAL);
|
|
|
|
cpu = irq / GIC_INTERNAL;
|
|
|
|
irq %= GIC_INTERNAL;
|
|
|
|
assert(cpu < s->num_cpu);
|
|
|
|
/* Raising SGIs via this function would be a bug in how the board
|
|
|
|
* model wires up interrupts.
|
|
|
|
*/
|
|
|
|
assert(irq >= GIC_NR_SGIS);
|
|
|
|
gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
|
|
|
|
}
|
2016-06-17 17:23:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
static void arm_gicv3_post_load(GICv3State *s)
|
|
|
|
{
|
2021-09-13 18:07:24 +03:00
|
|
|
int i;
|
2016-06-17 17:23:46 +03:00
|
|
|
/* Recalculate our cached idea of the current highest priority
|
|
|
|
* pending interrupt, but don't set IRQ or FIQ lines.
|
|
|
|
*/
|
2021-09-13 18:07:24 +03:00
|
|
|
for (i = 0; i < s->num_cpu; i++) {
|
2021-11-24 23:20:05 +03:00
|
|
|
gicv3_redist_update_lpi_only(&s->cpu[i]);
|
2021-09-13 18:07:24 +03:00
|
|
|
}
|
2016-06-17 17:23:46 +03:00
|
|
|
gicv3_full_update_noirqset(s);
|
|
|
|
/* Repopulate the cache of GICv3CPUState pointers for target CPUs */
|
|
|
|
gicv3_cache_all_target_cpustates(s);
|
|
|
|
}
|
|
|
|
|
2016-06-17 17:23:47 +03:00
|
|
|
static const MemoryRegionOps gic_ops[] = {
|
|
|
|
{
|
|
|
|
.read_with_attrs = gicv3_dist_read,
|
|
|
|
.write_with_attrs = gicv3_dist_write,
|
|
|
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
2022-03-03 23:23:39 +03:00
|
|
|
.valid.min_access_size = 1,
|
|
|
|
.valid.max_access_size = 8,
|
|
|
|
.impl.min_access_size = 1,
|
|
|
|
.impl.max_access_size = 8,
|
2016-06-17 17:23:47 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.read_with_attrs = gicv3_redist_read,
|
|
|
|
.write_with_attrs = gicv3_redist_write,
|
|
|
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
2022-03-03 23:23:39 +03:00
|
|
|
.valid.min_access_size = 1,
|
|
|
|
.valid.max_access_size = 8,
|
|
|
|
.impl.min_access_size = 1,
|
|
|
|
.impl.max_access_size = 8,
|
2016-06-17 17:23:47 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
static void arm_gic_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
|
|
|
/* Device instance realize function for the GIC sysbus device */
|
|
|
|
GICv3State *s = ARM_GICV3(dev);
|
|
|
|
ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
agc->parent_realize(dev, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-09-30 18:08:40 +03:00
|
|
|
gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
|
2016-06-17 17:23:47 +03:00
|
|
|
|
|
|
|
gicv3_init_cpuif(s);
|
2016-06-17 17:23:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void arm_gicv3_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2016-06-17 17:23:46 +03:00
|
|
|
ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
|
2016-06-17 17:23:46 +03:00
|
|
|
ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
|
|
|
|
|
2016-06-17 17:23:46 +03:00
|
|
|
agcc->post_load = arm_gicv3_post_load;
|
2018-01-14 05:04:12 +03:00
|
|
|
device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
|
2016-06-17 17:23:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo arm_gicv3_info = {
|
|
|
|
.name = TYPE_ARM_GICV3,
|
|
|
|
.parent = TYPE_ARM_GICV3_COMMON,
|
|
|
|
.instance_size = sizeof(GICv3State),
|
|
|
|
.class_init = arm_gicv3_class_init,
|
|
|
|
.class_size = sizeof(ARMGICv3Class),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void arm_gicv3_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&arm_gicv3_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(arm_gicv3_register_types)
|