2018-05-04 20:05:51 +03:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014-2016 Broadcom Corporation
|
|
|
|
* Copyright (c) 2017 Red Hat, Inc.
|
|
|
|
* Written by Prem Mallappa, Eric Auger
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2020-11-02 19:52:16 +03:00
|
|
|
#include "qemu/bitops.h"
|
2019-08-12 08:23:42 +03:00
|
|
|
#include "hw/irq.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
#include "hw/sysbus.h"
|
2019-08-12 08:23:45 +03:00
|
|
|
#include "migration/vmstate.h"
|
2023-05-25 12:37:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
#include "hw/qdev-core.h"
|
|
|
|
#include "hw/pci/pci.h"
|
2018-06-26 19:50:42 +03:00
|
|
|
#include "cpu.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
#include "trace.h"
|
|
|
|
#include "qemu/log.h"
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "qapi/error.h"
|
|
|
|
|
|
|
|
#include "hw/arm/smmuv3.h"
|
|
|
|
#include "smmuv3-internal.h"
|
2021-03-09 13:27:41 +03:00
|
|
|
#include "smmu-internal.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2024-07-15 11:45:16 +03:00
|
|
|
#define PTW_RECORD_FAULT(ptw_info, cfg) (((ptw_info).stage == SMMU_STAGE_1 && \
|
|
|
|
(cfg)->record_faults) || \
|
|
|
|
((ptw_info).stage == SMMU_STAGE_2 && \
|
|
|
|
(cfg)->s2cfg.record_faults))
|
2023-05-25 12:37:50 +03:00
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
/**
|
|
|
|
* smmuv3_trigger_irq - pulse @irq if enabled and update
|
|
|
|
* GERROR register in case of GERROR interrupt
|
|
|
|
*
|
|
|
|
* @irq: irq type
|
|
|
|
* @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
|
|
|
|
*/
|
2018-05-04 20:05:51 +03:00
|
|
|
static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
|
|
|
|
uint32_t gerror_mask)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
|
|
|
|
bool pulse = false;
|
|
|
|
|
|
|
|
switch (irq) {
|
|
|
|
case SMMU_IRQ_EVTQ:
|
|
|
|
pulse = smmuv3_eventq_irq_enabled(s);
|
|
|
|
break;
|
|
|
|
case SMMU_IRQ_PRIQ:
|
|
|
|
qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
|
|
|
|
break;
|
|
|
|
case SMMU_IRQ_CMD_SYNC:
|
|
|
|
pulse = true;
|
|
|
|
break;
|
|
|
|
case SMMU_IRQ_GERROR:
|
|
|
|
{
|
|
|
|
uint32_t pending = s->gerror ^ s->gerrorn;
|
|
|
|
uint32_t new_gerrors = ~pending & gerror_mask;
|
|
|
|
|
|
|
|
if (!new_gerrors) {
|
|
|
|
/* only toggle non pending errors */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
s->gerror ^= new_gerrors;
|
|
|
|
trace_smmuv3_write_gerror(new_gerrors, s->gerror);
|
|
|
|
|
|
|
|
pulse = smmuv3_gerror_irq_enabled(s);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pulse) {
|
|
|
|
trace_smmuv3_trigger_irq(irq);
|
|
|
|
qemu_irq_pulse(s->irq[irq]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
uint32_t pending = s->gerror ^ s->gerrorn;
|
|
|
|
uint32_t toggled = s->gerrorn ^ new_gerrorn;
|
|
|
|
|
|
|
|
if (toggled & ~pending) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"guest toggles non pending errors = 0x%x\n",
|
|
|
|
toggled & ~pending);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We do not raise any error in case guest toggles bits corresponding
|
|
|
|
* to not active IRQs (CONSTRAINED UNPREDICTABLE)
|
|
|
|
*/
|
|
|
|
s->gerrorn = new_gerrorn;
|
|
|
|
|
|
|
|
trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
|
|
|
|
}
|
|
|
|
|
2023-07-25 12:56:51 +03:00
|
|
|
static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
dma_addr_t addr = Q_CONS_ENTRY(q);
|
2023-07-25 12:56:51 +03:00
|
|
|
MemTxResult ret;
|
|
|
|
int i;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2023-07-25 12:56:51 +03:00
|
|
|
ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(Cmd),
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
|
|
|
if (ret != MEMTX_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
|
|
|
|
le32_to_cpus(&cmd->word[i]);
|
|
|
|
}
|
|
|
|
return ret;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2023-07-25 12:56:51 +03:00
|
|
|
static MemTxResult queue_write(SMMUQueue *q, Evt *evt_in)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
dma_addr_t addr = Q_PROD_ENTRY(q);
|
|
|
|
MemTxResult ret;
|
2023-07-25 12:56:51 +03:00
|
|
|
Evt evt = *evt_in;
|
|
|
|
int i;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2023-07-25 12:56:51 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(evt.word); i++) {
|
|
|
|
cpu_to_le32s(&evt.word[i]);
|
|
|
|
}
|
|
|
|
ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(Evt),
|
dma: Let dma_memory_read/write() take MemTxAttrs argument
Let devices specify transaction attributes when calling
dma_memory_read() or dma_memory_write().
Patch created mechanically using spatch with this script:
@@
expression E1, E2, E3, E4;
@@
(
- dma_memory_read(E1, E2, E3, E4)
+ dma_memory_read(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
|
- dma_memory_write(E1, E2, E3, E4)
+ dma_memory_write(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Li Qiang <liq3ea@gmail.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20211223115554.3155328-6-philmd@redhat.com>
2020-09-03 11:08:29 +03:00
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2018-05-04 20:05:51 +03:00
|
|
|
if (ret != MEMTX_OK) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_prod_incr(q);
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
SMMUQueue *q = &s->eventq;
|
2018-05-04 20:05:51 +03:00
|
|
|
MemTxResult r;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
if (!smmuv3_eventq_enabled(s)) {
|
2018-05-04 20:05:51 +03:00
|
|
|
return MEMTX_ERROR;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (smmuv3_q_full(q)) {
|
2018-05-04 20:05:51 +03:00
|
|
|
return MEMTX_ERROR;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
r = queue_write(q, evt);
|
|
|
|
if (r != MEMTX_OK) {
|
|
|
|
return r;
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2018-09-25 16:02:32 +03:00
|
|
|
if (!smmuv3_q_empty(q)) {
|
2018-05-04 20:05:51 +03:00
|
|
|
smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
|
|
|
|
{
|
2018-05-18 19:48:07 +03:00
|
|
|
Evt evt = {};
|
2018-05-04 20:05:51 +03:00
|
|
|
MemTxResult r;
|
|
|
|
|
|
|
|
if (!smmuv3_eventq_enabled(s)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
EVT_SET_TYPE(&evt, info->type);
|
|
|
|
EVT_SET_SID(&evt, info->sid);
|
|
|
|
|
|
|
|
switch (info->type) {
|
2018-06-26 19:50:42 +03:00
|
|
|
case SMMU_EVT_NONE:
|
2018-05-04 20:05:51 +03:00
|
|
|
return;
|
|
|
|
case SMMU_EVT_F_UUT:
|
|
|
|
EVT_SET_SSID(&evt, info->u.f_uut.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.f_uut.ssv);
|
|
|
|
EVT_SET_ADDR(&evt, info->u.f_uut.addr);
|
|
|
|
EVT_SET_RNW(&evt, info->u.f_uut.rnw);
|
|
|
|
EVT_SET_PNU(&evt, info->u.f_uut.pnu);
|
|
|
|
EVT_SET_IND(&evt, info->u.f_uut.ind);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_C_BAD_STREAMID:
|
|
|
|
EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_F_STE_FETCH:
|
|
|
|
EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
|
2019-12-20 17:03:00 +03:00
|
|
|
EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
|
2018-05-04 20:05:51 +03:00
|
|
|
break;
|
|
|
|
case SMMU_EVT_C_BAD_STE:
|
|
|
|
EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_F_STREAM_DISABLED:
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_F_TRANS_FORBIDDEN:
|
|
|
|
EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
|
|
|
|
EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_C_BAD_SUBSTREAMID:
|
|
|
|
EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_F_CD_FETCH:
|
|
|
|
EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
|
|
|
|
EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_C_BAD_CD:
|
|
|
|
EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_F_WALK_EABT:
|
|
|
|
case SMMU_EVT_F_TRANSLATION:
|
|
|
|
case SMMU_EVT_F_ADDR_SIZE:
|
|
|
|
case SMMU_EVT_F_ACCESS:
|
|
|
|
case SMMU_EVT_F_PERMISSION:
|
|
|
|
EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
|
|
|
|
EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
|
|
|
|
EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
|
|
|
|
EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
|
|
|
|
EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
|
|
|
|
EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
|
|
|
|
EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
|
|
|
|
EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
|
|
|
|
EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
|
|
|
|
EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
|
|
|
|
break;
|
|
|
|
case SMMU_EVT_F_CFG_CONFLICT:
|
|
|
|
EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
|
|
|
|
EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
|
|
|
|
break;
|
|
|
|
/* rest is not implemented */
|
|
|
|
case SMMU_EVT_F_BAD_ATS_TREQ:
|
|
|
|
case SMMU_EVT_F_TLB_CONFLICT:
|
|
|
|
case SMMU_EVT_E_PAGE_REQ:
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
|
|
|
|
r = smmuv3_write_eventq(s, &evt);
|
|
|
|
if (r != MEMTX_OK) {
|
|
|
|
smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
|
|
|
|
}
|
|
|
|
info->recorded = true;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static void smmuv3_init_regs(SMMUv3State *s)
|
|
|
|
{
|
2023-05-25 12:37:51 +03:00
|
|
|
/* Based on sys property, the stages supported in smmu will be advertised.*/
|
|
|
|
if (s->stage && !strcmp("2", s->stage)) {
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1);
|
2024-07-15 11:45:17 +03:00
|
|
|
} else if (s->stage && !strcmp("nested", s->stage)) {
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1);
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S2P, 1);
|
2023-05-25 12:37:51 +03:00
|
|
|
} else {
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1);
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
|
2023-05-25 12:37:51 +03:00
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, VMID16, 1); /* 16-bit VMID */
|
2018-05-04 20:05:51 +03:00
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
|
|
|
|
/* terminated transaction will always be aborted/error returned */
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
|
|
|
|
/* 2-level stream table supported */
|
|
|
|
s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
|
|
|
|
|
|
|
|
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
|
|
|
|
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
|
|
|
|
s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
|
|
|
|
|
2020-07-28 18:08:14 +03:00
|
|
|
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
|
2023-09-14 17:57:05 +03:00
|
|
|
if (FIELD_EX32(s->idr[0], IDR0, S2P)) {
|
|
|
|
/* XNX is a stage-2-specific feature */
|
|
|
|
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, XNX, 1);
|
|
|
|
}
|
2023-09-14 17:57:04 +03:00
|
|
|
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
|
2022-04-26 19:04:22 +03:00
|
|
|
s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2);
|
2020-07-28 18:08:14 +03:00
|
|
|
|
2023-09-14 17:57:04 +03:00
|
|
|
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
|
2021-03-31 09:47:13 +03:00
|
|
|
/* 4K, 16K and 64K granule support */
|
2018-05-04 20:05:51 +03:00
|
|
|
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
|
2021-03-31 09:47:13 +03:00
|
|
|
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
|
2018-05-04 20:05:51 +03:00
|
|
|
s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
|
|
|
|
|
|
|
|
s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
|
|
|
|
s->cmdq.prod = 0;
|
|
|
|
s->cmdq.cons = 0;
|
|
|
|
s->cmdq.entry_size = sizeof(struct Cmd);
|
|
|
|
s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
|
|
|
|
s->eventq.prod = 0;
|
|
|
|
s->eventq.cons = 0;
|
|
|
|
s->eventq.entry_size = sizeof(struct Evt);
|
|
|
|
|
|
|
|
s->features = 0;
|
|
|
|
s->sid_split = 0;
|
2020-07-28 18:08:14 +03:00
|
|
|
s->aidr = 0x1;
|
2022-02-02 14:16:02 +03:00
|
|
|
s->cr[0] = 0;
|
|
|
|
s->cr0ack = 0;
|
|
|
|
s->irq_ctrl = 0;
|
|
|
|
s->gerror = 0;
|
|
|
|
s->gerrorn = 0;
|
|
|
|
s->statusr = 0;
|
2023-02-14 12:40:09 +03:00
|
|
|
s->gbpa = SMMU_GBPA_RESET_VAL;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
|
|
|
|
SMMUEventInfo *event)
|
|
|
|
{
|
2023-07-25 12:56:51 +03:00
|
|
|
int ret, i;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
trace_smmuv3_get_ste(addr);
|
|
|
|
/* TODO: guarantee 64-bit single-copy atomicity */
|
dma: Let dma_memory_read/write() take MemTxAttrs argument
Let devices specify transaction attributes when calling
dma_memory_read() or dma_memory_write().
Patch created mechanically using spatch with this script:
@@
expression E1, E2, E3, E4;
@@
(
- dma_memory_read(E1, E2, E3, E4)
+ dma_memory_read(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
|
- dma_memory_write(E1, E2, E3, E4)
+ dma_memory_write(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Li Qiang <liq3ea@gmail.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20211223115554.3155328-6-philmd@redhat.com>
2020-09-03 11:08:29 +03:00
|
|
|
ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2018-05-04 20:05:51 +03:00
|
|
|
if (ret != MEMTX_OK) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Cannot fetch pte at address=0x%"PRIx64"\n", addr);
|
|
|
|
event->type = SMMU_EVT_F_STE_FETCH;
|
|
|
|
event->u.f_ste_fetch.addr = addr;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2023-07-25 12:56:51 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
|
|
|
|
le32_to_cpus(&buf->word[i]);
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
static SMMUTranslationStatus smmuv3_do_translate(SMMUv3State *s, hwaddr addr,
|
|
|
|
SMMUTransCfg *cfg,
|
|
|
|
SMMUEventInfo *event,
|
|
|
|
IOMMUAccessFlags flag,
|
|
|
|
SMMUTLBEntry **out_entry,
|
|
|
|
SMMUTranslationClass class);
|
2018-05-04 20:05:51 +03:00
|
|
|
/* @ssid > 0 not supported yet */
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
static int smmu_get_cd(SMMUv3State *s, STE *ste, SMMUTransCfg *cfg,
|
|
|
|
uint32_t ssid, CD *buf, SMMUEventInfo *event)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
dma_addr_t addr = STE_CTXPTR(ste);
|
2023-07-25 12:56:51 +03:00
|
|
|
int ret, i;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
SMMUTranslationStatus status;
|
|
|
|
SMMUTLBEntry *entry;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
trace_smmuv3_get_cd(addr);
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
|
|
|
|
if (cfg->stage == SMMU_NESTED) {
|
|
|
|
status = smmuv3_do_translate(s, addr, cfg, event,
|
|
|
|
IOMMU_RO, &entry, SMMU_CLASS_CD);
|
|
|
|
|
|
|
|
/* Same PTW faults are reported but with CLASS = CD. */
|
|
|
|
if (status != SMMU_TRANS_SUCCESS) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = CACHED_ENTRY_TO_ADDR(entry, addr);
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
/* TODO: guarantee 64-bit single-copy atomicity */
|
dma: Let dma_memory_read/write() take MemTxAttrs argument
Let devices specify transaction attributes when calling
dma_memory_read() or dma_memory_write().
Patch created mechanically using spatch with this script:
@@
expression E1, E2, E3, E4;
@@
(
- dma_memory_read(E1, E2, E3, E4)
+ dma_memory_read(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
|
- dma_memory_write(E1, E2, E3, E4)
+ dma_memory_write(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Li Qiang <liq3ea@gmail.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20211223115554.3155328-6-philmd@redhat.com>
2020-09-03 11:08:29 +03:00
|
|
|
ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf),
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2018-05-04 20:05:51 +03:00
|
|
|
if (ret != MEMTX_OK) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Cannot fetch pte at address=0x%"PRIx64"\n", addr);
|
|
|
|
event->type = SMMU_EVT_F_CD_FETCH;
|
|
|
|
event->u.f_ste_fetch.addr = addr;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2023-07-25 12:56:51 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(buf->word); i++) {
|
|
|
|
le32_to_cpus(&buf->word[i]);
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
/*
|
|
|
|
* Max valid value is 39 when SMMU_IDR3.STT == 0.
|
|
|
|
* In architectures after SMMUv3.0:
|
|
|
|
* - If STE.S2TG selects a 4KB or 16KB granule, the minimum valid value for this
|
|
|
|
* field is MAX(16, 64-IAS)
|
|
|
|
* - If STE.S2TG selects a 64KB granule, the minimum valid value for this field
|
|
|
|
* is (64-IAS).
|
|
|
|
* As we only support AA64, IAS = OAS.
|
|
|
|
*/
|
|
|
|
static bool s2t0sz_valid(SMMUTransCfg *cfg)
|
|
|
|
{
|
|
|
|
if (cfg->s2cfg.tsz > 39) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cfg->s2cfg.granule_sz == 16) {
|
2024-07-15 11:45:18 +03:00
|
|
|
return (cfg->s2cfg.tsz >= 64 - cfg->s2cfg.eff_ps);
|
2023-05-25 12:37:50 +03:00
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:18 +03:00
|
|
|
return (cfg->s2cfg.tsz >= MAX(64 - cfg->s2cfg.eff_ps, 16));
|
2023-05-25 12:37:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if s2 page table config is valid.
|
|
|
|
* This checks with the configured start level, ias_bits and granularity we can
|
|
|
|
* have a valid page table as described in ARM ARM D8.2 Translation process.
|
|
|
|
* The idea here is to see for the highest possible number of IPA bits, how
|
|
|
|
* many concatenated tables we would need, if it is more than 16, then this is
|
|
|
|
* not possible.
|
|
|
|
*/
|
|
|
|
static bool s2_pgtable_config_valid(uint8_t sl0, uint8_t t0sz, uint8_t gran)
|
|
|
|
{
|
|
|
|
int level = get_start_level(sl0, gran);
|
|
|
|
uint64_t ipa_bits = 64 - t0sz;
|
|
|
|
uint64_t max_ipa = (1ULL << ipa_bits) - 1;
|
|
|
|
int nr_concat = pgd_concat_idx(level, gran, max_ipa) + 1;
|
|
|
|
|
|
|
|
return nr_concat <= VMSA_MAX_S2_CONCAT;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:18 +03:00
|
|
|
static int decode_ste_s2_cfg(SMMUv3State *s, SMMUTransCfg *cfg,
|
|
|
|
STE *ste)
|
2023-05-25 12:37:50 +03:00
|
|
|
{
|
2024-07-15 11:45:18 +03:00
|
|
|
uint8_t oas = FIELD_EX32(s->idr[5], IDR5, OAS);
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
if (STE_S2AA64(ste) == 0x0) {
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"SMMUv3 AArch32 tables not supported\n");
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (STE_S2TG(ste)) {
|
|
|
|
case 0x0: /* 4KB */
|
|
|
|
cfg->s2cfg.granule_sz = 12;
|
|
|
|
break;
|
|
|
|
case 0x1: /* 64KB */
|
|
|
|
cfg->s2cfg.granule_sz = 16;
|
|
|
|
break;
|
|
|
|
case 0x2: /* 16KB */
|
|
|
|
cfg->s2cfg.granule_sz = 14;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"SMMUv3 bad STE S2TG: %x\n", STE_S2TG(ste));
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg->s2cfg.vttb = STE_S2TTB(ste);
|
|
|
|
|
|
|
|
cfg->s2cfg.sl0 = STE_S2SL0(ste);
|
|
|
|
/* FEAT_TTST not supported. */
|
|
|
|
if (cfg->s2cfg.sl0 == 0x3) {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "SMMUv3 S2SL0 = 0x3 has no meaning!\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For AA64, The effective S2PS size is capped to the OAS. */
|
2024-07-15 11:45:18 +03:00
|
|
|
cfg->s2cfg.eff_ps = oas2bits(MIN(STE_S2PS(ste), oas));
|
|
|
|
/*
|
|
|
|
* For SMMUv3.1 and later, when OAS == IAS == 52, the stage 2 input
|
|
|
|
* range is further limited to 48 bits unless STE.S2TG indicates a
|
|
|
|
* 64KB granule.
|
|
|
|
*/
|
|
|
|
if (cfg->s2cfg.granule_sz != 16) {
|
|
|
|
cfg->s2cfg.eff_ps = MIN(cfg->s2cfg.eff_ps, 48);
|
|
|
|
}
|
2023-05-25 12:37:50 +03:00
|
|
|
/*
|
|
|
|
* It is ILLEGAL for the address in S2TTB to be outside the range
|
|
|
|
* described by the effective S2PS value.
|
|
|
|
*/
|
|
|
|
if (cfg->s2cfg.vttb & ~(MAKE_64BIT_MASK(0, cfg->s2cfg.eff_ps))) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"SMMUv3 S2TTB too large 0x%" PRIx64
|
|
|
|
", effective PS %d bits\n",
|
|
|
|
cfg->s2cfg.vttb, cfg->s2cfg.eff_ps);
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg->s2cfg.tsz = STE_S2T0SZ(ste);
|
|
|
|
|
|
|
|
if (!s2t0sz_valid(cfg)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 bad STE S2T0SZ = %d\n",
|
|
|
|
cfg->s2cfg.tsz);
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s2_pgtable_config_valid(cfg->s2cfg.sl0, cfg->s2cfg.tsz,
|
|
|
|
cfg->s2cfg.granule_sz)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"SMMUv3 STE stage 2 config not valid!\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only LE supported(IDR0.TTENDIAN). */
|
|
|
|
if (STE_S2ENDI(ste)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"SMMUv3 STE_S2ENDI only supports LE!\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg->s2cfg.affd = STE_S2AFFD(ste);
|
|
|
|
|
|
|
|
cfg->s2cfg.record_faults = STE_S2R(ste);
|
|
|
|
/* As stall is not supported. */
|
|
|
|
if (STE_S2S(ste)) {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "SMMUv3 Stall not implemented!\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad_ste:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:17 +03:00
|
|
|
static void decode_ste_config(SMMUTransCfg *cfg, uint32_t config)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (STE_CFG_ABORT(config)) {
|
|
|
|
cfg->aborted = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (STE_CFG_BYPASS(config)) {
|
|
|
|
cfg->bypassed = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STE_CFG_S1_ENABLED(config)) {
|
|
|
|
cfg->stage = SMMU_STAGE_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STE_CFG_S2_ENABLED(config)) {
|
|
|
|
cfg->stage |= SMMU_STAGE_2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
/* Returns < 0 in case of invalid STE, 0 otherwise */
|
2018-05-04 20:05:51 +03:00
|
|
|
static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
|
|
|
|
STE *ste, SMMUEventInfo *event)
|
|
|
|
{
|
|
|
|
uint32_t config;
|
2024-07-15 11:45:18 +03:00
|
|
|
uint8_t oas = FIELD_EX32(s->idr[5], IDR5, OAS);
|
2023-05-25 12:37:50 +03:00
|
|
|
int ret;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
if (!STE_VALID(ste)) {
|
2019-08-22 20:23:50 +03:00
|
|
|
if (!event->inval_ste_allowed) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
config = STE_CONFIG(ste);
|
|
|
|
|
2024-07-15 11:45:17 +03:00
|
|
|
decode_ste_config(cfg, config);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2024-07-15 11:45:17 +03:00
|
|
|
if (cfg->aborted || cfg->bypassed) {
|
2018-06-26 19:50:42 +03:00
|
|
|
return 0;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
/*
|
|
|
|
* If a stage is enabled in SW while not advertised, throw bad ste
|
|
|
|
* according to user manual(IHI0070E) "5.2 Stream Table Entry".
|
|
|
|
*/
|
|
|
|
if (!STAGE1_SUPPORTED(s) && STE_CFG_S1_ENABLED(config)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S1 used but not supported.\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
if (!STAGE2_SUPPORTED(s) && STE_CFG_S2_ENABLED(config)) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR, "SMMUv3 S2 used but not supported.\n");
|
2018-05-04 20:05:51 +03:00
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
if (STAGE2_SUPPORTED(s)) {
|
|
|
|
/* VMID is considered even if s2 is disabled. */
|
|
|
|
cfg->s2cfg.vmid = STE_S2VMID(ste);
|
|
|
|
} else {
|
|
|
|
/* Default to -1 */
|
|
|
|
cfg->s2cfg.vmid = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STE_CFG_S2_ENABLED(config)) {
|
|
|
|
/*
|
|
|
|
* Stage-1 OAS defaults to OAS even if not enabled as it would be used
|
|
|
|
* in input address check for stage-2.
|
|
|
|
*/
|
2024-07-15 11:45:18 +03:00
|
|
|
cfg->oas = oas2bits(oas);
|
|
|
|
ret = decode_ste_s2_cfg(s, cfg, ste);
|
2023-05-25 12:37:50 +03:00
|
|
|
if (ret) {
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
if (STE_S1CDMAX(ste) != 0) {
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"SMMUv3 does not support multiple context descriptors yet\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STE_S1STALLD(ste)) {
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"SMMUv3 S1 stalling fault model not allowed yet\n");
|
|
|
|
goto bad_ste;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad_ste:
|
|
|
|
event->type = SMMU_EVT_C_BAD_STE;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* smmu_find_ste - Return the stream table entry associated
|
|
|
|
* to the sid
|
|
|
|
*
|
|
|
|
* @s: smmuv3 handle
|
|
|
|
* @sid: stream ID
|
|
|
|
* @ste: returned stream table entry
|
|
|
|
* @event: handle to an event info
|
|
|
|
*
|
|
|
|
* Supports linear and 2-level stream table
|
|
|
|
* Return 0 on success, -EINVAL otherwise
|
|
|
|
*/
|
|
|
|
static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
|
|
|
|
SMMUEventInfo *event)
|
|
|
|
{
|
2019-12-20 17:03:00 +03:00
|
|
|
dma_addr_t addr, strtab_base;
|
2019-12-20 17:03:00 +03:00
|
|
|
uint32_t log2size;
|
2019-12-20 17:03:00 +03:00
|
|
|
int strtab_size_shift;
|
2018-05-04 20:05:51 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_smmuv3_find_ste(sid, s->features, s->sid_split);
|
2019-12-20 17:03:00 +03:00
|
|
|
log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
|
|
|
|
/*
|
|
|
|
* Check SID range against both guest-configured and implementation limits
|
|
|
|
*/
|
|
|
|
if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
|
2018-05-04 20:05:51 +03:00
|
|
|
event->type = SMMU_EVT_C_BAD_STREAMID;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (s->features & SMMU_FEATURE_2LVL_STE) {
|
2023-07-25 12:56:51 +03:00
|
|
|
int l1_ste_offset, l2_ste_offset, max_l2_ste, span, i;
|
2019-12-20 17:03:00 +03:00
|
|
|
dma_addr_t l1ptr, l2ptr;
|
2018-05-04 20:05:51 +03:00
|
|
|
STEDesc l1std;
|
|
|
|
|
2019-12-20 17:03:00 +03:00
|
|
|
/*
|
|
|
|
* Align strtab base address to table size. For this purpose, assume it
|
|
|
|
* is not bounded by SMMU_IDR1_SIDSIZE.
|
|
|
|
*/
|
|
|
|
strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
|
|
|
|
strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
|
|
|
|
~MAKE_64BIT_MASK(0, strtab_size_shift);
|
2018-05-04 20:05:51 +03:00
|
|
|
l1_ste_offset = sid >> s->sid_split;
|
|
|
|
l2_ste_offset = sid & ((1 << s->sid_split) - 1);
|
|
|
|
l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
|
|
|
|
/* TODO: guarantee 64-bit single-copy atomicity */
|
2020-02-20 12:39:00 +03:00
|
|
|
ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
|
dma: Let dma_memory_read/write() take MemTxAttrs argument
Let devices specify transaction attributes when calling
dma_memory_read() or dma_memory_write().
Patch created mechanically using spatch with this script:
@@
expression E1, E2, E3, E4;
@@
(
- dma_memory_read(E1, E2, E3, E4)
+ dma_memory_read(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
|
- dma_memory_write(E1, E2, E3, E4)
+ dma_memory_write(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Li Qiang <liq3ea@gmail.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20211223115554.3155328-6-philmd@redhat.com>
2020-09-03 11:08:29 +03:00
|
|
|
sizeof(l1std), MEMTXATTRS_UNSPECIFIED);
|
2018-05-04 20:05:51 +03:00
|
|
|
if (ret != MEMTX_OK) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
|
|
|
|
event->type = SMMU_EVT_F_STE_FETCH;
|
|
|
|
event->u.f_ste_fetch.addr = l1ptr;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2023-07-25 12:56:51 +03:00
|
|
|
for (i = 0; i < ARRAY_SIZE(l1std.word); i++) {
|
|
|
|
le32_to_cpus(&l1std.word[i]);
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
span = L1STD_SPAN(&l1std);
|
|
|
|
|
|
|
|
if (!span) {
|
|
|
|
/* l2ptr is not valid */
|
2019-08-22 20:23:50 +03:00
|
|
|
if (!event->inval_ste_allowed) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"invalid sid=%d (L1STD span=0)\n", sid);
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
event->type = SMMU_EVT_C_BAD_STREAMID;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
max_l2_ste = (1 << span) - 1;
|
|
|
|
l2ptr = l1std_l2ptr(&l1std);
|
|
|
|
trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
|
|
|
|
l2ptr, l2_ste_offset, max_l2_ste);
|
|
|
|
if (l2_ste_offset > max_l2_ste) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"l2_ste_offset=%d > max_l2_ste=%d\n",
|
|
|
|
l2_ste_offset, max_l2_ste);
|
|
|
|
event->type = SMMU_EVT_C_BAD_STE;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
addr = l2ptr + l2_ste_offset * sizeof(*ste);
|
|
|
|
} else {
|
2019-12-20 17:03:00 +03:00
|
|
|
strtab_size_shift = log2size + 5;
|
|
|
|
strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
|
|
|
|
~MAKE_64BIT_MASK(0, strtab_size_shift);
|
|
|
|
addr = strtab_base + sid * sizeof(*ste);
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (smmu_get_ste(s, addr, ste, event)) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
static int decode_cd(SMMUv3State *s, SMMUTransCfg *cfg,
|
|
|
|
CD *cd, SMMUEventInfo *event)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
|
|
|
int i;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
SMMUTranslationStatus status;
|
|
|
|
SMMUTLBEntry *entry;
|
2024-07-15 11:45:18 +03:00
|
|
|
uint8_t oas = FIELD_EX32(s->idr[5], IDR5, OAS);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
|
|
|
|
goto bad_cd;
|
|
|
|
}
|
|
|
|
if (!CD_A(cd)) {
|
|
|
|
goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
|
|
|
|
}
|
|
|
|
if (CD_S(cd)) {
|
|
|
|
goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
|
|
|
|
}
|
|
|
|
if (CD_HA(cd) || CD_HD(cd)) {
|
|
|
|
goto bad_cd; /* HTTU = 0 */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we support only those at the moment */
|
|
|
|
cfg->aa64 = true;
|
|
|
|
|
|
|
|
cfg->oas = oas2bits(CD_IPS(cd));
|
2024-07-15 11:45:18 +03:00
|
|
|
cfg->oas = MIN(oas2bits(oas), cfg->oas);
|
2018-05-04 20:05:51 +03:00
|
|
|
cfg->tbi = CD_TBI(cd);
|
|
|
|
cfg->asid = CD_ASID(cd);
|
2024-02-13 11:22:11 +03:00
|
|
|
cfg->affd = CD_AFFD(cd);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
trace_smmuv3_decode_cd(cfg->oas);
|
|
|
|
|
|
|
|
/* decode data dependent on TT */
|
|
|
|
for (i = 0; i <= 1; i++) {
|
|
|
|
int tg, tsz;
|
|
|
|
SMMUTransTableInfo *tt = &cfg->tt[i];
|
|
|
|
|
|
|
|
cfg->tt[i].disabled = CD_EPD(cd, i);
|
|
|
|
if (cfg->tt[i].disabled) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
tsz = CD_TSZ(cd, i);
|
|
|
|
if (tsz < 16 || tsz > 39) {
|
|
|
|
goto bad_cd;
|
|
|
|
}
|
|
|
|
|
|
|
|
tg = CD_TG(cd, i);
|
|
|
|
tt->granule_sz = tg2granule(tg, i);
|
2021-03-31 09:47:13 +03:00
|
|
|
if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
|
|
|
|
tt->granule_sz != 16) || CD_ENDI(cd)) {
|
2018-05-04 20:05:51 +03:00
|
|
|
goto bad_cd;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:18 +03:00
|
|
|
/*
|
|
|
|
* An address greater than 48 bits in size can only be output from a
|
|
|
|
* TTD when, in SMMUv3.1 and later, the effective IPS is 52 and a 64KB
|
|
|
|
* granule is in use for that translation table
|
|
|
|
*/
|
|
|
|
if (tt->granule_sz != 16) {
|
|
|
|
cfg->oas = MIN(cfg->oas, 48);
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
tt->tsz = tsz;
|
|
|
|
tt->ttb = CD_TTB(cd, i);
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
|
|
|
|
goto bad_cd;
|
|
|
|
}
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
|
|
|
|
/* Translate the TTBx, from IPA to PA if nesting is enabled. */
|
|
|
|
if (cfg->stage == SMMU_NESTED) {
|
|
|
|
status = smmuv3_do_translate(s, tt->ttb, cfg, event, IOMMU_RO,
|
|
|
|
&entry, SMMU_CLASS_TT);
|
|
|
|
/*
|
|
|
|
* Same PTW faults are reported but with CLASS = TT.
|
|
|
|
* If TTBx is larger than the effective stage 1 output addres
|
|
|
|
* size, it reports C_BAD_CD, which is handled by the above case.
|
|
|
|
*/
|
|
|
|
if (status != SMMU_TRANS_SUCCESS) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
tt->ttb = CACHED_ENTRY_TO_ADDR(entry, tt->ttb);
|
|
|
|
}
|
|
|
|
|
2020-07-28 18:08:14 +03:00
|
|
|
tt->had = CD_HAD(cd, i);
|
|
|
|
trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2022-04-27 14:15:43 +03:00
|
|
|
cfg->record_faults = CD_R(cd);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad_cd:
|
|
|
|
event->type = SMMU_EVT_C_BAD_CD;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* smmuv3_decode_config - Prepare the translation configuration
|
|
|
|
* for the @mr iommu region
|
|
|
|
* @mr: iommu memory region the translation config must be prepared for
|
|
|
|
* @cfg: output translation configuration which is populated through
|
|
|
|
* the different configuration decoding steps
|
|
|
|
* @event: must be zero'ed by the caller
|
|
|
|
*
|
2018-06-26 19:50:42 +03:00
|
|
|
* return < 0 in case of config decoding error (@event is filled
|
2018-05-04 20:05:51 +03:00
|
|
|
* accordingly). Return 0 otherwise.
|
|
|
|
*/
|
|
|
|
static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
|
|
|
|
SMMUEventInfo *event)
|
|
|
|
{
|
|
|
|
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
|
|
|
|
uint32_t sid = smmu_get_sid(sdev);
|
|
|
|
SMMUv3State *s = sdev->smmu;
|
2018-06-26 19:50:42 +03:00
|
|
|
int ret;
|
2018-05-04 20:05:51 +03:00
|
|
|
STE ste;
|
|
|
|
CD cd;
|
|
|
|
|
hw/arm/smmuv3: Make TLB lookup work for stage-2
Right now, either stage-1 or stage-2 are supported, this simplifies
how we can deal with TLBs.
This patch makes TLB lookup work if stage-2 is enabled instead of
stage-1.
TLB lookup is done before a PTW, if a valid entry is found we won't
do the PTW.
To be able to do TLB lookup, we need the correct tagging info, as
granularity and input size, so we get this based on the supported
translation stage. The TLB entries are added correctly from each
stage PTW.
When nested translation is supported, this would need to change, for
example if we go with a combined TLB implementation, we would need to
use the min of the granularities in TLB.
As stage-2 shouldn't be tagged by ASID, it will be set to -1 if S1P
is not enabled.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Message-id: 20230516203327.2051088-7-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2023-05-25 12:37:50 +03:00
|
|
|
/* ASID defaults to -1 (if s1 is not supported). */
|
|
|
|
cfg->asid = -1;
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
ret = smmu_find_ste(s, sid, &ste, event);
|
|
|
|
if (ret) {
|
2018-05-04 20:05:51 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
ret = decode_ste(s, cfg, &ste, event);
|
|
|
|
if (ret) {
|
2018-05-04 20:05:51 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:04 +03:00
|
|
|
if (cfg->aborted || cfg->bypassed || (cfg->stage == SMMU_STAGE_2)) {
|
2018-06-26 19:50:42 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
ret = smmu_get_cd(s, &ste, cfg, 0 /* ssid */, &cd, event);
|
2018-06-26 19:50:42 +03:00
|
|
|
if (ret) {
|
2018-05-04 20:05:51 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
return decode_cd(s, cfg, &cd, event);
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
/**
|
|
|
|
* smmuv3_get_config - Look up for a cached copy of configuration data for
|
|
|
|
* @sdev and on cache miss performs a configuration structure decoding from
|
|
|
|
* guest RAM.
|
|
|
|
*
|
|
|
|
* @sdev: SMMUDevice handle
|
|
|
|
* @event: output event info
|
|
|
|
*
|
|
|
|
* The configuration cache contains data resulting from both STE and CD
|
|
|
|
* decoding under the form of an SMMUTransCfg struct. The hash table is indexed
|
|
|
|
* by the SMMUDevice handle.
|
|
|
|
*/
|
|
|
|
static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
|
|
|
|
{
|
|
|
|
SMMUv3State *s = sdev->smmu;
|
|
|
|
SMMUState *bc = &s->smmu_state;
|
|
|
|
SMMUTransCfg *cfg;
|
|
|
|
|
|
|
|
cfg = g_hash_table_lookup(bc->configs, sdev);
|
|
|
|
if (cfg) {
|
|
|
|
sdev->cfg_cache_hits++;
|
|
|
|
trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
|
|
|
|
sdev->cfg_cache_hits, sdev->cfg_cache_misses,
|
|
|
|
100 * sdev->cfg_cache_hits /
|
|
|
|
(sdev->cfg_cache_hits + sdev->cfg_cache_misses));
|
|
|
|
} else {
|
|
|
|
sdev->cfg_cache_misses++;
|
|
|
|
trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
|
|
|
|
sdev->cfg_cache_hits, sdev->cfg_cache_misses,
|
|
|
|
100 * sdev->cfg_cache_hits /
|
|
|
|
(sdev->cfg_cache_hits + sdev->cfg_cache_misses));
|
|
|
|
cfg = g_new0(SMMUTransCfg, 1);
|
|
|
|
|
|
|
|
if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
|
|
|
|
g_hash_table_insert(bc->configs, sdev, cfg);
|
|
|
|
} else {
|
|
|
|
g_free(cfg);
|
|
|
|
cfg = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cfg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void smmuv3_flush_config(SMMUDevice *sdev)
|
|
|
|
{
|
|
|
|
SMMUv3State *s = sdev->smmu;
|
|
|
|
SMMUState *bc = &s->smmu_state;
|
|
|
|
|
|
|
|
trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
|
|
|
|
g_hash_table_remove(bc->configs, sdev);
|
|
|
|
}
|
|
|
|
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
/* Do translation with TLB lookup. */
|
|
|
|
static SMMUTranslationStatus smmuv3_do_translate(SMMUv3State *s, hwaddr addr,
|
|
|
|
SMMUTransCfg *cfg,
|
|
|
|
SMMUEventInfo *event,
|
|
|
|
IOMMUAccessFlags flag,
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
SMMUTLBEntry **out_entry,
|
|
|
|
SMMUTranslationClass class)
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
{
|
|
|
|
SMMUPTWEventInfo ptw_info = {};
|
|
|
|
SMMUState *bs = ARM_SMMU(s);
|
|
|
|
SMMUTLBEntry *cached_entry = NULL;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
int asid, stage;
|
|
|
|
bool desc_s2_translation = class != SMMU_CLASS_IN;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The function uses the argument class to identify which stage is used:
|
|
|
|
* - CLASS = IN: Means an input translation, determine the stage from STE.
|
|
|
|
* - CLASS = CD: Means the addr is an IPA of the CD, and it would be
|
|
|
|
* translated using the stage-2.
|
|
|
|
* - CLASS = TT: Means the addr is an IPA of the stage-1 translation table
|
|
|
|
* and it would be translated using the stage-2.
|
|
|
|
* For the last 2 cases instead of having intrusive changes in the common
|
|
|
|
* logic, we modify the cfg to be a stage-2 translation only in case of
|
|
|
|
* nested, and then restore it after.
|
|
|
|
*/
|
|
|
|
if (desc_s2_translation) {
|
|
|
|
asid = cfg->asid;
|
|
|
|
stage = cfg->stage;
|
|
|
|
cfg->asid = -1;
|
|
|
|
cfg->stage = SMMU_STAGE_2;
|
|
|
|
}
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
|
|
|
|
cached_entry = smmu_translate(bs, cfg, addr, flag, &ptw_info);
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
|
|
|
|
if (desc_s2_translation) {
|
|
|
|
cfg->asid = asid;
|
|
|
|
cfg->stage = stage;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
if (!cached_entry) {
|
|
|
|
/* All faults from PTW has S2 field. */
|
|
|
|
event->u.f_walk_eabt.s2 = (ptw_info.stage == SMMU_STAGE_2);
|
2024-07-15 11:45:11 +03:00
|
|
|
/*
|
|
|
|
* Fault class is set as follows based on "class" input to
|
|
|
|
* the function and to "ptw_info" from "smmu_translate()"
|
|
|
|
* For stage-1:
|
|
|
|
* - EABT => CLASS_TT (hardcoded)
|
|
|
|
* - other events => CLASS_IN (input to function)
|
|
|
|
* For stage-2 => CLASS_IN (input to function)
|
|
|
|
* For nested, for all events:
|
|
|
|
* - CD fetch => CLASS_CD (input to function)
|
|
|
|
* - walking stage 1 translation table => CLASS_TT (from
|
|
|
|
* is_ipa_descriptor or input in case of TTBx)
|
|
|
|
* - s2 translation => CLASS_IN (input to function)
|
|
|
|
*/
|
|
|
|
class = ptw_info.is_ipa_descriptor ? SMMU_CLASS_TT : class;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
switch (ptw_info.type) {
|
|
|
|
case SMMU_PTW_ERR_WALK_EABT:
|
|
|
|
event->type = SMMU_EVT_F_WALK_EABT;
|
|
|
|
event->u.f_walk_eabt.rnw = flag & 0x1;
|
|
|
|
event->u.f_walk_eabt.class = (ptw_info.stage == SMMU_STAGE_2) ?
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
class : SMMU_CLASS_TT;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->u.f_walk_eabt.addr2 = ptw_info.addr;
|
|
|
|
break;
|
|
|
|
case SMMU_PTW_ERR_TRANSLATION:
|
2024-07-15 11:45:16 +03:00
|
|
|
if (PTW_RECORD_FAULT(ptw_info, cfg)) {
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->type = SMMU_EVT_F_TRANSLATION;
|
|
|
|
event->u.f_translation.addr2 = ptw_info.addr;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
event->u.f_translation.class = class;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->u.f_translation.rnw = flag & 0x1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SMMU_PTW_ERR_ADDR_SIZE:
|
2024-07-15 11:45:16 +03:00
|
|
|
if (PTW_RECORD_FAULT(ptw_info, cfg)) {
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->type = SMMU_EVT_F_ADDR_SIZE;
|
|
|
|
event->u.f_addr_size.addr2 = ptw_info.addr;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
event->u.f_addr_size.class = class;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->u.f_addr_size.rnw = flag & 0x1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SMMU_PTW_ERR_ACCESS:
|
2024-07-15 11:45:16 +03:00
|
|
|
if (PTW_RECORD_FAULT(ptw_info, cfg)) {
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->type = SMMU_EVT_F_ACCESS;
|
|
|
|
event->u.f_access.addr2 = ptw_info.addr;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
event->u.f_access.class = class;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->u.f_access.rnw = flag & 0x1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SMMU_PTW_ERR_PERMISSION:
|
2024-07-15 11:45:16 +03:00
|
|
|
if (PTW_RECORD_FAULT(ptw_info, cfg)) {
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->type = SMMU_EVT_F_PERMISSION;
|
|
|
|
event->u.f_permission.addr2 = ptw_info.addr;
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
event->u.f_permission.class = class;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
event->u.f_permission.rnw = flag & 0x1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
return SMMU_TRANS_ERROR;
|
|
|
|
}
|
|
|
|
*out_entry = cached_entry;
|
|
|
|
return SMMU_TRANS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
/*
|
|
|
|
* Sets the InputAddr for an SMMU_TRANS_ERROR, as it can't be
|
|
|
|
* set from all contexts, as smmuv3_get_config() can return
|
|
|
|
* translation faults in case of nested translation (for CD
|
|
|
|
* and TTBx). But in that case the iova is not known.
|
|
|
|
*/
|
|
|
|
static void smmuv3_fixup_event(SMMUEventInfo *event, hwaddr iova)
|
|
|
|
{
|
|
|
|
switch (event->type) {
|
|
|
|
case SMMU_EVT_F_WALK_EABT:
|
|
|
|
case SMMU_EVT_F_TRANSLATION:
|
|
|
|
case SMMU_EVT_F_ADDR_SIZE:
|
|
|
|
case SMMU_EVT_F_ACCESS:
|
|
|
|
case SMMU_EVT_F_PERMISSION:
|
|
|
|
event->u.f_walk_eabt.addr = iova;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
/* Entry point to SMMU, does everything. */
|
2018-05-04 20:05:51 +03:00
|
|
|
static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
|
2018-06-15 16:57:16 +03:00
|
|
|
IOMMUAccessFlags flag, int iommu_idx)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
|
|
|
|
SMMUv3State *s = sdev->smmu;
|
|
|
|
uint32_t sid = smmu_get_sid(sdev);
|
2019-08-22 20:23:50 +03:00
|
|
|
SMMUEventInfo event = {.type = SMMU_EVT_NONE,
|
|
|
|
.sid = sid,
|
|
|
|
.inval_ste_allowed = false};
|
2018-06-26 19:50:42 +03:00
|
|
|
SMMUTranslationStatus status;
|
2018-06-26 19:50:42 +03:00
|
|
|
SMMUTransCfg *cfg = NULL;
|
2018-05-04 20:05:51 +03:00
|
|
|
IOMMUTLBEntry entry = {
|
|
|
|
.target_as = &address_space_memory,
|
|
|
|
.iova = addr,
|
|
|
|
.translated_addr = addr,
|
|
|
|
.addr_mask = ~(hwaddr)0,
|
|
|
|
.perm = IOMMU_NONE,
|
|
|
|
};
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
SMMUTLBEntry *cached_entry = NULL;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
qemu_mutex_lock(&s->mutex);
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
if (!smmu_enabled(s)) {
|
2023-02-14 12:40:09 +03:00
|
|
|
if (FIELD_EX32(s->gbpa, GBPA, ABORT)) {
|
|
|
|
status = SMMU_TRANS_ABORT;
|
|
|
|
} else {
|
|
|
|
status = SMMU_TRANS_DISABLE;
|
|
|
|
}
|
2018-06-26 19:50:42 +03:00
|
|
|
goto epilogue;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
cfg = smmuv3_get_config(sdev, &event);
|
|
|
|
if (!cfg) {
|
2018-06-26 19:50:42 +03:00
|
|
|
status = SMMU_TRANS_ERROR;
|
|
|
|
goto epilogue;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
if (cfg->aborted) {
|
2018-06-26 19:50:42 +03:00
|
|
|
status = SMMU_TRANS_ABORT;
|
|
|
|
goto epilogue;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
if (cfg->bypassed) {
|
2018-06-26 19:50:42 +03:00
|
|
|
status = SMMU_TRANS_BYPASS;
|
|
|
|
goto epilogue;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
status = smmuv3_do_translate(s, addr, cfg, &event, flag,
|
|
|
|
&cached_entry, SMMU_CLASS_IN);
|
2018-06-26 19:50:42 +03:00
|
|
|
|
|
|
|
epilogue:
|
2018-06-26 19:50:42 +03:00
|
|
|
qemu_mutex_unlock(&s->mutex);
|
2018-06-26 19:50:42 +03:00
|
|
|
switch (status) {
|
|
|
|
case SMMU_TRANS_SUCCESS:
|
2022-04-16 10:38:15 +03:00
|
|
|
entry.perm = cached_entry->entry.perm;
|
2024-07-15 11:45:07 +03:00
|
|
|
entry.translated_addr = CACHED_ENTRY_TO_ADDR(cached_entry, addr);
|
2020-07-28 18:08:08 +03:00
|
|
|
entry.addr_mask = cached_entry->entry.addr_mask;
|
2018-06-26 19:50:42 +03:00
|
|
|
trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
entry.translated_addr, entry.perm,
|
|
|
|
cfg->stage);
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
|
|
|
case SMMU_TRANS_DISABLE:
|
|
|
|
entry.perm = flag;
|
|
|
|
entry.addr_mask = ~TARGET_PAGE_MASK;
|
|
|
|
trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
|
|
|
|
entry.perm);
|
|
|
|
break;
|
|
|
|
case SMMU_TRANS_BYPASS:
|
|
|
|
entry.perm = flag;
|
|
|
|
entry.addr_mask = ~TARGET_PAGE_MASK;
|
|
|
|
trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
|
|
|
|
entry.perm);
|
|
|
|
break;
|
|
|
|
case SMMU_TRANS_ABORT:
|
|
|
|
/* no event is recorded on abort */
|
|
|
|
trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
|
|
|
|
entry.perm);
|
|
|
|
break;
|
|
|
|
case SMMU_TRANS_ERROR:
|
hw/arm/smmuv3: Translate CD and TT using stage-2 table
According to ARM SMMU architecture specification (ARM IHI 0070 F.b),
In "5.2 Stream Table Entry":
[51:6] S1ContextPtr
If Config[1] == 1 (stage 2 enabled), this pointer is an IPA translated by
stage 2 and the programmed value must be within the range of the IAS.
In "5.4.1 CD notes":
The translation table walks performed from TTB0 or TTB1 are always performed
in IPA space if stage 2 translations are enabled.
This patch implements translation of the S1 context descriptor pointer and
TTBx base addresses through the S2 stage (IPA -> PA)
smmuv3_do_translate() is updated to have one arg which is translation
class, this is useful to:
- Decide wether a translation is stage-2 only or use the STE config.
- Populate the class in case of faults, WALK_EABT is left unchanged
for stage-1 as it is always IN, while stage-2 would match the
used class (TT, IN, CD), this will change slightly when the ptw
supports nested translation as it can also issue TT event with
class IN.
In case for stage-2 only translation, used in the context of nested
translation, the stage and asid are saved and restored before and
after calling smmu_translate().
Translating CD or TTBx can fail for the following reasons:
1) Large address size: This is described in
(3.4.3 Address sizes of SMMU-originated accesses)
- For CD ptr larger than IAS, for SMMUv3.1, it can trigger either
C_BAD_STE or Translation fault, we implement the latter as it
requires no extra code.
- For TTBx, if larger than the effective stage 1 output address size, it
triggers C_BAD_CD.
2) Faults from PTWs (7.3 Event records)
- F_ADDR_SIZE: large address size after first level causes stage 2 Address
Size fault (Also in 3.4.3 Address sizes of SMMU-originated accesses)
- F_PERMISSION: Same as an address translation. However, when
CLASS == CD, the access is implicitly Data and a read.
- F_ACCESS: Same as an address translation.
- F_TRANSLATION: Same as an address translation.
- F_WALK_EABT: Same as an address translation.
These are already implemented in the PTW logic, so no extra handling
required.
As in CD and TTBx translation context, the iova is not known, setting
the InputAddr was removed from "smmuv3_do_translate" and set after
from "smmuv3_translate" with the new function "smmuv3_fixup_event"
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-9-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:08 +03:00
|
|
|
smmuv3_fixup_event(&event, addr);
|
2018-05-04 20:05:51 +03:00
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
2022-04-27 14:15:45 +03:00
|
|
|
"%s translation failed for iova=0x%"PRIx64" (%s)\n",
|
2018-06-26 19:50:42 +03:00
|
|
|
mr->parent_obj.name, addr, smmu_event_string(event.type));
|
2018-05-04 20:05:51 +03:00
|
|
|
smmuv3_record_event(s, &event);
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
/**
|
|
|
|
* smmuv3_notify_iova - call the notifier @n for a given
|
|
|
|
* @asid and @iova tuple.
|
|
|
|
*
|
|
|
|
* @mr: IOMMU mr region handle
|
|
|
|
* @n: notifier to be called
|
|
|
|
* @asid: address space ID or negative value if we don't care
|
2023-05-25 12:37:51 +03:00
|
|
|
* @vmid: virtual machine ID or negative value if we don't care
|
2018-06-26 19:50:42 +03:00
|
|
|
* @iova: iova
|
2020-07-28 18:08:11 +03:00
|
|
|
* @tg: translation granule (if communicated through range invalidation)
|
|
|
|
* @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
|
2024-07-15 11:45:15 +03:00
|
|
|
* @stage: Which stage(1 or 2) is used
|
2018-06-26 19:50:42 +03:00
|
|
|
*/
|
|
|
|
static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
|
|
|
|
IOMMUNotifier *n,
|
2023-05-25 12:37:51 +03:00
|
|
|
int asid, int vmid,
|
|
|
|
dma_addr_t iova, uint8_t tg,
|
2024-07-15 11:45:15 +03:00
|
|
|
uint64_t num_pages, int stage)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
|
|
|
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
|
2024-07-15 11:45:15 +03:00
|
|
|
SMMUEventInfo eventinfo = {.inval_ste_allowed = true};
|
|
|
|
SMMUTransCfg *cfg = smmuv3_get_config(sdev, &eventinfo);
|
2020-11-16 19:55:03 +03:00
|
|
|
IOMMUTLBEvent event;
|
2021-01-30 07:32:20 +03:00
|
|
|
uint8_t granule;
|
2024-07-15 11:45:15 +03:00
|
|
|
|
|
|
|
if (!cfg) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stage is passed from TLB invalidation commands which can be either
|
|
|
|
* stage-1 or stage-2.
|
|
|
|
* However, IOMMUTLBEvent only understands IOVA, for stage-1 or stage-2
|
|
|
|
* SMMU instances we consider the input address as the IOVA, but when
|
|
|
|
* nesting is used, we can't mix stage-1 and stage-2 addresses, so for
|
|
|
|
* nesting only stage-1 is considered the IOVA and would be notified.
|
|
|
|
*/
|
|
|
|
if ((stage == SMMU_STAGE_2) && (cfg->stage == SMMU_NESTED))
|
|
|
|
return;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2020-07-28 18:08:11 +03:00
|
|
|
if (!tg) {
|
|
|
|
SMMUTransTableInfo *tt;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2020-07-28 18:08:11 +03:00
|
|
|
if (asid >= 0 && cfg->asid != asid) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:51 +03:00
|
|
|
if (vmid >= 0 && cfg->s2cfg.vmid != vmid) {
|
2020-07-28 18:08:11 +03:00
|
|
|
return;
|
|
|
|
}
|
2023-05-25 12:37:51 +03:00
|
|
|
|
2024-07-15 11:45:15 +03:00
|
|
|
if (stage == SMMU_STAGE_1) {
|
2023-05-25 12:37:51 +03:00
|
|
|
tt = select_tt(cfg, iova);
|
|
|
|
if (!tt) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
granule = tt->granule_sz;
|
|
|
|
} else {
|
|
|
|
granule = cfg->s2cfg.granule_sz;
|
|
|
|
}
|
|
|
|
|
2021-01-30 07:32:20 +03:00
|
|
|
} else {
|
|
|
|
granule = tg * 2 + 10;
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
|
2020-11-16 19:55:03 +03:00
|
|
|
event.type = IOMMU_NOTIFIER_UNMAP;
|
|
|
|
event.entry.target_as = &address_space_memory;
|
|
|
|
event.entry.iova = iova;
|
|
|
|
event.entry.addr_mask = num_pages * (1 << granule) - 1;
|
|
|
|
event.entry.perm = IOMMU_NONE;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2020-11-16 19:55:03 +03:00
|
|
|
memory_region_notify_iommu_one(n, &event);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:51 +03:00
|
|
|
/* invalidate an asid/vmid/iova range tuple in all mr's */
|
|
|
|
static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, int vmid,
|
|
|
|
dma_addr_t iova, uint8_t tg,
|
2024-07-15 11:45:15 +03:00
|
|
|
uint64_t num_pages, int stage)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
2019-04-29 19:35:57 +03:00
|
|
|
SMMUDevice *sdev;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2019-04-29 19:35:57 +03:00
|
|
|
QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
|
|
|
|
IOMMUMemoryRegion *mr = &sdev->iommu;
|
2018-06-26 19:50:42 +03:00
|
|
|
IOMMUNotifier *n;
|
|
|
|
|
2023-05-25 12:37:51 +03:00
|
|
|
trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, vmid,
|
2024-07-15 11:45:15 +03:00
|
|
|
iova, tg, num_pages, stage);
|
2018-06-26 19:50:42 +03:00
|
|
|
|
|
|
|
IOMMU_NOTIFIER_FOREACH(n, mr) {
|
2024-07-15 11:45:15 +03:00
|
|
|
smmuv3_notify_iova(mr, n, asid, vmid, iova, tg, num_pages, stage);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:12 +03:00
|
|
|
static void smmuv3_range_inval(SMMUState *s, Cmd *cmd, SMMUStage stage)
|
2020-07-28 18:08:10 +03:00
|
|
|
{
|
2021-05-10 15:47:47 +03:00
|
|
|
dma_addr_t end, addr = CMD_ADDR(cmd);
|
2020-07-28 18:08:10 +03:00
|
|
|
uint8_t type = CMD_TYPE(cmd);
|
2023-05-25 12:37:50 +03:00
|
|
|
int vmid = -1;
|
2021-05-10 15:47:47 +03:00
|
|
|
uint8_t scale = CMD_SCALE(cmd);
|
|
|
|
uint8_t num = CMD_NUM(cmd);
|
|
|
|
uint8_t ttl = CMD_TTL(cmd);
|
2020-07-28 18:08:10 +03:00
|
|
|
bool leaf = CMD_LEAF(cmd);
|
2020-07-28 18:08:11 +03:00
|
|
|
uint8_t tg = CMD_TG(cmd);
|
2021-05-10 15:47:47 +03:00
|
|
|
uint64_t num_pages;
|
|
|
|
uint8_t granule;
|
2020-07-28 18:08:10 +03:00
|
|
|
int asid = -1;
|
2023-05-25 12:37:50 +03:00
|
|
|
SMMUv3State *smmuv3 = ARM_SMMUV3(s);
|
|
|
|
|
|
|
|
/* Only consider VMID if stage-2 is supported. */
|
|
|
|
if (STAGE2_SUPPORTED(smmuv3)) {
|
|
|
|
vmid = CMD_VMID(cmd);
|
|
|
|
}
|
2020-07-28 18:08:10 +03:00
|
|
|
|
|
|
|
if (type == SMMU_CMD_TLBI_NH_VA) {
|
|
|
|
asid = CMD_ASID(cmd);
|
|
|
|
}
|
2021-03-09 13:27:40 +03:00
|
|
|
|
2021-05-10 15:47:47 +03:00
|
|
|
if (!tg) {
|
2024-07-15 11:45:12 +03:00
|
|
|
trace_smmuv3_range_inval(vmid, asid, addr, tg, 1, ttl, leaf, stage);
|
2024-07-15 11:45:15 +03:00
|
|
|
smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, 1, stage);
|
2024-07-15 11:45:12 +03:00
|
|
|
if (stage == SMMU_STAGE_1) {
|
|
|
|
smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, 1, ttl);
|
|
|
|
} else {
|
|
|
|
smmu_iotlb_inv_ipa(s, vmid, addr, tg, 1, ttl);
|
|
|
|
}
|
2021-05-10 15:47:47 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* RIL in use */
|
2021-03-09 13:27:40 +03:00
|
|
|
|
2021-05-10 15:47:47 +03:00
|
|
|
num_pages = (num + 1) * BIT_ULL(scale);
|
|
|
|
granule = tg * 2 + 10;
|
|
|
|
|
|
|
|
/* Split invalidations into ^2 range invalidations */
|
|
|
|
end = addr + (num_pages << granule) - 1;
|
2021-03-09 13:27:40 +03:00
|
|
|
|
2021-05-10 15:47:47 +03:00
|
|
|
while (addr != end + 1) {
|
|
|
|
uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
|
2021-03-09 13:27:40 +03:00
|
|
|
|
2021-05-10 15:47:47 +03:00
|
|
|
num_pages = (mask + 1) >> granule;
|
2024-07-15 11:45:12 +03:00
|
|
|
trace_smmuv3_range_inval(vmid, asid, addr, tg, num_pages,
|
|
|
|
ttl, leaf, stage);
|
2024-07-15 11:45:15 +03:00
|
|
|
smmuv3_inv_notifiers_iova(s, asid, vmid, addr, tg, num_pages, stage);
|
2024-07-15 11:45:12 +03:00
|
|
|
if (stage == SMMU_STAGE_1) {
|
|
|
|
smmu_iotlb_inv_iova(s, asid, vmid, addr, tg, num_pages, ttl);
|
|
|
|
} else {
|
|
|
|
smmu_iotlb_inv_ipa(s, vmid, addr, tg, num_pages, ttl);
|
|
|
|
}
|
2021-05-10 15:47:47 +03:00
|
|
|
addr += mask + 1;
|
2021-03-09 13:27:40 +03:00
|
|
|
}
|
2020-07-28 18:08:10 +03:00
|
|
|
}
|
|
|
|
|
2021-03-09 13:27:41 +03:00
|
|
|
static gboolean
|
|
|
|
smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
|
|
|
|
{
|
|
|
|
SMMUDevice *sdev = (SMMUDevice *)key;
|
|
|
|
uint32_t sid = smmu_get_sid(sdev);
|
|
|
|
SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
|
|
|
|
|
|
|
|
if (sid < sid_range->start || sid > sid_range->end) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
trace_smmuv3_config_cache_inv(sid);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static int smmuv3_cmdq_consume(SMMUv3State *s)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
2018-06-26 19:50:42 +03:00
|
|
|
SMMUState *bs = ARM_SMMU(s);
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUCmdError cmd_error = SMMU_CERROR_NONE;
|
|
|
|
SMMUQueue *q = &s->cmdq;
|
|
|
|
SMMUCommandType type = 0;
|
|
|
|
|
|
|
|
if (!smmuv3_cmdq_enabled(s)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* some commands depend on register values, typically CR0. In case those
|
|
|
|
* register values change while handling the command, spec says it
|
|
|
|
* is UNPREDICTABLE whether the command is interpreted under the new
|
|
|
|
* or old value.
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (!smmuv3_q_empty(q)) {
|
|
|
|
uint32_t pending = s->gerror ^ s->gerrorn;
|
|
|
|
Cmd cmd;
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
|
|
|
|
Q_PROD_WRAP(q), Q_CONS_WRAP(q));
|
|
|
|
|
|
|
|
if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (queue_read(q, &cmd) != MEMTX_OK) {
|
|
|
|
cmd_error = SMMU_CERROR_ABT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
type = CMD_TYPE(&cmd);
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
qemu_mutex_lock(&s->mutex);
|
2018-05-04 20:05:51 +03:00
|
|
|
switch (type) {
|
|
|
|
case SMMU_CMD_SYNC:
|
|
|
|
if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
|
|
|
|
smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SMMU_CMD_PREFETCH_CONFIG:
|
|
|
|
case SMMU_CMD_PREFETCH_ADDR:
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
2018-05-04 20:05:51 +03:00
|
|
|
case SMMU_CMD_CFGI_STE:
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
|
|
|
uint32_t sid = CMD_SID(&cmd);
|
2024-06-19 03:22:18 +03:00
|
|
|
SMMUDevice *sdev = smmu_find_sdev(bs, sid);
|
2018-06-26 19:50:42 +03:00
|
|
|
|
|
|
|
if (CMD_SSEC(&cmd)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-06-19 03:22:18 +03:00
|
|
|
if (!sdev) {
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_cfgi_ste(sid);
|
|
|
|
smmuv3_flush_config(sdev);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
2021-04-02 13:04:49 +03:00
|
|
|
uint32_t sid = CMD_SID(&cmd), mask;
|
2018-06-26 19:50:42 +03:00
|
|
|
uint8_t range = CMD_STE_RANGE(&cmd);
|
2021-04-02 13:04:49 +03:00
|
|
|
SMMUSIDRange sid_range;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
|
|
|
if (CMD_SSEC(&cmd)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
2021-04-02 13:04:49 +03:00
|
|
|
|
|
|
|
mask = (1ULL << (range + 1)) - 1;
|
|
|
|
sid_range.start = sid & ~mask;
|
|
|
|
sid_range.end = sid_range.start + mask;
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
|
2021-03-09 13:27:41 +03:00
|
|
|
g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
|
|
|
|
&sid_range);
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
case SMMU_CMD_CFGI_CD:
|
|
|
|
case SMMU_CMD_CFGI_CD_ALL:
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
|
|
|
uint32_t sid = CMD_SID(&cmd);
|
2024-06-19 03:22:18 +03:00
|
|
|
SMMUDevice *sdev = smmu_find_sdev(bs, sid);
|
2018-06-26 19:50:42 +03:00
|
|
|
|
|
|
|
if (CMD_SSEC(&cmd)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-06-19 03:22:18 +03:00
|
|
|
if (!sdev) {
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_cfgi_cd(sid);
|
|
|
|
smmuv3_flush_config(sdev);
|
|
|
|
break;
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
case SMMU_CMD_TLBI_NH_ASID:
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
2024-07-15 11:45:06 +03:00
|
|
|
int asid = CMD_ASID(&cmd);
|
2024-07-15 11:45:14 +03:00
|
|
|
int vmid = -1;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2023-05-25 12:37:51 +03:00
|
|
|
if (!STAGE1_SUPPORTED(s)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:14 +03:00
|
|
|
/*
|
|
|
|
* VMID is only matched when stage 2 is supported, otherwise set it
|
|
|
|
* to -1 as the value used for stage-1 only VMIDs.
|
|
|
|
*/
|
|
|
|
if (STAGE2_SUPPORTED(s)) {
|
|
|
|
vmid = CMD_VMID(&cmd);
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
trace_smmuv3_cmdq_tlbi_nh_asid(asid);
|
2018-06-26 19:50:42 +03:00
|
|
|
smmu_inv_notifiers_all(&s->smmu_state);
|
2024-07-15 11:45:14 +03:00
|
|
|
smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SMMU_CMD_TLBI_NH_ALL:
|
2024-07-15 11:45:14 +03:00
|
|
|
{
|
|
|
|
int vmid = -1;
|
|
|
|
|
2023-05-25 12:37:51 +03:00
|
|
|
if (!STAGE1_SUPPORTED(s)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
2024-07-15 11:45:14 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If stage-2 is supported, invalidate for this VMID only, otherwise
|
|
|
|
* invalidate the whole thing.
|
|
|
|
*/
|
|
|
|
if (STAGE2_SUPPORTED(s)) {
|
|
|
|
vmid = CMD_VMID(&cmd);
|
|
|
|
trace_smmuv3_cmdq_tlbi_nh(vmid);
|
|
|
|
smmu_iotlb_inv_vmid_s1(bs, vmid);
|
|
|
|
break;
|
|
|
|
}
|
2023-05-25 12:37:51 +03:00
|
|
|
QEMU_FALLTHROUGH;
|
2024-07-15 11:45:14 +03:00
|
|
|
}
|
2018-06-26 19:50:42 +03:00
|
|
|
case SMMU_CMD_TLBI_NSNH_ALL:
|
2024-07-15 11:45:14 +03:00
|
|
|
trace_smmuv3_cmdq_tlbi_nsnh();
|
2018-06-26 19:50:42 +03:00
|
|
|
smmu_inv_notifiers_all(&s->smmu_state);
|
2018-06-26 19:50:42 +03:00
|
|
|
smmu_iotlb_inv_all(bs);
|
|
|
|
break;
|
2018-05-04 20:05:51 +03:00
|
|
|
case SMMU_CMD_TLBI_NH_VAA:
|
2018-06-26 19:50:42 +03:00
|
|
|
case SMMU_CMD_TLBI_NH_VA:
|
2023-05-25 12:37:51 +03:00
|
|
|
if (!STAGE1_SUPPORTED(s)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
2024-07-15 11:45:12 +03:00
|
|
|
smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
|
2023-05-25 12:37:51 +03:00
|
|
|
break;
|
|
|
|
case SMMU_CMD_TLBI_S12_VMALL:
|
|
|
|
{
|
2024-07-15 11:45:06 +03:00
|
|
|
int vmid = CMD_VMID(&cmd);
|
2023-05-25 12:37:51 +03:00
|
|
|
|
|
|
|
if (!STAGE2_SUPPORTED(s)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
|
|
|
|
smmu_inv_notifiers_all(&s->smmu_state);
|
|
|
|
smmu_iotlb_inv_vmid(bs, vmid);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SMMU_CMD_TLBI_S2_IPA:
|
|
|
|
if (!STAGE2_SUPPORTED(s)) {
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* As currently only either s1 or s2 are supported
|
|
|
|
* we can reuse same function for s2.
|
|
|
|
*/
|
2024-07-15 11:45:12 +03:00
|
|
|
smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2);
|
2018-06-26 19:50:42 +03:00
|
|
|
break;
|
2018-05-04 20:05:51 +03:00
|
|
|
case SMMU_CMD_TLBI_EL3_ALL:
|
|
|
|
case SMMU_CMD_TLBI_EL3_VA:
|
|
|
|
case SMMU_CMD_TLBI_EL2_ALL:
|
|
|
|
case SMMU_CMD_TLBI_EL2_ASID:
|
|
|
|
case SMMU_CMD_TLBI_EL2_VA:
|
|
|
|
case SMMU_CMD_TLBI_EL2_VAA:
|
|
|
|
case SMMU_CMD_ATC_INV:
|
|
|
|
case SMMU_CMD_PRI_RESP:
|
|
|
|
case SMMU_CMD_RESUME:
|
|
|
|
case SMMU_CMD_STALL_TERM:
|
|
|
|
trace_smmuv3_unhandled_cmd(type);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
cmd_error = SMMU_CERROR_ILL;
|
|
|
|
break;
|
|
|
|
}
|
2018-06-26 19:50:42 +03:00
|
|
|
qemu_mutex_unlock(&s->mutex);
|
2018-05-04 20:05:51 +03:00
|
|
|
if (cmd_error) {
|
2023-05-25 12:37:51 +03:00
|
|
|
if (cmd_error == SMMU_CERROR_ILL) {
|
|
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
"Illegal command type: %d\n", CMD_TYPE(&cmd));
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We only increment the cons index after the completion of
|
|
|
|
* the command. We do that because the SYNC returns immediately
|
|
|
|
* and does not check the completion of previous commands
|
|
|
|
*/
|
|
|
|
queue_cons_incr(q);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd_error) {
|
|
|
|
trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
|
|
|
|
smmu_write_cmdq_err(s, cmd_error);
|
|
|
|
smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
|
|
|
|
Q_PROD_WRAP(q), Q_CONS_WRAP(q));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
|
|
|
|
uint64_t data, MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case A_GERROR_IRQ_CFG0:
|
|
|
|
s->gerror_irq_cfg0 = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE:
|
|
|
|
s->strtab_base = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_BASE:
|
|
|
|
s->cmdq.base = data;
|
|
|
|
s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
|
|
|
|
if (s->cmdq.log2size > SMMU_CMDQS) {
|
|
|
|
s->cmdq.log2size = SMMU_CMDQS;
|
|
|
|
}
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_BASE:
|
|
|
|
s->eventq.base = data;
|
|
|
|
s->eventq.log2size = extract64(s->eventq.base, 0, 5);
|
|
|
|
if (s->eventq.log2size > SMMU_EVENTQS) {
|
|
|
|
s->eventq.log2size = SMMU_EVENTQS;
|
|
|
|
}
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_IRQ_CFG0:
|
|
|
|
s->eventq_irq_cfg0 = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
default:
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
|
|
|
|
__func__, offset);
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
|
|
|
|
uint64_t data, MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case A_CR0:
|
|
|
|
s->cr[0] = data;
|
|
|
|
s->cr0ack = data & ~SMMU_CR0_RESERVED;
|
|
|
|
/* in case the command queue has been enabled */
|
|
|
|
smmuv3_cmdq_consume(s);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CR1:
|
|
|
|
s->cr[1] = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CR2:
|
|
|
|
s->cr[2] = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_IRQ_CTRL:
|
|
|
|
s->irq_ctrl = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERRORN:
|
|
|
|
smmuv3_write_gerrorn(s, data);
|
|
|
|
/*
|
|
|
|
* By acknowledging the CMDQ_ERR, SW may notify cmds can
|
|
|
|
* be processed again
|
|
|
|
*/
|
|
|
|
smmuv3_cmdq_consume(s);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG0: /* 64b */
|
|
|
|
s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG0 + 4:
|
|
|
|
s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG1:
|
|
|
|
s->gerror_irq_cfg1 = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG2:
|
|
|
|
s->gerror_irq_cfg2 = data;
|
|
|
|
return MEMTX_OK;
|
2023-02-14 12:40:09 +03:00
|
|
|
case A_GBPA:
|
|
|
|
/*
|
|
|
|
* If UPDATE is not set, the write is ignored. This is the only
|
|
|
|
* permitted behavior in SMMUv3.2 and later.
|
|
|
|
*/
|
|
|
|
if (data & R_GBPA_UPDATE_MASK) {
|
|
|
|
/* Ignore update bit as write is synchronous. */
|
|
|
|
s->gbpa = data & ~R_GBPA_UPDATE_MASK;
|
|
|
|
}
|
|
|
|
return MEMTX_OK;
|
2018-05-04 20:05:51 +03:00
|
|
|
case A_STRTAB_BASE: /* 64b */
|
|
|
|
s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE + 4:
|
|
|
|
s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE_CFG:
|
|
|
|
s->strtab_base_cfg = data;
|
|
|
|
if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
|
|
|
|
s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
|
|
|
|
s->features |= SMMU_FEATURE_2LVL_STE;
|
|
|
|
}
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_BASE: /* 64b */
|
|
|
|
s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
|
|
|
|
s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
|
|
|
|
if (s->cmdq.log2size > SMMU_CMDQS) {
|
|
|
|
s->cmdq.log2size = SMMU_CMDQS;
|
|
|
|
}
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_BASE + 4: /* 64b */
|
|
|
|
s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_PROD:
|
|
|
|
s->cmdq.prod = data;
|
|
|
|
smmuv3_cmdq_consume(s);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_CONS:
|
|
|
|
s->cmdq.cons = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_BASE: /* 64b */
|
|
|
|
s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
|
|
|
|
s->eventq.log2size = extract64(s->eventq.base, 0, 5);
|
|
|
|
if (s->eventq.log2size > SMMU_EVENTQS) {
|
|
|
|
s->eventq.log2size = SMMU_EVENTQS;
|
|
|
|
}
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_BASE + 4:
|
|
|
|
s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_PROD:
|
|
|
|
s->eventq.prod = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_CONS:
|
|
|
|
s->eventq.cons = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_IRQ_CFG0: /* 64b */
|
|
|
|
s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_IRQ_CFG0 + 4:
|
|
|
|
s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_IRQ_CFG1:
|
|
|
|
s->eventq_irq_cfg1 = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_IRQ_CFG2:
|
|
|
|
s->eventq_irq_cfg2 = data;
|
|
|
|
return MEMTX_OK;
|
|
|
|
default:
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
|
|
|
|
__func__, offset);
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
|
|
|
|
unsigned size, MemTxAttrs attrs)
|
|
|
|
{
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUState *sys = opaque;
|
|
|
|
SMMUv3State *s = ARM_SMMUV3(sys);
|
|
|
|
MemTxResult r;
|
|
|
|
|
|
|
|
/* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
|
|
|
|
offset &= ~0x10000;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 8:
|
|
|
|
r = smmu_writell(s, offset, data, attrs);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
r = smmu_writel(s, offset, data, attrs);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
r = MEMTX_ERROR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_write_mmio(offset, data, size, r);
|
|
|
|
return r;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
|
|
|
|
uint64_t *data, MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case A_GERROR_IRQ_CFG0:
|
|
|
|
*data = s->gerror_irq_cfg0;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE:
|
|
|
|
*data = s->strtab_base;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_BASE:
|
|
|
|
*data = s->cmdq.base;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_BASE:
|
|
|
|
*data = s->eventq.base;
|
|
|
|
return MEMTX_OK;
|
|
|
|
default:
|
|
|
|
*data = 0;
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
|
|
|
|
__func__, offset);
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
|
|
|
|
uint64_t *data, MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
2019-05-24 15:48:29 +03:00
|
|
|
case A_IDREGS ... A_IDREGS + 0x2f:
|
2018-05-04 20:05:51 +03:00
|
|
|
*data = smmuv3_idreg(offset - A_IDREGS);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_IDR0 ... A_IDR5:
|
|
|
|
*data = s->idr[(offset - A_IDR0) / 4];
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_IIDR:
|
|
|
|
*data = s->iidr;
|
|
|
|
return MEMTX_OK;
|
2020-07-28 18:08:13 +03:00
|
|
|
case A_AIDR:
|
|
|
|
*data = s->aidr;
|
|
|
|
return MEMTX_OK;
|
2018-05-04 20:05:51 +03:00
|
|
|
case A_CR0:
|
|
|
|
*data = s->cr[0];
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CR0ACK:
|
|
|
|
*data = s->cr0ack;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CR1:
|
|
|
|
*data = s->cr[1];
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CR2:
|
|
|
|
*data = s->cr[2];
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STATUSR:
|
|
|
|
*data = s->statusr;
|
|
|
|
return MEMTX_OK;
|
2023-02-14 12:40:09 +03:00
|
|
|
case A_GBPA:
|
|
|
|
*data = s->gbpa;
|
|
|
|
return MEMTX_OK;
|
2018-05-04 20:05:51 +03:00
|
|
|
case A_IRQ_CTRL:
|
|
|
|
case A_IRQ_CTRL_ACK:
|
|
|
|
*data = s->irq_ctrl;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR:
|
|
|
|
*data = s->gerror;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERRORN:
|
|
|
|
*data = s->gerrorn;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG0: /* 64b */
|
|
|
|
*data = extract64(s->gerror_irq_cfg0, 0, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG0 + 4:
|
|
|
|
*data = extract64(s->gerror_irq_cfg0, 32, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG1:
|
|
|
|
*data = s->gerror_irq_cfg1;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_GERROR_IRQ_CFG2:
|
|
|
|
*data = s->gerror_irq_cfg2;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE: /* 64b */
|
|
|
|
*data = extract64(s->strtab_base, 0, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE + 4: /* 64b */
|
|
|
|
*data = extract64(s->strtab_base, 32, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_STRTAB_BASE_CFG:
|
|
|
|
*data = s->strtab_base_cfg;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_BASE: /* 64b */
|
|
|
|
*data = extract64(s->cmdq.base, 0, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_BASE + 4:
|
|
|
|
*data = extract64(s->cmdq.base, 32, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_PROD:
|
|
|
|
*data = s->cmdq.prod;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_CMDQ_CONS:
|
|
|
|
*data = s->cmdq.cons;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_BASE: /* 64b */
|
|
|
|
*data = extract64(s->eventq.base, 0, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_BASE + 4: /* 64b */
|
|
|
|
*data = extract64(s->eventq.base, 32, 32);
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_PROD:
|
|
|
|
*data = s->eventq.prod;
|
|
|
|
return MEMTX_OK;
|
|
|
|
case A_EVENTQ_CONS:
|
|
|
|
*data = s->eventq.cons;
|
|
|
|
return MEMTX_OK;
|
|
|
|
default:
|
|
|
|
*data = 0;
|
|
|
|
qemu_log_mask(LOG_UNIMP,
|
|
|
|
"%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
|
|
|
|
__func__, offset);
|
|
|
|
return MEMTX_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
|
|
|
|
unsigned size, MemTxAttrs attrs)
|
|
|
|
{
|
|
|
|
SMMUState *sys = opaque;
|
|
|
|
SMMUv3State *s = ARM_SMMUV3(sys);
|
|
|
|
MemTxResult r;
|
|
|
|
|
|
|
|
/* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
|
|
|
|
offset &= ~0x10000;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 8:
|
|
|
|
r = smmu_readll(s, offset, data, attrs);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
r = smmu_readl(s, offset, data, attrs);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
r = MEMTX_ERROR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_smmuv3_read_mmio(offset, *data, size, r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MemoryRegionOps smmu_mem_ops = {
|
|
|
|
.read_with_attrs = smmu_read_mmio,
|
|
|
|
.write_with_attrs = smmu_write_mmio,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
.valid = {
|
|
|
|
.min_access_size = 4,
|
|
|
|
.max_access_size = 8,
|
|
|
|
},
|
|
|
|
.impl = {
|
|
|
|
.min_access_size = 4,
|
|
|
|
.max_access_size = 8,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
|
|
|
|
sysbus_init_irq(dev, &s->irq[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-12 19:08:07 +03:00
|
|
|
static void smmu_reset_hold(Object *obj, ResetType type)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
2022-12-14 17:27:10 +03:00
|
|
|
SMMUv3State *s = ARM_SMMUV3(obj);
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
|
|
|
|
|
2022-12-14 17:27:10 +03:00
|
|
|
if (c->parent_phases.hold) {
|
2024-04-12 19:08:07 +03:00
|
|
|
c->parent_phases.hold(obj, type);
|
2022-12-14 17:27:10 +03:00
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
smmuv3_init_regs(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void smmu_realize(DeviceState *d, Error **errp)
|
|
|
|
{
|
|
|
|
SMMUState *sys = ARM_SMMU(d);
|
|
|
|
SMMUv3State *s = ARM_SMMUV3(sys);
|
|
|
|
SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
|
|
|
|
SysBusDevice *dev = SYS_BUS_DEVICE(d);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
c->parent_realize(d, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
qemu_mutex_init(&s->mutex);
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
memory_region_init_io(&sys->iomem, OBJECT(s),
|
|
|
|
&smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
|
|
|
|
|
|
|
|
sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
|
|
|
|
|
|
|
|
sysbus_init_mmio(dev, &sys->iomem);
|
|
|
|
|
|
|
|
smmu_init_irq(s, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_smmuv3_queue = {
|
|
|
|
.name = "smmuv3_queue",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2023-12-21 06:15:59 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2018-05-04 20:05:51 +03:00
|
|
|
VMSTATE_UINT64(base, SMMUQueue),
|
|
|
|
VMSTATE_UINT32(prod, SMMUQueue),
|
|
|
|
VMSTATE_UINT32(cons, SMMUQueue),
|
|
|
|
VMSTATE_UINT8(log2size, SMMUQueue),
|
2018-07-27 16:54:06 +03:00
|
|
|
VMSTATE_END_OF_LIST(),
|
2018-05-04 20:05:51 +03:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2023-02-14 12:40:09 +03:00
|
|
|
static bool smmuv3_gbpa_needed(void *opaque)
|
|
|
|
{
|
|
|
|
SMMUv3State *s = opaque;
|
|
|
|
|
|
|
|
/* Only migrate GBPA if it has different reset value. */
|
|
|
|
return s->gbpa != SMMU_GBPA_RESET_VAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_gbpa = {
|
|
|
|
.name = "smmuv3/gbpa",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.needed = smmuv3_gbpa_needed,
|
2023-12-21 06:15:59 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2023-02-14 12:40:09 +03:00
|
|
|
VMSTATE_UINT32(gbpa, SMMUv3State),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static const VMStateDescription vmstate_smmuv3 = {
|
|
|
|
.name = "smmuv3",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2020-10-19 12:15:08 +03:00
|
|
|
.priority = MIG_PRI_IOMMU,
|
2023-12-21 06:15:59 +03:00
|
|
|
.fields = (const VMStateField[]) {
|
2018-05-04 20:05:51 +03:00
|
|
|
VMSTATE_UINT32(features, SMMUv3State),
|
|
|
|
VMSTATE_UINT8(sid_size, SMMUv3State),
|
|
|
|
VMSTATE_UINT8(sid_split, SMMUv3State),
|
|
|
|
|
|
|
|
VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
|
|
|
|
VMSTATE_UINT32(cr0ack, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(statusr, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(irq_ctrl, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(gerror, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(gerrorn, SMMUv3State),
|
|
|
|
VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
|
|
|
|
VMSTATE_UINT64(strtab_base, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
|
|
|
|
VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
|
|
|
|
VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
|
|
|
|
|
|
|
|
VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
|
|
|
|
VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
|
|
|
|
|
|
|
|
VMSTATE_END_OF_LIST(),
|
|
|
|
},
|
2023-12-21 06:15:59 +03:00
|
|
|
.subsections = (const VMStateDescription * const []) {
|
2023-02-14 12:40:09 +03:00
|
|
|
&vmstate_gbpa,
|
|
|
|
NULL
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
};
|
|
|
|
|
2023-05-25 12:37:51 +03:00
|
|
|
static Property smmuv3_properties[] = {
|
|
|
|
/*
|
|
|
|
* Stages of translation advertised.
|
|
|
|
* "1": Stage 1
|
|
|
|
* "2": Stage 2
|
|
|
|
* Defaults to stage 1
|
|
|
|
*/
|
|
|
|
DEFINE_PROP_STRING("stage", SMMUv3State, stage),
|
|
|
|
DEFINE_PROP_END_OF_LIST()
|
|
|
|
};
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static void smmuv3_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
/* Nothing much to do here as of now */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void smmuv3_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2022-12-14 17:27:10 +03:00
|
|
|
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
|
|
|
|
|
|
|
|
dc->vmsd = &vmstate_smmuv3;
|
2022-12-14 17:27:10 +03:00
|
|
|
resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL,
|
|
|
|
&c->parent_phases);
|
2024-02-01 11:40:27 +03:00
|
|
|
device_class_set_parent_realize(dc, smmu_realize,
|
|
|
|
&c->parent_realize);
|
2023-05-25 12:37:51 +03:00
|
|
|
device_class_set_props(dc, smmuv3_properties);
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2019-09-24 11:25:17 +03:00
|
|
|
static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
|
|
|
|
IOMMUNotifierFlag old,
|
|
|
|
IOMMUNotifierFlag new,
|
|
|
|
Error **errp)
|
2018-05-04 20:05:52 +03:00
|
|
|
{
|
2018-06-26 19:50:42 +03:00
|
|
|
SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
|
|
|
|
SMMUv3State *s3 = sdev->smmu;
|
|
|
|
SMMUState *s = &(s3->smmu_state);
|
|
|
|
|
2021-02-04 22:12:28 +03:00
|
|
|
if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
|
|
|
|
error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
if (new & IOMMU_NOTIFIER_MAP) {
|
2019-09-24 11:25:17 +03:00
|
|
|
error_setg(errp,
|
|
|
|
"device %02x.%02x.%x requires iommu MAP notifier which is "
|
|
|
|
"not currently supported", pci_bus_num(sdev->bus),
|
|
|
|
PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
|
|
|
|
return -EINVAL;
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:52 +03:00
|
|
|
if (old == IOMMU_NOTIFIER_NONE) {
|
2018-06-26 19:50:42 +03:00
|
|
|
trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
|
2019-04-29 19:35:57 +03:00
|
|
|
QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
|
|
|
|
} else if (new == IOMMU_NOTIFIER_NONE) {
|
|
|
|
trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
|
|
|
|
QLIST_REMOVE(sdev, next);
|
2018-05-04 20:05:52 +03:00
|
|
|
}
|
2019-09-24 11:25:17 +03:00
|
|
|
return 0;
|
2018-05-04 20:05:52 +03:00
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
|
|
|
|
void *data)
|
|
|
|
{
|
2018-05-04 20:05:51 +03:00
|
|
|
IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
|
|
|
|
|
|
|
|
imrc->translate = smmuv3_translate;
|
2018-05-04 20:05:52 +03:00
|
|
|
imrc->notify_flag_changed = smmuv3_notify_flag_changed;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo smmuv3_type_info = {
|
|
|
|
.name = TYPE_ARM_SMMUV3,
|
|
|
|
.parent = TYPE_ARM_SMMU,
|
|
|
|
.instance_size = sizeof(SMMUv3State),
|
|
|
|
.instance_init = smmuv3_instance_init,
|
|
|
|
.class_size = sizeof(SMMUv3Class),
|
|
|
|
.class_init = smmuv3_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const TypeInfo smmuv3_iommu_memory_region_info = {
|
|
|
|
.parent = TYPE_IOMMU_MEMORY_REGION,
|
|
|
|
.name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
|
|
|
|
.class_init = smmuv3_iommu_memory_region_class_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void smmuv3_register_types(void)
|
|
|
|
{
|
|
|
|
type_register(&smmuv3_type_info);
|
|
|
|
type_register(&smmuv3_iommu_memory_region_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(smmuv3_register_types)
|
|
|
|
|