2018-05-04 20:05:51 +03:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014-2016 Broadcom Corporation
|
|
|
|
* Copyright (c) 2017 Red Hat, Inc.
|
|
|
|
* Written by Prem Mallappa, Eric Auger
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* Author: Prem Mallappa <pmallapp@broadcom.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "trace.h"
|
|
|
|
#include "exec/target_page.h"
|
2019-07-09 18:20:52 +03:00
|
|
|
#include "hw/core/cpu.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
#include "hw/qdev-properties.h"
|
|
|
|
#include "qapi/error.h"
|
2018-06-26 19:50:42 +03:00
|
|
|
#include "qemu/jhash.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
#include "qemu/error-report.h"
|
|
|
|
#include "hw/arm/smmu-common.h"
|
2018-05-04 20:05:51 +03:00
|
|
|
#include "smmu-internal.h"
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
/* IOTLB Management */
|
|
|
|
|
2020-07-28 18:08:07 +03:00
|
|
|
static guint smmu_iotlb_key_hash(gconstpointer v)
|
|
|
|
{
|
|
|
|
SMMUIOTLBKey *key = (SMMUIOTLBKey *)v;
|
|
|
|
uint32_t a, b, c;
|
|
|
|
|
|
|
|
/* Jenkins hash */
|
|
|
|
a = b = c = JHASH_INITVAL + sizeof(*key);
|
2023-05-25 12:37:50 +03:00
|
|
|
a += key->asid + key->vmid + key->level + key->tg;
|
2020-07-28 18:08:07 +03:00
|
|
|
b += extract64(key->iova, 0, 32);
|
|
|
|
c += extract64(key->iova, 32, 32);
|
|
|
|
|
|
|
|
__jhash_mix(a, b, c);
|
|
|
|
__jhash_final(a, b, c);
|
|
|
|
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean smmu_iotlb_key_equal(gconstpointer v1, gconstpointer v2)
|
|
|
|
{
|
2020-07-28 18:08:09 +03:00
|
|
|
SMMUIOTLBKey *k1 = (SMMUIOTLBKey *)v1, *k2 = (SMMUIOTLBKey *)v2;
|
2020-07-28 18:08:07 +03:00
|
|
|
|
2020-07-28 18:08:09 +03:00
|
|
|
return (k1->asid == k2->asid) && (k1->iova == k2->iova) &&
|
2023-05-25 12:37:50 +03:00
|
|
|
(k1->level == k2->level) && (k1->tg == k2->tg) &&
|
|
|
|
(k1->vmid == k2->vmid);
|
2020-07-28 18:08:07 +03:00
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:06 +03:00
|
|
|
SMMUIOTLBKey smmu_get_iotlb_key(int asid, int vmid, uint64_t iova,
|
2020-07-28 18:08:09 +03:00
|
|
|
uint8_t tg, uint8_t level)
|
2020-07-28 18:08:07 +03:00
|
|
|
{
|
2023-05-25 12:37:50 +03:00
|
|
|
SMMUIOTLBKey key = {.asid = asid, .vmid = vmid, .iova = iova,
|
|
|
|
.tg = tg, .level = level};
|
2020-07-28 18:08:07 +03:00
|
|
|
|
|
|
|
return key;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmu-common: Rework TLB lookup for nesting
In the next patch, combine_tlb() will be added which combines 2 TLB
entries into one for nested translations, which chooses the granule
and level from the smallest entry.
This means that with nested translation, an entry can be cached with
the granule of stage-2 and not stage-1.
However, currently, the lookup for an IOVA is done with input stage
granule, which is stage-1 for nested configuration, which will not
work with the above logic.
This patch reworks lookup in that case, so it falls back to stage-2
granule if no entry is found using stage-1 granule.
Also, drop aligning the iova to avoid over-aligning in case the iova
is cached with a smaller granule, the TLB lookup will align the iova
anyway for each granule and level, and the page table walker doesn't
consider the page offset bits.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-10-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:09 +03:00
|
|
|
static SMMUTLBEntry *smmu_iotlb_lookup_all_levels(SMMUState *bs,
|
|
|
|
SMMUTransCfg *cfg,
|
|
|
|
SMMUTransTableInfo *tt,
|
|
|
|
hwaddr iova)
|
2020-07-28 18:08:06 +03:00
|
|
|
{
|
2020-07-28 18:08:09 +03:00
|
|
|
uint8_t tg = (tt->granule_sz - 10) / 2;
|
|
|
|
uint8_t inputsize = 64 - tt->tsz;
|
|
|
|
uint8_t stride = tt->granule_sz - 3;
|
|
|
|
uint8_t level = 4 - (inputsize - 4) / stride;
|
|
|
|
SMMUTLBEntry *entry = NULL;
|
|
|
|
|
|
|
|
while (level <= 3) {
|
|
|
|
uint64_t subpage_size = 1ULL << level_shift(level, tt->granule_sz);
|
|
|
|
uint64_t mask = subpage_size - 1;
|
|
|
|
SMMUIOTLBKey key;
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid,
|
|
|
|
iova & ~mask, tg, level);
|
2020-07-28 18:08:09 +03:00
|
|
|
entry = g_hash_table_lookup(bs->iotlb, &key);
|
|
|
|
if (entry) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
level++;
|
|
|
|
}
|
hw/arm/smmu-common: Rework TLB lookup for nesting
In the next patch, combine_tlb() will be added which combines 2 TLB
entries into one for nested translations, which chooses the granule
and level from the smallest entry.
This means that with nested translation, an entry can be cached with
the granule of stage-2 and not stage-1.
However, currently, the lookup for an IOVA is done with input stage
granule, which is stage-1 for nested configuration, which will not
work with the above logic.
This patch reworks lookup in that case, so it falls back to stage-2
granule if no entry is found using stage-1 granule.
Also, drop aligning the iova to avoid over-aligning in case the iova
is cached with a smaller granule, the TLB lookup will align the iova
anyway for each granule and level, and the page table walker doesn't
consider the page offset bits.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-10-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:09 +03:00
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* smmu_iotlb_lookup - Look up for a TLB entry.
|
|
|
|
* @bs: SMMU state which includes the TLB instance
|
|
|
|
* @cfg: Configuration of the translation
|
|
|
|
* @tt: Translation table info (granule and tsz)
|
|
|
|
* @iova: IOVA address to lookup
|
|
|
|
*
|
|
|
|
* returns a valid entry on success, otherwise NULL.
|
|
|
|
* In case of nested translation, tt can be updated to include
|
|
|
|
* the granule of the found entry as it might different from
|
|
|
|
* the IOVA granule.
|
|
|
|
*/
|
|
|
|
SMMUTLBEntry *smmu_iotlb_lookup(SMMUState *bs, SMMUTransCfg *cfg,
|
|
|
|
SMMUTransTableInfo *tt, hwaddr iova)
|
|
|
|
{
|
|
|
|
SMMUTLBEntry *entry = NULL;
|
|
|
|
|
|
|
|
entry = smmu_iotlb_lookup_all_levels(bs, cfg, tt, iova);
|
|
|
|
/*
|
|
|
|
* For nested translation also try the s2 granule, as the TLB will insert
|
|
|
|
* it if the size of s2 tlb entry was smaller.
|
|
|
|
*/
|
|
|
|
if (!entry && (cfg->stage == SMMU_NESTED) &&
|
|
|
|
(cfg->s2cfg.granule_sz != tt->granule_sz)) {
|
|
|
|
tt->granule_sz = cfg->s2cfg.granule_sz;
|
|
|
|
entry = smmu_iotlb_lookup_all_levels(bs, cfg, tt, iova);
|
|
|
|
}
|
2020-07-28 18:08:06 +03:00
|
|
|
|
|
|
|
if (entry) {
|
|
|
|
cfg->iotlb_hits++;
|
2023-05-25 12:37:50 +03:00
|
|
|
trace_smmu_iotlb_lookup_hit(cfg->asid, cfg->s2cfg.vmid, iova,
|
2020-07-28 18:08:06 +03:00
|
|
|
cfg->iotlb_hits, cfg->iotlb_misses,
|
|
|
|
100 * cfg->iotlb_hits /
|
|
|
|
(cfg->iotlb_hits + cfg->iotlb_misses));
|
|
|
|
} else {
|
|
|
|
cfg->iotlb_misses++;
|
2023-05-25 12:37:50 +03:00
|
|
|
trace_smmu_iotlb_lookup_miss(cfg->asid, cfg->s2cfg.vmid, iova,
|
2020-07-28 18:08:06 +03:00
|
|
|
cfg->iotlb_hits, cfg->iotlb_misses,
|
|
|
|
100 * cfg->iotlb_hits /
|
|
|
|
(cfg->iotlb_hits + cfg->iotlb_misses));
|
|
|
|
}
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2020-07-28 18:08:08 +03:00
|
|
|
void smmu_iotlb_insert(SMMUState *bs, SMMUTransCfg *cfg, SMMUTLBEntry *new)
|
2020-07-28 18:08:06 +03:00
|
|
|
{
|
|
|
|
SMMUIOTLBKey *key = g_new0(SMMUIOTLBKey, 1);
|
2020-07-28 18:08:09 +03:00
|
|
|
uint8_t tg = (new->granule - 10) / 2;
|
2020-07-28 18:08:06 +03:00
|
|
|
|
|
|
|
if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
|
|
|
|
smmu_iotlb_inv_all(bs);
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
*key = smmu_get_iotlb_key(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
|
|
|
|
tg, new->level);
|
|
|
|
trace_smmu_iotlb_insert(cfg->asid, cfg->s2cfg.vmid, new->entry.iova,
|
|
|
|
tg, new->level);
|
2020-07-28 18:08:08 +03:00
|
|
|
g_hash_table_insert(bs->iotlb, key, new);
|
2020-07-28 18:08:06 +03:00
|
|
|
}
|
|
|
|
|
2022-12-17 00:49:24 +03:00
|
|
|
void smmu_iotlb_inv_all(SMMUState *s)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
|
|
|
trace_smmu_iotlb_inv_all();
|
|
|
|
g_hash_table_remove_all(s->iotlb);
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:13 +03:00
|
|
|
static gboolean smmu_hash_remove_by_asid_vmid(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
2024-07-15 11:45:13 +03:00
|
|
|
SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
|
2018-06-26 19:50:42 +03:00
|
|
|
SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
|
|
|
|
|
2024-07-15 11:45:13 +03:00
|
|
|
return (SMMU_IOTLB_ASID(*iotlb_key) == info->asid) &&
|
|
|
|
(SMMU_IOTLB_VMID(*iotlb_key) == info->vmid);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
2023-05-25 12:37:51 +03:00
|
|
|
|
|
|
|
static gboolean smmu_hash_remove_by_vmid(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
2024-07-15 11:45:06 +03:00
|
|
|
int vmid = *(int *)user_data;
|
2023-05-25 12:37:51 +03:00
|
|
|
SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
|
|
|
|
|
|
|
|
return SMMU_IOTLB_VMID(*iotlb_key) == vmid;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:14 +03:00
|
|
|
static gboolean smmu_hash_remove_by_vmid_s1(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
|
|
|
int vmid = *(int *)user_data;
|
|
|
|
SMMUIOTLBKey *iotlb_key = (SMMUIOTLBKey *)key;
|
|
|
|
|
|
|
|
return (SMMU_IOTLB_VMID(*iotlb_key) == vmid) &&
|
|
|
|
(SMMU_IOTLB_ASID(*iotlb_key) >= 0);
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
static gboolean smmu_hash_remove_by_asid_vmid_iova(gpointer key, gpointer value,
|
2020-07-28 18:08:09 +03:00
|
|
|
gpointer user_data)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
2020-07-28 18:08:09 +03:00
|
|
|
SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
|
|
|
|
IOMMUTLBEntry *entry = &iter->entry;
|
|
|
|
SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
|
|
|
|
SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
|
|
|
|
|
|
|
|
if (info->asid >= 0 && info->asid != SMMU_IOTLB_ASID(iotlb_key)) {
|
|
|
|
return false;
|
|
|
|
}
|
2023-05-25 12:37:50 +03:00
|
|
|
if (info->vmid >= 0 && info->vmid != SMMU_IOTLB_VMID(iotlb_key)) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-07-28 18:08:11 +03:00
|
|
|
return ((info->iova & ~entry->addr_mask) == entry->iova) ||
|
|
|
|
((entry->iova & ~info->mask) == info->iova);
|
2020-07-28 18:08:09 +03:00
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:12 +03:00
|
|
|
static gboolean smmu_hash_remove_by_vmid_ipa(gpointer key, gpointer value,
|
|
|
|
gpointer user_data)
|
|
|
|
{
|
|
|
|
SMMUTLBEntry *iter = (SMMUTLBEntry *)value;
|
|
|
|
IOMMUTLBEntry *entry = &iter->entry;
|
|
|
|
SMMUIOTLBPageInvInfo *info = (SMMUIOTLBPageInvInfo *)user_data;
|
|
|
|
SMMUIOTLBKey iotlb_key = *(SMMUIOTLBKey *)key;
|
|
|
|
|
|
|
|
if (SMMU_IOTLB_ASID(iotlb_key) >= 0) {
|
|
|
|
/* This is a stage-1 address. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (info->vmid != SMMU_IOTLB_VMID(iotlb_key)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return ((info->iova & ~entry->addr_mask) == entry->iova) ||
|
|
|
|
((entry->iova & ~info->mask) == info->iova);
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
void smmu_iotlb_inv_iova(SMMUState *s, int asid, int vmid, dma_addr_t iova,
|
2022-12-17 00:49:24 +03:00
|
|
|
uint8_t tg, uint64_t num_pages, uint8_t ttl)
|
2020-07-28 18:08:09 +03:00
|
|
|
{
|
2021-03-09 13:27:40 +03:00
|
|
|
/* if tg is not set we use 4KB range invalidation */
|
|
|
|
uint8_t granule = tg ? tg * 2 + 10 : 12;
|
|
|
|
|
2021-03-09 13:27:39 +03:00
|
|
|
if (ttl && (num_pages == 1) && (asid >= 0)) {
|
2023-05-25 12:37:50 +03:00
|
|
|
SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, iova, tg, ttl);
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2021-03-09 13:27:40 +03:00
|
|
|
if (g_hash_table_remove(s->iotlb, &key)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* if the entry is not found, let's see if it does not
|
|
|
|
* belong to a larger IOTLB entry
|
|
|
|
*/
|
|
|
|
}
|
2020-07-28 18:08:11 +03:00
|
|
|
|
2021-03-09 13:27:40 +03:00
|
|
|
SMMUIOTLBPageInvInfo info = {
|
|
|
|
.asid = asid, .iova = iova,
|
2023-05-25 12:37:50 +03:00
|
|
|
.vmid = vmid,
|
2021-03-09 13:27:40 +03:00
|
|
|
.mask = (num_pages * 1 << granule) - 1};
|
2020-07-28 18:08:11 +03:00
|
|
|
|
2021-03-09 13:27:40 +03:00
|
|
|
g_hash_table_foreach_remove(s->iotlb,
|
2023-05-25 12:37:50 +03:00
|
|
|
smmu_hash_remove_by_asid_vmid_iova,
|
2021-03-09 13:27:40 +03:00
|
|
|
&info);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:12 +03:00
|
|
|
/*
|
|
|
|
* Similar to smmu_iotlb_inv_iova(), but for Stage-2, ASID is always -1,
|
|
|
|
* in Stage-1 invalidation ASID = -1, means don't care.
|
|
|
|
*/
|
|
|
|
void smmu_iotlb_inv_ipa(SMMUState *s, int vmid, dma_addr_t ipa, uint8_t tg,
|
|
|
|
uint64_t num_pages, uint8_t ttl)
|
|
|
|
{
|
|
|
|
uint8_t granule = tg ? tg * 2 + 10 : 12;
|
|
|
|
int asid = -1;
|
|
|
|
|
|
|
|
if (ttl && (num_pages == 1)) {
|
|
|
|
SMMUIOTLBKey key = smmu_get_iotlb_key(asid, vmid, ipa, tg, ttl);
|
|
|
|
|
|
|
|
if (g_hash_table_remove(s->iotlb, &key)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SMMUIOTLBPageInvInfo info = {
|
|
|
|
.iova = ipa,
|
|
|
|
.vmid = vmid,
|
|
|
|
.mask = (num_pages << granule) - 1};
|
|
|
|
|
|
|
|
g_hash_table_foreach_remove(s->iotlb,
|
|
|
|
smmu_hash_remove_by_vmid_ipa,
|
|
|
|
&info);
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:13 +03:00
|
|
|
void smmu_iotlb_inv_asid_vmid(SMMUState *s, int asid, int vmid)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
2024-07-15 11:45:13 +03:00
|
|
|
SMMUIOTLBPageInvInfo info = {
|
|
|
|
.asid = asid,
|
|
|
|
.vmid = vmid,
|
|
|
|
};
|
|
|
|
|
|
|
|
trace_smmu_iotlb_inv_asid_vmid(asid, vmid);
|
|
|
|
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_asid_vmid, &info);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:06 +03:00
|
|
|
void smmu_iotlb_inv_vmid(SMMUState *s, int vmid)
|
2023-05-25 12:37:51 +03:00
|
|
|
{
|
|
|
|
trace_smmu_iotlb_inv_vmid(vmid);
|
|
|
|
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid);
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:14 +03:00
|
|
|
inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid)
|
|
|
|
{
|
|
|
|
trace_smmu_iotlb_inv_vmid_s1(vmid);
|
|
|
|
g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid);
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
/* VMSAv8-64 Translation */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_pte - Get the content of a page table entry located at
|
|
|
|
* @base_addr[@index]
|
|
|
|
*/
|
|
|
|
static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
|
|
|
|
SMMUPTWEventInfo *info)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
dma_addr_t addr = baseaddr + index * sizeof(*pte);
|
|
|
|
|
|
|
|
/* TODO: guarantee 64-bit single-copy atomicity */
|
2023-07-25 12:56:51 +03:00
|
|
|
ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
if (ret != MEMTX_OK) {
|
|
|
|
info->type = SMMU_PTW_ERR_WALK_EABT;
|
|
|
|
info->addr = addr;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
trace_smmu_get_pte(baseaddr, index, addr, *pte);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* VMSAv8-64 Translation Table Format Descriptor Decoding */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_page_pte_address - returns the L3 descriptor output address,
|
|
|
|
* ie. the page frame
|
|
|
|
* ARM ARM spec: Figure D4-17 VMSAv8-64 level 3 descriptor format
|
|
|
|
*/
|
|
|
|
static inline hwaddr get_page_pte_address(uint64_t pte, int granule_sz)
|
|
|
|
{
|
|
|
|
return PTE_ADDRESS(pte, granule_sz);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_table_pte_address - return table descriptor output address,
|
|
|
|
* ie. address of next level table
|
|
|
|
* ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
|
|
|
|
*/
|
|
|
|
static inline hwaddr get_table_pte_address(uint64_t pte, int granule_sz)
|
|
|
|
{
|
|
|
|
return PTE_ADDRESS(pte, granule_sz);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_block_pte_address - return block descriptor output address and block size
|
|
|
|
* ARM ARM Figure D4-16 VMSAv8-64 level0, level1, and level 2 descriptor formats
|
|
|
|
*/
|
|
|
|
static inline hwaddr get_block_pte_address(uint64_t pte, int level,
|
|
|
|
int granule_sz, uint64_t *bsz)
|
|
|
|
{
|
2018-05-18 19:48:07 +03:00
|
|
|
int n = level_shift(level, granule_sz);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2018-05-18 19:48:07 +03:00
|
|
|
*bsz = 1ULL << n;
|
2018-05-04 20:05:51 +03:00
|
|
|
return PTE_ADDRESS(pte, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
SMMUTransTableInfo *select_tt(SMMUTransCfg *cfg, dma_addr_t iova)
|
|
|
|
{
|
|
|
|
bool tbi = extract64(iova, 55, 1) ? TBI1(cfg->tbi) : TBI0(cfg->tbi);
|
|
|
|
uint8_t tbi_byte = tbi * 8;
|
|
|
|
|
|
|
|
if (cfg->tt[0].tsz &&
|
|
|
|
!extract64(iova, 64 - cfg->tt[0].tsz, cfg->tt[0].tsz - tbi_byte)) {
|
|
|
|
/* there is a ttbr0 region and we are in it (high bits all zero) */
|
|
|
|
return &cfg->tt[0];
|
|
|
|
} else if (cfg->tt[1].tsz &&
|
2023-02-14 20:19:22 +03:00
|
|
|
sextract64(iova, 64 - cfg->tt[1].tsz, cfg->tt[1].tsz - tbi_byte) == -1) {
|
2018-05-04 20:05:51 +03:00
|
|
|
/* there is a ttbr1 region and we are in it (high bits all one) */
|
|
|
|
return &cfg->tt[1];
|
|
|
|
} else if (!cfg->tt[0].tsz) {
|
|
|
|
/* ttbr0 region is "everything not in the ttbr1 region" */
|
|
|
|
return &cfg->tt[0];
|
|
|
|
} else if (!cfg->tt[1].tsz) {
|
|
|
|
/* ttbr1 region is "everything not in the ttbr0 region" */
|
|
|
|
return &cfg->tt[1];
|
|
|
|
}
|
|
|
|
/* in the gap between the two regions, this is a Translation fault */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:11 +03:00
|
|
|
/* Translate stage-1 table address using stage-2 page table. */
|
|
|
|
static inline int translate_table_addr_ipa(SMMUState *bs,
|
|
|
|
dma_addr_t *table_addr,
|
|
|
|
SMMUTransCfg *cfg,
|
|
|
|
SMMUPTWEventInfo *info)
|
|
|
|
{
|
|
|
|
dma_addr_t addr = *table_addr;
|
|
|
|
SMMUTLBEntry *cached_entry;
|
|
|
|
int asid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The translation table walks performed from TTB0 or TTB1 are always
|
|
|
|
* performed in IPA space if stage 2 translations are enabled.
|
|
|
|
*/
|
|
|
|
asid = cfg->asid;
|
|
|
|
cfg->stage = SMMU_STAGE_2;
|
|
|
|
cfg->asid = -1;
|
|
|
|
cached_entry = smmu_translate(bs, cfg, addr, IOMMU_RO, info);
|
|
|
|
cfg->asid = asid;
|
|
|
|
cfg->stage = SMMU_NESTED;
|
|
|
|
|
|
|
|
if (cached_entry) {
|
|
|
|
*table_addr = CACHED_ENTRY_TO_ADDR(cached_entry, addr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->stage = SMMU_STAGE_2;
|
|
|
|
info->addr = addr;
|
|
|
|
info->is_ipa_descriptor = true;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
/**
|
2023-05-25 12:37:49 +03:00
|
|
|
* smmu_ptw_64_s1 - VMSAv8-64 Walk of the page tables for a given IOVA
|
2024-07-15 11:45:11 +03:00
|
|
|
* @bs: smmu state which includes TLB instance
|
2018-05-04 20:05:51 +03:00
|
|
|
* @cfg: translation config
|
|
|
|
* @iova: iova to translate
|
|
|
|
* @perm: access type
|
2020-07-28 18:08:08 +03:00
|
|
|
* @tlbe: SMMUTLBEntry (out)
|
2018-05-04 20:05:51 +03:00
|
|
|
* @info: handle to an error info
|
|
|
|
*
|
|
|
|
* Return 0 on success, < 0 on error. In case of error, @info is filled
|
|
|
|
* and tlbe->perm is set to IOMMU_NONE.
|
|
|
|
* Upon success, @tlbe is filled with translated_addr and entry
|
|
|
|
* permission rights.
|
|
|
|
*/
|
2024-07-15 11:45:11 +03:00
|
|
|
static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
|
2023-05-25 12:37:49 +03:00
|
|
|
dma_addr_t iova, IOMMUAccessFlags perm,
|
|
|
|
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
|
|
|
dma_addr_t baseaddr, indexmask;
|
2024-07-15 11:45:04 +03:00
|
|
|
SMMUStage stage = cfg->stage;
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUTransTableInfo *tt = select_tt(cfg, iova);
|
|
|
|
uint8_t level, granule_sz, inputsize, stride;
|
|
|
|
|
|
|
|
if (!tt || tt->disabled) {
|
|
|
|
info->type = SMMU_PTW_ERR_TRANSLATION;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
granule_sz = tt->granule_sz;
|
2023-05-25 12:37:49 +03:00
|
|
|
stride = VMSA_STRIDE(granule_sz);
|
2018-05-04 20:05:51 +03:00
|
|
|
inputsize = 64 - tt->tsz;
|
|
|
|
level = 4 - (inputsize - 4) / stride;
|
2023-05-25 12:37:49 +03:00
|
|
|
indexmask = VMSA_IDXMSK(inputsize, stride, level);
|
2024-07-15 11:45:18 +03:00
|
|
|
|
|
|
|
baseaddr = extract64(tt->ttb, 0, cfg->oas);
|
2018-05-04 20:05:51 +03:00
|
|
|
baseaddr &= ~indexmask;
|
|
|
|
|
2023-05-25 12:37:49 +03:00
|
|
|
while (level < VMSA_LEVELS) {
|
2018-05-04 20:05:51 +03:00
|
|
|
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
|
|
|
|
uint64_t mask = subpage_size - 1;
|
|
|
|
uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
|
2020-07-28 18:08:05 +03:00
|
|
|
uint64_t pte, gpa;
|
2018-05-04 20:05:51 +03:00
|
|
|
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
|
|
|
|
uint8_t ap;
|
|
|
|
|
|
|
|
if (get_pte(baseaddr, offset, &pte, info)) {
|
|
|
|
goto error;
|
|
|
|
}
|
2023-05-25 12:37:49 +03:00
|
|
|
trace_smmu_ptw_level(stage, level, iova, subpage_size,
|
2018-05-04 20:05:51 +03:00
|
|
|
baseaddr, offset, pte);
|
|
|
|
|
|
|
|
if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
|
|
|
|
trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
|
|
|
|
pte_addr, offset, pte);
|
2020-07-28 18:08:05 +03:00
|
|
|
break;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2020-07-28 18:08:05 +03:00
|
|
|
if (is_table_pte(pte, level)) {
|
|
|
|
ap = PTE_APTABLE(pte);
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2020-07-28 18:08:14 +03:00
|
|
|
if (is_permission_fault(ap, perm) && !tt->had) {
|
2018-05-04 20:05:51 +03:00
|
|
|
info->type = SMMU_PTW_ERR_PERMISSION;
|
|
|
|
goto error;
|
|
|
|
}
|
2020-07-28 18:08:05 +03:00
|
|
|
baseaddr = get_table_pte_address(pte, granule_sz);
|
2024-07-15 11:45:11 +03:00
|
|
|
if (cfg->stage == SMMU_NESTED) {
|
|
|
|
if (translate_table_addr_ipa(bs, &baseaddr, cfg, info)) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
2020-07-28 18:08:05 +03:00
|
|
|
level++;
|
|
|
|
continue;
|
|
|
|
} else if (is_page_pte(pte, level)) {
|
|
|
|
gpa = get_page_pte_address(pte, granule_sz);
|
2018-05-04 20:05:51 +03:00
|
|
|
trace_smmu_ptw_page_pte(stage, level, iova,
|
|
|
|
baseaddr, pte_addr, pte, gpa);
|
2020-07-28 18:08:05 +03:00
|
|
|
} else {
|
2018-05-04 20:05:51 +03:00
|
|
|
uint64_t block_size;
|
|
|
|
|
2020-07-28 18:08:05 +03:00
|
|
|
gpa = get_block_pte_address(pte, level, granule_sz,
|
|
|
|
&block_size);
|
2018-05-04 20:05:51 +03:00
|
|
|
trace_smmu_ptw_block_pte(stage, level, baseaddr,
|
|
|
|
pte_addr, pte, iova, gpa,
|
|
|
|
block_size >> 20);
|
|
|
|
}
|
2024-02-13 11:22:11 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* QEMU does not currently implement HTTU, so if AFFD and PTE.AF
|
|
|
|
* are 0 we take an Access flag fault. (5.4. Context Descriptor)
|
|
|
|
* An Access flag fault takes priority over a Permission fault.
|
|
|
|
*/
|
|
|
|
if (!PTE_AF(pte) && !cfg->affd) {
|
|
|
|
info->type = SMMU_PTW_ERR_ACCESS;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-07-28 18:08:05 +03:00
|
|
|
ap = PTE_AP(pte);
|
2018-05-04 20:05:51 +03:00
|
|
|
if (is_permission_fault(ap, perm)) {
|
|
|
|
info->type = SMMU_PTW_ERR_PERMISSION;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:01 +03:00
|
|
|
/*
|
|
|
|
* The address output from the translation causes a stage 1 Address
|
|
|
|
* Size fault if it exceeds the range of the effective IPA size for
|
|
|
|
* the given CD.
|
|
|
|
*/
|
|
|
|
if (gpa >= (1ULL << cfg->oas)) {
|
|
|
|
info->type = SMMU_PTW_ERR_ADDR_SIZE;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-07-28 18:08:09 +03:00
|
|
|
tlbe->entry.translated_addr = gpa;
|
|
|
|
tlbe->entry.iova = iova & ~mask;
|
|
|
|
tlbe->entry.addr_mask = mask;
|
2024-07-15 11:45:10 +03:00
|
|
|
tlbe->parent_perm = PTE_AP_TO_PERM(ap);
|
|
|
|
tlbe->entry.perm = tlbe->parent_perm;
|
2020-07-28 18:08:08 +03:00
|
|
|
tlbe->level = level;
|
|
|
|
tlbe->granule = granule_sz;
|
2020-07-28 18:08:05 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
info->type = SMMU_PTW_ERR_TRANSLATION;
|
|
|
|
|
|
|
|
error:
|
2024-07-15 11:45:04 +03:00
|
|
|
info->stage = SMMU_STAGE_1;
|
2020-07-28 18:08:08 +03:00
|
|
|
tlbe->entry.perm = IOMMU_NONE;
|
2018-05-04 20:05:51 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-05-25 12:37:50 +03:00
|
|
|
/**
|
|
|
|
* smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa
|
|
|
|
* for stage-2.
|
|
|
|
* @cfg: translation config
|
|
|
|
* @ipa: ipa to translate
|
|
|
|
* @perm: access type
|
|
|
|
* @tlbe: SMMUTLBEntry (out)
|
|
|
|
* @info: handle to an error info
|
|
|
|
*
|
|
|
|
* Return 0 on success, < 0 on error. In case of error, @info is filled
|
|
|
|
* and tlbe->perm is set to IOMMU_NONE.
|
|
|
|
* Upon success, @tlbe is filled with translated_addr and entry
|
|
|
|
* permission rights.
|
|
|
|
*/
|
|
|
|
static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
|
|
|
|
dma_addr_t ipa, IOMMUAccessFlags perm,
|
|
|
|
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
|
|
|
|
{
|
2024-07-15 11:45:04 +03:00
|
|
|
const SMMUStage stage = SMMU_STAGE_2;
|
2023-05-25 12:37:50 +03:00
|
|
|
int granule_sz = cfg->s2cfg.granule_sz;
|
|
|
|
/* ARM DDI0487I.a: Table D8-7. */
|
|
|
|
int inputsize = 64 - cfg->s2cfg.tsz;
|
|
|
|
int level = get_start_level(cfg->s2cfg.sl0, granule_sz);
|
|
|
|
int stride = VMSA_STRIDE(granule_sz);
|
|
|
|
int idx = pgd_concat_idx(level, granule_sz, ipa);
|
|
|
|
/*
|
|
|
|
* Get the ttb from concatenated structure.
|
|
|
|
* The offset is the idx * size of each ttb(number of ptes * (sizeof(pte))
|
|
|
|
*/
|
2024-07-15 11:45:18 +03:00
|
|
|
uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, cfg->s2cfg.eff_ps) +
|
|
|
|
(1 << stride) * idx * sizeof(uint64_t);
|
2023-05-25 12:37:50 +03:00
|
|
|
dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level);
|
|
|
|
|
|
|
|
baseaddr &= ~indexmask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On input, a stage 2 Translation fault occurs if the IPA is outside the
|
|
|
|
* range configured by the relevant S2T0SZ field of the STE.
|
|
|
|
*/
|
|
|
|
if (ipa >= (1ULL << inputsize)) {
|
|
|
|
info->type = SMMU_PTW_ERR_TRANSLATION;
|
hw/arm/smmu: Fix IPA for stage-2 events
For the following events (ARM IHI 0070 F.b - 7.3 Event records):
- F_TRANSLATION
- F_ACCESS
- F_PERMISSION
- F_ADDR_SIZE
If fault occurs at stage 2, S2 == 1 and:
- If translating an IPA for a transaction (whether by input to
stage 2-only configuration, or after successful stage 1 translation),
CLASS == IN, and IPA is provided.
At the moment only CLASS == IN is used which indicates input
translation.
However, this was not implemented correctly, as for stage 2, the code
only sets the S2 bit but not the IPA.
This field has the same bits as FetchAddr in F_WALK_EABT which is
populated correctly, so we don’t change that.
The setting of this field should be done from the walker as the IPA address
wouldn't be known in case of nesting.
For stage 1, the spec says:
If fault occurs at stage 1, S2 == 0 and:
CLASS == IN, IPA is UNKNOWN.
So, no need to set it to for stage 1, as ptw_info is initialised by zero in
smmuv3_translate().
Fixes: e703f7076a “hw/arm/smmuv3: Add page table walk for stage-2”
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Message-id: 20240715084519.1189624-3-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:02 +03:00
|
|
|
goto error_ipa;
|
2023-05-25 12:37:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
while (level < VMSA_LEVELS) {
|
|
|
|
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
|
|
|
|
uint64_t mask = subpage_size - 1;
|
|
|
|
uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz);
|
|
|
|
uint64_t pte, gpa;
|
|
|
|
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
|
|
|
|
uint8_t s2ap;
|
|
|
|
|
|
|
|
if (get_pte(baseaddr, offset, &pte, info)) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
trace_smmu_ptw_level(stage, level, ipa, subpage_size,
|
|
|
|
baseaddr, offset, pte);
|
|
|
|
if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
|
|
|
|
trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
|
|
|
|
pte_addr, offset, pte);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_table_pte(pte, level)) {
|
|
|
|
baseaddr = get_table_pte_address(pte, granule_sz);
|
|
|
|
level++;
|
|
|
|
continue;
|
|
|
|
} else if (is_page_pte(pte, level)) {
|
|
|
|
gpa = get_page_pte_address(pte, granule_sz);
|
|
|
|
trace_smmu_ptw_page_pte(stage, level, ipa,
|
|
|
|
baseaddr, pte_addr, pte, gpa);
|
|
|
|
} else {
|
|
|
|
uint64_t block_size;
|
|
|
|
|
|
|
|
gpa = get_block_pte_address(pte, level, granule_sz,
|
|
|
|
&block_size);
|
|
|
|
trace_smmu_ptw_block_pte(stage, level, baseaddr,
|
|
|
|
pte_addr, pte, ipa, gpa,
|
|
|
|
block_size >> 20);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry)
|
|
|
|
* An Access fault takes priority over a Permission fault.
|
|
|
|
*/
|
|
|
|
if (!PTE_AF(pte) && !cfg->s2cfg.affd) {
|
|
|
|
info->type = SMMU_PTW_ERR_ACCESS;
|
hw/arm/smmu: Fix IPA for stage-2 events
For the following events (ARM IHI 0070 F.b - 7.3 Event records):
- F_TRANSLATION
- F_ACCESS
- F_PERMISSION
- F_ADDR_SIZE
If fault occurs at stage 2, S2 == 1 and:
- If translating an IPA for a transaction (whether by input to
stage 2-only configuration, or after successful stage 1 translation),
CLASS == IN, and IPA is provided.
At the moment only CLASS == IN is used which indicates input
translation.
However, this was not implemented correctly, as for stage 2, the code
only sets the S2 bit but not the IPA.
This field has the same bits as FetchAddr in F_WALK_EABT which is
populated correctly, so we don’t change that.
The setting of this field should be done from the walker as the IPA address
wouldn't be known in case of nesting.
For stage 1, the spec says:
If fault occurs at stage 1, S2 == 0 and:
CLASS == IN, IPA is UNKNOWN.
So, no need to set it to for stage 1, as ptw_info is initialised by zero in
smmuv3_translate().
Fixes: e703f7076a “hw/arm/smmuv3: Add page table walk for stage-2”
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Message-id: 20240715084519.1189624-3-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:02 +03:00
|
|
|
goto error_ipa;
|
2023-05-25 12:37:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s2ap = PTE_AP(pte);
|
|
|
|
if (is_permission_fault_s2(s2ap, perm)) {
|
|
|
|
info->type = SMMU_PTW_ERR_PERMISSION;
|
hw/arm/smmu: Fix IPA for stage-2 events
For the following events (ARM IHI 0070 F.b - 7.3 Event records):
- F_TRANSLATION
- F_ACCESS
- F_PERMISSION
- F_ADDR_SIZE
If fault occurs at stage 2, S2 == 1 and:
- If translating an IPA for a transaction (whether by input to
stage 2-only configuration, or after successful stage 1 translation),
CLASS == IN, and IPA is provided.
At the moment only CLASS == IN is used which indicates input
translation.
However, this was not implemented correctly, as for stage 2, the code
only sets the S2 bit but not the IPA.
This field has the same bits as FetchAddr in F_WALK_EABT which is
populated correctly, so we don’t change that.
The setting of this field should be done from the walker as the IPA address
wouldn't be known in case of nesting.
For stage 1, the spec says:
If fault occurs at stage 1, S2 == 0 and:
CLASS == IN, IPA is UNKNOWN.
So, no need to set it to for stage 1, as ptw_info is initialised by zero in
smmuv3_translate().
Fixes: e703f7076a “hw/arm/smmuv3: Add page table walk for stage-2”
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Message-id: 20240715084519.1189624-3-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:02 +03:00
|
|
|
goto error_ipa;
|
2023-05-25 12:37:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The address output from the translation causes a stage 2 Address
|
|
|
|
* Size fault if it exceeds the effective PA output range.
|
|
|
|
*/
|
|
|
|
if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) {
|
|
|
|
info->type = SMMU_PTW_ERR_ADDR_SIZE;
|
hw/arm/smmu: Fix IPA for stage-2 events
For the following events (ARM IHI 0070 F.b - 7.3 Event records):
- F_TRANSLATION
- F_ACCESS
- F_PERMISSION
- F_ADDR_SIZE
If fault occurs at stage 2, S2 == 1 and:
- If translating an IPA for a transaction (whether by input to
stage 2-only configuration, or after successful stage 1 translation),
CLASS == IN, and IPA is provided.
At the moment only CLASS == IN is used which indicates input
translation.
However, this was not implemented correctly, as for stage 2, the code
only sets the S2 bit but not the IPA.
This field has the same bits as FetchAddr in F_WALK_EABT which is
populated correctly, so we don’t change that.
The setting of this field should be done from the walker as the IPA address
wouldn't be known in case of nesting.
For stage 1, the spec says:
If fault occurs at stage 1, S2 == 0 and:
CLASS == IN, IPA is UNKNOWN.
So, no need to set it to for stage 1, as ptw_info is initialised by zero in
smmuv3_translate().
Fixes: e703f7076a “hw/arm/smmuv3: Add page table walk for stage-2”
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Message-id: 20240715084519.1189624-3-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:02 +03:00
|
|
|
goto error_ipa;
|
2023-05-25 12:37:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
tlbe->entry.translated_addr = gpa;
|
|
|
|
tlbe->entry.iova = ipa & ~mask;
|
|
|
|
tlbe->entry.addr_mask = mask;
|
2024-07-15 11:45:10 +03:00
|
|
|
tlbe->parent_perm = s2ap;
|
|
|
|
tlbe->entry.perm = tlbe->parent_perm;
|
2023-05-25 12:37:50 +03:00
|
|
|
tlbe->level = level;
|
|
|
|
tlbe->granule = granule_sz;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
info->type = SMMU_PTW_ERR_TRANSLATION;
|
|
|
|
|
hw/arm/smmu: Fix IPA for stage-2 events
For the following events (ARM IHI 0070 F.b - 7.3 Event records):
- F_TRANSLATION
- F_ACCESS
- F_PERMISSION
- F_ADDR_SIZE
If fault occurs at stage 2, S2 == 1 and:
- If translating an IPA for a transaction (whether by input to
stage 2-only configuration, or after successful stage 1 translation),
CLASS == IN, and IPA is provided.
At the moment only CLASS == IN is used which indicates input
translation.
However, this was not implemented correctly, as for stage 2, the code
only sets the S2 bit but not the IPA.
This field has the same bits as FetchAddr in F_WALK_EABT which is
populated correctly, so we don’t change that.
The setting of this field should be done from the walker as the IPA address
wouldn't be known in case of nesting.
For stage 1, the spec says:
If fault occurs at stage 1, S2 == 0 and:
CLASS == IN, IPA is UNKNOWN.
So, no need to set it to for stage 1, as ptw_info is initialised by zero in
smmuv3_translate().
Fixes: e703f7076a “hw/arm/smmuv3: Add page table walk for stage-2”
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Message-id: 20240715084519.1189624-3-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:02 +03:00
|
|
|
error_ipa:
|
|
|
|
info->addr = ipa;
|
2023-05-25 12:37:50 +03:00
|
|
|
error:
|
2024-07-15 11:45:04 +03:00
|
|
|
info->stage = SMMU_STAGE_2;
|
2023-05-25 12:37:50 +03:00
|
|
|
tlbe->entry.perm = IOMMU_NONE;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:10 +03:00
|
|
|
/*
|
|
|
|
* combine S1 and S2 TLB entries into a single entry.
|
2024-08-13 23:23:27 +03:00
|
|
|
* As a result the S1 entry is overridden with combined data.
|
2024-07-15 11:45:10 +03:00
|
|
|
*/
|
2024-07-15 11:45:11 +03:00
|
|
|
static void combine_tlb(SMMUTLBEntry *tlbe, SMMUTLBEntry *tlbe_s2,
|
|
|
|
dma_addr_t iova, SMMUTransCfg *cfg)
|
2024-07-15 11:45:10 +03:00
|
|
|
{
|
|
|
|
if (tlbe_s2->entry.addr_mask < tlbe->entry.addr_mask) {
|
|
|
|
tlbe->entry.addr_mask = tlbe_s2->entry.addr_mask;
|
|
|
|
tlbe->granule = tlbe_s2->granule;
|
|
|
|
tlbe->level = tlbe_s2->level;
|
|
|
|
}
|
|
|
|
|
|
|
|
tlbe->entry.translated_addr = CACHED_ENTRY_TO_ADDR(tlbe_s2,
|
|
|
|
tlbe->entry.translated_addr);
|
|
|
|
|
|
|
|
tlbe->entry.iova = iova & ~tlbe->entry.addr_mask;
|
|
|
|
/* parent_perm has s2 perm while perm keeps s1 perm. */
|
|
|
|
tlbe->parent_perm = tlbe_s2->entry.perm;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
/**
|
|
|
|
* smmu_ptw - Walk the page tables for an IOVA, according to @cfg
|
|
|
|
*
|
2024-07-15 11:45:11 +03:00
|
|
|
* @bs: smmu state which includes TLB instance
|
2018-05-04 20:05:51 +03:00
|
|
|
* @cfg: translation configuration
|
|
|
|
* @iova: iova to translate
|
|
|
|
* @perm: tentative access type
|
|
|
|
* @tlbe: returned entry
|
|
|
|
* @info: ptw event handle
|
|
|
|
*
|
|
|
|
* return 0 on success
|
|
|
|
*/
|
2024-07-15 11:45:11 +03:00
|
|
|
int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
|
|
|
|
IOMMUAccessFlags perm, SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
2024-07-15 11:45:11 +03:00
|
|
|
int ret;
|
|
|
|
SMMUTLBEntry tlbe_s2;
|
|
|
|
dma_addr_t ipa;
|
|
|
|
|
2024-07-15 11:45:04 +03:00
|
|
|
if (cfg->stage == SMMU_STAGE_1) {
|
2024-07-15 11:45:11 +03:00
|
|
|
return smmu_ptw_64_s1(bs, cfg, iova, perm, tlbe, info);
|
2024-07-15 11:45:04 +03:00
|
|
|
} else if (cfg->stage == SMMU_STAGE_2) {
|
2023-05-25 12:37:50 +03:00
|
|
|
/*
|
|
|
|
* If bypassing stage 1(or unimplemented), the input address is passed
|
|
|
|
* directly to stage 2 as IPA. If the input address of a transaction
|
|
|
|
* exceeds the size of the IAS, a stage 1 Address Size fault occurs.
|
|
|
|
* For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes"
|
|
|
|
*/
|
|
|
|
if (iova >= (1ULL << cfg->oas)) {
|
|
|
|
info->type = SMMU_PTW_ERR_ADDR_SIZE;
|
2024-07-15 11:45:04 +03:00
|
|
|
info->stage = SMMU_STAGE_1;
|
2023-05-25 12:37:50 +03:00
|
|
|
tlbe->entry.perm = IOMMU_NONE;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info);
|
|
|
|
}
|
|
|
|
|
2024-07-15 11:45:11 +03:00
|
|
|
/* SMMU_NESTED. */
|
|
|
|
ret = smmu_ptw_64_s1(bs, cfg, iova, perm, tlbe, info);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ipa = CACHED_ENTRY_TO_ADDR(tlbe, iova);
|
|
|
|
ret = smmu_ptw_64_s2(cfg, ipa, perm, &tlbe_s2, info);
|
|
|
|
if (ret) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
combine_tlb(tlbe, &tlbe_s2, iova, cfg);
|
|
|
|
return 0;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
SMMUTLBEntry *smmu_translate(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t addr,
|
|
|
|
IOMMUAccessFlags flag, SMMUPTWEventInfo *info)
|
|
|
|
{
|
|
|
|
SMMUTLBEntry *cached_entry = NULL;
|
|
|
|
SMMUTransTableInfo *tt;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
/*
|
hw/arm/smmu-common: Rework TLB lookup for nesting
In the next patch, combine_tlb() will be added which combines 2 TLB
entries into one for nested translations, which chooses the granule
and level from the smallest entry.
This means that with nested translation, an entry can be cached with
the granule of stage-2 and not stage-1.
However, currently, the lookup for an IOVA is done with input stage
granule, which is stage-1 for nested configuration, which will not
work with the above logic.
This patch reworks lookup in that case, so it falls back to stage-2
granule if no entry is found using stage-1 granule.
Also, drop aligning the iova to avoid over-aligning in case the iova
is cached with a smaller granule, the TLB lookup will align the iova
anyway for each granule and level, and the page table walker doesn't
consider the page offset bits.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-10-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:09 +03:00
|
|
|
* Combined attributes used for TLB lookup, holds the attributes for
|
|
|
|
* the input stage.
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
*/
|
|
|
|
SMMUTransTableInfo tt_combined;
|
|
|
|
|
hw/arm/smmu-common: Rework TLB lookup for nesting
In the next patch, combine_tlb() will be added which combines 2 TLB
entries into one for nested translations, which chooses the granule
and level from the smallest entry.
This means that with nested translation, an entry can be cached with
the granule of stage-2 and not stage-1.
However, currently, the lookup for an IOVA is done with input stage
granule, which is stage-1 for nested configuration, which will not
work with the above logic.
This patch reworks lookup in that case, so it falls back to stage-2
granule if no entry is found using stage-1 granule.
Also, drop aligning the iova to avoid over-aligning in case the iova
is cached with a smaller granule, the TLB lookup will align the iova
anyway for each granule and level, and the page table walker doesn't
consider the page offset bits.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-10-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:09 +03:00
|
|
|
if (cfg->stage == SMMU_STAGE_2) {
|
|
|
|
/* Stage2. */
|
|
|
|
tt_combined.granule_sz = cfg->s2cfg.granule_sz;
|
|
|
|
tt_combined.tsz = cfg->s2cfg.tsz;
|
|
|
|
} else {
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
/* Select stage1 translation table. */
|
|
|
|
tt = select_tt(cfg, addr);
|
|
|
|
if (!tt) {
|
|
|
|
info->type = SMMU_PTW_ERR_TRANSLATION;
|
|
|
|
info->stage = SMMU_STAGE_1;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
tt_combined.granule_sz = tt->granule_sz;
|
|
|
|
tt_combined.tsz = tt->tsz;
|
|
|
|
}
|
|
|
|
|
hw/arm/smmu-common: Rework TLB lookup for nesting
In the next patch, combine_tlb() will be added which combines 2 TLB
entries into one for nested translations, which chooses the granule
and level from the smallest entry.
This means that with nested translation, an entry can be cached with
the granule of stage-2 and not stage-1.
However, currently, the lookup for an IOVA is done with input stage
granule, which is stage-1 for nested configuration, which will not
work with the above logic.
This patch reworks lookup in that case, so it falls back to stage-2
granule if no entry is found using stage-1 granule.
Also, drop aligning the iova to avoid over-aligning in case the iova
is cached with a smaller granule, the TLB lookup will align the iova
anyway for each granule and level, and the page table walker doesn't
consider the page offset bits.
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-10-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:09 +03:00
|
|
|
cached_entry = smmu_iotlb_lookup(bs, cfg, &tt_combined, addr);
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
if (cached_entry) {
|
2024-07-15 11:45:10 +03:00
|
|
|
if ((flag & IOMMU_WO) && !(cached_entry->entry.perm &
|
|
|
|
cached_entry->parent_perm & IOMMU_WO)) {
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
info->type = SMMU_PTW_ERR_PERMISSION;
|
2024-07-15 11:45:10 +03:00
|
|
|
info->stage = !(cached_entry->entry.perm & IOMMU_WO) ?
|
|
|
|
SMMU_STAGE_1 :
|
|
|
|
SMMU_STAGE_2;
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return cached_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
cached_entry = g_new0(SMMUTLBEntry, 1);
|
2024-07-15 11:45:11 +03:00
|
|
|
status = smmu_ptw(bs, cfg, addr, flag, cached_entry, info);
|
hw/arm/smmu: Split smmuv3_translate()
smmuv3_translate() does everything from STE/CD parsing to TLB lookup
and PTW.
Soon, when nesting is supported, stage-1 data (tt, CD) needs to be
translated using stage-2.
Split smmuv3_translate() to 3 functions:
- smmu_translate(): in smmu-common.c, which does the TLB lookup, PTW,
TLB insertion, all the functions are already there, this just puts
them together.
This also simplifies the code as it consolidates event generation
in case of TLB lookup permission failure or in TT selection.
- smmuv3_do_translate(): in smmuv3.c, Calls smmu_translate() and does
the event population in case of errors.
- smmuv3_translate(), now calls smmuv3_do_translate() for
translation while the rest is the same.
Also, add stage in trace_smmuv3_translate_success()
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20240715084519.1189624-6-smostafa@google.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-07-15 11:45:05 +03:00
|
|
|
if (status) {
|
|
|
|
g_free(cached_entry);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
smmu_iotlb_insert(bs, cfg, cached_entry);
|
|
|
|
return cached_entry;
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
/**
|
|
|
|
* The bus number is used for lookup when SID based invalidation occurs.
|
|
|
|
* In that case we lazily populate the SMMUPciBus array from the bus hash
|
|
|
|
* table. At the time the SMMUPciBus is created (smmu_find_add_as), the bus
|
|
|
|
* numbers may not be always initialized yet.
|
|
|
|
*/
|
|
|
|
SMMUPciBus *smmu_find_smmu_pcibus(SMMUState *s, uint8_t bus_num)
|
|
|
|
{
|
|
|
|
SMMUPciBus *smmu_pci_bus = s->smmu_pcibus_by_bus_num[bus_num];
|
2020-03-05 19:09:14 +03:00
|
|
|
GHashTableIter iter;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2020-03-05 19:09:14 +03:00
|
|
|
if (smmu_pci_bus) {
|
|
|
|
return smmu_pci_bus;
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
|
2020-03-05 19:09:14 +03:00
|
|
|
g_hash_table_iter_init(&iter, s->smmu_pcibus_by_busptr);
|
|
|
|
while (g_hash_table_iter_next(&iter, NULL, (void **)&smmu_pci_bus)) {
|
|
|
|
if (pci_bus_num(smmu_pci_bus->bus) == bus_num) {
|
|
|
|
s->smmu_pcibus_by_bus_num[bus_num] = smmu_pci_bus;
|
|
|
|
return smmu_pci_bus;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
}
|
2020-03-05 19:09:14 +03:00
|
|
|
|
|
|
|
return NULL;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static AddressSpace *smmu_find_add_as(PCIBus *bus, void *opaque, int devfn)
|
|
|
|
{
|
|
|
|
SMMUState *s = opaque;
|
|
|
|
SMMUPciBus *sbus = g_hash_table_lookup(s->smmu_pcibus_by_busptr, bus);
|
|
|
|
SMMUDevice *sdev;
|
2018-09-25 16:02:32 +03:00
|
|
|
static unsigned int index;
|
2018-05-04 20:05:51 +03:00
|
|
|
|
|
|
|
if (!sbus) {
|
|
|
|
sbus = g_malloc0(sizeof(SMMUPciBus) +
|
|
|
|
sizeof(SMMUDevice *) * SMMU_PCI_DEVFN_MAX);
|
|
|
|
sbus->bus = bus;
|
|
|
|
g_hash_table_insert(s->smmu_pcibus_by_busptr, bus, sbus);
|
|
|
|
}
|
|
|
|
|
|
|
|
sdev = sbus->pbdev[devfn];
|
|
|
|
if (!sdev) {
|
2018-09-25 16:02:32 +03:00
|
|
|
char *name = g_strdup_printf("%s-%d-%d", s->mrtypename, devfn, index++);
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
sdev = sbus->pbdev[devfn] = g_new0(SMMUDevice, 1);
|
|
|
|
|
|
|
|
sdev->smmu = s;
|
|
|
|
sdev->bus = bus;
|
|
|
|
sdev->devfn = devfn;
|
|
|
|
|
|
|
|
memory_region_init_iommu(&sdev->iommu, sizeof(sdev->iommu),
|
|
|
|
s->mrtypename,
|
2023-02-14 20:19:21 +03:00
|
|
|
OBJECT(s), name, UINT64_MAX);
|
2018-05-04 20:05:51 +03:00
|
|
|
address_space_init(&sdev->as,
|
|
|
|
MEMORY_REGION(&sdev->iommu), name);
|
|
|
|
trace_smmu_add_mr(name);
|
|
|
|
g_free(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
return &sdev->as;
|
|
|
|
}
|
|
|
|
|
2023-10-17 19:14:04 +03:00
|
|
|
static const PCIIOMMUOps smmu_ops = {
|
|
|
|
.get_address_space = smmu_find_add_as,
|
|
|
|
};
|
|
|
|
|
2024-06-19 03:22:18 +03:00
|
|
|
SMMUDevice *smmu_find_sdev(SMMUState *s, uint32_t sid)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
|
|
|
uint8_t bus_n, devfn;
|
|
|
|
SMMUPciBus *smmu_bus;
|
|
|
|
|
|
|
|
bus_n = PCI_BUS_NUM(sid);
|
|
|
|
smmu_bus = smmu_find_smmu_pcibus(s, bus_n);
|
|
|
|
if (smmu_bus) {
|
2018-07-09 16:51:34 +03:00
|
|
|
devfn = SMMU_PCI_DEVFN(sid);
|
2024-06-19 03:22:18 +03:00
|
|
|
return smmu_bus->pbdev[devfn];
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
/* Unmap all notifiers attached to @mr */
|
2022-12-17 00:49:23 +03:00
|
|
|
static void smmu_inv_notifiers_mr(IOMMUMemoryRegion *mr)
|
2018-06-26 19:50:42 +03:00
|
|
|
{
|
|
|
|
IOMMUNotifier *n;
|
|
|
|
|
|
|
|
trace_smmu_inv_notifiers_mr(mr->parent_obj.name);
|
|
|
|
IOMMU_NOTIFIER_FOREACH(n, mr) {
|
2023-02-23 09:59:23 +03:00
|
|
|
memory_region_unmap_iommu_notifier_range(n);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unmap all notifiers of all mr's */
|
|
|
|
void smmu_inv_notifiers_all(SMMUState *s)
|
|
|
|
{
|
2019-04-29 19:35:57 +03:00
|
|
|
SMMUDevice *sdev;
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2019-04-29 19:35:57 +03:00
|
|
|
QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
|
|
|
|
smmu_inv_notifiers_mr(&sdev->iommu);
|
2018-06-26 19:50:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-04 20:05:51 +03:00
|
|
|
static void smmu_base_realize(DeviceState *dev, Error **errp)
|
|
|
|
{
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUState *s = ARM_SMMU(dev);
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev);
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
sbc->parent_realize(dev, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2018-06-26 19:50:42 +03:00
|
|
|
s->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
|
2018-06-26 19:50:42 +03:00
|
|
|
s->iotlb = g_hash_table_new_full(smmu_iotlb_key_hash, smmu_iotlb_key_equal,
|
|
|
|
g_free, g_free);
|
2018-05-04 20:05:51 +03:00
|
|
|
s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL);
|
|
|
|
|
|
|
|
if (s->primary_bus) {
|
2023-10-17 19:14:04 +03:00
|
|
|
pci_setup_iommu(s->primary_bus, &smmu_ops, s);
|
2018-05-04 20:05:51 +03:00
|
|
|
} else {
|
|
|
|
error_setg(errp, "SMMU is not attached to any PCI bus!");
|
|
|
|
}
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
2024-04-12 19:08:07 +03:00
|
|
|
static void smmu_base_reset_hold(Object *obj, ResetType type)
|
2018-05-04 20:05:51 +03:00
|
|
|
{
|
2022-12-14 17:27:10 +03:00
|
|
|
SMMUState *s = ARM_SMMU(obj);
|
2018-06-26 19:50:42 +03:00
|
|
|
|
2024-01-25 10:37:06 +03:00
|
|
|
memset(s->smmu_pcibus_by_bus_num, 0, sizeof(s->smmu_pcibus_by_bus_num));
|
|
|
|
|
2018-06-26 19:50:42 +03:00
|
|
|
g_hash_table_remove_all(s->configs);
|
2018-06-26 19:50:42 +03:00
|
|
|
g_hash_table_remove_all(s->iotlb);
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static Property smmu_dev_properties[] = {
|
|
|
|
DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0),
|
2023-01-17 22:30:14 +03:00
|
|
|
DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus,
|
|
|
|
TYPE_PCI_BUS, PCIBus *),
|
2018-05-04 20:05:51 +03:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
|
|
|
static void smmu_base_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
2022-12-14 17:27:10 +03:00
|
|
|
ResettableClass *rc = RESETTABLE_CLASS(klass);
|
2018-05-04 20:05:51 +03:00
|
|
|
SMMUBaseClass *sbc = ARM_SMMU_CLASS(klass);
|
|
|
|
|
2020-01-10 18:30:32 +03:00
|
|
|
device_class_set_props(dc, smmu_dev_properties);
|
2018-05-04 20:05:51 +03:00
|
|
|
device_class_set_parent_realize(dc, smmu_base_realize,
|
|
|
|
&sbc->parent_realize);
|
2022-12-14 17:27:10 +03:00
|
|
|
rc->phases.hold = smmu_base_reset_hold;
|
2018-05-04 20:05:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo smmu_base_info = {
|
|
|
|
.name = TYPE_ARM_SMMU,
|
|
|
|
.parent = TYPE_SYS_BUS_DEVICE,
|
|
|
|
.instance_size = sizeof(SMMUState),
|
|
|
|
.class_data = NULL,
|
|
|
|
.class_size = sizeof(SMMUBaseClass),
|
|
|
|
.class_init = smmu_base_class_init,
|
|
|
|
.abstract = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void smmu_base_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&smmu_base_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(smmu_base_register_types)
|
|
|
|
|