diff --git a/include/qemu/selfmap.h b/include/qemu/selfmap.h
index 7d938945cb..1690a74f4b 100644
--- a/include/qemu/selfmap.h
+++ b/include/qemu/selfmap.h
@@ -20,10 +20,10 @@ typedef struct {
bool is_exec;
bool is_priv;
+ dev_t dev;
+ ino_t inode;
uint64_t offset;
- uint64_t inode;
const char *path;
- char dev[];
} MapInfo;
/**
diff --git a/linux-user/aarch64/target_proc.h b/linux-user/aarch64/target_proc.h
new file mode 100644
index 0000000000..907df4dcd2
--- /dev/null
+++ b/linux-user/aarch64/target_proc.h
@@ -0,0 +1 @@
+#include "../arm/target_proc.h"
diff --git a/linux-user/alpha/target_proc.h b/linux-user/alpha/target_proc.h
new file mode 100644
index 0000000000..dac37dffc9
--- /dev/null
+++ b/linux-user/alpha/target_proc.h
@@ -0,0 +1,67 @@
+/*
+ * Alpha specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ALPHA_TARGET_PROC_H
+#define ALPHA_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int max_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ unsigned long cpu_mask;
+ char model[32];
+ const char *p, *q;
+ int t;
+
+ p = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(env_cpu(cpu_env))));
+ q = strchr(p, '-');
+ t = q - p;
+ assert(t < sizeof(model));
+ memcpy(model, p, t);
+ model[t] = 0;
+
+ t = sched_getaffinity(getpid(), sizeof(cpu_mask), (cpu_set_t *)&cpu_mask);
+ if (t < 0) {
+ if (num_cpus >= sizeof(cpu_mask) * 8) {
+ cpu_mask = -1;
+ } else {
+ cpu_mask = (1UL << num_cpus) - 1;
+ }
+ }
+
+ dprintf(fd,
+ "cpu\t\t\t: Alpha\n"
+ "cpu model\t\t: %s\n"
+ "cpu variation\t\t: 0\n"
+ "cpu revision\t\t: 0\n"
+ "cpu serial number\t: JA00000000\n"
+ "system type\t\t: QEMU\n"
+ "system variation\t: QEMU_v" QEMU_VERSION "\n"
+ "system revision\t\t: 0\n"
+ "system serial number\t: AY00000000\n"
+ "cycle frequency [Hz]\t: 250000000\n"
+ "timer frequency [Hz]\t: 250.00\n"
+ "page size [bytes]\t: %d\n"
+ "phys. address bits\t: %d\n"
+ "max. addr. space #\t: 255\n"
+ "BogoMIPS\t\t: 2500.00\n"
+ "kernel unaligned acc\t: 0 (pc=0,va=0)\n"
+ "user unaligned acc\t: 0 (pc=0,va=0)\n"
+ "platform string\t\t: AlphaServer QEMU user-mode VM\n"
+ "cpus detected\t\t: %d\n"
+ "cpus active\t\t: %d\n"
+ "cpu active mask\t\t: %016lx\n"
+ "L1 Icache\t\t: n/a\n"
+ "L1 Dcache\t\t: n/a\n"
+ "L2 cache\t\t: n/a\n"
+ "L3 cache\t\t: n/a\n",
+ model, TARGET_PAGE_SIZE, TARGET_PHYS_ADDR_SPACE_BITS,
+ max_cpus, num_cpus, cpu_mask);
+
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* ALPHA_TARGET_PROC_H */
diff --git a/linux-user/arm/target_proc.h b/linux-user/arm/target_proc.h
new file mode 100644
index 0000000000..ac75af9ca6
--- /dev/null
+++ b/linux-user/arm/target_proc.h
@@ -0,0 +1,101 @@
+/*
+ * Arm specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ARM_TARGET_PROC_H
+#define ARM_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ ARMCPU *cpu = env_archcpu(cpu_env);
+ int arch, midr_rev, midr_part, midr_var, midr_impl;
+ target_ulong elf_hwcap = get_elf_hwcap();
+ target_ulong elf_hwcap2 = get_elf_hwcap2();
+ const char *elf_name;
+ int num_cpus, len_part, len_var;
+
+#if TARGET_BIG_ENDIAN
+# define END_SUFFIX "b"
+#else
+# define END_SUFFIX "l"
+#endif
+
+ arch = 8;
+ elf_name = "v8" END_SUFFIX;
+ midr_rev = FIELD_EX32(cpu->midr, MIDR_EL1, REVISION);
+ midr_part = FIELD_EX32(cpu->midr, MIDR_EL1, PARTNUM);
+ midr_var = FIELD_EX32(cpu->midr, MIDR_EL1, VARIANT);
+ midr_impl = FIELD_EX32(cpu->midr, MIDR_EL1, IMPLEMENTER);
+ len_part = 3;
+ len_var = 1;
+
+#ifndef TARGET_AARCH64
+ /* For simplicity, treat ARMv8 as an arm64 kernel with CONFIG_COMPAT. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ arch = 7;
+ midr_var = (cpu->midr >> 16) & 0x7f;
+ len_var = 2;
+ if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ elf_name = "armv7m" END_SUFFIX;
+ } else {
+ elf_name = "armv7" END_SUFFIX;
+ }
+ } else {
+ midr_part = cpu->midr >> 4;
+ len_part = 7;
+ if (arm_feature(&cpu->env, ARM_FEATURE_V6)) {
+ arch = 6;
+ elf_name = "armv6" END_SUFFIX;
+ } else if (arm_feature(&cpu->env, ARM_FEATURE_V5)) {
+ arch = 5;
+ elf_name = "armv5t" END_SUFFIX;
+ } else {
+ arch = 4;
+ elf_name = "armv4" END_SUFFIX;
+ }
+ }
+ }
+#endif
+
+#undef END_SUFFIX
+
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ for (int i = 0; i < num_cpus; i++) {
+ dprintf(fd,
+ "processor\t: %d\n"
+ "model name\t: ARMv%d Processor rev %d (%s)\n"
+ "BogoMIPS\t: 100.00\n"
+ "Features\t:",
+ i, arch, midr_rev, elf_name);
+
+ for (target_ulong j = elf_hwcap; j ; j &= j - 1) {
+ dprintf(fd, " %s", elf_hwcap_str(ctz64(j)));
+ }
+ for (target_ulong j = elf_hwcap2; j ; j &= j - 1) {
+ dprintf(fd, " %s", elf_hwcap2_str(ctz64(j)));
+ }
+
+ dprintf(fd, "\n"
+ "CPU implementer\t: 0x%02x\n"
+ "CPU architecture: %d\n"
+ "CPU variant\t: 0x%0*x\n",
+ midr_impl, arch, len_var, midr_var);
+ if (arch >= 7) {
+ dprintf(fd, "CPU part\t: 0x%0*x\n", len_part, midr_part);
+ }
+ dprintf(fd, "CPU revision\t: %d\n\n", midr_rev);
+ }
+
+ if (arch < 8) {
+ dprintf(fd, "Hardware\t: QEMU v%s %s\n", QEMU_VERSION,
+ cpu->dtb_compatible ? : "");
+ dprintf(fd, "Revision\t: 0000\n");
+ dprintf(fd, "Serial\t\t: 0000000000000000\n");
+ }
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* ARM_TARGET_PROC_H */
diff --git a/linux-user/cris/target_proc.h b/linux-user/cris/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/cris/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 92b981c445..a5b28fa3e7 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -143,8 +143,6 @@ static uint32_t get_elf_hwcap(void)
}
#ifdef TARGET_X86_64
-#define ELF_START_MMAP 0x2aaaaab000ULL
-
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_X86_64
@@ -221,8 +219,6 @@ static bool init_guest_commpage(void)
#endif
#else
-#define ELF_START_MMAP 0x80000000
-
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -308,8 +304,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
#ifndef TARGET_AARCH64
/* 32 bit ARM definitions */
-#define ELF_START_MMAP 0x80000000
-
#define ELF_ARCH EM_ARM
#define ELF_CLASS ELFCLASS32
#define EXSTACK_DEFAULT true
@@ -466,7 +460,7 @@ static bool init_guest_commpage(void)
#define ELF_HWCAP get_elf_hwcap()
#define ELF_HWCAP2 get_elf_hwcap2()
-static uint32_t get_elf_hwcap(void)
+uint32_t get_elf_hwcap(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
@@ -508,7 +502,7 @@ static uint32_t get_elf_hwcap(void)
return hwcaps;
}
-static uint32_t get_elf_hwcap2(void)
+uint32_t get_elf_hwcap2(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
@@ -521,6 +515,49 @@ static uint32_t get_elf_hwcap2(void)
return hwcaps;
}
+const char *elf_hwcap_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp",
+ [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half",
+ [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb",
+ [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit",
+ [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult",
+ [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp",
+ [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp",
+ [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java",
+ [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt",
+ [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch",
+ [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee",
+ [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16",
+ [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4",
+ [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva",
+ [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt",
+ [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32",
+ [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae",
+ [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
+const char *elf_hwcap2_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes",
+ [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull",
+ [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1",
+ [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2",
+ [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
#undef GET_FEATURE
#undef GET_FEATURE_ID
@@ -557,7 +594,6 @@ static const char *get_elf_platform(void)
#else
/* 64 bit ARM definitions */
-#define ELF_START_MMAP 0x80000000
#define ELF_ARCH EM_AARCH64
#define ELF_CLASS ELFCLASS64
@@ -668,7 +704,7 @@ enum {
#define GET_FEATURE_ID(feat, hwcap) \
do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
-static uint32_t get_elf_hwcap(void)
+uint32_t get_elf_hwcap(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
@@ -706,7 +742,7 @@ static uint32_t get_elf_hwcap(void)
return hwcaps;
}
-static uint32_t get_elf_hwcap2(void)
+uint32_t get_elf_hwcap2(void)
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
@@ -741,6 +777,85 @@ static uint32_t get_elf_hwcap2(void)
return hwcaps;
}
+const char *elf_hwcap_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd",
+ [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
+ [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes",
+ [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2",
+ [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32",
+ [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics",
+ [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp",
+ [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm",
+ [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt",
+ [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma",
+ [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc",
+ [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3",
+ [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3",
+ [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512",
+ [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm",
+ [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit",
+ [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat",
+ [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc",
+ [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm",
+ [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs",
+ [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb",
+ [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca",
+ [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
+const char *elf_hwcap2_str(uint32_t bit)
+{
+ static const char *hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4",
+ [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2",
+ [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16",
+ [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16",
+ [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh",
+ [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng",
+ [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti",
+ [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte",
+ [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv",
+ [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp",
+ [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres",
+ [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "sme_i16i64",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "sme_f64f64",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "sme_i8i32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "sme_f16f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "sme_b16f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "sme_f32f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "sme_fa64",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
#undef GET_FEATURE_ID
#endif /* not TARGET_AARCH64 */
@@ -749,7 +864,6 @@ static uint32_t get_elf_hwcap2(void)
#ifdef TARGET_SPARC
#ifdef TARGET_SPARC64
-#define ELF_START_MMAP 0x80000000
#define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
| HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
#ifndef TARGET_ABI32
@@ -761,7 +875,6 @@ static uint32_t get_elf_hwcap2(void)
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_SPARCV9
#else
-#define ELF_START_MMAP 0x80000000
#define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
| HWCAP_SPARC_MULDIV)
#define ELF_CLASS ELFCLASS32
@@ -783,7 +896,6 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_PPC
#define ELF_MACHINE PPC_ELF_MACHINE
-#define ELF_START_MMAP 0x80000000
#if defined(TARGET_PPC64)
@@ -986,8 +1098,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
#ifdef TARGET_LOONGARCH64
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_LOONGARCH
#define EXSTACK_DEFAULT true
@@ -1078,8 +1188,6 @@ static uint32_t get_elf_hwcap(void)
#ifdef TARGET_MIPS
-#define ELF_START_MMAP 0x80000000
-
#ifdef TARGET_MIPS64
#define ELF_CLASS ELFCLASS64
#else
@@ -1237,8 +1345,6 @@ static uint32_t get_elf_hwcap(void)
#ifdef TARGET_MICROBLAZE
-#define ELF_START_MMAP 0x80000000
-
#define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
#define ELF_CLASS ELFCLASS32
@@ -1279,8 +1385,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env
#ifdef TARGET_NIOS2
-#define ELF_START_MMAP 0x80000000
-
#define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
#define ELF_CLASS ELFCLASS32
@@ -1376,8 +1480,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#ifdef TARGET_OPENRISC
-#define ELF_START_MMAP 0x08000000
-
#define ELF_ARCH EM_OPENRISC
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
@@ -1414,8 +1516,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#ifdef TARGET_SH4
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_SH
@@ -1496,8 +1596,6 @@ static uint32_t get_elf_hwcap(void)
#ifdef TARGET_CRIS
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_CRIS
@@ -1513,8 +1611,6 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_M68K
-#define ELF_START_MMAP 0x80000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_68K
@@ -1564,8 +1660,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *e
#ifdef TARGET_ALPHA
-#define ELF_START_MMAP (0x30000000000ULL)
-
#define ELF_CLASS ELFCLASS64
#define ELF_ARCH EM_ALPHA
@@ -1583,8 +1677,6 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_S390X
-#define ELF_START_MMAP (0x20000000000ULL)
-
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
@@ -1695,7 +1787,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#ifdef TARGET_RISCV
-#define ELF_START_MMAP 0x80000000
#define ELF_ARCH EM_RISCV
#ifdef TARGET_RISCV32
@@ -1731,7 +1822,6 @@ static inline void init_thread(struct target_pt_regs *regs,
#ifdef TARGET_HPPA
-#define ELF_START_MMAP 0x80000000
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_PARISC
#define ELF_PLATFORM "PARISC"
@@ -1783,8 +1873,6 @@ static bool init_guest_commpage(void)
#ifdef TARGET_XTENSA
-#define ELF_START_MMAP 0x20000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
@@ -1850,8 +1938,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs,
#ifdef TARGET_HEXAGON
-#define ELF_START_MMAP 0x20000000
-
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_HEXAGON
@@ -3205,7 +3291,7 @@ static void load_elf_image(const char *image_name, int image_fd,
info->start_data = -1;
info->end_data = 0;
/* Usual start for brk is after all sections of the main executable. */
- info->brk = TARGET_PAGE_ALIGN(hiaddr);
+ info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias);
info->elf_flags = ehdr->e_flags;
prot_exec = PROT_EXEC;
@@ -3568,8 +3654,6 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
#endif
- info->start_mmap = (abi_ulong)ELF_START_MMAP;
-
load_elf_image(bprm->filename, bprm->fd, info,
&elf_interpreter, bprm->buf);
diff --git a/linux-user/hexagon/target_proc.h b/linux-user/hexagon/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/hexagon/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/hppa/target_proc.h b/linux-user/hppa/target_proc.h
new file mode 100644
index 0000000000..9340c3b6af
--- /dev/null
+++ b/linux-user/hppa/target_proc.h
@@ -0,0 +1,26 @@
+/*
+ * HPPA specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef HPPA_TARGET_PROC_H
+#define HPPA_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int i, num_cpus;
+
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "processor\t: %d\n", i);
+ dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
+ dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
+ dprintf(fd, "capabilities\t: os32\n");
+ dprintf(fd, "model\t\t: 9000/778/B160L - "
+ "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
+ }
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* HPPA_TARGET_PROC_H */
diff --git a/linux-user/i386/target_proc.h b/linux-user/i386/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/i386/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/loader.h b/linux-user/loader.h
index 59cbeacf24..324e5c872a 100644
--- a/linux-user/loader.h
+++ b/linux-user/loader.h
@@ -56,9 +56,13 @@ abi_long memcpy_to_target(abi_ulong dest, const void *src,
extern unsigned long guest_stack_size;
-#ifdef TARGET_S390X
+#if defined(TARGET_S390X) || defined(TARGET_AARCH64) || defined(TARGET_ARM)
uint32_t get_elf_hwcap(void);
const char *elf_hwcap_str(uint32_t bit);
#endif
+#if defined(TARGET_AARCH64) || defined(TARGET_ARM)
+uint32_t get_elf_hwcap2(void);
+const char *elf_hwcap2_str(uint32_t bit);
+#endif
#endif /* LINUX_USER_LOADER_H */
diff --git a/linux-user/loongarch64/target_proc.h b/linux-user/loongarch64/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/loongarch64/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/m68k/target_proc.h b/linux-user/m68k/target_proc.h
new file mode 100644
index 0000000000..3df8f28e22
--- /dev/null
+++ b/linux-user/m68k/target_proc.h
@@ -0,0 +1,16 @@
+/*
+ * M68K specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef M68K_TARGET_PROC_H
+#define M68K_TARGET_PROC_H
+
+static int open_hardware(CPUArchState *cpu_env, int fd)
+{
+ dprintf(fd, "Model:\t\tqemu-m68k\n");
+ return 0;
+}
+#define HAVE_ARCH_PROC_HARDWARE
+
+#endif /* M68K_TARGET_PROC_H */
diff --git a/linux-user/microblaze/target_proc.h b/linux-user/microblaze/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/microblaze/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/mips/target_proc.h b/linux-user/mips/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/mips/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/mips64/target_proc.h b/linux-user/mips64/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/mips64/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 9aab48d4a3..8eaf57b208 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -17,12 +17,14 @@
* along with this program; if not, see .
*/
#include "qemu/osdep.h"
+#include
#include "trace.h"
#include "exec/log.h"
#include "qemu.h"
#include "user-internals.h"
#include "user-mmap.h"
#include "target_mman.h"
+#include "qemu/interval-tree.h"
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
static __thread int mmap_lock_count;
@@ -64,6 +66,44 @@ void mmap_fork_end(int child)
}
}
+/* Protected by mmap_lock. */
+static IntervalTreeRoot shm_regions;
+
+static void shm_region_add(abi_ptr start, abi_ptr last)
+{
+ IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
+
+ i->start = start;
+ i->last = last;
+ interval_tree_insert(i, &shm_regions);
+}
+
+static abi_ptr shm_region_find(abi_ptr start)
+{
+ IntervalTreeNode *i;
+
+ for (i = interval_tree_iter_first(&shm_regions, start, start); i;
+ i = interval_tree_iter_next(i, start, start)) {
+ if (i->start == start) {
+ return i->last;
+ }
+ }
+ return 0;
+}
+
+static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
+{
+ IntervalTreeNode *i, *n;
+
+ for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
+ n = interval_tree_iter_next(i, start, last);
+ if (i->start >= start && i->last <= last) {
+ interval_tree_remove(i, &shm_regions);
+ g_free(i);
+ }
+ }
+}
+
/*
* Validate target prot bitmask.
* Return the prot bitmask for the host in *HOST_PROT.
@@ -720,6 +760,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
page_set_flags(passthrough_last + 1, last, page_flags);
}
}
+ shm_region_rm_complete(start, last);
the_end:
trace_target_mmap_complete(start);
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
@@ -817,6 +858,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
mmap_lock();
mmap_reserve_or_unmap(start, len);
page_set_flags(start, start + len - 1, 0);
+ shm_region_rm_complete(start, start + len - 1);
mmap_unlock();
return 0;
@@ -906,8 +948,10 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
new_addr = h2g(host_addr);
prot = page_get_flags(old_addr);
page_set_flags(old_addr, old_addr + old_size - 1, 0);
+ shm_region_rm_complete(old_addr, old_addr + old_size - 1);
page_set_flags(new_addr, new_addr + new_size - 1,
prot | PAGE_VALID | PAGE_RESET);
+ shm_region_rm_complete(new_addr, new_addr + new_size - 1);
}
mmap_unlock();
return new_addr;
@@ -981,3 +1025,127 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
return ret;
}
+
+#ifndef TARGET_FORCE_SHMLBA
+/*
+ * For most architectures, SHMLBA is the same as the page size;
+ * some architectures have larger values, in which case they should
+ * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
+ * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
+ * and defining its own value for SHMLBA.
+ *
+ * The kernel also permits SHMLBA to be set by the architecture to a
+ * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
+ * this means that addresses are rounded to the large size if
+ * SHM_RND is set but addresses not aligned to that size are not rejected
+ * as long as they are at least page-aligned. Since the only architecture
+ * which uses this is ia64 this code doesn't provide for that oddity.
+ */
+static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
+{
+ return TARGET_PAGE_SIZE;
+}
+#endif
+
+abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
+ abi_ulong shmaddr, int shmflg)
+{
+ CPUState *cpu = env_cpu(cpu_env);
+ abi_ulong raddr;
+ struct shmid_ds shm_info;
+ int ret;
+ abi_ulong shmlba;
+
+ /* shmat pointers are always untagged */
+
+ /* find out the length of the shared memory segment */
+ ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
+ if (is_error(ret)) {
+ /* can't get length, bail out */
+ return ret;
+ }
+
+ shmlba = target_shmlba(cpu_env);
+
+ if (shmaddr & (shmlba - 1)) {
+ if (shmflg & SHM_RND) {
+ shmaddr &= ~(shmlba - 1);
+ } else {
+ return -TARGET_EINVAL;
+ }
+ }
+ if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
+ return -TARGET_EINVAL;
+ }
+
+ WITH_MMAP_LOCK_GUARD() {
+ void *host_raddr;
+ abi_ulong last;
+
+ if (shmaddr) {
+ host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
+ } else {
+ abi_ulong mmap_start;
+
+ /* In order to use the host shmat, we need to honor host SHMLBA. */
+ mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
+ MAX(SHMLBA, shmlba));
+
+ if (mmap_start == -1) {
+ return -TARGET_ENOMEM;
+ }
+ host_raddr = shmat(shmid, g2h_untagged(mmap_start),
+ shmflg | SHM_REMAP);
+ }
+
+ if (host_raddr == (void *)-1) {
+ return get_errno(-1);
+ }
+ raddr = h2g(host_raddr);
+ last = raddr + shm_info.shm_segsz - 1;
+
+ page_set_flags(raddr, last,
+ PAGE_VALID | PAGE_RESET | PAGE_READ |
+ (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
+
+ shm_region_rm_complete(raddr, last);
+ shm_region_add(raddr, last);
+ }
+
+ /*
+ * We're mapping shared memory, so ensure we generate code for parallel
+ * execution and flush old translations. This will work up to the level
+ * supported by the host -- anything that requires EXCP_ATOMIC will not
+ * be atomic with respect to an external process.
+ */
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
+ cpu->tcg_cflags |= CF_PARALLEL;
+ tb_flush(cpu);
+ }
+
+ return raddr;
+}
+
+abi_long target_shmdt(abi_ulong shmaddr)
+{
+ abi_long rv;
+
+ /* shmdt pointers are always untagged */
+
+ WITH_MMAP_LOCK_GUARD() {
+ abi_ulong last = shm_region_find(shmaddr);
+ if (last == 0) {
+ return -TARGET_EINVAL;
+ }
+
+ rv = get_errno(shmdt(g2h_untagged(shmaddr)));
+ if (rv == 0) {
+ abi_ulong size = last - shmaddr + 1;
+
+ page_set_flags(shmaddr, last, 0);
+ shm_region_rm_complete(shmaddr, last);
+ mmap_reserve_or_unmap(shmaddr, size);
+ }
+ }
+ return rv;
+}
diff --git a/linux-user/nios2/target_proc.h b/linux-user/nios2/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/nios2/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/openrisc/target_proc.h b/linux-user/openrisc/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/openrisc/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/ppc/target_proc.h b/linux-user/ppc/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/ppc/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 4f8b55e2fb..12f638336a 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -30,7 +30,6 @@ struct image_info {
abi_ulong start_data;
abi_ulong end_data;
abi_ulong brk;
- abi_ulong start_mmap;
abi_ulong start_stack;
abi_ulong stack_limit;
abi_ulong entry;
diff --git a/linux-user/riscv/target_proc.h b/linux-user/riscv/target_proc.h
new file mode 100644
index 0000000000..c77c003d65
--- /dev/null
+++ b/linux-user/riscv/target_proc.h
@@ -0,0 +1,37 @@
+/*
+ * RISC-V specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef RISCV_TARGET_PROC_H
+#define RISCV_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int i;
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ RISCVCPU *cpu = env_archcpu(cpu_env);
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
+ char *isa_string = riscv_isa_string(cpu);
+ const char *mmu;
+
+ if (cfg->mmu) {
+ mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
+ } else {
+ mmu = "none";
+ }
+
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "processor\t: %d\n", i);
+ dprintf(fd, "hart\t\t: %d\n", i);
+ dprintf(fd, "isa\t\t: %s\n", isa_string);
+ dprintf(fd, "mmu\t\t: %s\n", mmu);
+ dprintf(fd, "uarch\t\t: qemu\n\n");
+ }
+
+ g_free(isa_string);
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* RISCV_TARGET_PROC_H */
diff --git a/linux-user/s390x/target_proc.h b/linux-user/s390x/target_proc.h
new file mode 100644
index 0000000000..a4a4821ea5
--- /dev/null
+++ b/linux-user/s390x/target_proc.h
@@ -0,0 +1,109 @@
+/*
+ * S390X specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef S390X_TARGET_PROC_H
+#define S390X_TARGET_PROC_H
+
+/*
+ * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
+ * show in /proc/cpuinfo.
+ *
+ * Skip the following in order to match the missing support in op_ecag():
+ * - show_cacheinfo().
+ * - show_cpu_topology().
+ * - show_cpu_mhz().
+ *
+ * Use fixed values for certain fields:
+ * - bogomips per cpu - from a qemu-system-s390x run.
+ * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
+ *
+ * Keep the code structure close to arch/s390/kernel/processor.c.
+ */
+
+static void show_facilities(int fd)
+{
+ size_t sizeof_stfl_bytes = 2048;
+ g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
+ unsigned int bit;
+
+ dprintf(fd, "facilities :");
+ s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
+ for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
+ if (test_be_bit(bit, stfl_bytes)) {
+ dprintf(fd, " %d", bit);
+ }
+ }
+ dprintf(fd, "\n");
+}
+
+static int cpu_ident(unsigned long n)
+{
+ return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
+ n);
+}
+
+static void show_cpu_summary(CPUArchState *cpu_env, int fd)
+{
+ S390CPUModel *model = env_archcpu(cpu_env)->model;
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ uint32_t elf_hwcap = get_elf_hwcap();
+ const char *hwcap_str;
+ int i;
+
+ dprintf(fd, "vendor_id : IBM/S390\n"
+ "# processors : %i\n"
+ "bogomips per cpu: 13370.00\n",
+ num_cpus);
+ dprintf(fd, "max thread id : 0\n");
+ dprintf(fd, "features\t: ");
+ for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
+ if (!(elf_hwcap & (1 << i))) {
+ continue;
+ }
+ hwcap_str = elf_hwcap_str(i);
+ if (hwcap_str) {
+ dprintf(fd, "%s ", hwcap_str);
+ }
+ }
+ dprintf(fd, "\n");
+ show_facilities(fd);
+ for (i = 0; i < num_cpus; i++) {
+ dprintf(fd, "processor %d: "
+ "version = %02X, "
+ "identification = %06X, "
+ "machine = %04X\n",
+ i, model->cpu_ver, cpu_ident(i), model->def->type);
+ }
+}
+
+static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
+{
+ S390CPUModel *model = env_archcpu(cpu_env)->model;
+
+ dprintf(fd, "version : %02X\n", model->cpu_ver);
+ dprintf(fd, "identification : %06X\n", cpu_ident(n));
+ dprintf(fd, "machine : %04X\n", model->def->type);
+}
+
+static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
+{
+ dprintf(fd, "\ncpu number : %ld\n", n);
+ show_cpu_ids(cpu_env, fd, n);
+}
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ int i;
+
+ show_cpu_summary(cpu_env, fd);
+ for (i = 0; i < num_cpus; i++) {
+ show_cpuinfo(cpu_env, fd, i);
+ }
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* S390X_TARGET_PROC_H */
diff --git a/linux-user/sh4/target_proc.h b/linux-user/sh4/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/sh4/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/sparc/target_proc.h b/linux-user/sparc/target_proc.h
new file mode 100644
index 0000000000..3bb3134a47
--- /dev/null
+++ b/linux-user/sparc/target_proc.h
@@ -0,0 +1,16 @@
+/*
+ * Sparc specific proc functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef SPARC_TARGET_PROC_H
+#define SPARC_TARGET_PROC_H
+
+static int open_cpuinfo(CPUArchState *cpu_env, int fd)
+{
+ dprintf(fd, "type\t\t: sun4u\n");
+ return 0;
+}
+#define HAVE_ARCH_PROC_CPUINFO
+
+#endif /* SPARC_TARGET_PROC_H */
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 7ccd3affbe..dac0641bab 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -3725,14 +3725,6 @@ static abi_long do_socketcall(int num, abi_ulong vptr)
}
#endif
-#define N_SHM_REGIONS 32
-
-static struct shm_region {
- abi_ulong start;
- abi_ulong size;
- bool in_use;
-} shm_regions[N_SHM_REGIONS];
-
#ifndef TARGET_SEMID64_DS
/* asm-generic version of this struct */
struct target_semid64_ds
@@ -4482,133 +4474,6 @@ static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
return ret;
}
-#ifndef TARGET_FORCE_SHMLBA
-/* For most architectures, SHMLBA is the same as the page size;
- * some architectures have larger values, in which case they should
- * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
- * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
- * and defining its own value for SHMLBA.
- *
- * The kernel also permits SHMLBA to be set by the architecture to a
- * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
- * this means that addresses are rounded to the large size if
- * SHM_RND is set but addresses not aligned to that size are not rejected
- * as long as they are at least page-aligned. Since the only architecture
- * which uses this is ia64 this code doesn't provide for that oddity.
- */
-static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
-{
- return TARGET_PAGE_SIZE;
-}
-#endif
-
-static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
- abi_ulong shmaddr, int shmflg)
-{
- CPUState *cpu = env_cpu(cpu_env);
- abi_ulong raddr;
- void *host_raddr;
- struct shmid_ds shm_info;
- int i, ret;
- abi_ulong shmlba;
-
- /* shmat pointers are always untagged */
-
- /* find out the length of the shared memory segment */
- ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
- if (is_error(ret)) {
- /* can't get length, bail out */
- return ret;
- }
-
- shmlba = target_shmlba(cpu_env);
-
- if (shmaddr & (shmlba - 1)) {
- if (shmflg & SHM_RND) {
- shmaddr &= ~(shmlba - 1);
- } else {
- return -TARGET_EINVAL;
- }
- }
- if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
- return -TARGET_EINVAL;
- }
-
- mmap_lock();
-
- /*
- * We're mapping shared memory, so ensure we generate code for parallel
- * execution and flush old translations. This will work up to the level
- * supported by the host -- anything that requires EXCP_ATOMIC will not
- * be atomic with respect to an external process.
- */
- if (!(cpu->tcg_cflags & CF_PARALLEL)) {
- cpu->tcg_cflags |= CF_PARALLEL;
- tb_flush(cpu);
- }
-
- if (shmaddr)
- host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
- else {
- abi_ulong mmap_start;
-
- /* In order to use the host shmat, we need to honor host SHMLBA. */
- mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
-
- if (mmap_start == -1) {
- errno = ENOMEM;
- host_raddr = (void *)-1;
- } else
- host_raddr = shmat(shmid, g2h_untagged(mmap_start),
- shmflg | SHM_REMAP);
- }
-
- if (host_raddr == (void *)-1) {
- mmap_unlock();
- return get_errno((intptr_t)host_raddr);
- }
- raddr = h2g((uintptr_t)host_raddr);
-
- page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
- PAGE_VALID | PAGE_RESET | PAGE_READ |
- (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
-
- for (i = 0; i < N_SHM_REGIONS; i++) {
- if (!shm_regions[i].in_use) {
- shm_regions[i].in_use = true;
- shm_regions[i].start = raddr;
- shm_regions[i].size = shm_info.shm_segsz;
- break;
- }
- }
-
- mmap_unlock();
- return raddr;
-}
-
-static inline abi_long do_shmdt(abi_ulong shmaddr)
-{
- int i;
- abi_long rv;
-
- /* shmdt pointers are always untagged */
-
- mmap_lock();
-
- for (i = 0; i < N_SHM_REGIONS; ++i) {
- if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
- shm_regions[i].in_use = false;
- page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
- break;
- }
- }
- rv = get_errno(shmdt(g2h_untagged(shmaddr)));
-
- mmap_unlock();
-
- return rv;
-}
-
#ifdef TARGET_NR_ipc
/* ??? This only works with linear mappings. */
/* do_ipc() must return target values and target errnos. */
@@ -4695,7 +4560,7 @@ static abi_long do_ipc(CPUArchState *cpu_env,
default:
{
abi_ulong raddr;
- raddr = do_shmat(cpu_env, first, ptr, second);
+ raddr = target_shmat(cpu_env, first, ptr, second);
if (is_error(raddr))
return get_errno(raddr);
if (put_user_ual(raddr, third))
@@ -4708,7 +4573,7 @@ static abi_long do_ipc(CPUArchState *cpu_env,
}
break;
case IPCOP_shmdt:
- ret = do_shmdt(ptr);
+ ret = target_shmdt(ptr);
break;
case IPCOP_shmget:
@@ -8095,12 +7960,68 @@ static int open_self_cmdline(CPUArchState *cpu_env, int fd)
return 0;
}
-static void show_smaps(int fd, unsigned long size)
-{
- unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
- unsigned long size_kb = size >> 10;
+struct open_self_maps_data {
+ TaskState *ts;
+ IntervalTreeRoot *host_maps;
+ int fd;
+ bool smaps;
+};
- dprintf(fd, "Size: %lu kB\n"
+/*
+ * Subroutine to output one line of /proc/self/maps,
+ * or one region of /proc/self/smaps.
+ */
+
+#ifdef TARGET_HPPA
+# define test_stack(S, E, L) (E == L)
+#else
+# define test_stack(S, E, L) (S == L)
+#endif
+
+static void open_self_maps_4(const struct open_self_maps_data *d,
+ const MapInfo *mi, abi_ptr start,
+ abi_ptr end, unsigned flags)
+{
+ const struct image_info *info = d->ts->info;
+ const char *path = mi->path;
+ uint64_t offset;
+ int fd = d->fd;
+ int count;
+
+ if (test_stack(start, end, info->stack_limit)) {
+ path = "[stack]";
+ } else if (start == info->brk) {
+ path = "[heap]";
+ }
+
+ /* Except null device (MAP_ANON), adjust offset for this fragment. */
+ offset = mi->offset;
+ if (mi->dev) {
+ uintptr_t hstart = (uintptr_t)g2h_untagged(start);
+ offset += hstart - mi->itree.start;
+ }
+
+ count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
+ " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
+ start, end,
+ (flags & PAGE_READ) ? 'r' : '-',
+ (flags & PAGE_WRITE_ORG) ? 'w' : '-',
+ (flags & PAGE_EXEC) ? 'x' : '-',
+ mi->is_priv ? 'p' : 's',
+ offset, major(mi->dev), minor(mi->dev),
+ (uint64_t)mi->inode);
+ if (path) {
+ dprintf(fd, "%*s%s\n", 73 - count, "", path);
+ } else {
+ dprintf(fd, "\n");
+ }
+
+ if (d->smaps) {
+ unsigned long size = end - start;
+ unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
+ unsigned long size_kb = size >> 10;
+
+ dprintf(fd, "Size: %lu kB\n"
"KernelPageSize: %lu kB\n"
"MMUPageSize: %lu kB\n"
"Rss: 0 kB\n"
@@ -8111,7 +8032,7 @@ static void show_smaps(int fd, unsigned long size)
"Private_Clean: 0 kB\n"
"Private_Dirty: 0 kB\n"
"Referenced: 0 kB\n"
- "Anonymous: 0 kB\n"
+ "Anonymous: %lu kB\n"
"LazyFree: 0 kB\n"
"AnonHugePages: 0 kB\n"
"ShmemPmdMapped: 0 kB\n"
@@ -8121,89 +8042,76 @@ static void show_smaps(int fd, unsigned long size)
"Swap: 0 kB\n"
"SwapPss: 0 kB\n"
"Locked: 0 kB\n"
- "THPeligible: 0\n", size_kb, page_size_kb, page_size_kb);
+ "THPeligible: 0\n"
+ "VmFlags:%s%s%s%s%s%s%s%s\n",
+ size_kb, page_size_kb, page_size_kb,
+ (flags & PAGE_ANON ? size_kb : 0),
+ (flags & PAGE_READ) ? " rd" : "",
+ (flags & PAGE_WRITE_ORG) ? " wr" : "",
+ (flags & PAGE_EXEC) ? " ex" : "",
+ mi->is_priv ? "" : " sh",
+ (flags & PAGE_READ) ? " mr" : "",
+ (flags & PAGE_WRITE_ORG) ? " mw" : "",
+ (flags & PAGE_EXEC) ? " me" : "",
+ mi->is_priv ? "" : " ms");
+ }
}
-static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
+/*
+ * Callback for walk_memory_regions, when read_self_maps() fails.
+ * Proceed without the benefit of host /proc/self/maps cross-check.
+ */
+static int open_self_maps_3(void *opaque, target_ulong guest_start,
+ target_ulong guest_end, unsigned long flags)
{
- CPUState *cpu = env_cpu(cpu_env);
- TaskState *ts = cpu->opaque;
- IntervalTreeRoot *map_info = read_self_maps();
- IntervalTreeNode *s;
- int count;
+ static const MapInfo mi = { .is_priv = true };
- for (s = interval_tree_iter_first(map_info, 0, -1); s;
- s = interval_tree_iter_next(s, 0, -1)) {
- MapInfo *e = container_of(s, MapInfo, itree);
+ open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
+ return 0;
+}
- if (h2g_valid(e->itree.start)) {
- unsigned long min = e->itree.start;
- unsigned long max = e->itree.last + 1;
- int flags = page_get_flags(h2g(min));
- const char *path;
+/*
+ * Callback for walk_memory_regions, when read_self_maps() succeeds.
+ */
+static int open_self_maps_2(void *opaque, target_ulong guest_start,
+ target_ulong guest_end, unsigned long flags)
+{
+ const struct open_self_maps_data *d = opaque;
+ uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
+ uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
- max = h2g_valid(max - 1) ?
- max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
+ while (1) {
+ IntervalTreeNode *n =
+ interval_tree_iter_first(d->host_maps, host_start, host_start);
+ MapInfo *mi = container_of(n, MapInfo, itree);
+ uintptr_t this_hlast = MIN(host_last, n->last);
+ target_ulong this_gend = h2g(this_hlast) + 1;
- if (!page_check_range(h2g(min), max - min, flags)) {
- continue;
- }
+ open_self_maps_4(d, mi, guest_start, this_gend, flags);
-#ifdef TARGET_HPPA
- if (h2g(max) == ts->info->stack_limit) {
-#else
- if (h2g(min) == ts->info->stack_limit) {
-#endif
- path = "[stack]";
- } else {
- path = e->path;
- }
-
- count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
- " %c%c%c%c %08" PRIx64 " %s %"PRId64,
- h2g(min), h2g(max - 1) + 1,
- (flags & PAGE_READ) ? 'r' : '-',
- (flags & PAGE_WRITE_ORG) ? 'w' : '-',
- (flags & PAGE_EXEC) ? 'x' : '-',
- e->is_priv ? 'p' : 's',
- (uint64_t) e->offset, e->dev, e->inode);
- if (path) {
- dprintf(fd, "%*s%s\n", 73 - count, "", path);
- } else {
- dprintf(fd, "\n");
- }
- if (smaps) {
- show_smaps(fd, max - min);
- dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
- (flags & PAGE_READ) ? " rd" : "",
- (flags & PAGE_WRITE_ORG) ? " wr" : "",
- (flags & PAGE_EXEC) ? " ex" : "",
- e->is_priv ? "" : " sh",
- (flags & PAGE_READ) ? " mr" : "",
- (flags & PAGE_WRITE_ORG) ? " mw" : "",
- (flags & PAGE_EXEC) ? " me" : "",
- e->is_priv ? "" : " ms");
- }
+ if (this_hlast == host_last) {
+ return 0;
}
+ host_start = this_hlast + 1;
+ guest_start = h2g(host_start);
}
+}
- free_self_maps(map_info);
+static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
+{
+ struct open_self_maps_data d = {
+ .ts = env_cpu(env)->opaque,
+ .host_maps = read_self_maps(),
+ .fd = fd,
+ .smaps = smaps
+ };
-#ifdef TARGET_VSYSCALL_PAGE
- /*
- * We only support execution from the vsyscall page.
- * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
- */
- count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
- " --xp 00000000 00:00 0",
- TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
- dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
- if (smaps) {
- show_smaps(fd, TARGET_PAGE_SIZE);
- dprintf(fd, "VmFlags: ex\n");
+ if (d.host_maps) {
+ walk_memory_regions(&d, open_self_maps_2);
+ free_self_maps(d.host_maps);
+ } else {
+ walk_memory_regions(&d, open_self_maps_3);
}
-#endif
-
return 0;
}
@@ -8339,9 +8247,11 @@ void target_exception_dump(CPUArchState *env, const char *fmt, int code)
}
}
+#include "target_proc.h"
+
#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
- defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
- defined(TARGET_RISCV) || defined(TARGET_S390X)
+ defined(HAVE_ARCH_PROC_CPUINFO) || \
+ defined(HAVE_ARCH_PROC_HARDWARE)
static int is_proc(const char *filename, const char *entry)
{
return strcmp(filename, entry) == 0;
@@ -8393,171 +8303,6 @@ static int open_net_route(CPUArchState *cpu_env, int fd)
}
#endif
-#if defined(TARGET_SPARC)
-static int open_cpuinfo(CPUArchState *cpu_env, int fd)
-{
- dprintf(fd, "type\t\t: sun4u\n");
- return 0;
-}
-#endif
-
-#if defined(TARGET_HPPA)
-static int open_cpuinfo(CPUArchState *cpu_env, int fd)
-{
- int i, num_cpus;
-
- num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- for (i = 0; i < num_cpus; i++) {
- dprintf(fd, "processor\t: %d\n", i);
- dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
- dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
- dprintf(fd, "capabilities\t: os32\n");
- dprintf(fd, "model\t\t: 9000/778/B160L - "
- "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
- }
- return 0;
-}
-#endif
-
-#if defined(TARGET_RISCV)
-static int open_cpuinfo(CPUArchState *cpu_env, int fd)
-{
- int i;
- int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- RISCVCPU *cpu = env_archcpu(cpu_env);
- const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
- char *isa_string = riscv_isa_string(cpu);
- const char *mmu;
-
- if (cfg->mmu) {
- mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
- } else {
- mmu = "none";
- }
-
- for (i = 0; i < num_cpus; i++) {
- dprintf(fd, "processor\t: %d\n", i);
- dprintf(fd, "hart\t\t: %d\n", i);
- dprintf(fd, "isa\t\t: %s\n", isa_string);
- dprintf(fd, "mmu\t\t: %s\n", mmu);
- dprintf(fd, "uarch\t\t: qemu\n\n");
- }
-
- g_free(isa_string);
- return 0;
-}
-#endif
-
-#if defined(TARGET_S390X)
-/*
- * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
- * show in /proc/cpuinfo.
- *
- * Skip the following in order to match the missing support in op_ecag():
- * - show_cacheinfo().
- * - show_cpu_topology().
- * - show_cpu_mhz().
- *
- * Use fixed values for certain fields:
- * - bogomips per cpu - from a qemu-system-s390x run.
- * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
- *
- * Keep the code structure close to arch/s390/kernel/processor.c.
- */
-
-static void show_facilities(int fd)
-{
- size_t sizeof_stfl_bytes = 2048;
- g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
- unsigned int bit;
-
- dprintf(fd, "facilities :");
- s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
- for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
- if (test_be_bit(bit, stfl_bytes)) {
- dprintf(fd, " %d", bit);
- }
- }
- dprintf(fd, "\n");
-}
-
-static int cpu_ident(unsigned long n)
-{
- return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
- n);
-}
-
-static void show_cpu_summary(CPUArchState *cpu_env, int fd)
-{
- S390CPUModel *model = env_archcpu(cpu_env)->model;
- int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- uint32_t elf_hwcap = get_elf_hwcap();
- const char *hwcap_str;
- int i;
-
- dprintf(fd, "vendor_id : IBM/S390\n"
- "# processors : %i\n"
- "bogomips per cpu: 13370.00\n",
- num_cpus);
- dprintf(fd, "max thread id : 0\n");
- dprintf(fd, "features\t: ");
- for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
- if (!(elf_hwcap & (1 << i))) {
- continue;
- }
- hwcap_str = elf_hwcap_str(i);
- if (hwcap_str) {
- dprintf(fd, "%s ", hwcap_str);
- }
- }
- dprintf(fd, "\n");
- show_facilities(fd);
- for (i = 0; i < num_cpus; i++) {
- dprintf(fd, "processor %d: "
- "version = %02X, "
- "identification = %06X, "
- "machine = %04X\n",
- i, model->cpu_ver, cpu_ident(i), model->def->type);
- }
-}
-
-static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
-{
- S390CPUModel *model = env_archcpu(cpu_env)->model;
-
- dprintf(fd, "version : %02X\n", model->cpu_ver);
- dprintf(fd, "identification : %06X\n", cpu_ident(n));
- dprintf(fd, "machine : %04X\n", model->def->type);
-}
-
-static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
-{
- dprintf(fd, "\ncpu number : %ld\n", n);
- show_cpu_ids(cpu_env, fd, n);
-}
-
-static int open_cpuinfo(CPUArchState *cpu_env, int fd)
-{
- int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- int i;
-
- show_cpu_summary(cpu_env, fd);
- for (i = 0; i < num_cpus; i++) {
- show_cpuinfo(cpu_env, fd, i);
- }
- return 0;
-}
-#endif
-
-#if defined(TARGET_M68K)
-static int open_hardware(CPUArchState *cpu_env, int fd)
-{
- dprintf(fd, "Model:\t\tqemu-m68k\n");
- return 0;
-}
-#endif
-
-
int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
int flags, mode_t mode, bool safe)
{
@@ -8578,11 +8323,10 @@ int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
{ "/proc/net/route", open_net_route, is_proc },
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
- defined(TARGET_RISCV) || defined(TARGET_S390X)
+#if defined(HAVE_ARCH_PROC_CPUINFO)
{ "/proc/cpuinfo", open_cpuinfo, is_proc },
#endif
-#if defined(TARGET_M68K)
+#if defined(HAVE_ARCH_PROC_HARDWARE)
{ "/proc/hardware", open_hardware, is_proc },
#endif
{ NULL, NULL, NULL }
@@ -11129,11 +10873,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_shmat
case TARGET_NR_shmat:
- return do_shmat(cpu_env, arg1, arg2, arg3);
+ return target_shmat(cpu_env, arg1, arg2, arg3);
#endif
#ifdef TARGET_NR_shmdt
case TARGET_NR_shmdt:
- return do_shmdt(arg1);
+ return target_shmdt(arg1);
#endif
case TARGET_NR_fsync:
return get_errno(fsync(arg1));
diff --git a/linux-user/user-mmap.h b/linux-user/user-mmap.h
index 0f4883eb57..b94bcdcf83 100644
--- a/linux-user/user-mmap.h
+++ b/linux-user/user-mmap.h
@@ -58,4 +58,8 @@ abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong);
void mmap_fork_start(void);
void mmap_fork_end(int child);
+abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
+ abi_ulong shmaddr, int shmflg);
+abi_long target_shmdt(abi_ulong shmaddr);
+
#endif /* LINUX_USER_USER_MMAP_H */
diff --git a/linux-user/x86_64/target_proc.h b/linux-user/x86_64/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/x86_64/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/linux-user/xtensa/target_proc.h b/linux-user/xtensa/target_proc.h
new file mode 100644
index 0000000000..43fe29ca72
--- /dev/null
+++ b/linux-user/xtensa/target_proc.h
@@ -0,0 +1 @@
+/* No target-specific /proc support */
diff --git a/util/selfmap.c b/util/selfmap.c
index 4db5b42651..483cb617e2 100644
--- a/util/selfmap.c
+++ b/util/selfmap.c
@@ -30,19 +30,21 @@ IntervalTreeRoot *read_self_maps(void)
if (nfields > 4) {
uint64_t start, end, offset, inode;
+ unsigned dev_maj, dev_min;
int errors = 0;
const char *p;
errors |= qemu_strtou64(fields[0], &p, 16, &start);
errors |= qemu_strtou64(p + 1, NULL, 16, &end);
errors |= qemu_strtou64(fields[2], NULL, 16, &offset);
+ errors |= qemu_strtoui(fields[3], &p, 16, &dev_maj);
+ errors |= qemu_strtoui(p + 1, NULL, 16, &dev_min);
errors |= qemu_strtou64(fields[4], NULL, 10, &inode);
if (!errors) {
- size_t dev_len, path_len;
+ size_t path_len;
MapInfo *e;
- dev_len = strlen(fields[3]) + 1;
if (nfields == 6) {
p = fields[5];
p += strspn(p, " ");
@@ -52,11 +54,12 @@ IntervalTreeRoot *read_self_maps(void)
path_len = 0;
}
- e = g_malloc0(sizeof(*e) + dev_len + path_len);
+ e = g_malloc0(sizeof(*e) + path_len);
e->itree.start = start;
e->itree.last = end - 1;
e->offset = offset;
+ e->dev = makedev(dev_maj, dev_min);
e->inode = inode;
e->is_read = fields[1][0] == 'r';
@@ -64,9 +67,8 @@ IntervalTreeRoot *read_self_maps(void)
e->is_exec = fields[1][2] == 'x';
e->is_priv = fields[1][3] == 'p';
- memcpy(e->dev, fields[3], dev_len);
if (path_len) {
- e->path = memcpy(e->dev + dev_len, p, path_len);
+ e->path = memcpy(e + 1, p, path_len);
}
interval_tree_insert(&e->itree, root);