2009-05-19 19:17:58 +04:00
|
|
|
#ifndef CPU_COMMON_H
|
|
|
|
#define CPU_COMMON_H 1
|
|
|
|
|
|
|
|
/* CPU interfaces that are target indpendent. */
|
|
|
|
|
tcg: initial ia64 support
A few words about design choices:
* On IA64, instructions should be grouped by bundle, and dependencies
between instructions declared. A first version of this code tried to
schedule instructions automatically, but was very complex and too
invasive for the current common TCG code (ops not ending at
instruction boundaries, code retranslation breaking already generated
code, etc.) It was also not very efficient, as dependencies between
TCG ops is not available.
Instead the option taken by the current implementation does not try
to fill the bundle by scheduling instructions, but by providing ops
not available as an ia64 instruction, and by offering 22-bit constant
loading for most of the instructions. With both options the bundle are
filled at approximately the same level.
* Up to 128 registers can be affected to a function on IA64, but TCG
limits this number to 64, which is actually more than enough. The
register affectation is the following:
- r0: used to map a constant argument with value 0
- r1: global pointer
- r2, r3: internal use
- r4 to r6: not used to avoid saving them
- r7: env structure
- r8 to r11: free for TCG (call clobbered)
- r12: stack pointer
- r13: thread pointer
- r14 to r31: free for TCG (call clobbered)
- r32: reserved (return address)
- r33: reserved (PFS)
- r33 to r63: free for TCG
* The IA64 architecture has only 64-bit registers and no 32-bit
instructions (the only exception being cmp4). Therefore 64-bit
registers and instructions are used for 32-bit ops. The adopted
strategy is the same as the ABI, that is the higher 32 bits are
undefined. Most ops (and, or, add, shl, etc.) can directly use
the 64-bit registers, while some others have to sign-extend (sar,
div, etc.) or zero-extend (shr, divu, etc.) the register first.
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2010-03-29 04:12:51 +04:00
|
|
|
#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
|
2009-05-19 19:17:58 +04:00
|
|
|
#define WORDS_ALIGNED
|
|
|
|
#endif
|
|
|
|
|
2010-04-01 21:57:10 +04:00
|
|
|
#ifdef TARGET_PHYS_ADDR_BITS
|
|
|
|
#include "targphys.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef NEED_CPU_H
|
|
|
|
#include "poison.h"
|
|
|
|
#endif
|
|
|
|
|
2009-05-19 19:17:58 +04:00
|
|
|
#include "bswap.h"
|
2010-01-27 23:06:57 +03:00
|
|
|
#include "qemu-queue.h"
|
2009-05-19 19:17:58 +04:00
|
|
|
|
2010-03-12 19:54:58 +03:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
|
2010-12-08 14:05:36 +03:00
|
|
|
enum device_endian {
|
|
|
|
DEVICE_NATIVE_ENDIAN,
|
|
|
|
DEVICE_BIG_ENDIAN,
|
|
|
|
DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
|
2009-05-19 19:17:58 +04:00
|
|
|
/* address in the RAM (different from a physical address) */
|
2009-10-02 01:12:16 +04:00
|
|
|
typedef unsigned long ram_addr_t;
|
2009-05-19 19:17:58 +04:00
|
|
|
|
|
|
|
/* memory API */
|
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
|
|
|
|
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
|
2009-05-19 19:17:58 +04:00
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
|
|
|
|
ram_addr_t size,
|
|
|
|
ram_addr_t phys_offset,
|
|
|
|
ram_addr_t region_offset);
|
|
|
|
static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
|
|
|
|
ram_addr_t size,
|
|
|
|
ram_addr_t phys_offset)
|
2009-05-19 19:17:58 +04:00
|
|
|
{
|
|
|
|
cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
|
|
|
|
}
|
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
|
2010-07-27 04:10:57 +04:00
|
|
|
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
|
|
|
|
ram_addr_t size, void *host);
|
2010-06-25 21:09:35 +04:00
|
|
|
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
|
2009-10-02 01:12:16 +04:00
|
|
|
void qemu_ram_free(ram_addr_t addr);
|
2011-03-02 10:56:19 +03:00
|
|
|
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
|
2009-05-19 19:17:58 +04:00
|
|
|
/* This should only be used for ram local to a device. */
|
2009-10-02 01:12:16 +04:00
|
|
|
void *qemu_get_ram_ptr(ram_addr_t addr);
|
2010-11-22 20:52:34 +03:00
|
|
|
/* Same but slower, to use for migration, where the order of
|
|
|
|
* RAMBlocks must not change. */
|
|
|
|
void *qemu_safe_ram_ptr(ram_addr_t addr);
|
2009-05-19 19:17:58 +04:00
|
|
|
/* This should not be used by devices. */
|
2010-10-11 22:31:19 +04:00
|
|
|
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
|
|
|
|
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
|
2009-05-19 19:17:58 +04:00
|
|
|
|
2009-08-25 22:29:31 +04:00
|
|
|
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
|
|
|
|
CPUWriteMemoryFunc * const *mem_write,
|
2010-12-08 14:05:36 +03:00
|
|
|
void *opaque, enum device_endian endian);
|
2009-05-19 19:17:58 +04:00
|
|
|
void cpu_unregister_io_memory(int table_address);
|
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
2009-05-19 19:17:58 +04:00
|
|
|
int len, int is_write);
|
2009-10-02 01:12:16 +04:00
|
|
|
static inline void cpu_physical_memory_read(target_phys_addr_t addr,
|
2011-04-10 19:28:56 +04:00
|
|
|
void *buf, int len)
|
2009-05-19 19:17:58 +04:00
|
|
|
{
|
|
|
|
cpu_physical_memory_rw(addr, buf, len, 0);
|
|
|
|
}
|
2009-10-02 01:12:16 +04:00
|
|
|
static inline void cpu_physical_memory_write(target_phys_addr_t addr,
|
2011-04-10 19:28:56 +04:00
|
|
|
const void *buf, int len)
|
2009-05-19 19:17:58 +04:00
|
|
|
{
|
2011-04-10 19:28:56 +04:00
|
|
|
cpu_physical_memory_rw(addr, (void *)buf, len, 1);
|
2009-05-19 19:17:58 +04:00
|
|
|
}
|
2009-10-02 01:12:16 +04:00
|
|
|
void *cpu_physical_memory_map(target_phys_addr_t addr,
|
|
|
|
target_phys_addr_t *plen,
|
2009-05-19 19:17:58 +04:00
|
|
|
int is_write);
|
2009-10-02 01:12:16 +04:00
|
|
|
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
|
|
|
int is_write, target_phys_addr_t access_len);
|
2009-05-19 19:17:58 +04:00
|
|
|
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
|
|
|
|
void cpu_unregister_map_client(void *cookie);
|
|
|
|
|
2010-01-27 23:06:57 +03:00
|
|
|
struct CPUPhysMemoryClient;
|
|
|
|
typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
|
|
|
|
struct CPUPhysMemoryClient {
|
|
|
|
void (*set_memory)(struct CPUPhysMemoryClient *client,
|
|
|
|
target_phys_addr_t start_addr,
|
|
|
|
ram_addr_t size,
|
|
|
|
ram_addr_t phys_offset);
|
|
|
|
int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
|
|
|
|
target_phys_addr_t start_addr,
|
|
|
|
target_phys_addr_t end_addr);
|
|
|
|
int (*migration_log)(struct CPUPhysMemoryClient *client,
|
|
|
|
int enable);
|
2011-02-07 14:19:23 +03:00
|
|
|
int (*log_start)(struct CPUPhysMemoryClient *client,
|
|
|
|
target_phys_addr_t phys_addr, ram_addr_t size);
|
|
|
|
int (*log_stop)(struct CPUPhysMemoryClient *client,
|
|
|
|
target_phys_addr_t phys_addr, ram_addr_t size);
|
2010-01-27 23:06:57 +03:00
|
|
|
QLIST_ENTRY(CPUPhysMemoryClient) list;
|
|
|
|
};
|
|
|
|
|
|
|
|
void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
|
|
|
|
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
|
|
|
|
|
2010-03-21 22:47:13 +03:00
|
|
|
/* Coalesced MMIO regions are areas where write operations can be reordered.
|
|
|
|
* This usually implies that write operations are side-effect free. This allows
|
|
|
|
* batching which can make a major impact on performance when using
|
|
|
|
* virtualization.
|
|
|
|
*/
|
|
|
|
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
|
|
|
|
|
|
|
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
|
|
|
|
|
|
|
void qemu_flush_coalesced_mmio_buffer(void);
|
|
|
|
|
2009-10-02 01:12:16 +04:00
|
|
|
uint32_t ldub_phys(target_phys_addr_t addr);
|
|
|
|
uint32_t lduw_phys(target_phys_addr_t addr);
|
|
|
|
uint32_t ldl_phys(target_phys_addr_t addr);
|
|
|
|
uint64_t ldq_phys(target_phys_addr_t addr);
|
|
|
|
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
|
|
|
|
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
|
|
|
|
void stb_phys(target_phys_addr_t addr, uint32_t val);
|
|
|
|
void stw_phys(target_phys_addr_t addr, uint32_t val);
|
|
|
|
void stl_phys(target_phys_addr_t addr, uint32_t val);
|
|
|
|
void stq_phys(target_phys_addr_t addr, uint64_t val);
|
|
|
|
|
|
|
|
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
2009-05-19 19:17:58 +04:00
|
|
|
const uint8_t *buf, int len);
|
|
|
|
|
|
|
|
#define IO_MEM_SHIFT 3
|
|
|
|
|
|
|
|
#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
|
|
|
|
#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
|
|
|
|
#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
|
|
|
|
#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
|
|
|
|
|
|
|
|
/* Acts like a ROM when read and like a device when written. */
|
|
|
|
#define IO_MEM_ROMD (1)
|
|
|
|
#define IO_MEM_SUBPAGE (2)
|
|
|
|
|
2010-03-12 19:54:58 +03:00
|
|
|
#endif
|
|
|
|
|
2009-05-19 19:17:58 +04:00
|
|
|
#endif /* !CPU_COMMON_H */
|