2011-12-15 17:25:22 +04:00
|
|
|
/*
|
2017-11-21 18:08:08 +03:00
|
|
|
* Declarations for functions which are internal to the memory subsystem.
|
2011-12-15 17:25:22 +04:00
|
|
|
*
|
|
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@redhat.com>
|
|
|
|
*
|
2012-01-13 20:44:23 +04:00
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
|
|
* later. See the COPYING file in the top-level directory.
|
2011-12-15 17:25:22 +04:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2017-11-21 18:08:08 +03:00
|
|
|
* This header is for use by exec.c, memory.c and accel/tcg/cputlb.c ONLY,
|
|
|
|
* for declarations which are shared between the memory subsystem's
|
|
|
|
* internals and the TCG TLB code. Do not include it from elsewhere.
|
2011-12-15 17:25:22 +04:00
|
|
|
*/
|
|
|
|
|
2012-09-20 17:02:51 +04:00
|
|
|
#ifndef MEMORY_INTERNAL_H
|
|
|
|
#define MEMORY_INTERNAL_H
|
2011-12-15 17:25:22 +04:00
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
2018-03-05 02:31:20 +03:00
|
|
|
static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
|
|
|
|
{
|
|
|
|
return fv->dispatch;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
|
|
|
|
{
|
|
|
|
return flatview_to_dispatch(address_space_to_flatview(as));
|
|
|
|
}
|
2012-10-03 18:22:53 +04:00
|
|
|
|
2018-03-18 20:26:36 +03:00
|
|
|
FlatView *address_space_get_flatview(AddressSpace *as);
|
|
|
|
void flatview_unref(FlatView *view);
|
|
|
|
|
2013-05-24 15:23:38 +04:00
|
|
|
extern const MemoryRegionOps unassigned_mem_ops;
|
|
|
|
|
2013-05-24 13:55:06 +04:00
|
|
|
bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
|
2018-05-31 16:50:52 +03:00
|
|
|
unsigned size, bool is_write,
|
|
|
|
MemTxAttrs attrs);
|
2013-05-24 13:55:06 +04:00
|
|
|
|
2017-09-21 11:51:00 +03:00
|
|
|
void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section);
|
|
|
|
AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv);
|
|
|
|
void address_space_dispatch_compact(AddressSpaceDispatch *d);
|
2017-09-21 11:50:56 +03:00
|
|
|
void address_space_dispatch_free(AddressSpaceDispatch *d);
|
2017-09-21 11:50:54 +03:00
|
|
|
|
2019-04-17 22:17:56 +03:00
|
|
|
void mtree_print_dispatch(struct AddressSpaceDispatch *d,
|
2017-09-21 11:51:06 +03:00
|
|
|
MemoryRegion *root);
|
|
|
|
|
2017-08-05 06:46:31 +03:00
|
|
|
struct page_collection;
|
|
|
|
|
2017-11-20 21:08:27 +03:00
|
|
|
/* Opaque struct for passing info from memory_notdirty_write_prepare()
|
|
|
|
* to memory_notdirty_write_complete(). Callers should treat all fields
|
|
|
|
* as private, with the exception of @active.
|
|
|
|
*
|
|
|
|
* @active is a field which is not touched by either the prepare or
|
|
|
|
* complete functions, but which the caller can use if it wishes to
|
|
|
|
* track whether it has called prepare for this struct and so needs
|
|
|
|
* to later call the complete function.
|
|
|
|
*/
|
|
|
|
typedef struct {
|
|
|
|
CPUState *cpu;
|
2017-08-05 06:46:31 +03:00
|
|
|
struct page_collection *pages;
|
2017-11-20 21:08:27 +03:00
|
|
|
ram_addr_t ram_addr;
|
|
|
|
vaddr mem_vaddr;
|
|
|
|
unsigned size;
|
|
|
|
bool active;
|
|
|
|
} NotDirtyInfo;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* memory_notdirty_write_prepare: call before writing to non-dirty memory
|
|
|
|
* @ndi: pointer to opaque NotDirtyInfo struct
|
|
|
|
* @cpu: CPU doing the write
|
|
|
|
* @mem_vaddr: virtual address of write
|
|
|
|
* @ram_addr: the ram address of the write
|
|
|
|
* @size: size of write in bytes
|
|
|
|
*
|
|
|
|
* Any code which writes to the host memory corresponding to
|
|
|
|
* guest RAM which has been marked as NOTDIRTY must wrap those
|
|
|
|
* writes in calls to memory_notdirty_write_prepare() and
|
|
|
|
* memory_notdirty_write_complete():
|
|
|
|
*
|
|
|
|
* NotDirtyInfo ndi;
|
|
|
|
* memory_notdirty_write_prepare(&ndi, ....);
|
|
|
|
* ... perform write here ...
|
|
|
|
* memory_notdirty_write_complete(&ndi);
|
|
|
|
*
|
|
|
|
* These calls will ensure that we flush any TCG translated code for
|
|
|
|
* the memory being written, update the dirty bits and (if possible)
|
|
|
|
* remove the slowpath callback for writing to the memory.
|
|
|
|
*
|
|
|
|
* This must only be called if we are using TCG; it will assert otherwise.
|
|
|
|
*
|
2017-08-05 06:46:31 +03:00
|
|
|
* We may take locks in the prepare call, so callers must ensure that
|
2017-11-20 21:08:27 +03:00
|
|
|
* they don't exit (via longjump or otherwise) without calling complete.
|
|
|
|
*
|
|
|
|
* This call must only be made inside an RCU critical section.
|
|
|
|
* (Note that while we're executing a TCG TB we're always in an
|
|
|
|
* RCU critical section, which is likely to be the case for callers
|
|
|
|
* of these functions.)
|
|
|
|
*/
|
|
|
|
void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
|
|
|
|
CPUState *cpu,
|
|
|
|
vaddr mem_vaddr,
|
|
|
|
ram_addr_t ram_addr,
|
|
|
|
unsigned size);
|
|
|
|
/**
|
|
|
|
* memory_notdirty_write_complete: finish write to non-dirty memory
|
|
|
|
* @ndi: pointer to the opaque NotDirtyInfo struct which was initialized
|
|
|
|
* by memory_not_dirty_write_prepare().
|
|
|
|
*/
|
|
|
|
void memory_notdirty_write_complete(NotDirtyInfo *ndi);
|
|
|
|
|
2011-12-15 17:25:22 +04:00
|
|
|
#endif
|
|
|
|
#endif
|