2016-12-20 19:31:36 +03:00
|
|
|
#ifndef RAMLIST_H
|
|
|
|
#define RAMLIST_H
|
|
|
|
|
|
|
|
#include "qemu/queue.h"
|
|
|
|
#include "qemu/thread.h"
|
|
|
|
#include "qemu/rcu.h"
|
2017-05-12 07:17:39 +03:00
|
|
|
#include "qemu/rcu_queue.h"
|
2016-12-20 19:31:36 +03:00
|
|
|
|
|
|
|
typedef struct RAMBlockNotifier RAMBlockNotifier;
|
|
|
|
|
|
|
|
#define DIRTY_MEMORY_VGA 0
|
|
|
|
#define DIRTY_MEMORY_CODE 1
|
|
|
|
#define DIRTY_MEMORY_MIGRATION 2
|
|
|
|
#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
|
|
|
|
|
|
|
|
/* The dirty memory bitmap is split into fixed-size blocks to allow growth
|
|
|
|
* under RCU. The bitmap for a block can be accessed as follows:
|
|
|
|
*
|
|
|
|
* rcu_read_lock();
|
|
|
|
*
|
|
|
|
* DirtyMemoryBlocks *blocks =
|
2020-09-23 13:56:46 +03:00
|
|
|
* qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
|
2016-12-20 19:31:36 +03:00
|
|
|
*
|
|
|
|
* ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
|
|
|
|
* unsigned long *block = blocks.blocks[idx];
|
|
|
|
* ...access block bitmap...
|
|
|
|
*
|
|
|
|
* rcu_read_unlock();
|
|
|
|
*
|
|
|
|
* Remember to check for the end of the block when accessing a range of
|
|
|
|
* addresses. Move on to the next block if you reach the end.
|
|
|
|
*
|
|
|
|
* Organization into blocks allows dirty memory to grow (but not shrink) under
|
|
|
|
* RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
|
|
|
|
* DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
|
|
|
|
* the same. Other threads can safely access existing blocks while dirty
|
|
|
|
* memory is being grown. When no threads are using the old DirtyMemoryBlocks
|
|
|
|
* anymore it is freed by RCU (but the underlying blocks stay because they are
|
|
|
|
* pointed to from the new DirtyMemoryBlocks).
|
|
|
|
*/
|
|
|
|
#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
|
|
|
|
typedef struct {
|
|
|
|
struct rcu_head rcu;
|
|
|
|
unsigned long *blocks[];
|
|
|
|
} DirtyMemoryBlocks;
|
|
|
|
|
|
|
|
typedef struct RAMList {
|
|
|
|
QemuMutex mutex;
|
|
|
|
RAMBlock *mru_block;
|
|
|
|
/* RCU-enabled, writes protected by the ramlist lock. */
|
|
|
|
QLIST_HEAD(, RAMBlock) blocks;
|
|
|
|
DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
|
|
|
|
uint32_t version;
|
|
|
|
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
|
|
|
|
} RAMList;
|
|
|
|
extern RAMList ram_list;
|
|
|
|
|
2017-05-12 07:17:39 +03:00
|
|
|
/* Should be holding either ram_list.mutex, or the RCU lock. */
|
2018-06-05 19:25:45 +03:00
|
|
|
#define INTERNAL_RAMBLOCK_FOREACH(block) \
|
2017-05-12 07:17:39 +03:00
|
|
|
QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
|
2018-06-05 19:25:45 +03:00
|
|
|
/* Never use the INTERNAL_ version except for defining other macros */
|
|
|
|
#define RAMBLOCK_FOREACH(block) INTERNAL_RAMBLOCK_FOREACH(block)
|
2017-05-12 07:17:39 +03:00
|
|
|
|
2016-12-20 19:31:36 +03:00
|
|
|
void qemu_mutex_lock_ramlist(void);
|
|
|
|
void qemu_mutex_unlock_ramlist(void);
|
|
|
|
|
|
|
|
struct RAMBlockNotifier {
|
2021-04-29 14:27:00 +03:00
|
|
|
void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size,
|
|
|
|
size_t max_size);
|
|
|
|
void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size,
|
|
|
|
size_t max_size);
|
|
|
|
void (*ram_block_resized)(RAMBlockNotifier *n, void *host, size_t old_size,
|
|
|
|
size_t new_size);
|
2016-12-20 19:31:36 +03:00
|
|
|
QLIST_ENTRY(RAMBlockNotifier) next;
|
|
|
|
};
|
|
|
|
|
|
|
|
void ram_block_notifier_add(RAMBlockNotifier *n);
|
|
|
|
void ram_block_notifier_remove(RAMBlockNotifier *n);
|
2021-04-29 14:27:00 +03:00
|
|
|
void ram_block_notify_add(void *host, size_t size, size_t max_size);
|
|
|
|
void ram_block_notify_remove(void *host, size_t size, size_t max_size);
|
|
|
|
void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
|
2016-12-20 19:31:36 +03:00
|
|
|
|
2021-09-08 12:35:43 +03:00
|
|
|
GString *ram_block_format(void);
|
2016-12-20 19:31:36 +03:00
|
|
|
|
|
|
|
#endif /* RAMLIST_H */
|