ea089eebbd
Originally CPU threads were by default assigned in round-robin fashion. However it was causing issues in guest since CPU threads from the same socket/core could be placed on different NUMA nodes. Commitfb43b73b
(pc: fix default VCPU to NUMA node mapping) fixed it by grouping threads within a socket on the same node introducing cpu_index_to_socket_id() callback and commit20bb648d
(spapr: Fix default NUMA node allocation for threads) reused callback to fix similar issues for SPAPR machine even though socket doesn't make much sense there. As result QEMU ended up having 3 default distribution rules used by 3 targets /virt-arm, spapr, pc/. In effort of moving NUMA mapping for CPUs into possible_cpus, generalize default mapping in numa.c by making boards decide on default mapping and let them explicitly tell generic numa code to which node a CPU thread belongs to by replacing cpu_index_to_socket_id() with @cpu_index_to_instance_props() which provides default node_id assigned by board to specified cpu_index. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> Message-Id: <1494415802-227633-2-git-send-email-imammedo@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
46 lines
1.4 KiB
C
46 lines
1.4 KiB
C
#ifndef SYSEMU_NUMA_H
|
|
#define SYSEMU_NUMA_H
|
|
|
|
#include "qemu/bitmap.h"
|
|
#include "qemu/option.h"
|
|
#include "sysemu/sysemu.h"
|
|
#include "sysemu/hostmem.h"
|
|
#include "hw/boards.h"
|
|
|
|
extern int nb_numa_nodes; /* Number of NUMA nodes */
|
|
extern bool have_numa_distance;
|
|
|
|
struct numa_addr_range {
|
|
ram_addr_t mem_start;
|
|
ram_addr_t mem_end;
|
|
QLIST_ENTRY(numa_addr_range) entry;
|
|
};
|
|
|
|
struct node_info {
|
|
uint64_t node_mem;
|
|
unsigned long *node_cpu;
|
|
struct HostMemoryBackend *node_memdev;
|
|
bool present;
|
|
QLIST_HEAD(, numa_addr_range) addr; /* List to store address ranges */
|
|
uint8_t distance[MAX_NODES];
|
|
};
|
|
|
|
extern NodeInfo numa_info[MAX_NODES];
|
|
void parse_numa_opts(MachineState *ms);
|
|
void numa_post_machine_init(void);
|
|
void query_numa_node_mem(uint64_t node_mem[]);
|
|
extern QemuOptsList qemu_numa_opts;
|
|
void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
|
|
void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
|
|
uint32_t numa_get_node(ram_addr_t addr, Error **errp);
|
|
void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
|
|
int nb_nodes, ram_addr_t size);
|
|
void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
|
|
int nb_nodes, ram_addr_t size);
|
|
|
|
|
|
/* on success returns node index in numa_info,
|
|
* on failure returns nb_numa_nodes */
|
|
int numa_get_node_for_cpu(int idx);
|
|
#endif
|