2024-10-03 16:28:43 +03:00
|
|
|
|
project('qemu', ['c'], meson_version: '>=1.5.0',
|
2021-02-09 16:59:26 +03:00
|
|
|
|
default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=gnu++11', 'b_colorout=auto',
|
2022-04-20 18:33:59 +03:00
|
|
|
|
'b_staticpic=false', 'stdsplit=false', 'optimization=2', 'b_pie=true'],
|
2021-02-09 16:59:26 +03:00
|
|
|
|
version: files('VERSION'))
|
2019-06-10 13:05:14 +03:00
|
|
|
|
|
2022-03-10 10:50:48 +03:00
|
|
|
|
add_test_setup('quick', exclude_suites: ['slow', 'thorough'], is_default: true)
|
|
|
|
|
add_test_setup('slow', exclude_suites: ['thorough'], env: ['G_TEST_SLOW=1', 'SPEED=slow'])
|
|
|
|
|
add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough'])
|
2021-02-11 14:15:12 +03:00
|
|
|
|
|
2022-06-24 17:50:37 +03:00
|
|
|
|
meson.add_postconf_script(find_program('scripts/symlink-install-tree.py'))
|
|
|
|
|
|
2023-09-08 13:06:12 +03:00
|
|
|
|
####################
|
|
|
|
|
# Global variables #
|
|
|
|
|
####################
|
|
|
|
|
|
2019-06-10 13:05:14 +03:00
|
|
|
|
not_found = dependency('', required: false)
|
2021-02-09 16:59:26 +03:00
|
|
|
|
keyval = import('keyval')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
ss = import('sourceset')
|
2020-09-13 22:19:25 +03:00
|
|
|
|
fs = import('fs')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
host_os = host_machine.system()
|
2019-06-10 13:05:14 +03:00
|
|
|
|
config_host = keyval.load(meson.current_build_dir() / 'config-host.mak')
|
2021-01-26 13:15:33 +03:00
|
|
|
|
|
2020-10-16 01:06:25 +03:00
|
|
|
|
# Temporary directory used for files created while
|
|
|
|
|
# configure runs. Since it is in the build directory
|
|
|
|
|
# we can safely blow away any previous version of it
|
|
|
|
|
# (and we need not jump through hoops to try to delete
|
|
|
|
|
# it when configure exits.)
|
|
|
|
|
tmpdir = meson.current_build_dir() / 'meson-private/temp'
|
2020-09-11 15:42:48 +03:00
|
|
|
|
|
|
|
|
|
if get_option('qemu_suffix').startswith('/')
|
|
|
|
|
error('qemu_suffix cannot start with a /')
|
|
|
|
|
endif
|
|
|
|
|
|
2020-10-16 10:19:14 +03:00
|
|
|
|
qemu_confdir = get_option('sysconfdir') / get_option('qemu_suffix')
|
2020-08-26 14:04:16 +03:00
|
|
|
|
qemu_datadir = get_option('datadir') / get_option('qemu_suffix')
|
2020-08-26 14:04:17 +03:00
|
|
|
|
qemu_docdir = get_option('docdir') / get_option('qemu_suffix')
|
2020-10-16 10:19:14 +03:00
|
|
|
|
qemu_moddir = get_option('libdir') / get_option('qemu_suffix')
|
|
|
|
|
|
|
|
|
|
qemu_desktopdir = get_option('datadir') / 'applications'
|
|
|
|
|
qemu_icondir = get_option('datadir') / 'icons'
|
|
|
|
|
|
2020-08-04 19:14:26 +03:00
|
|
|
|
genh = []
|
2022-01-26 19:11:27 +03:00
|
|
|
|
qapi_trace_events = []
|
2019-06-10 13:05:14 +03:00
|
|
|
|
|
2021-10-15 17:47:43 +03:00
|
|
|
|
bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin']
|
|
|
|
|
supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux']
|
2023-06-27 17:32:34 +03:00
|
|
|
|
supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64',
|
2022-10-17 08:00:57 +03:00
|
|
|
|
'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64']
|
2021-10-15 17:47:43 +03:00
|
|
|
|
|
|
|
|
|
cpu = host_machine.cpu_family()
|
|
|
|
|
|
2020-08-26 09:09:48 +03:00
|
|
|
|
target_dirs = config_host['TARGET_DIRS'].split()
|
2023-09-08 13:06:57 +03:00
|
|
|
|
|
|
|
|
|
############
|
|
|
|
|
# Programs #
|
|
|
|
|
############
|
|
|
|
|
|
|
|
|
|
sh = find_program('sh')
|
|
|
|
|
python = import('python').find_installation()
|
|
|
|
|
|
|
|
|
|
cc = meson.get_compiler('c')
|
|
|
|
|
all_languages = ['c']
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows' and add_languages('cpp', required: false, native: false)
|
2023-09-08 13:06:57 +03:00
|
|
|
|
all_languages += ['cpp']
|
|
|
|
|
cxx = meson.get_compiler('cpp')
|
|
|
|
|
endif
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin' and \
|
2024-03-11 16:33:34 +03:00
|
|
|
|
add_languages('objc', required: true, native: false)
|
2023-09-08 13:06:57 +03:00
|
|
|
|
all_languages += ['objc']
|
|
|
|
|
objc = meson.get_compiler('objc')
|
|
|
|
|
endif
|
2024-10-03 16:28:44 +03:00
|
|
|
|
have_rust = false
|
meson: check in main meson.build for native Rust compiler
A working native Rust compiler is always needed in order to compile Rust
code, even when cross compiling, in order to build the procedural macros
that QEMU uses.
Right now, the check is done in rust/qemu-api-macros/meson.build, but this
has two disadvantages. First, it makes the build fail when the Meson "rust"
option is set to "auto" (instead, Rust support should be disabled). Second,
add_languages() is one of the few functions that are executed even by
"meson introspect", except that "meson introspect" executes both branches
of "if" statements! Therefore, "meson introspect" tries to look for a
Rust compiler even if the option is disabled---and then fails because
the compiler is required by rust/qemu-api-macros/meson.build. This is
visible for example if the compilation host has a stale
scripts/meson-buildoptions.sh and no rustc installed.
Both issues can be fixed by moving the check to the main meson.build,
together with the check for the cross compiler.
Reported-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2024-10-14 17:36:40 +03:00
|
|
|
|
if not get_option('rust').disabled() and add_languages('rust', required: get_option('rust'), native: false) \
|
|
|
|
|
and add_languages('rust', required: get_option('rust'), native: true)
|
2024-10-03 16:28:44 +03:00
|
|
|
|
rustc = meson.get_compiler('rust')
|
|
|
|
|
have_rust = true
|
|
|
|
|
if rustc.version().version_compare('<1.80.0')
|
|
|
|
|
if get_option('rust').enabled()
|
|
|
|
|
error('rustc version ' + rustc.version() + ' is unsupported: Please upgrade to at least 1.80.0')
|
|
|
|
|
else
|
|
|
|
|
warning('rustc version ' + rustc.version() + ' is unsupported: Disabling Rust compilation. Please upgrade to at least 1.80.0 to use Rust.')
|
|
|
|
|
have_rust = false
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2023-09-08 13:06:57 +03:00
|
|
|
|
|
|
|
|
|
dtrace = not_found
|
|
|
|
|
stap = not_found
|
|
|
|
|
if 'dtrace' in get_option('trace_backends')
|
|
|
|
|
dtrace = find_program('dtrace', required: true)
|
|
|
|
|
stap = find_program('stap', required: false)
|
|
|
|
|
if stap.found()
|
|
|
|
|
# Workaround to avoid dtrace(1) producing a file with 'hidden' symbol
|
|
|
|
|
# visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility
|
|
|
|
|
# instead. QEMU --enable-modules depends on this because the SystemTap
|
|
|
|
|
# semaphores are linked into the main binary and not the module's shared
|
|
|
|
|
# object.
|
|
|
|
|
add_global_arguments('-DSTAP_SDT_V2',
|
|
|
|
|
native: false, language: all_languages)
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
if get_option('iasl') == ''
|
|
|
|
|
iasl = find_program('iasl', required: false)
|
|
|
|
|
else
|
|
|
|
|
iasl = find_program(get_option('iasl'), required: true)
|
|
|
|
|
endif
|
|
|
|
|
|
2024-07-24 05:22:45 +03:00
|
|
|
|
edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu', 'riscv64-softmmu', 'loongarch64-softmmu' ]
|
2023-09-08 13:06:57 +03:00
|
|
|
|
unpack_edk2_blobs = false
|
|
|
|
|
foreach target : edk2_targets
|
|
|
|
|
if target in target_dirs
|
|
|
|
|
bzip2 = find_program('bzip2', required: get_option('install_blobs'))
|
|
|
|
|
unpack_edk2_blobs = bzip2.found()
|
|
|
|
|
break
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
|
#####################
|
|
|
|
|
# Option validation #
|
|
|
|
|
#####################
|
|
|
|
|
|
2023-12-30 20:42:30 +03:00
|
|
|
|
# Fuzzing
|
|
|
|
|
if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \
|
|
|
|
|
not cc.links('''
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
|
|
|
|
|
int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; }
|
|
|
|
|
''',
|
|
|
|
|
args: ['-Werror', '-fsanitize=fuzzer'])
|
|
|
|
|
error('Your compiler does not support -fsanitize=fuzzer')
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# Tracing backends
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if 'ftrace' in get_option('trace_backends') and host_os != 'linux'
|
2023-12-30 20:42:30 +03:00
|
|
|
|
error('ftrace is supported only on Linux')
|
|
|
|
|
endif
|
|
|
|
|
if 'syslog' in get_option('trace_backends') and not cc.compiles('''
|
|
|
|
|
#include <syslog.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
openlog("qemu", LOG_PID, LOG_DAEMON);
|
|
|
|
|
syslog(LOG_INFO, "configure");
|
|
|
|
|
return 0;
|
|
|
|
|
}''')
|
|
|
|
|
error('syslog is not supported on this system')
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# Miscellaneous Linux-only features
|
|
|
|
|
get_option('mpath') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux', error_message: 'Multipath is supported only on Linux')
|
2023-12-30 20:42:30 +03:00
|
|
|
|
|
|
|
|
|
multiprocess_allowed = get_option('multiprocess') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux', error_message: 'Multiprocess QEMU is supported only on Linux') \
|
2023-12-30 20:42:30 +03:00
|
|
|
|
.allowed()
|
|
|
|
|
|
|
|
|
|
vfio_user_server_allowed = get_option('vfio_user_server') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux', error_message: 'vfio-user server is supported only on Linux') \
|
2023-12-30 20:42:30 +03:00
|
|
|
|
.allowed()
|
|
|
|
|
|
|
|
|
|
have_tpm = get_option('tpm') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os != 'windows', error_message: 'TPM emulation only available on POSIX systems') \
|
2023-12-30 20:42:30 +03:00
|
|
|
|
.allowed()
|
|
|
|
|
|
|
|
|
|
# vhost
|
|
|
|
|
have_vhost_user = get_option('vhost_user') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.disable_auto_if(host_os != 'linux') \
|
|
|
|
|
.require(host_os != 'windows',
|
2023-12-30 20:42:30 +03:00
|
|
|
|
error_message: 'vhost-user is not available on Windows').allowed()
|
|
|
|
|
have_vhost_vdpa = get_option('vhost_vdpa') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux',
|
2023-12-30 20:42:30 +03:00
|
|
|
|
error_message: 'vhost-vdpa is only available on Linux').allowed()
|
|
|
|
|
have_vhost_kernel = get_option('vhost_kernel') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux',
|
2023-12-30 20:42:30 +03:00
|
|
|
|
error_message: 'vhost-kernel is only available on Linux').allowed()
|
|
|
|
|
have_vhost_user_crypto = get_option('vhost_crypto') \
|
|
|
|
|
.require(have_vhost_user,
|
|
|
|
|
error_message: 'vhost-crypto requires vhost-user to be enabled').allowed()
|
|
|
|
|
|
|
|
|
|
have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel
|
|
|
|
|
|
|
|
|
|
have_vhost_net_user = have_vhost_user and get_option('vhost_net').allowed()
|
|
|
|
|
have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed()
|
|
|
|
|
have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed()
|
|
|
|
|
have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa
|
|
|
|
|
|
|
|
|
|
# type of binaries to build
|
2021-10-09 01:47:37 +03:00
|
|
|
|
have_linux_user = false
|
|
|
|
|
have_bsd_user = false
|
2020-08-26 09:09:48 +03:00
|
|
|
|
have_system = false
|
|
|
|
|
foreach target : target_dirs
|
2021-10-09 01:47:37 +03:00
|
|
|
|
have_linux_user = have_linux_user or target.endswith('linux-user')
|
|
|
|
|
have_bsd_user = have_bsd_user or target.endswith('bsd-user')
|
2020-08-26 09:09:48 +03:00
|
|
|
|
have_system = have_system or target.endswith('-softmmu')
|
|
|
|
|
endforeach
|
2021-10-09 01:47:37 +03:00
|
|
|
|
have_user = have_linux_user or have_bsd_user
|
2023-12-30 20:42:30 +03:00
|
|
|
|
|
2021-10-15 17:47:43 +03:00
|
|
|
|
have_tools = get_option('tools') \
|
|
|
|
|
.disable_auto_if(not have_system) \
|
|
|
|
|
.allowed()
|
|
|
|
|
have_ga = get_option('guest_agent') \
|
|
|
|
|
.disable_auto_if(not have_system and not have_tools) \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os in ['sunos', 'linux', 'windows', 'freebsd', 'netbsd', 'openbsd'],
|
2021-10-15 17:47:43 +03:00
|
|
|
|
error_message: 'unsupported OS for QEMU guest agent') \
|
|
|
|
|
.allowed()
|
2023-12-30 20:42:30 +03:00
|
|
|
|
have_block = have_system or have_tools
|
|
|
|
|
|
2022-10-20 15:53:10 +03:00
|
|
|
|
enable_modules = get_option('modules') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os != 'windows',
|
2022-10-20 15:53:10 +03:00
|
|
|
|
error_message: 'Modules are not available for Windows') \
|
|
|
|
|
.require(not get_option('prefer_static'),
|
|
|
|
|
error_message: 'Modules are incompatible with static linking') \
|
|
|
|
|
.allowed()
|
2020-08-26 09:09:48 +03:00
|
|
|
|
|
2023-09-08 13:06:12 +03:00
|
|
|
|
#######################################
|
|
|
|
|
# Variables for host and accelerators #
|
|
|
|
|
#######################################
|
|
|
|
|
|
2021-11-08 16:18:17 +03:00
|
|
|
|
if cpu not in supported_cpus
|
|
|
|
|
host_arch = 'unknown'
|
|
|
|
|
elif cpu == 'x86'
|
|
|
|
|
host_arch = 'i386'
|
2021-12-31 08:25:11 +03:00
|
|
|
|
elif cpu == 'mips64'
|
|
|
|
|
host_arch = 'mips'
|
2023-06-27 17:32:34 +03:00
|
|
|
|
elif cpu in ['riscv32', 'riscv64']
|
|
|
|
|
host_arch = 'riscv'
|
2021-11-08 16:18:17 +03:00
|
|
|
|
else
|
|
|
|
|
host_arch = cpu
|
|
|
|
|
endif
|
|
|
|
|
|
2020-09-18 12:37:01 +03:00
|
|
|
|
if cpu in ['x86', 'x86_64']
|
|
|
|
|
kvm_targets = ['i386-softmmu', 'x86_64-softmmu']
|
|
|
|
|
elif cpu == 'aarch64'
|
|
|
|
|
kvm_targets = ['aarch64-softmmu']
|
|
|
|
|
elif cpu == 's390x'
|
|
|
|
|
kvm_targets = ['s390x-softmmu']
|
|
|
|
|
elif cpu in ['ppc', 'ppc64']
|
|
|
|
|
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
|
2020-10-07 11:39:28 +03:00
|
|
|
|
elif cpu in ['mips', 'mips64']
|
|
|
|
|
kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu']
|
2023-06-27 17:32:35 +03:00
|
|
|
|
elif cpu in ['riscv32']
|
|
|
|
|
kvm_targets = ['riscv32-softmmu']
|
|
|
|
|
elif cpu in ['riscv64']
|
|
|
|
|
kvm_targets = ['riscv64-softmmu']
|
2024-01-05 10:58:04 +03:00
|
|
|
|
elif cpu in ['loongarch64']
|
|
|
|
|
kvm_targets = ['loongarch64-softmmu']
|
2020-09-18 12:37:01 +03:00
|
|
|
|
else
|
|
|
|
|
kvm_targets = []
|
|
|
|
|
endif
|
|
|
|
|
accelerator_targets = { 'CONFIG_KVM': kvm_targets }
|
2021-09-16 18:54:02 +03:00
|
|
|
|
|
2023-12-09 17:31:15 +03:00
|
|
|
|
if cpu in ['x86', 'x86_64']
|
|
|
|
|
xen_targets = ['i386-softmmu', 'x86_64-softmmu']
|
|
|
|
|
elif cpu in ['arm', 'aarch64']
|
|
|
|
|
# i386 emulator provides xenpv machine type for multiple architectures
|
|
|
|
|
xen_targets = ['i386-softmmu', 'x86_64-softmmu', 'aarch64-softmmu']
|
|
|
|
|
else
|
|
|
|
|
xen_targets = []
|
|
|
|
|
endif
|
|
|
|
|
accelerator_targets += { 'CONFIG_XEN': xen_targets }
|
|
|
|
|
|
2021-09-16 18:54:02 +03:00
|
|
|
|
if cpu in ['aarch64']
|
|
|
|
|
accelerator_targets += {
|
|
|
|
|
'CONFIG_HVF': ['aarch64-softmmu']
|
|
|
|
|
}
|
|
|
|
|
endif
|
|
|
|
|
|
2020-09-18 12:37:01 +03:00
|
|
|
|
if cpu in ['x86', 'x86_64']
|
|
|
|
|
accelerator_targets += {
|
|
|
|
|
'CONFIG_HVF': ['x86_64-softmmu'],
|
2021-04-02 23:25:32 +03:00
|
|
|
|
'CONFIG_NVMM': ['i386-softmmu', 'x86_64-softmmu'],
|
2020-09-18 12:37:01 +03:00
|
|
|
|
'CONFIG_WHPX': ['i386-softmmu', 'x86_64-softmmu'],
|
|
|
|
|
}
|
|
|
|
|
endif
|
|
|
|
|
|
2021-07-12 15:22:08 +03:00
|
|
|
|
modular_tcg = []
|
|
|
|
|
# Darwin does not support references to thread-local variables in modules
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'darwin'
|
2021-07-12 15:22:08 +03:00
|
|
|
|
modular_tcg = ['i386-softmmu', 'x86_64-softmmu']
|
|
|
|
|
endif
|
2021-06-24 13:38:31 +03:00
|
|
|
|
|
2020-09-01 14:45:54 +03:00
|
|
|
|
##################
|
|
|
|
|
# Compiler flags #
|
|
|
|
|
##################
|
|
|
|
|
|
2022-10-20 15:34:27 +03:00
|
|
|
|
foreach lang : all_languages
|
|
|
|
|
compiler = meson.get_compiler(lang)
|
|
|
|
|
if compiler.get_id() == 'gcc' and compiler.version().version_compare('>=7.4')
|
|
|
|
|
# ok
|
|
|
|
|
elif compiler.get_id() == 'clang' and compiler.compiles('''
|
|
|
|
|
#ifdef __apple_build_version__
|
|
|
|
|
# if __clang_major__ < 12 || (__clang_major__ == 12 && __clang_minor__ < 0)
|
|
|
|
|
# error You need at least XCode Clang v12.0 to compile QEMU
|
|
|
|
|
# endif
|
|
|
|
|
#else
|
|
|
|
|
# if __clang_major__ < 10 || (__clang_major__ == 10 && __clang_minor__ < 0)
|
|
|
|
|
# error You need at least Clang v10.0 to compile QEMU
|
|
|
|
|
# endif
|
|
|
|
|
#endif''')
|
|
|
|
|
# ok
|
|
|
|
|
else
|
|
|
|
|
error('You either need GCC v7.4 or Clang v10.0 (or XCode Clang v12.0) to compile QEMU')
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
|
2022-10-20 15:20:30 +03:00
|
|
|
|
# default flags for all hosts
|
|
|
|
|
# We use -fwrapv to tell the compiler that we require a C dialect where
|
|
|
|
|
# left shift of signed integers is well defined and has the expected
|
|
|
|
|
# 2s-complement style results. (Both clang and gcc agree that it
|
|
|
|
|
# provides these semantics.)
|
|
|
|
|
|
|
|
|
|
qemu_common_flags = [
|
|
|
|
|
'-D_GNU_SOURCE', '-D_FILE_OFFSET_BITS=64', '-D_LARGEFILE_SOURCE',
|
|
|
|
|
'-fno-strict-aliasing', '-fno-common', '-fwrapv' ]
|
2022-10-12 18:13:23 +03:00
|
|
|
|
qemu_cflags = []
|
2022-10-12 13:46:23 +03:00
|
|
|
|
qemu_ldflags = []
|
2021-11-08 14:36:29 +03:00
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin'
|
2022-10-20 15:20:30 +03:00
|
|
|
|
# Disable attempts to use ObjectiveC features in os/object.h since they
|
|
|
|
|
# won't work when we're compiling with gcc as a C compiler.
|
2023-08-30 19:14:14 +03:00
|
|
|
|
if compiler.get_id() == 'gcc'
|
|
|
|
|
qemu_common_flags += '-DOS_OBJECT_USE_OBJC=0'
|
|
|
|
|
endif
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'sunos'
|
2022-10-20 15:20:30 +03:00
|
|
|
|
# needed for CMSG_ macros in sys/socket.h
|
|
|
|
|
qemu_common_flags += '-D_XOPEN_SOURCE=600'
|
|
|
|
|
# needed for TIOCWIN* defines in termios.h
|
|
|
|
|
qemu_common_flags += '-D__EXTENSIONS__'
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'haiku'
|
2022-10-20 15:20:30 +03:00
|
|
|
|
qemu_common_flags += ['-DB_USE_POSITIVE_POSIX_ERRORS', '-D_BSD_SOURCE', '-fPIC']
|
2024-08-15 15:27:19 +03:00
|
|
|
|
elif host_os == 'windows'
|
|
|
|
|
if not compiler.compiles('struct x { int y; } __attribute__((gcc_struct));',
|
|
|
|
|
args: '-Werror')
|
|
|
|
|
error('Your compiler does not support __attribute__((gcc_struct)) - please use GCC instead of Clang')
|
|
|
|
|
endif
|
2022-10-20 15:20:30 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2024-10-07 11:31:28 +03:00
|
|
|
|
# Choose instruction set (currently x86-only)
|
|
|
|
|
|
|
|
|
|
qemu_isa_flags = []
|
|
|
|
|
|
2022-10-20 15:20:30 +03:00
|
|
|
|
# __sync_fetch_and_and requires at least -march=i486. Many toolchains
|
|
|
|
|
# use i686 as default anyway, but for those that don't, an explicit
|
|
|
|
|
# specification is necessary
|
|
|
|
|
if host_arch == 'i386' and not cc.links('''
|
|
|
|
|
static int sfaa(int *ptr)
|
|
|
|
|
{
|
|
|
|
|
return __sync_fetch_and_and(ptr, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
int val = 42;
|
|
|
|
|
val = __sync_val_compare_and_swap(&val, 0, 1);
|
|
|
|
|
sfaa(&val);
|
|
|
|
|
return val;
|
|
|
|
|
}''')
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-march=i486']
|
2022-10-20 15:20:30 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2024-06-18 18:32:52 +03:00
|
|
|
|
# Pick x86-64 baseline version
|
2024-05-31 11:37:06 +03:00
|
|
|
|
if host_arch in ['i386', 'x86_64']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
if get_option('x86_version') == '0' and host_arch == 'x86_64'
|
|
|
|
|
error('x86_64-v1 required for x86-64 hosts')
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# add flags for individual instruction set extensions
|
|
|
|
|
if get_option('x86_version') >= '1'
|
|
|
|
|
if host_arch == 'i386'
|
|
|
|
|
qemu_common_flags = ['-mfpmath=sse'] + qemu_common_flags
|
|
|
|
|
else
|
|
|
|
|
# present on basically all processors but technically not part of
|
|
|
|
|
# x86-64-v1, so only include -mneeded for x86-64 version 2 and above
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-mcx16']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
if get_option('x86_version') >= '2'
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-mpopcnt']
|
|
|
|
|
qemu_isa_flags += cc.get_supported_arguments('-mneeded')
|
2024-06-18 18:32:52 +03:00
|
|
|
|
endif
|
|
|
|
|
if get_option('x86_version') >= '3'
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-mmovbe', '-mabm', '-mbmi', '-mbmi2', '-mfma', '-mf16c']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# add required vector instruction set (each level implies those below)
|
|
|
|
|
if get_option('x86_version') == '1'
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-msse2']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
elif get_option('x86_version') == '2'
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-msse4.2']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
elif get_option('x86_version') == '3'
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-mavx2']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
elif get_option('x86_version') == '4'
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_isa_flags += ['-mavx512f', '-mavx512bw', '-mavx512cd', '-mavx512dq', '-mavx512vl']
|
2024-06-18 18:32:52 +03:00
|
|
|
|
endif
|
configure: move -mcx16 flag out of CPU_CFLAGS
The point of CPU_CFLAGS is really just to select the appropriate multilib,
for example for library linking tests, and -mcx16 is not needed for
that purpose.
Furthermore, if -mcx16 is part of QEMU's choice of a basic x86_64
instruction set, it should be applied to cross-compiled x86_64 code too;
it is plausible that tests/tcg would want to cover cmpxchg16b as well,
for example. In the end this makes just as much sense as a per sub-build
tweak, so move the flag to meson.build and cross_cc_cflags_x86_64.
This leaves out contrib/plugins, which would fail when attempting to use
__sync_val_compare_and_swap_16 (note it does not do yet); while minor,
this *is* a disadvantage of this change. But building contrib/plugins
with a Makefile instead of meson.build is something self-inflicted just
for the sake of showing that it can be done, and if this kind of papercut
started becoming a problem we could make the directory part of the meson
build. Until then, we can live with the limitation.
Signed-off-by: Artyom Kunakovsky <artyomkunakovsky@gmail.com>
Message-ID: <20240523051118.29367-1-artyomkunakovsky@gmail.com>
[rewrite commit message, remove from configure. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-23 08:11:18 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2024-10-07 11:31:28 +03:00
|
|
|
|
qemu_common_flags = qemu_isa_flags + qemu_common_flags
|
|
|
|
|
|
2022-07-14 15:33:49 +03:00
|
|
|
|
if get_option('prefer_static')
|
2022-03-15 17:57:15 +03:00
|
|
|
|
qemu_ldflags += get_option('b_pie') ? '-static-pie' : '-static'
|
|
|
|
|
endif
|
|
|
|
|
|
2022-10-20 15:20:30 +03:00
|
|
|
|
# Meson currently only handles pie as a boolean for now, so if the user
|
|
|
|
|
# has explicitly disabled PIE we need to extend our cflags.
|
2023-05-22 11:05:33 +03:00
|
|
|
|
#
|
|
|
|
|
# -no-pie is supposedly a linker flag that has no effect on the compiler
|
|
|
|
|
# command line, but some distros, that didn't quite know what they were
|
|
|
|
|
# doing, made local changes to gcc's specs file that turned it into
|
|
|
|
|
# a compiler command-line flag.
|
|
|
|
|
#
|
|
|
|
|
# What about linker flags? For a static build, no PIE is implied by -static
|
|
|
|
|
# which we added above (and if it's not because of the same specs patching,
|
|
|
|
|
# there's nothing we can do: compilation will fail, report a bug to your
|
|
|
|
|
# distro and do not use --disable-pie in the meanwhile). For dynamic linking,
|
|
|
|
|
# instead, we can't add -no-pie because it overrides -shared: the linker then
|
|
|
|
|
# tries to build an executable instead of a shared library and fails. So
|
|
|
|
|
# don't add -no-pie anywhere and cross fingers. :(
|
2022-10-20 15:20:30 +03:00
|
|
|
|
if not get_option('b_pie')
|
2023-05-22 11:05:33 +03:00
|
|
|
|
qemu_common_flags += cc.get_supported_arguments('-fno-pie', '-no-pie')
|
2022-10-20 15:20:30 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2022-10-12 13:46:23 +03:00
|
|
|
|
if not get_option('stack_protector').disabled()
|
|
|
|
|
stack_protector_probe = '''
|
|
|
|
|
int main(int argc, char *argv[])
|
|
|
|
|
{
|
|
|
|
|
char arr[64], *p = arr, *c = argv[argc - 1];
|
|
|
|
|
while (*c) {
|
|
|
|
|
*p++ = *c++;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}'''
|
|
|
|
|
have_stack_protector = false
|
|
|
|
|
foreach arg : ['-fstack-protector-strong', '-fstack-protector-all']
|
|
|
|
|
# We need to check both a compile and a link, since some compiler
|
|
|
|
|
# setups fail only on a .c->.o compile and some only at link time
|
|
|
|
|
if cc.compiles(stack_protector_probe, args: ['-Werror', arg]) and \
|
|
|
|
|
cc.links(stack_protector_probe, args: ['-Werror', arg])
|
|
|
|
|
have_stack_protector = true
|
|
|
|
|
qemu_cflags += arg
|
|
|
|
|
qemu_ldflags += arg
|
|
|
|
|
break
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
get_option('stack_protector') \
|
|
|
|
|
.require(have_stack_protector, error_message: 'Stack protector not supported')
|
|
|
|
|
endif
|
|
|
|
|
|
2022-10-12 14:19:35 +03:00
|
|
|
|
coroutine_backend = get_option('coroutine_backend')
|
|
|
|
|
ucontext_probe = '''
|
|
|
|
|
#include <ucontext.h>
|
|
|
|
|
#ifdef __stub_makecontext
|
|
|
|
|
#error Ignoring glibc stub makecontext which will always fail
|
|
|
|
|
#endif
|
|
|
|
|
int main(void) { makecontext(0, 0, 0); return 0; }'''
|
|
|
|
|
|
|
|
|
|
# On Windows the only valid backend is the Windows specific one.
|
|
|
|
|
# For POSIX prefer ucontext, but it's not always possible. The fallback
|
|
|
|
|
# is sigcontext.
|
|
|
|
|
supported_backends = []
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2022-10-12 14:19:35 +03:00
|
|
|
|
supported_backends += ['windows']
|
|
|
|
|
else
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'darwin' and cc.links(ucontext_probe)
|
2022-10-12 14:19:35 +03:00
|
|
|
|
supported_backends += ['ucontext']
|
|
|
|
|
endif
|
|
|
|
|
supported_backends += ['sigaltstack']
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
if coroutine_backend == 'auto'
|
|
|
|
|
coroutine_backend = supported_backends[0]
|
|
|
|
|
elif coroutine_backend not in supported_backends
|
|
|
|
|
error('"@0@" backend requested but not available. Available backends: @1@' \
|
|
|
|
|
.format(coroutine_backend, ', '.join(supported_backends)))
|
|
|
|
|
endif
|
|
|
|
|
|
2022-10-12 12:59:51 +03:00
|
|
|
|
# Compiles if SafeStack *not* enabled
|
|
|
|
|
safe_stack_probe = '''
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
#if defined(__has_feature)
|
|
|
|
|
#if __has_feature(safe_stack)
|
|
|
|
|
#error SafeStack Enabled
|
|
|
|
|
#endif
|
|
|
|
|
#endif
|
|
|
|
|
return 0;
|
|
|
|
|
}'''
|
|
|
|
|
if get_option('safe_stack') != not cc.compiles(safe_stack_probe)
|
|
|
|
|
safe_stack_arg = get_option('safe_stack') ? '-fsanitize=safe-stack' : '-fno-sanitize=safe-stack'
|
|
|
|
|
if get_option('safe_stack') != not cc.compiles(safe_stack_probe, args: safe_stack_arg)
|
|
|
|
|
error(get_option('safe_stack') \
|
|
|
|
|
? 'SafeStack not supported by your compiler' \
|
|
|
|
|
: 'Cannot disable SafeStack')
|
|
|
|
|
endif
|
|
|
|
|
qemu_cflags += safe_stack_arg
|
|
|
|
|
qemu_ldflags += safe_stack_arg
|
|
|
|
|
endif
|
2022-10-12 14:19:35 +03:00
|
|
|
|
if get_option('safe_stack') and coroutine_backend != 'ucontext'
|
2022-10-12 12:59:51 +03:00
|
|
|
|
error('SafeStack is only supported with the ucontext coroutine backend')
|
|
|
|
|
endif
|
|
|
|
|
|
2024-08-13 12:52:15 +03:00
|
|
|
|
if get_option('asan')
|
2023-01-09 17:31:51 +03:00
|
|
|
|
if cc.has_argument('-fsanitize=address')
|
|
|
|
|
qemu_cflags = ['-fsanitize=address'] + qemu_cflags
|
|
|
|
|
qemu_ldflags = ['-fsanitize=address'] + qemu_ldflags
|
2024-08-13 12:52:15 +03:00
|
|
|
|
else
|
|
|
|
|
error('Your compiler does not support -fsanitize=address')
|
2023-01-09 17:31:51 +03:00
|
|
|
|
endif
|
2024-08-13 12:52:15 +03:00
|
|
|
|
endif
|
2023-01-09 17:31:51 +03:00
|
|
|
|
|
2024-08-13 12:52:15 +03:00
|
|
|
|
if get_option('ubsan')
|
|
|
|
|
# Detect static linking issue with ubsan:
|
|
|
|
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84285
|
2023-01-09 17:31:51 +03:00
|
|
|
|
if cc.links('int main(int argc, char **argv) { return argc + 1; }',
|
|
|
|
|
args: [qemu_ldflags, '-fsanitize=undefined'])
|
2024-08-13 12:52:16 +03:00
|
|
|
|
qemu_cflags += ['-fsanitize=undefined']
|
|
|
|
|
qemu_ldflags += ['-fsanitize=undefined']
|
|
|
|
|
|
|
|
|
|
# Suppress undefined behaviour from function call to mismatched type.
|
|
|
|
|
# In addition, tcg prologue does not emit function type prefix
|
|
|
|
|
# required by function call sanitizer.
|
|
|
|
|
if cc.has_argument('-fno-sanitize=function')
|
|
|
|
|
qemu_cflags += ['-fno-sanitize=function']
|
|
|
|
|
endif
|
2024-08-13 12:52:15 +03:00
|
|
|
|
else
|
|
|
|
|
error('Your compiler does not support -fsanitize=undefined')
|
2023-01-09 17:31:51 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# Thread sanitizer is, for now, much noisier than the other sanitizers;
|
|
|
|
|
# keep it separate until that is not the case.
|
|
|
|
|
if get_option('tsan')
|
2024-08-13 12:52:15 +03:00
|
|
|
|
if get_option('asan') or get_option('ubsan')
|
2023-01-09 17:31:51 +03:00
|
|
|
|
error('TSAN is not supported with other sanitizers')
|
|
|
|
|
endif
|
|
|
|
|
if not cc.has_function('__tsan_create_fiber',
|
|
|
|
|
args: '-fsanitize=thread',
|
|
|
|
|
prefix: '#include <sanitizer/tsan_interface.h>')
|
|
|
|
|
error('Cannot enable TSAN due to missing fiber annotation interface')
|
|
|
|
|
endif
|
2024-10-23 14:33:52 +03:00
|
|
|
|
tsan_warn_suppress = []
|
|
|
|
|
# gcc (>=11) will report constructions not supported by tsan:
|
|
|
|
|
# "error: ‘atomic_thread_fence’ is not supported with ‘-fsanitize=thread’"
|
|
|
|
|
# https://gcc.gnu.org/gcc-11/changes.html
|
|
|
|
|
# However, clang does not support this warning and this triggers an error.
|
|
|
|
|
if cc.has_argument('-Wno-tsan')
|
|
|
|
|
tsan_warn_suppress = ['-Wno-tsan']
|
|
|
|
|
endif
|
|
|
|
|
qemu_cflags = ['-fsanitize=thread'] + tsan_warn_suppress + qemu_cflags
|
2023-01-09 17:31:51 +03:00
|
|
|
|
qemu_ldflags = ['-fsanitize=thread'] + qemu_ldflags
|
|
|
|
|
endif
|
|
|
|
|
|
2022-03-15 17:57:15 +03:00
|
|
|
|
# Detect support for PT_GNU_RELRO + DT_BIND_NOW.
|
|
|
|
|
# The combination is known as "full relro", because .got.plt is read-only too.
|
|
|
|
|
qemu_ldflags += cc.get_supported_link_arguments('-Wl,-z,relro', '-Wl,-z,now')
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2022-04-20 18:33:50 +03:00
|
|
|
|
qemu_ldflags += cc.get_supported_link_arguments('-Wl,--no-seh', '-Wl,--nxcompat')
|
2022-11-02 15:03:51 +03:00
|
|
|
|
qemu_ldflags += cc.get_supported_link_arguments('-Wl,--dynamicbase', '-Wl,--high-entropy-va')
|
2022-04-20 18:33:50 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2021-10-07 16:08:12 +03:00
|
|
|
|
if get_option('fuzzing')
|
|
|
|
|
# Specify a filter to only instrument code that is directly related to
|
|
|
|
|
# virtual-devices.
|
|
|
|
|
configure_file(output: 'instrumentation-filter',
|
|
|
|
|
input: 'scripts/oss-fuzz/instrumentation-filter-template',
|
|
|
|
|
copy: true)
|
2022-06-14 18:54:15 +03:00
|
|
|
|
|
|
|
|
|
if cc.compiles('int main () { return 0; }',
|
|
|
|
|
name: '-fsanitize-coverage-allowlist=/dev/null',
|
2022-06-21 23:45:07 +03:00
|
|
|
|
args: ['-fsanitize-coverage-allowlist=/dev/null',
|
|
|
|
|
'-fsanitize-coverage=trace-pc'] )
|
2022-10-12 18:13:23 +03:00
|
|
|
|
qemu_common_flags += ['-fsanitize-coverage-allowlist=instrumentation-filter']
|
2022-06-14 18:54:15 +03:00
|
|
|
|
endif
|
2021-10-07 16:08:12 +03:00
|
|
|
|
|
|
|
|
|
if get_option('fuzzing_engine') == ''
|
|
|
|
|
# Add CFLAGS to tell clang to add fuzzer-related instrumentation to all the
|
|
|
|
|
# compiled code. To build non-fuzzer binaries with --enable-fuzzing, link
|
|
|
|
|
# everything with fsanitize=fuzzer-no-link. Otherwise, the linker will be
|
|
|
|
|
# unable to bind the fuzzer-related callbacks added by instrumentation.
|
2022-10-12 18:13:23 +03:00
|
|
|
|
qemu_common_flags += ['-fsanitize=fuzzer-no-link']
|
|
|
|
|
qemu_ldflags += ['-fsanitize=fuzzer-no-link']
|
2021-10-07 16:08:12 +03:00
|
|
|
|
# For the actual fuzzer binaries, we need to link against the libfuzzer
|
|
|
|
|
# library. They need to be configurable, to support OSS-Fuzz
|
|
|
|
|
fuzz_exe_ldflags = ['-fsanitize=fuzzer']
|
|
|
|
|
else
|
|
|
|
|
# LIB_FUZZING_ENGINE was set; assume we are running on OSS-Fuzz, and
|
|
|
|
|
# the needed CFLAGS have already been provided
|
|
|
|
|
fuzz_exe_ldflags = get_option('fuzzing_engine').split()
|
|
|
|
|
endif
|
2020-09-10 01:05:16 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-05-10 15:54:30 +03:00
|
|
|
|
if get_option('cfi')
|
|
|
|
|
cfi_flags=[]
|
|
|
|
|
# Check for dependency on LTO
|
|
|
|
|
if not get_option('b_lto')
|
|
|
|
|
error('Selected Control-Flow Integrity but LTO is disabled')
|
|
|
|
|
endif
|
|
|
|
|
if enable_modules
|
|
|
|
|
error('Selected Control-Flow Integrity is not compatible with modules')
|
|
|
|
|
endif
|
|
|
|
|
# Check for cfi flags. CFI requires LTO so we can't use
|
|
|
|
|
# get_supported_arguments, but need a more complex "compiles" which allows
|
|
|
|
|
# custom arguments
|
|
|
|
|
if cc.compiles('int main () { return 0; }', name: '-fsanitize=cfi-icall',
|
|
|
|
|
args: ['-flto', '-fsanitize=cfi-icall'] )
|
|
|
|
|
cfi_flags += '-fsanitize=cfi-icall'
|
|
|
|
|
else
|
|
|
|
|
error('-fsanitize=cfi-icall is not supported by the compiler')
|
|
|
|
|
endif
|
|
|
|
|
if cc.compiles('int main () { return 0; }',
|
|
|
|
|
name: '-fsanitize-cfi-icall-generalize-pointers',
|
|
|
|
|
args: ['-flto', '-fsanitize=cfi-icall',
|
|
|
|
|
'-fsanitize-cfi-icall-generalize-pointers'] )
|
|
|
|
|
cfi_flags += '-fsanitize-cfi-icall-generalize-pointers'
|
|
|
|
|
else
|
|
|
|
|
error('-fsanitize-cfi-icall-generalize-pointers is not supported by the compiler')
|
|
|
|
|
endif
|
|
|
|
|
if get_option('cfi_debug')
|
|
|
|
|
if cc.compiles('int main () { return 0; }',
|
|
|
|
|
name: '-fno-sanitize-trap=cfi-icall',
|
|
|
|
|
args: ['-flto', '-fsanitize=cfi-icall',
|
|
|
|
|
'-fno-sanitize-trap=cfi-icall'] )
|
|
|
|
|
cfi_flags += '-fno-sanitize-trap=cfi-icall'
|
|
|
|
|
else
|
|
|
|
|
error('-fno-sanitize-trap=cfi-icall is not supported by the compiler')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
add_global_arguments(cfi_flags, native: false, language: all_languages)
|
|
|
|
|
add_global_link_arguments(cfi_flags, native: false, language: all_languages)
|
|
|
|
|
endif
|
|
|
|
|
|
meson: mitigate against ROP exploits with -fzero-call-used-regs
To quote wikipedia:
"Return-oriented programming (ROP) is a computer security exploit
technique that allows an attacker to execute code in the presence
of security defenses such as executable space protection and code
signing.
In this technique, an attacker gains control of the call stack to
hijack program control flow and then executes carefully chosen
machine instruction sequences that are already present in the
machine's memory, called "gadgets". Each gadget typically ends in
a return instruction and is located in a subroutine within the
existing program and/or shared library code. Chained together,
these gadgets allow an attacker to perform arbitrary operations
on a machine employing defenses that thwart simpler attacks."
QEMU is by no means perfect with an ever growing set of CVEs from
flawed hardware device emulation, which could potentially be
exploited using ROP techniques.
Since GCC 11 there has been a compiler option that can mitigate
against this exploit technique:
-fzero-call-user-regs
To understand it refer to these two resources:
https://www.jerkeby.se/newsletter/posts/rop-reduction-zero-call-user-regs/
https://gcc.gnu.org/pipermail/gcc-patches/2020-August/552262.html
I used two programs to scan qemu-system-x86_64 for ROP gadgets:
https://github.com/0vercl0k/rp
https://github.com/JonathanSalwan/ROPgadget
When asked to find 8 byte gadgets, the 'rp' tool reports:
A total of 440278 gadgets found.
You decided to keep only the unique ones, 156143 unique gadgets found.
While the ROPgadget tool reports:
Unique gadgets found: 353122
With the --ropchain argument, the latter attempts to use the found
gadgets to product a chain that can execute arbitrary syscalls. With
current QEMU it succeeds in this task, which is an undesirable
situation.
With QEMU modified to use -fzero-call-user-regs=used-gpr the 'rp' tool
reports
A total of 528991 gadgets found.
You decided to keep only the unique ones, 121128 unique gadgets found.
This is 22% fewer unique gadgets
While the ROPgadget tool reports:
Unique gadgets found: 328605
This is 7% fewer unique gadgets. Crucially though, despite this more
modest reduction, the ROPgadget tool is no longer able to identify a
chain of gadgets for executing arbitrary syscalls. It fails at the
very first step, unable to find gadgets for populating registers for
a future syscall. Having said that, more advanced tools do still
manage to put together a viable ROP chain.
Also this only takes into account QEMU code. QEMU links to many 3rd
party shared libraries and ideally all of them would be compiled with
this same hardening. That becomes a distro policy question though.
In terms of performance impact, TCG was used as an evaluation test
case. We're not interested in protecting TCG since it isn't designed
to provide a security barrier, but it is performance sensitive code,
so useful as a guide to how other areas of QEMU might be impacted.
With the -fzero-call-user-regs=used-gpr argument present, using the
real world test of booting a linux kernel and having init immediately
poweroff, there is a ~1% slow down in performance under TCG. The QEMU
binary size also grows by approximately 1%.
By comparison, using the more aggressive -fzero-call-user-regs=all,
results in a slowdown of over 25% in TCG, which is clearly not an
acceptable impact, and a binary size increase of 5%.
Considering that 'used-gpr' successfully stopped ROPgadget assembling
a chain, this more targeted protection is a justifiable hardening
/ performance tradeoff.
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: "Daniel P. Berrangé" <berrange@redhat.com>
Message-ID: <20240103123414.2401208-2-berrange@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-01-03 15:34:13 +03:00
|
|
|
|
# Check further flags that make QEMU more robust against malicious parties
|
|
|
|
|
|
|
|
|
|
hardening_flags = [
|
meson: mitigate against use of uninitialize stack for exploits
When variables are used without being initialized, there is potential
to take advantage of data that was pre-existing on the stack from an
earlier call, to drive an exploit.
It is good practice to always initialize variables, and the compiler
can warn about flaws when -Wuninitialized is present. This warning,
however, is by no means foolproof with its output varying depending
on compiler version and which optimizations are enabled.
The -ftrivial-auto-var-init option can be used to tell the compiler
to always initialize all variables. This increases the security and
predictability of the program, closing off certain attack vectors,
reducing the risk of unsafe memory disclosure.
While the option takes several possible values, using 'zero' is
considered to be the option that is likely to lead to semantically
correct or safe behaviour[1]. eg sizes/indexes are not likely to
lead to out-of-bounds accesses when initialized to zero. Pointers
are less likely to point something useful if initialized to zero.
Even with -ftrivial-auto-var-init=zero set, GCC will still issue
warnings with -Wuninitialized if it discovers a problem, so we are
not loosing diagnostics for developers, just hardening runtime
behaviour and making QEMU behave more predictably in case of hitting
bad codepaths.
[1] https://lists.llvm.org/pipermail/cfe-dev/2020-April/065221.html
Signed-off-by: "Daniel P. Berrangé" <berrange@redhat.com>
Message-ID: <20240103123414.2401208-3-berrange@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-01-03 15:34:14 +03:00
|
|
|
|
# Initialize all stack variables to zero. This makes
|
|
|
|
|
# it harder to take advantage of uninitialized stack
|
|
|
|
|
# data to drive exploits
|
|
|
|
|
'-ftrivial-auto-var-init=zero',
|
meson: mitigate against ROP exploits with -fzero-call-used-regs
To quote wikipedia:
"Return-oriented programming (ROP) is a computer security exploit
technique that allows an attacker to execute code in the presence
of security defenses such as executable space protection and code
signing.
In this technique, an attacker gains control of the call stack to
hijack program control flow and then executes carefully chosen
machine instruction sequences that are already present in the
machine's memory, called "gadgets". Each gadget typically ends in
a return instruction and is located in a subroutine within the
existing program and/or shared library code. Chained together,
these gadgets allow an attacker to perform arbitrary operations
on a machine employing defenses that thwart simpler attacks."
QEMU is by no means perfect with an ever growing set of CVEs from
flawed hardware device emulation, which could potentially be
exploited using ROP techniques.
Since GCC 11 there has been a compiler option that can mitigate
against this exploit technique:
-fzero-call-user-regs
To understand it refer to these two resources:
https://www.jerkeby.se/newsletter/posts/rop-reduction-zero-call-user-regs/
https://gcc.gnu.org/pipermail/gcc-patches/2020-August/552262.html
I used two programs to scan qemu-system-x86_64 for ROP gadgets:
https://github.com/0vercl0k/rp
https://github.com/JonathanSalwan/ROPgadget
When asked to find 8 byte gadgets, the 'rp' tool reports:
A total of 440278 gadgets found.
You decided to keep only the unique ones, 156143 unique gadgets found.
While the ROPgadget tool reports:
Unique gadgets found: 353122
With the --ropchain argument, the latter attempts to use the found
gadgets to product a chain that can execute arbitrary syscalls. With
current QEMU it succeeds in this task, which is an undesirable
situation.
With QEMU modified to use -fzero-call-user-regs=used-gpr the 'rp' tool
reports
A total of 528991 gadgets found.
You decided to keep only the unique ones, 121128 unique gadgets found.
This is 22% fewer unique gadgets
While the ROPgadget tool reports:
Unique gadgets found: 328605
This is 7% fewer unique gadgets. Crucially though, despite this more
modest reduction, the ROPgadget tool is no longer able to identify a
chain of gadgets for executing arbitrary syscalls. It fails at the
very first step, unable to find gadgets for populating registers for
a future syscall. Having said that, more advanced tools do still
manage to put together a viable ROP chain.
Also this only takes into account QEMU code. QEMU links to many 3rd
party shared libraries and ideally all of them would be compiled with
this same hardening. That becomes a distro policy question though.
In terms of performance impact, TCG was used as an evaluation test
case. We're not interested in protecting TCG since it isn't designed
to provide a security barrier, but it is performance sensitive code,
so useful as a guide to how other areas of QEMU might be impacted.
With the -fzero-call-user-regs=used-gpr argument present, using the
real world test of booting a linux kernel and having init immediately
poweroff, there is a ~1% slow down in performance under TCG. The QEMU
binary size also grows by approximately 1%.
By comparison, using the more aggressive -fzero-call-user-regs=all,
results in a slowdown of over 25% in TCG, which is clearly not an
acceptable impact, and a binary size increase of 5%.
Considering that 'used-gpr' successfully stopped ROPgadget assembling
a chain, this more targeted protection is a justifiable hardening
/ performance tradeoff.
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: "Daniel P. Berrangé" <berrange@redhat.com>
Message-ID: <20240103123414.2401208-2-berrange@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-01-03 15:34:13 +03:00
|
|
|
|
]
|
|
|
|
|
|
2024-03-04 17:44:55 +03:00
|
|
|
|
# Zero out registers used during a function call
|
|
|
|
|
# upon its return. This makes it harder to assemble
|
|
|
|
|
# ROP gadgets into something usable
|
|
|
|
|
#
|
|
|
|
|
# NB: Clang 17 is broken and SEGVs
|
|
|
|
|
# https://github.com/llvm/llvm-project/issues/75168
|
2024-04-11 15:08:19 +03:00
|
|
|
|
#
|
|
|
|
|
# NB2: This clashes with the "retguard" extension of OpenBSD's Clang
|
|
|
|
|
# https://gitlab.com/qemu-project/qemu/-/issues/2278
|
|
|
|
|
if host_os != 'openbsd' and \
|
|
|
|
|
cc.compiles('extern struct { void (*cb)(void); } s; void f(void) { s.cb(); }',
|
2024-03-04 17:44:55 +03:00
|
|
|
|
name: '-fzero-call-used-regs=used-gpr',
|
|
|
|
|
args: ['-O2', '-fzero-call-used-regs=used-gpr'])
|
|
|
|
|
hardening_flags += '-fzero-call-used-regs=used-gpr'
|
|
|
|
|
endif
|
|
|
|
|
|
meson: mitigate against ROP exploits with -fzero-call-used-regs
To quote wikipedia:
"Return-oriented programming (ROP) is a computer security exploit
technique that allows an attacker to execute code in the presence
of security defenses such as executable space protection and code
signing.
In this technique, an attacker gains control of the call stack to
hijack program control flow and then executes carefully chosen
machine instruction sequences that are already present in the
machine's memory, called "gadgets". Each gadget typically ends in
a return instruction and is located in a subroutine within the
existing program and/or shared library code. Chained together,
these gadgets allow an attacker to perform arbitrary operations
on a machine employing defenses that thwart simpler attacks."
QEMU is by no means perfect with an ever growing set of CVEs from
flawed hardware device emulation, which could potentially be
exploited using ROP techniques.
Since GCC 11 there has been a compiler option that can mitigate
against this exploit technique:
-fzero-call-user-regs
To understand it refer to these two resources:
https://www.jerkeby.se/newsletter/posts/rop-reduction-zero-call-user-regs/
https://gcc.gnu.org/pipermail/gcc-patches/2020-August/552262.html
I used two programs to scan qemu-system-x86_64 for ROP gadgets:
https://github.com/0vercl0k/rp
https://github.com/JonathanSalwan/ROPgadget
When asked to find 8 byte gadgets, the 'rp' tool reports:
A total of 440278 gadgets found.
You decided to keep only the unique ones, 156143 unique gadgets found.
While the ROPgadget tool reports:
Unique gadgets found: 353122
With the --ropchain argument, the latter attempts to use the found
gadgets to product a chain that can execute arbitrary syscalls. With
current QEMU it succeeds in this task, which is an undesirable
situation.
With QEMU modified to use -fzero-call-user-regs=used-gpr the 'rp' tool
reports
A total of 528991 gadgets found.
You decided to keep only the unique ones, 121128 unique gadgets found.
This is 22% fewer unique gadgets
While the ROPgadget tool reports:
Unique gadgets found: 328605
This is 7% fewer unique gadgets. Crucially though, despite this more
modest reduction, the ROPgadget tool is no longer able to identify a
chain of gadgets for executing arbitrary syscalls. It fails at the
very first step, unable to find gadgets for populating registers for
a future syscall. Having said that, more advanced tools do still
manage to put together a viable ROP chain.
Also this only takes into account QEMU code. QEMU links to many 3rd
party shared libraries and ideally all of them would be compiled with
this same hardening. That becomes a distro policy question though.
In terms of performance impact, TCG was used as an evaluation test
case. We're not interested in protecting TCG since it isn't designed
to provide a security barrier, but it is performance sensitive code,
so useful as a guide to how other areas of QEMU might be impacted.
With the -fzero-call-user-regs=used-gpr argument present, using the
real world test of booting a linux kernel and having init immediately
poweroff, there is a ~1% slow down in performance under TCG. The QEMU
binary size also grows by approximately 1%.
By comparison, using the more aggressive -fzero-call-user-regs=all,
results in a slowdown of over 25% in TCG, which is clearly not an
acceptable impact, and a binary size increase of 5%.
Considering that 'used-gpr' successfully stopped ROPgadget assembling
a chain, this more targeted protection is a justifiable hardening
/ performance tradeoff.
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: "Daniel P. Berrangé" <berrange@redhat.com>
Message-ID: <20240103123414.2401208-2-berrange@redhat.com>
Signed-off-by: Thomas Huth <thuth@redhat.com>
2024-01-03 15:34:13 +03:00
|
|
|
|
qemu_common_flags += cc.get_supported_arguments(hardening_flags)
|
|
|
|
|
|
2022-10-12 18:13:23 +03:00
|
|
|
|
add_global_arguments(qemu_common_flags, native: false, language: all_languages)
|
|
|
|
|
add_global_link_arguments(qemu_ldflags, native: false, language: all_languages)
|
|
|
|
|
|
2023-09-21 12:12:51 +03:00
|
|
|
|
# Collect warning flags we want to set, sorted alphabetically
|
2022-12-22 11:28:56 +03:00
|
|
|
|
warn_flags = [
|
2023-09-21 12:12:51 +03:00
|
|
|
|
# First enable interesting warnings
|
2022-12-22 11:28:56 +03:00
|
|
|
|
'-Wempty-body',
|
|
|
|
|
'-Wendif-labels',
|
|
|
|
|
'-Wexpansion-to-defined',
|
2023-09-21 12:12:51 +03:00
|
|
|
|
'-Wformat-security',
|
|
|
|
|
'-Wformat-y2k',
|
|
|
|
|
'-Wignored-qualifiers',
|
2022-12-22 11:28:56 +03:00
|
|
|
|
'-Wimplicit-fallthrough=2',
|
2023-09-21 12:12:51 +03:00
|
|
|
|
'-Winit-self',
|
2022-12-22 11:28:56 +03:00
|
|
|
|
'-Wmissing-format-attribute',
|
2023-09-21 12:12:51 +03:00
|
|
|
|
'-Wmissing-prototypes',
|
|
|
|
|
'-Wnested-externs',
|
|
|
|
|
'-Wold-style-declaration',
|
|
|
|
|
'-Wold-style-definition',
|
|
|
|
|
'-Wredundant-decls',
|
|
|
|
|
'-Wshadow=local',
|
|
|
|
|
'-Wstrict-prototypes',
|
|
|
|
|
'-Wtype-limits',
|
|
|
|
|
'-Wundef',
|
2024-02-21 19:26:36 +03:00
|
|
|
|
'-Wvla',
|
2023-09-21 12:12:51 +03:00
|
|
|
|
'-Wwrite-strings',
|
|
|
|
|
|
|
|
|
|
# Then disable some undesirable warnings
|
|
|
|
|
'-Wno-gnu-variable-sized-type-not-at-end',
|
2022-12-22 11:28:56 +03:00
|
|
|
|
'-Wno-initializer-overrides',
|
|
|
|
|
'-Wno-missing-include-dirs',
|
2023-09-21 12:12:51 +03:00
|
|
|
|
'-Wno-psabi',
|
2022-12-22 11:28:56 +03:00
|
|
|
|
'-Wno-shift-negative-value',
|
|
|
|
|
'-Wno-string-plus-int',
|
|
|
|
|
'-Wno-tautological-type-limit-compare',
|
2023-09-21 12:12:51 +03:00
|
|
|
|
'-Wno-typedef-redefinition',
|
2022-12-22 11:28:56 +03:00
|
|
|
|
]
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'darwin'
|
2024-06-27 21:12:45 +03:00
|
|
|
|
tsa_has_cleanup = cc.compiles('''
|
|
|
|
|
struct __attribute__((capability("mutex"))) mutex {};
|
|
|
|
|
void lock(struct mutex *m) __attribute__((acquire_capability(m)));
|
|
|
|
|
void unlock(struct mutex *m) __attribute__((release_capability(m)));
|
|
|
|
|
|
|
|
|
|
void test(void) {
|
|
|
|
|
struct mutex __attribute__((cleanup(unlock))) m;
|
|
|
|
|
lock(&m);
|
|
|
|
|
}
|
|
|
|
|
''', args: ['-Wthread-safety', '-Werror'])
|
|
|
|
|
if tsa_has_cleanup
|
|
|
|
|
warn_flags += ['-Wthread-safety']
|
|
|
|
|
endif
|
2022-12-22 11:28:56 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-07-06 09:47:36 +03:00
|
|
|
|
# Set up C++ compiler flags
|
2021-11-08 14:31:52 +03:00
|
|
|
|
qemu_cxxflags = []
|
2022-10-12 15:15:06 +03:00
|
|
|
|
if 'cpp' in all_languages
|
2022-12-22 11:28:56 +03:00
|
|
|
|
qemu_cxxflags = ['-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS', '-D__STDC_FORMAT_MACROS'] + qemu_cflags
|
2021-11-08 14:31:52 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2022-10-12 18:13:23 +03:00
|
|
|
|
add_project_arguments(qemu_cflags, native: false, language: 'c')
|
2022-12-22 11:28:56 +03:00
|
|
|
|
add_project_arguments(cc.get_supported_arguments(warn_flags), native: false, language: 'c')
|
|
|
|
|
if 'cpp' in all_languages
|
|
|
|
|
add_project_arguments(qemu_cxxflags, native: false, language: 'cpp')
|
|
|
|
|
add_project_arguments(cxx.get_supported_arguments(warn_flags), native: false, language: 'cpp')
|
|
|
|
|
endif
|
|
|
|
|
if 'objc' in all_languages
|
|
|
|
|
# Note sanitizer flags are not applied to Objective-C sources!
|
|
|
|
|
add_project_arguments(objc.get_supported_arguments(warn_flags), native: false, language: 'objc')
|
|
|
|
|
endif
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux'
|
2020-10-14 15:45:42 +03:00
|
|
|
|
add_project_arguments('-isystem', meson.current_source_dir() / 'linux-headers',
|
|
|
|
|
'-isystem', 'linux-headers',
|
2022-10-12 15:15:06 +03:00
|
|
|
|
language: all_languages)
|
2020-10-14 15:45:42 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2020-12-14 14:01:45 +03:00
|
|
|
|
add_project_arguments('-iquote', '.',
|
2020-10-14 15:45:42 +03:00
|
|
|
|
'-iquote', meson.current_source_dir(),
|
|
|
|
|
'-iquote', meson.current_source_dir() / 'include',
|
2022-10-12 15:15:06 +03:00
|
|
|
|
language: all_languages)
|
2023-05-18 03:48:34 +03:00
|
|
|
|
|
|
|
|
|
# If a host-specific include directory exists, list that first...
|
|
|
|
|
host_include = meson.current_source_dir() / 'host/include/'
|
|
|
|
|
if fs.is_dir(host_include / host_arch)
|
|
|
|
|
add_project_arguments('-iquote', host_include / host_arch,
|
|
|
|
|
language: all_languages)
|
|
|
|
|
endif
|
|
|
|
|
# ... followed by the generic fallback.
|
|
|
|
|
add_project_arguments('-iquote', host_include / 'generic',
|
|
|
|
|
language: all_languages)
|
2019-06-10 13:05:14 +03:00
|
|
|
|
|
2020-09-01 14:51:16 +03:00
|
|
|
|
sparse = find_program('cgcc', required: get_option('sparse'))
|
|
|
|
|
if sparse.found()
|
2020-02-03 16:45:33 +03:00
|
|
|
|
run_target('sparse',
|
|
|
|
|
command: [find_program('scripts/check_sparse.py'),
|
2020-09-01 14:51:16 +03:00
|
|
|
|
'compile_commands.json', sparse.full_path(), '-Wbitwise',
|
|
|
|
|
'-Wno-transparent-union', '-Wno-old-initializer',
|
|
|
|
|
'-Wno-non-pointer-null'])
|
2020-02-03 16:45:33 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-09-08 13:08:53 +03:00
|
|
|
|
#####################################
|
|
|
|
|
# Host-specific libraries and flags #
|
|
|
|
|
#####################################
|
|
|
|
|
|
2021-06-01 11:00:48 +03:00
|
|
|
|
libm = cc.find_library('m', required: false)
|
2021-06-03 16:01:35 +03:00
|
|
|
|
threads = dependency('threads')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
util = cc.find_library('util', required: false)
|
2020-08-03 17:22:28 +03:00
|
|
|
|
winmm = []
|
2020-08-19 15:44:56 +03:00
|
|
|
|
socket = []
|
2019-07-17 23:31:05 +03:00
|
|
|
|
version_res = []
|
2019-08-20 18:48:59 +03:00
|
|
|
|
coref = []
|
|
|
|
|
iokit = []
|
2020-09-21 11:49:50 +03:00
|
|
|
|
emulator_link_args = []
|
2022-02-01 11:55:21 +03:00
|
|
|
|
midl = not_found
|
|
|
|
|
widl = not_found
|
2022-06-24 17:50:37 +03:00
|
|
|
|
pathcch = not_found
|
2021-10-07 16:08:15 +03:00
|
|
|
|
host_dsosuf = '.so'
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2022-02-01 11:55:21 +03:00
|
|
|
|
midl = find_program('midl', required: false)
|
|
|
|
|
widl = find_program('widl', required: false)
|
2022-06-24 17:50:37 +03:00
|
|
|
|
pathcch = cc.find_library('pathcch')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
socket = cc.find_library('ws2_32')
|
2020-08-03 17:22:28 +03:00
|
|
|
|
winmm = cc.find_library('winmm')
|
2019-07-17 23:31:05 +03:00
|
|
|
|
|
|
|
|
|
win = import('windows')
|
|
|
|
|
version_res = win.compile_resources('version.rc',
|
|
|
|
|
depend_files: files('pc-bios/qemu-nsis.ico'),
|
|
|
|
|
include_directories: include_directories('.'))
|
2021-10-07 16:08:15 +03:00
|
|
|
|
host_dsosuf = '.dll'
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'darwin'
|
2019-08-20 18:48:59 +03:00
|
|
|
|
coref = dependency('appleframeworks', modules: 'CoreFoundation')
|
2021-03-15 21:03:38 +03:00
|
|
|
|
iokit = dependency('appleframeworks', modules: 'IOKit', required: false)
|
2021-10-07 16:08:15 +03:00
|
|
|
|
host_dsosuf = '.dylib'
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'sunos'
|
2020-08-10 00:47:45 +03:00
|
|
|
|
socket = [cc.find_library('socket'),
|
|
|
|
|
cc.find_library('nsl'),
|
|
|
|
|
cc.find_library('resolv')]
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'haiku'
|
2020-08-10 00:47:45 +03:00
|
|
|
|
socket = [cc.find_library('posix_error_mapper'),
|
|
|
|
|
cc.find_library('network'),
|
|
|
|
|
cc.find_library('bsd')]
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'openbsd'
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('tcg').allowed() and target_dirs.length() > 0
|
2020-09-21 11:49:50 +03:00
|
|
|
|
# Disable OpenBSD W^X if available
|
|
|
|
|
emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded')
|
|
|
|
|
endif
|
2020-08-19 15:44:56 +03:00
|
|
|
|
endif
|
2020-09-16 19:07:29 +03:00
|
|
|
|
|
2023-09-08 13:08:53 +03:00
|
|
|
|
###############################################
|
|
|
|
|
# Host-specific configuration of accelerators #
|
|
|
|
|
###############################################
|
|
|
|
|
|
2020-09-18 12:37:01 +03:00
|
|
|
|
accelerators = []
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if get_option('kvm').allowed() and host_os == 'linux'
|
2020-09-18 12:37:01 +03:00
|
|
|
|
accelerators += 'CONFIG_KVM'
|
|
|
|
|
endif
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if get_option('whpx').allowed() and host_os == 'windows'
|
2020-10-22 03:27:55 +03:00
|
|
|
|
if get_option('whpx').enabled() and host_machine.cpu() != 'x86_64'
|
2020-09-18 12:37:01 +03:00
|
|
|
|
error('WHPX requires 64-bit host')
|
2023-06-24 16:31:44 +03:00
|
|
|
|
elif cc.has_header('winhvplatform.h', required: get_option('whpx')) and \
|
|
|
|
|
cc.has_header('winhvemulation.h', required: get_option('whpx'))
|
2020-09-18 12:37:01 +03:00
|
|
|
|
accelerators += 'CONFIG_WHPX'
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2023-09-08 13:08:53 +03:00
|
|
|
|
|
|
|
|
|
hvf = not_found
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('hvf').allowed()
|
2020-09-18 12:37:01 +03:00
|
|
|
|
hvf = dependency('appleframeworks', modules: 'Hypervisor',
|
|
|
|
|
required: get_option('hvf'))
|
|
|
|
|
if hvf.found()
|
|
|
|
|
accelerators += 'CONFIG_HVF'
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2023-09-08 13:08:53 +03:00
|
|
|
|
|
|
|
|
|
nvmm = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'netbsd'
|
2021-10-13 16:54:17 +03:00
|
|
|
|
nvmm = cc.find_library('nvmm', required: get_option('nvmm'))
|
2021-04-02 23:25:32 +03:00
|
|
|
|
if nvmm.found()
|
|
|
|
|
accelerators += 'CONFIG_NVMM'
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2020-12-14 14:01:45 +03:00
|
|
|
|
|
2021-11-08 16:18:17 +03:00
|
|
|
|
tcg_arch = host_arch
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('tcg').allowed()
|
2021-11-08 16:18:17 +03:00
|
|
|
|
if host_arch == 'unknown'
|
2023-08-04 12:29:05 +03:00
|
|
|
|
if not get_option('tcg_interpreter')
|
2020-09-18 12:37:01 +03:00
|
|
|
|
error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu))
|
|
|
|
|
endif
|
2021-01-25 17:45:30 +03:00
|
|
|
|
elif get_option('tcg_interpreter')
|
2021-05-21 13:34:23 +03:00
|
|
|
|
warning('Use of the TCG interpreter is not recommended on this host')
|
2021-01-25 17:45:30 +03:00
|
|
|
|
warning('architecture. There is a native TCG execution backend available')
|
|
|
|
|
warning('which provides substantially better performance and reliability.')
|
|
|
|
|
warning('It is strongly recommended to remove the --enable-tcg-interpreter')
|
|
|
|
|
warning('configuration option on this architecture to use the native')
|
|
|
|
|
warning('backend.')
|
2020-09-18 12:37:01 +03:00
|
|
|
|
endif
|
2020-12-14 14:01:45 +03:00
|
|
|
|
if get_option('tcg_interpreter')
|
|
|
|
|
tcg_arch = 'tci'
|
2021-11-08 16:18:17 +03:00
|
|
|
|
elif host_arch == 'x86_64'
|
2020-12-14 14:01:45 +03:00
|
|
|
|
tcg_arch = 'i386'
|
2021-11-08 16:18:17 +03:00
|
|
|
|
elif host_arch == 'ppc64'
|
2020-12-14 14:01:45 +03:00
|
|
|
|
tcg_arch = 'ppc'
|
|
|
|
|
endif
|
|
|
|
|
add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch,
|
2022-10-12 15:15:06 +03:00
|
|
|
|
language: all_languages)
|
2020-12-14 14:01:45 +03:00
|
|
|
|
|
2020-09-18 12:37:01 +03:00
|
|
|
|
accelerators += 'CONFIG_TCG'
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
if 'CONFIG_KVM' not in accelerators and get_option('kvm').enabled()
|
|
|
|
|
error('KVM not available on this platform')
|
|
|
|
|
endif
|
|
|
|
|
if 'CONFIG_HVF' not in accelerators and get_option('hvf').enabled()
|
|
|
|
|
error('HVF not available on this platform')
|
|
|
|
|
endif
|
2021-04-02 23:25:32 +03:00
|
|
|
|
if 'CONFIG_NVMM' not in accelerators and get_option('nvmm').enabled()
|
|
|
|
|
error('NVMM not available on this platform')
|
|
|
|
|
endif
|
2020-09-18 12:37:01 +03:00
|
|
|
|
if 'CONFIG_WHPX' not in accelerators and get_option('whpx').enabled()
|
|
|
|
|
error('WHPX not available on this platform')
|
|
|
|
|
endif
|
2020-09-01 18:28:59 +03:00
|
|
|
|
|
2023-09-08 13:08:53 +03:00
|
|
|
|
xen = not_found
|
|
|
|
|
if get_option('xen').enabled() or (get_option('xen').auto() and have_system)
|
|
|
|
|
xencontrol = dependency('xencontrol', required: false,
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
if xencontrol.found()
|
|
|
|
|
xen_pc = declare_dependency(version: xencontrol.version(),
|
|
|
|
|
dependencies: [
|
|
|
|
|
xencontrol,
|
|
|
|
|
# disabler: true makes xen_pc.found() return false if any is not found
|
|
|
|
|
dependency('xenstore', required: false,
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
disabler: true),
|
|
|
|
|
dependency('xenforeignmemory', required: false,
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
disabler: true),
|
|
|
|
|
dependency('xengnttab', required: false,
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
disabler: true),
|
|
|
|
|
dependency('xenevtchn', required: false,
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
disabler: true),
|
|
|
|
|
dependency('xendevicemodel', required: false,
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
disabler: true),
|
|
|
|
|
# optional, no "disabler: true"
|
|
|
|
|
dependency('xentoolcore', required: false,
|
|
|
|
|
method: 'pkg-config')])
|
|
|
|
|
if xen_pc.found()
|
|
|
|
|
xen = xen_pc
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
if not xen.found()
|
|
|
|
|
xen_tests = [ '4.11.0', '4.10.0', '4.9.0', '4.8.0', '4.7.1' ]
|
|
|
|
|
xen_libs = {
|
|
|
|
|
'4.11.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ],
|
|
|
|
|
'4.10.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ],
|
|
|
|
|
'4.9.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ],
|
|
|
|
|
'4.8.0': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ],
|
|
|
|
|
'4.7.1': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ],
|
|
|
|
|
}
|
|
|
|
|
xen_deps = {}
|
|
|
|
|
foreach ver: xen_tests
|
|
|
|
|
# cache the various library tests to avoid polluting the logs
|
|
|
|
|
xen_test_deps = []
|
|
|
|
|
foreach l: xen_libs[ver]
|
|
|
|
|
if l not in xen_deps
|
|
|
|
|
xen_deps += { l: cc.find_library(l, required: false) }
|
|
|
|
|
endif
|
|
|
|
|
xen_test_deps += xen_deps[l]
|
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
|
# Use -D to pick just one of the test programs in scripts/xen-detect.c
|
|
|
|
|
xen_version = ver.split('.')
|
|
|
|
|
xen_ctrl_version = xen_version[0] + \
|
|
|
|
|
('0' + xen_version[1]).substring(-2) + \
|
|
|
|
|
('0' + xen_version[2]).substring(-2)
|
|
|
|
|
if cc.links(files('scripts/xen-detect.c'),
|
|
|
|
|
args: '-DCONFIG_XEN_CTRL_INTERFACE_VERSION=' + xen_ctrl_version,
|
|
|
|
|
dependencies: xen_test_deps)
|
|
|
|
|
xen = declare_dependency(version: ver, dependencies: xen_test_deps)
|
|
|
|
|
break
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
endif
|
|
|
|
|
if xen.found()
|
|
|
|
|
accelerators += 'CONFIG_XEN'
|
|
|
|
|
elif get_option('xen').enabled()
|
|
|
|
|
error('could not compile and link Xen test program')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
have_xen_pci_passthrough = get_option('xen_pci_passthrough') \
|
|
|
|
|
.require(xen.found(),
|
|
|
|
|
error_message: 'Xen PCI passthrough requested but Xen not enabled') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux',
|
2023-09-08 13:08:53 +03:00
|
|
|
|
error_message: 'Xen PCI passthrough not available on this platform') \
|
|
|
|
|
.require(cpu == 'x86' or cpu == 'x86_64',
|
|
|
|
|
error_message: 'Xen PCI passthrough not available on this platform') \
|
|
|
|
|
.allowed()
|
|
|
|
|
|
2020-09-16 19:07:29 +03:00
|
|
|
|
################
|
|
|
|
|
# Dependencies #
|
|
|
|
|
################
|
|
|
|
|
|
2022-10-12 12:31:32 +03:00
|
|
|
|
# When bumping glib minimum version, please check also whether to increase
|
2024-10-03 16:28:48 +03:00
|
|
|
|
# the _WIN32_WINNT setting in osdep.h according to the value from glib.
|
|
|
|
|
# You should also check if any of the glib.version() checks
|
|
|
|
|
# below can also be removed.
|
2024-04-18 13:10:50 +03:00
|
|
|
|
glib_req_ver = '>=2.66.0'
|
2022-10-12 12:31:32 +03:00
|
|
|
|
glib_pc = dependency('glib-2.0', version: glib_req_ver, required: true,
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
glib_cflags = []
|
2022-10-20 15:53:10 +03:00
|
|
|
|
if enable_modules
|
2022-10-12 12:31:32 +03:00
|
|
|
|
gmodule = dependency('gmodule-export-2.0', version: glib_req_ver, required: true,
|
|
|
|
|
method: 'pkg-config')
|
2023-08-30 13:20:53 +03:00
|
|
|
|
elif get_option('plugins')
|
2022-10-12 12:31:32 +03:00
|
|
|
|
gmodule = dependency('gmodule-no-export-2.0', version: glib_req_ver, required: true,
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
else
|
|
|
|
|
gmodule = not_found
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# This workaround is required due to a bug in pkg-config file for glib as it
|
|
|
|
|
# doesn't define GLIB_STATIC_COMPILATION for pkg-config --static
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows' and get_option('prefer_static')
|
2022-10-12 12:31:32 +03:00
|
|
|
|
glib_cflags += ['-DGLIB_STATIC_COMPILATION']
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
# Sanity check that the current size_t matches the
|
|
|
|
|
# size that glib thinks it should be. This catches
|
|
|
|
|
# problems on multi-arch where people try to build
|
|
|
|
|
# 32-bit QEMU while pointing at 64-bit glib headers
|
|
|
|
|
|
|
|
|
|
if not cc.compiles('''
|
|
|
|
|
#include <glib.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
|
|
#define QEMU_BUILD_BUG_ON(x) \
|
|
|
|
|
typedef char qemu_build_bug_on[(x)?-1:1] __attribute__((unused));
|
|
|
|
|
|
|
|
|
|
int main(void) {
|
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(size_t) != GLIB_SIZEOF_SIZE_T);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: glib_pc, args: glib_cflags)
|
|
|
|
|
error('''sizeof(size_t) doesn't match GLIB_SIZEOF_SIZE_T.
|
|
|
|
|
You probably need to set PKG_CONFIG_LIBDIR" to point
|
|
|
|
|
to the right pkg-config files for your build target.''')
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
glib = declare_dependency(dependencies: [glib_pc, gmodule],
|
|
|
|
|
compile_args: glib_cflags,
|
|
|
|
|
version: glib_pc.version())
|
|
|
|
|
|
|
|
|
|
# Check whether glib has gslice, which we have to avoid for correctness.
|
|
|
|
|
# TODO: remove this check and the corresponding workaround (qtree) when
|
|
|
|
|
# the minimum supported glib is >= 2.75.3
|
|
|
|
|
glib_has_gslice = glib.version().version_compare('<2.75.3')
|
2024-10-03 16:28:48 +03:00
|
|
|
|
# Check whether glib has the aligned_alloc family of functions.
|
|
|
|
|
# <https://docs.gtk.org/glib/func.aligned_alloc.html>
|
|
|
|
|
glib_has_aligned_alloc = glib.version().version_compare('>=2.72.0')
|
2022-10-12 12:31:32 +03:00
|
|
|
|
|
|
|
|
|
# override glib dep to include the above refinements
|
|
|
|
|
meson.override_dependency('glib-2.0', glib)
|
|
|
|
|
|
|
|
|
|
# The path to glib.h is added to all compilation commands.
|
|
|
|
|
add_project_dependencies(glib.partial_dependency(compile_args: true, includes: true),
|
|
|
|
|
native: false, language: all_languages)
|
2020-12-15 11:03:19 +03:00
|
|
|
|
|
2020-08-19 15:44:56 +03:00
|
|
|
|
gio = not_found
|
2022-04-20 18:33:44 +03:00
|
|
|
|
gdbus_codegen = not_found
|
2022-09-30 10:53:02 +03:00
|
|
|
|
gdbus_codegen_error = '@0@ requires gdbus-codegen, please install libgio'
|
2022-04-20 18:33:44 +03:00
|
|
|
|
if not get_option('gio').auto() or have_system
|
|
|
|
|
gio = dependency('gio-2.0', required: get_option('gio'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-04-20 18:33:44 +03:00
|
|
|
|
if gio.found() and not cc.links('''
|
|
|
|
|
#include <gio/gio.h>
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
g_dbus_proxy_new_sync(0, 0, 0, 0, 0, 0, 0, 0);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: [glib, gio])
|
|
|
|
|
if get_option('gio').enabled()
|
|
|
|
|
error('The installed libgio is broken for static linking')
|
|
|
|
|
endif
|
|
|
|
|
gio = not_found
|
|
|
|
|
endif
|
|
|
|
|
if gio.found()
|
2024-10-08 15:50:22 +03:00
|
|
|
|
gdbus_codegen = find_program('gdbus-codegen',
|
2022-04-20 18:33:44 +03:00
|
|
|
|
required: get_option('gio'))
|
|
|
|
|
gio_unix = dependency('gio-unix-2.0', required: get_option('gio'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-04-20 18:33:44 +03:00
|
|
|
|
gio = declare_dependency(dependencies: [gio, gio_unix],
|
|
|
|
|
version: gio.version())
|
|
|
|
|
endif
|
2020-08-19 15:44:56 +03:00
|
|
|
|
endif
|
2022-09-30 10:53:02 +03:00
|
|
|
|
if gdbus_codegen.found() and get_option('cfi')
|
|
|
|
|
gdbus_codegen = not_found
|
|
|
|
|
gdbus_codegen_error = '@0@ uses gdbus-codegen, which does not support control flow integrity'
|
|
|
|
|
endif
|
2022-04-20 18:33:44 +03:00
|
|
|
|
|
2023-06-06 14:56:42 +03:00
|
|
|
|
xml_pp = find_program('scripts/xml-preprocess.py')
|
|
|
|
|
|
2020-08-19 15:44:56 +03:00
|
|
|
|
lttng = not_found
|
2021-10-07 16:08:14 +03:00
|
|
|
|
if 'ust' in get_option('trace_backends')
|
2022-03-28 11:47:13 +03:00
|
|
|
|
lttng = dependency('lttng-ust', required: true, version: '>= 2.1',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
endif
|
2020-08-26 09:22:58 +03:00
|
|
|
|
pixman = not_found
|
2023-08-30 12:38:25 +03:00
|
|
|
|
if not get_option('pixman').auto() or have_system or have_tools
|
|
|
|
|
pixman = dependency('pixman-1', required: get_option('pixman'), version:'>=0.21.8',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-26 09:22:58 +03:00
|
|
|
|
endif
|
2023-08-30 12:38:25 +03:00
|
|
|
|
|
2022-07-14 15:56:58 +03:00
|
|
|
|
zlib = dependency('zlib', required: true)
|
2021-06-03 12:31:35 +03:00
|
|
|
|
|
2021-10-07 16:08:20 +03:00
|
|
|
|
libaio = not_found
|
|
|
|
|
if not get_option('linux_aio').auto() or have_block
|
|
|
|
|
libaio = cc.find_library('aio', has_headers: ['libaio.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('linux_aio'))
|
2021-10-07 16:08:20 +03:00
|
|
|
|
endif
|
2022-05-13 09:28:30 +03:00
|
|
|
|
|
|
|
|
|
linux_io_uring_test = '''
|
|
|
|
|
#include <liburing.h>
|
|
|
|
|
#include <linux/errqueue.h>
|
|
|
|
|
|
|
|
|
|
int main(void) { return 0; }'''
|
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
linux_io_uring = not_found
|
2021-06-03 12:31:35 +03:00
|
|
|
|
if not get_option('linux_io_uring').auto() or have_block
|
2022-01-05 16:49:38 +03:00
|
|
|
|
linux_io_uring = dependency('liburing', version: '>=0.3',
|
|
|
|
|
required: get_option('linux_io_uring'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-05-13 09:28:30 +03:00
|
|
|
|
if not cc.links(linux_io_uring_test)
|
|
|
|
|
linux_io_uring = not_found
|
|
|
|
|
endif
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2022-05-13 09:28:30 +03:00
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
libnfs = not_found
|
2020-11-17 15:11:25 +03:00
|
|
|
|
if not get_option('libnfs').auto() or have_block
|
|
|
|
|
libnfs = dependency('libnfs', version: '>=1.9.3',
|
|
|
|
|
required: get_option('libnfs'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2020-11-17 16:45:24 +03:00
|
|
|
|
|
|
|
|
|
libattr_test = '''
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#ifdef CONFIG_LIBATTR
|
|
|
|
|
#include <attr/xattr.h>
|
|
|
|
|
#else
|
|
|
|
|
#include <sys/xattr.h>
|
|
|
|
|
#endif
|
|
|
|
|
int main(void) { getxattr(NULL, NULL, NULL, 0); setxattr(NULL, NULL, NULL, 0, 0); return 0; }'''
|
|
|
|
|
|
2019-07-15 14:04:49 +03:00
|
|
|
|
libattr = not_found
|
2020-11-17 16:45:24 +03:00
|
|
|
|
have_old_libattr = false
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('attr').allowed()
|
2020-11-17 16:45:24 +03:00
|
|
|
|
if cc.links(libattr_test)
|
|
|
|
|
libattr = declare_dependency()
|
|
|
|
|
else
|
|
|
|
|
libattr = cc.find_library('attr', has_headers: ['attr/xattr.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('attr'))
|
2020-11-17 16:45:24 +03:00
|
|
|
|
if libattr.found() and not \
|
|
|
|
|
cc.links(libattr_test, dependencies: libattr, args: '-DCONFIG_LIBATTR')
|
|
|
|
|
libattr = not_found
|
|
|
|
|
if get_option('attr').enabled()
|
|
|
|
|
error('could not link libattr')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link libattr, disabling')
|
|
|
|
|
endif
|
|
|
|
|
else
|
|
|
|
|
have_old_libattr = libattr.found()
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2019-07-15 14:04:49 +03:00
|
|
|
|
endif
|
2020-11-17 16:45:24 +03:00
|
|
|
|
|
2024-07-15 08:25:44 +03:00
|
|
|
|
cocoa = dependency('appleframeworks',
|
|
|
|
|
modules: ['Cocoa', 'CoreVideo', 'QuartzCore'],
|
2022-07-02 17:25:19 +03:00
|
|
|
|
required: get_option('cocoa'))
|
2021-01-07 16:04:00 +03:00
|
|
|
|
|
2022-03-17 20:28:33 +03:00
|
|
|
|
vmnet = dependency('appleframeworks', modules: 'vmnet', required: get_option('vmnet'))
|
|
|
|
|
if vmnet.found() and not cc.has_header_symbol('vmnet/vmnet.h',
|
|
|
|
|
'VMNET_BRIDGED_MODE',
|
|
|
|
|
dependencies: vmnet)
|
|
|
|
|
vmnet = not_found
|
|
|
|
|
if get_option('vmnet').enabled()
|
|
|
|
|
error('vmnet.framework API is outdated')
|
|
|
|
|
else
|
|
|
|
|
warning('vmnet.framework API is outdated, disabling')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2020-02-05 11:45:39 +03:00
|
|
|
|
seccomp = not_found
|
2022-10-26 10:30:24 +03:00
|
|
|
|
seccomp_has_sysrawrc = false
|
2020-11-17 16:22:24 +03:00
|
|
|
|
if not get_option('seccomp').auto() or have_system or have_tools
|
|
|
|
|
seccomp = dependency('libseccomp', version: '>=2.3.0',
|
|
|
|
|
required: get_option('seccomp'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-10-26 10:30:24 +03:00
|
|
|
|
if seccomp.found()
|
|
|
|
|
seccomp_has_sysrawrc = cc.has_header_symbol('seccomp.h',
|
|
|
|
|
'SCMP_FLTATR_API_SYSRAWRC',
|
|
|
|
|
dependencies: seccomp)
|
|
|
|
|
endif
|
2020-02-05 11:45:39 +03:00
|
|
|
|
endif
|
2020-11-17 16:46:58 +03:00
|
|
|
|
|
2020-02-05 11:45:39 +03:00
|
|
|
|
libcap_ng = not_found
|
2020-11-17 16:46:58 +03:00
|
|
|
|
if not get_option('cap_ng').auto() or have_system or have_tools
|
|
|
|
|
libcap_ng = cc.find_library('cap-ng', has_headers: ['cap-ng.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('cap_ng'))
|
2020-11-17 16:46:58 +03:00
|
|
|
|
endif
|
|
|
|
|
if libcap_ng.found() and not cc.links('''
|
|
|
|
|
#include <cap-ng.h>
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
capng_capability_to_name(CAPNG_EFFECTIVE);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: libcap_ng)
|
|
|
|
|
libcap_ng = not_found
|
|
|
|
|
if get_option('cap_ng').enabled()
|
|
|
|
|
error('could not link libcap-ng')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link libcap-ng, disabling')
|
|
|
|
|
endif
|
2020-02-05 11:45:39 +03:00
|
|
|
|
endif
|
2020-11-17 16:46:58 +03:00
|
|
|
|
|
2020-08-26 10:24:11 +03:00
|
|
|
|
if get_option('xkbcommon').auto() and not have_system and not have_tools
|
|
|
|
|
xkbcommon = not_found
|
|
|
|
|
else
|
|
|
|
|
xkbcommon = dependency('xkbcommon', required: get_option('xkbcommon'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2019-07-15 13:48:31 +03:00
|
|
|
|
endif
|
2021-10-07 16:08:21 +03:00
|
|
|
|
|
2022-04-08 19:20:47 +03:00
|
|
|
|
slirp = not_found
|
|
|
|
|
if not get_option('slirp').auto() or have_system
|
|
|
|
|
slirp = dependency('slirp', required: get_option('slirp'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-04-08 19:20:47 +03:00
|
|
|
|
# slirp < 4.7 is incompatible with CFI support in QEMU. This is because
|
|
|
|
|
# it passes function pointers within libslirp as callbacks for timers.
|
|
|
|
|
# When using a system-wide shared libslirp, the type information for the
|
|
|
|
|
# callback is missing and the timer call produces a false positive with CFI.
|
|
|
|
|
# Do not use the "version" keyword argument to produce a better error.
|
|
|
|
|
# with control-flow integrity.
|
|
|
|
|
if get_option('cfi') and slirp.found() and slirp.version().version_compare('<4.7')
|
|
|
|
|
if get_option('slirp').enabled()
|
|
|
|
|
error('Control-Flow Integrity requires libslirp 4.7.')
|
|
|
|
|
else
|
|
|
|
|
warning('Cannot use libslirp since Control-Flow Integrity requires libslirp >= 4.7.')
|
|
|
|
|
slirp = not_found
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2019-07-22 22:47:50 +03:00
|
|
|
|
vde = not_found
|
2021-10-07 16:08:21 +03:00
|
|
|
|
if not get_option('vde').auto() or have_system or have_tools
|
|
|
|
|
vde = cc.find_library('vdeplug', has_headers: ['libvdeplug.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('vde'))
|
2021-10-07 16:08:21 +03:00
|
|
|
|
endif
|
|
|
|
|
if vde.found() and not cc.links('''
|
|
|
|
|
#include <libvdeplug.h>
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
struct vde_open_args a = {0, 0, 0};
|
|
|
|
|
char s[] = "";
|
|
|
|
|
vde_open(s, s, &a);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: vde)
|
|
|
|
|
vde = not_found
|
|
|
|
|
if get_option('cap_ng').enabled()
|
|
|
|
|
error('could not link libvdeplug')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link libvdeplug, disabling')
|
|
|
|
|
endif
|
2019-07-22 22:47:50 +03:00
|
|
|
|
endif
|
2021-10-07 16:06:09 +03:00
|
|
|
|
|
2020-08-17 13:47:55 +03:00
|
|
|
|
pulse = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not get_option('pa').auto() or (host_os == 'linux' and have_system)
|
2021-10-07 16:06:09 +03:00
|
|
|
|
pulse = dependency('libpulse', required: get_option('pa'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-17 13:47:55 +03:00
|
|
|
|
endif
|
|
|
|
|
alsa = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not get_option('alsa').auto() or (host_os == 'linux' and have_system)
|
2021-10-07 16:06:09 +03:00
|
|
|
|
alsa = dependency('alsa', required: get_option('alsa'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-17 13:47:55 +03:00
|
|
|
|
endif
|
|
|
|
|
jack = not_found
|
2021-10-07 16:06:09 +03:00
|
|
|
|
if not get_option('jack').auto() or have_system
|
|
|
|
|
jack = dependency('jack', required: get_option('jack'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-17 13:47:55 +03:00
|
|
|
|
endif
|
2023-04-17 13:56:54 +03:00
|
|
|
|
pipewire = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not get_option('pipewire').auto() or (host_os == 'linux' and have_system)
|
2023-04-17 13:56:54 +03:00
|
|
|
|
pipewire = dependency('libpipewire-0.3', version: '>=0.3.60',
|
|
|
|
|
required: get_option('pipewire'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2023-04-17 13:56:54 +03:00
|
|
|
|
endif
|
2022-09-07 16:23:42 +03:00
|
|
|
|
sndio = not_found
|
|
|
|
|
if not get_option('sndio').auto() or have_system
|
|
|
|
|
sndio = dependency('sndio', required: get_option('sndio'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-09-07 16:23:42 +03:00
|
|
|
|
endif
|
2021-10-07 16:06:09 +03:00
|
|
|
|
|
2021-05-19 08:39:32 +03:00
|
|
|
|
spice_protocol = not_found
|
2021-10-07 16:08:23 +03:00
|
|
|
|
if not get_option('spice_protocol').auto() or have_system
|
2023-01-09 22:03:07 +03:00
|
|
|
|
spice_protocol = dependency('spice-protocol', version: '>=0.14.0',
|
2021-10-07 16:08:23 +03:00
|
|
|
|
required: get_option('spice_protocol'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2019-07-29 16:40:07 +03:00
|
|
|
|
endif
|
2021-10-07 16:08:23 +03:00
|
|
|
|
spice = not_found
|
2023-08-30 12:38:35 +03:00
|
|
|
|
if get_option('spice') \
|
|
|
|
|
.disable_auto_if(not have_system) \
|
|
|
|
|
.require(pixman.found(),
|
|
|
|
|
error_message: 'cannot enable SPICE if pixman is not available') \
|
|
|
|
|
.allowed()
|
2023-01-09 22:03:09 +03:00
|
|
|
|
spice = dependency('spice-server', version: '>=0.14.0',
|
2021-10-07 16:08:23 +03:00
|
|
|
|
required: get_option('spice'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2021-05-19 08:39:32 +03:00
|
|
|
|
endif
|
2021-10-07 16:08:23 +03:00
|
|
|
|
spice_headers = spice.partial_dependency(compile_args: true, includes: true)
|
|
|
|
|
|
2019-07-12 22:16:54 +03:00
|
|
|
|
rt = cc.find_library('rt', required: false)
|
2021-11-15 17:29:13 +03:00
|
|
|
|
|
2019-06-10 13:21:14 +03:00
|
|
|
|
libiscsi = not_found
|
2020-11-17 15:11:25 +03:00
|
|
|
|
if not get_option('libiscsi').auto() or have_block
|
|
|
|
|
libiscsi = dependency('libiscsi', version: '>=1.9.0',
|
|
|
|
|
required: get_option('libiscsi'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2019-06-10 13:21:14 +03:00
|
|
|
|
endif
|
2019-08-29 21:34:43 +03:00
|
|
|
|
zstd = not_found
|
2020-11-17 15:37:39 +03:00
|
|
|
|
if not get_option('zstd').auto() or have_block
|
|
|
|
|
zstd = dependency('libzstd', version: '>=1.4.0',
|
|
|
|
|
required: get_option('zstd'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2024-06-10 13:21:06 +03:00
|
|
|
|
qpl = not_found
|
|
|
|
|
if not get_option('qpl').auto() or have_system
|
|
|
|
|
qpl = dependency('qpl', version: '>=1.5.0',
|
|
|
|
|
required: get_option('qpl'),
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
endif
|
2024-06-07 16:53:05 +03:00
|
|
|
|
uadk = not_found
|
|
|
|
|
if not get_option('uadk').auto() or have_system
|
|
|
|
|
libwd = dependency('libwd', version: '>=2.6',
|
|
|
|
|
required: get_option('uadk'),
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
libwd_comp = dependency('libwd_comp', version: '>=2.6',
|
|
|
|
|
required: get_option('uadk'),
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
if libwd.found() and libwd_comp.found()
|
|
|
|
|
uadk = declare_dependency(dependencies: [libwd, libwd_comp])
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2024-08-31 02:27:19 +03:00
|
|
|
|
|
|
|
|
|
qatzip = not_found
|
|
|
|
|
if not get_option('qatzip').auto() or have_system
|
|
|
|
|
qatzip = dependency('qatzip', version: '>=1.1.2',
|
|
|
|
|
required: get_option('qatzip'),
|
|
|
|
|
method: 'pkg-config')
|
|
|
|
|
endif
|
|
|
|
|
|
2019-07-12 21:23:46 +03:00
|
|
|
|
virgl = not_found
|
2021-12-17 14:36:26 +03:00
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
have_vhost_user_gpu = have_tools and host_os == 'linux' and pixman.found()
|
2021-12-17 14:36:26 +03:00
|
|
|
|
if not get_option('virglrenderer').auto() or have_system or have_vhost_user_gpu
|
2021-06-03 12:31:35 +03:00
|
|
|
|
virgl = dependency('virglrenderer',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('virglrenderer'))
|
2019-07-12 21:23:46 +03:00
|
|
|
|
endif
|
2023-03-21 19:47:36 +03:00
|
|
|
|
rutabaga = not_found
|
|
|
|
|
if not get_option('rutabaga_gfx').auto() or have_system or have_vhost_user_gpu
|
|
|
|
|
rutabaga = dependency('rutabaga_gfx_ffi',
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
required: get_option('rutabaga_gfx'))
|
|
|
|
|
endif
|
blkio: add libblkio block driver
libblkio (https://gitlab.com/libblkio/libblkio/) is a library for
high-performance disk I/O. It currently supports io_uring,
virtio-blk-vhost-user, and virtio-blk-vhost-vdpa with additional drivers
under development.
One of the reasons for developing libblkio is that other applications
besides QEMU can use it. This will be particularly useful for
virtio-blk-vhost-user which applications may wish to use for connecting
to qemu-storage-daemon.
libblkio also gives us an opportunity to develop in Rust behind a C API
that is easy to consume from QEMU.
This commit adds io_uring, nvme-io_uring, virtio-blk-vhost-user, and
virtio-blk-vhost-vdpa BlockDrivers to QEMU using libblkio. It will be
easy to add other libblkio drivers since they will share the majority of
code.
For now I/O buffers are copied through bounce buffers if the libblkio
driver requires it. Later commits add an optimization for
pre-registering guest RAM to avoid bounce buffers.
The syntax is:
--blockdev io_uring,node-name=drive0,filename=test.img,readonly=on|off,cache.direct=on|off
--blockdev nvme-io_uring,node-name=drive0,filename=/dev/ng0n1,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-vdpa,node-name=drive0,path=/dev/vdpa...,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-user,node-name=drive0,path=vhost-user-blk.sock,readonly=on|off,cache.direct=on
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20221013185908.1297568-3-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-10-13 21:58:57 +03:00
|
|
|
|
blkio = not_found
|
|
|
|
|
if not get_option('blkio').auto() or have_block
|
|
|
|
|
blkio = dependency('blkio',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('blkio'))
|
blkio: add libblkio block driver
libblkio (https://gitlab.com/libblkio/libblkio/) is a library for
high-performance disk I/O. It currently supports io_uring,
virtio-blk-vhost-user, and virtio-blk-vhost-vdpa with additional drivers
under development.
One of the reasons for developing libblkio is that other applications
besides QEMU can use it. This will be particularly useful for
virtio-blk-vhost-user which applications may wish to use for connecting
to qemu-storage-daemon.
libblkio also gives us an opportunity to develop in Rust behind a C API
that is easy to consume from QEMU.
This commit adds io_uring, nvme-io_uring, virtio-blk-vhost-user, and
virtio-blk-vhost-vdpa BlockDrivers to QEMU using libblkio. It will be
easy to add other libblkio drivers since they will share the majority of
code.
For now I/O buffers are copied through bounce buffers if the libblkio
driver requires it. Later commits add an optimization for
pre-registering guest RAM to avoid bounce buffers.
The syntax is:
--blockdev io_uring,node-name=drive0,filename=test.img,readonly=on|off,cache.direct=on|off
--blockdev nvme-io_uring,node-name=drive0,filename=/dev/ng0n1,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-vdpa,node-name=drive0,path=/dev/vdpa...,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-user,node-name=drive0,path=vhost-user-blk.sock,readonly=on|off,cache.direct=on
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20221013185908.1297568-3-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-10-13 21:58:57 +03:00
|
|
|
|
endif
|
2019-07-12 22:47:06 +03:00
|
|
|
|
curl = not_found
|
2020-11-17 14:43:15 +03:00
|
|
|
|
if not get_option('curl').auto() or have_block
|
|
|
|
|
curl = dependency('libcurl', version: '>=7.29.0',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('curl'))
|
2019-07-12 22:47:06 +03:00
|
|
|
|
endif
|
2019-07-18 14:19:02 +03:00
|
|
|
|
libudev = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux' and (have_system or have_tools)
|
2020-09-16 19:07:29 +03:00
|
|
|
|
libudev = dependency('libudev',
|
2020-11-17 14:36:15 +03:00
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('libudev'))
|
2020-09-16 19:07:29 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2020-10-15 13:09:27 +03:00
|
|
|
|
mpathlibs = [libudev]
|
2020-09-16 19:07:29 +03:00
|
|
|
|
mpathpersist = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux' and have_tools and get_option('mpath').allowed()
|
2023-06-05 20:41:45 +03:00
|
|
|
|
mpath_test_source = '''
|
2020-09-16 19:07:29 +03:00
|
|
|
|
#include <libudev.h>
|
|
|
|
|
#include <mpath_persist.h>
|
|
|
|
|
unsigned mpath_mx_alloc_len = 1024;
|
|
|
|
|
int logsink;
|
|
|
|
|
static struct config *multipath_conf;
|
|
|
|
|
extern struct udev *udev;
|
|
|
|
|
extern struct config *get_multipath_config(void);
|
|
|
|
|
extern void put_multipath_config(struct config *conf);
|
|
|
|
|
struct udev *udev;
|
|
|
|
|
struct config *get_multipath_config(void) { return multipath_conf; }
|
|
|
|
|
void put_multipath_config(struct config *conf) { }
|
|
|
|
|
int main(void) {
|
|
|
|
|
udev = udev_new();
|
|
|
|
|
multipath_conf = mpath_lib_init();
|
|
|
|
|
return 0;
|
|
|
|
|
}'''
|
2020-10-15 13:09:27 +03:00
|
|
|
|
libmpathpersist = cc.find_library('mpathpersist',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('mpath'))
|
2020-10-15 13:09:27 +03:00
|
|
|
|
if libmpathpersist.found()
|
|
|
|
|
mpathlibs += libmpathpersist
|
2022-07-14 15:33:49 +03:00
|
|
|
|
if get_option('prefer_static')
|
2020-10-15 13:09:27 +03:00
|
|
|
|
mpathlibs += cc.find_library('devmapper',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('mpath'))
|
2020-09-17 13:25:09 +03:00
|
|
|
|
endif
|
2020-10-15 13:09:27 +03:00
|
|
|
|
mpathlibs += cc.find_library('multipath',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('mpath'))
|
2020-10-15 13:09:27 +03:00
|
|
|
|
foreach lib: mpathlibs
|
|
|
|
|
if not lib.found()
|
|
|
|
|
mpathlibs = []
|
|
|
|
|
break
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
if mpathlibs.length() == 0
|
|
|
|
|
msg = 'Dependencies missing for libmpathpersist'
|
2023-06-05 20:41:45 +03:00
|
|
|
|
elif cc.links(mpath_test_source, dependencies: mpathlibs)
|
2020-09-16 19:07:29 +03:00
|
|
|
|
mpathpersist = declare_dependency(dependencies: mpathlibs)
|
|
|
|
|
else
|
2020-10-15 13:09:27 +03:00
|
|
|
|
msg = 'Cannot detect libmpathpersist API'
|
|
|
|
|
endif
|
|
|
|
|
if not mpathpersist.found()
|
2020-09-16 19:07:29 +03:00
|
|
|
|
if get_option('mpath').enabled()
|
2020-10-15 13:09:27 +03:00
|
|
|
|
error(msg)
|
2020-09-16 19:07:29 +03:00
|
|
|
|
else
|
2020-10-15 13:09:27 +03:00
|
|
|
|
warning(msg + ', disabling')
|
2020-09-16 19:07:29 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2019-07-18 14:19:02 +03:00
|
|
|
|
endif
|
2020-09-16 19:07:29 +03:00
|
|
|
|
|
2020-10-13 02:43:48 +03:00
|
|
|
|
iconv = not_found
|
|
|
|
|
curses = not_found
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if have_system and get_option('curses').allowed()
|
2020-10-19 11:42:11 +03:00
|
|
|
|
curses_test = '''
|
2024-10-12 06:38:55 +03:00
|
|
|
|
#ifdef __APPLE__
|
2021-11-17 23:53:55 +03:00
|
|
|
|
#define _XOPEN_SOURCE_EXTENDED 1
|
|
|
|
|
#endif
|
2020-10-19 11:42:11 +03:00
|
|
|
|
#include <locale.h>
|
|
|
|
|
#include <curses.h>
|
|
|
|
|
#include <wchar.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
wchar_t wch = L'w';
|
|
|
|
|
setlocale(LC_ALL, "");
|
|
|
|
|
resize_term(0, 0);
|
|
|
|
|
addwstr(L"wide chars\n");
|
|
|
|
|
addnwstr(&wch, 1);
|
|
|
|
|
add_wch(WACS_DEGREE);
|
|
|
|
|
return 0;
|
|
|
|
|
}'''
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
curses_dep_list = host_os == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw']
|
2022-03-27 17:05:58 +03:00
|
|
|
|
curses = dependency(curses_dep_list,
|
|
|
|
|
required: false,
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-10-19 11:42:11 +03:00
|
|
|
|
msg = get_option('curses').enabled() ? 'curses library not found' : ''
|
2021-11-17 23:53:55 +03:00
|
|
|
|
curses_compile_args = ['-DNCURSES_WIDECHAR=1']
|
2020-10-19 11:42:11 +03:00
|
|
|
|
if curses.found()
|
2020-11-30 16:07:48 +03:00
|
|
|
|
if cc.links(curses_test, args: curses_compile_args, dependencies: [curses])
|
2023-03-30 13:45:58 +03:00
|
|
|
|
curses = declare_dependency(compile_args: curses_compile_args, dependencies: [curses],
|
|
|
|
|
version: curses.version())
|
2020-10-19 11:42:11 +03:00
|
|
|
|
else
|
|
|
|
|
msg = 'curses package not usable'
|
|
|
|
|
curses = not_found
|
2020-10-13 02:43:48 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
2020-10-19 11:42:11 +03:00
|
|
|
|
if not curses.found()
|
|
|
|
|
has_curses_h = cc.has_header('curses.h', args: curses_compile_args)
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'windows' and not has_curses_h
|
2020-10-19 11:42:11 +03:00
|
|
|
|
message('Trying with /usr/include/ncursesw')
|
|
|
|
|
curses_compile_args += ['-I/usr/include/ncursesw']
|
|
|
|
|
has_curses_h = cc.has_header('curses.h', args: curses_compile_args)
|
|
|
|
|
endif
|
|
|
|
|
if has_curses_h
|
2023-11-03 11:17:48 +03:00
|
|
|
|
curses_libname_list = (host_os == 'windows' ? ['pdcurses'] : ['ncursesw', 'cursesw'])
|
2020-10-19 11:42:11 +03:00
|
|
|
|
foreach curses_libname : curses_libname_list
|
2020-10-13 02:43:48 +03:00
|
|
|
|
libcurses = cc.find_library(curses_libname,
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: false)
|
2020-10-19 11:42:11 +03:00
|
|
|
|
if libcurses.found()
|
|
|
|
|
if cc.links(curses_test, args: curses_compile_args, dependencies: libcurses)
|
|
|
|
|
curses = declare_dependency(compile_args: curses_compile_args,
|
|
|
|
|
dependencies: [libcurses])
|
|
|
|
|
break
|
|
|
|
|
else
|
|
|
|
|
msg = 'curses library not usable'
|
|
|
|
|
endif
|
2020-10-13 02:43:48 +03:00
|
|
|
|
endif
|
2020-10-19 11:42:11 +03:00
|
|
|
|
endforeach
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('iconv').allowed()
|
2020-10-19 11:42:11 +03:00
|
|
|
|
foreach link_args : [ ['-liconv'], [] ]
|
|
|
|
|
# Programs will be linked with glib and this will bring in libiconv on FreeBSD.
|
|
|
|
|
# We need to use libiconv if available because mixing libiconv's headers with
|
|
|
|
|
# the system libc does not work.
|
|
|
|
|
# However, without adding glib to the dependencies -L/usr/local/lib will not be
|
|
|
|
|
# included in the command line and libiconv will not be found.
|
|
|
|
|
if cc.links('''
|
|
|
|
|
#include <iconv.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
iconv_t conv = iconv_open("WCHAR_T", "UCS-2");
|
|
|
|
|
return conv != (iconv_t) -1;
|
2022-10-12 12:31:32 +03:00
|
|
|
|
}''', args: link_args, dependencies: glib)
|
2020-10-19 11:42:11 +03:00
|
|
|
|
iconv = declare_dependency(link_args: link_args, dependencies: glib)
|
|
|
|
|
break
|
2020-10-13 02:43:48 +03:00
|
|
|
|
endif
|
2020-10-15 20:26:50 +03:00
|
|
|
|
endforeach
|
|
|
|
|
endif
|
2020-10-19 11:42:11 +03:00
|
|
|
|
if curses.found() and not iconv.found()
|
|
|
|
|
if get_option('iconv').enabled()
|
|
|
|
|
error('iconv not available')
|
|
|
|
|
endif
|
|
|
|
|
msg = 'iconv required for curses UI but not available'
|
|
|
|
|
curses = not_found
|
|
|
|
|
endif
|
|
|
|
|
if not curses.found() and msg != ''
|
|
|
|
|
if get_option('curses').enabled()
|
|
|
|
|
error(msg)
|
2020-10-15 20:26:50 +03:00
|
|
|
|
else
|
2020-10-19 11:42:11 +03:00
|
|
|
|
warning(msg + ', disabling')
|
2020-10-15 20:26:50 +03:00
|
|
|
|
endif
|
2020-10-13 02:43:48 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2019-07-29 16:40:07 +03:00
|
|
|
|
brlapi = not_found
|
2020-11-17 15:02:17 +03:00
|
|
|
|
if not get_option('brlapi').auto() or have_system
|
|
|
|
|
brlapi = cc.find_library('brlapi', has_headers: ['brlapi.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('brlapi'))
|
2020-11-17 15:02:17 +03:00
|
|
|
|
if brlapi.found() and not cc.links('''
|
|
|
|
|
#include <brlapi.h>
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
int main(void) { return brlapi__openConnection (NULL, NULL, NULL); }''', dependencies: brlapi)
|
|
|
|
|
brlapi = not_found
|
|
|
|
|
if get_option('brlapi').enabled()
|
|
|
|
|
error('could not link brlapi')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link brlapi, disabling')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2019-07-29 16:40:07 +03:00
|
|
|
|
endif
|
2020-02-06 16:17:15 +03:00
|
|
|
|
|
2020-08-26 09:09:48 +03:00
|
|
|
|
sdl = not_found
|
2022-08-19 16:27:56 +03:00
|
|
|
|
if not get_option('sdl').auto() or have_system
|
2022-07-14 15:56:58 +03:00
|
|
|
|
sdl = dependency('sdl2', required: get_option('sdl'))
|
2020-08-26 09:09:48 +03:00
|
|
|
|
sdl_image = not_found
|
|
|
|
|
endif
|
2020-02-06 16:17:15 +03:00
|
|
|
|
if sdl.found()
|
2023-06-05 14:45:23 +03:00
|
|
|
|
# Some versions of SDL have problems with -Wundef
|
|
|
|
|
if not cc.compiles('''
|
|
|
|
|
#include <SDL.h>
|
|
|
|
|
#include <SDL_syswm.h>
|
|
|
|
|
int main(int argc, char *argv[]) { return 0; }
|
|
|
|
|
''', dependencies: sdl, args: '-Werror=undef')
|
|
|
|
|
sdl = declare_dependency(compile_args: '-Wno-undef',
|
|
|
|
|
dependencies: sdl,
|
|
|
|
|
version: sdl.version())
|
|
|
|
|
endif
|
2020-08-29 13:41:58 +03:00
|
|
|
|
sdl_image = dependency('SDL2_image', required: get_option('sdl_image'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-02-06 16:17:15 +03:00
|
|
|
|
else
|
|
|
|
|
if get_option('sdl_image').enabled()
|
2020-09-08 10:40:16 +03:00
|
|
|
|
error('sdl-image required, but SDL was @0@'.format(
|
|
|
|
|
get_option('sdl').disabled() ? 'disabled' : 'not found'))
|
2020-02-06 16:17:15 +03:00
|
|
|
|
endif
|
|
|
|
|
sdl_image = not_found
|
2019-07-29 16:40:07 +03:00
|
|
|
|
endif
|
2020-02-06 16:17:15 +03:00
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
rbd = not_found
|
2020-11-17 15:11:25 +03:00
|
|
|
|
if not get_option('rbd').auto() or have_block
|
2022-07-14 15:56:58 +03:00
|
|
|
|
librados = cc.find_library('rados', required: get_option('rbd'))
|
2020-11-17 15:11:25 +03:00
|
|
|
|
librbd = cc.find_library('rbd', has_headers: ['rbd/librbd.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('rbd'))
|
2021-01-26 13:20:35 +03:00
|
|
|
|
if librados.found() and librbd.found()
|
|
|
|
|
if cc.links('''
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
#include <rbd/librbd.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
rados_t cluster;
|
|
|
|
|
rados_create(&cluster, NULL);
|
2021-07-02 20:23:51 +03:00
|
|
|
|
#if LIBRBD_VERSION_CODE < LIBRBD_VERSION(1, 12, 0)
|
|
|
|
|
#error
|
|
|
|
|
#endif
|
2021-01-26 13:20:35 +03:00
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: [librbd, librados])
|
|
|
|
|
rbd = declare_dependency(dependencies: [librbd, librados])
|
|
|
|
|
elif get_option('rbd').enabled()
|
2021-07-02 20:23:51 +03:00
|
|
|
|
error('librbd >= 1.12.0 required')
|
2021-01-26 13:20:35 +03:00
|
|
|
|
else
|
2021-07-02 20:23:51 +03:00
|
|
|
|
warning('librbd >= 1.12.0 not found, disabling')
|
2021-01-26 13:20:35 +03:00
|
|
|
|
endif
|
2020-11-17 15:11:25 +03:00
|
|
|
|
endif
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2020-11-17 15:11:25 +03:00
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
glusterfs = not_found
|
2020-11-17 15:01:26 +03:00
|
|
|
|
glusterfs_ftruncate_has_stat = false
|
|
|
|
|
glusterfs_iocb_has_stat = false
|
|
|
|
|
if not get_option('glusterfs').auto() or have_block
|
|
|
|
|
glusterfs = dependency('glusterfs-api', version: '>=3',
|
|
|
|
|
required: get_option('glusterfs'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-11-17 15:01:26 +03:00
|
|
|
|
if glusterfs.found()
|
|
|
|
|
glusterfs_ftruncate_has_stat = cc.links('''
|
|
|
|
|
#include <glusterfs/api/glfs.h>
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
main(void)
|
|
|
|
|
{
|
|
|
|
|
/* new glfs_ftruncate() passes two additional args */
|
|
|
|
|
return glfs_ftruncate(NULL, 0, NULL, NULL);
|
|
|
|
|
}
|
|
|
|
|
''', dependencies: glusterfs)
|
|
|
|
|
glusterfs_iocb_has_stat = cc.links('''
|
|
|
|
|
#include <glusterfs/api/glfs.h>
|
|
|
|
|
|
|
|
|
|
/* new glfs_io_cbk() passes two additional glfs_stat structs */
|
|
|
|
|
static void
|
|
|
|
|
glusterfs_iocb(glfs_fd_t *fd, ssize_t ret, struct glfs_stat *prestat, struct glfs_stat *poststat, void *data)
|
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
main(void)
|
|
|
|
|
{
|
|
|
|
|
glfs_io_cbk iocb = &glusterfs_iocb;
|
|
|
|
|
iocb(NULL, 0 , NULL, NULL, NULL);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
''', dependencies: glusterfs)
|
|
|
|
|
endif
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2021-12-09 17:48:01 +03:00
|
|
|
|
|
Add Hyper-V Dynamic Memory Protocol driver (hv-balloon) base
This driver is like virtio-balloon on steroids: it allows both changing the
guest memory allocation via ballooning and (in the next patch) inserting
pieces of extra RAM into it on demand from a provided memory backend.
The actual resizing is done via ballooning interface (for example, via
the "balloon" HMP command).
This includes resizing the guest past its boot size - that is, hot-adding
additional memory in granularity limited only by the guest alignment
requirements, as provided by the next patch.
In contrast with ACPI DIMM hotplug where one can only request to unplug a
whole DIMM stick this driver allows removing memory from guest in single
page (4k) units via ballooning.
After a VM reboot the guest is back to its original (boot) size.
In the future, the guest boot memory size might be changed on reboot
instead, taking into account the effective size that VM had before that
reboot (much like Hyper-V does).
For performance reasons, the guest-released memory is tracked in a few
range trees, as a series of (start, count) ranges.
Each time a new page range is inserted into such tree its neighbors are
checked as candidates for possible merging with it.
Besides performance reasons, the Dynamic Memory protocol itself uses page
ranges as the data structure in its messages, so relevant pages need to be
merged into such ranges anyway.
One has to be careful when tracking the guest-released pages, since the
guest can maliciously report returning pages outside its current address
space, which later clash with the address range of newly added memory.
Similarly, the guest can report freeing the same page twice.
The above design results in much better ballooning performance than when
using virtio-balloon with the same guest: 230 GB / minute with this driver
versus 70 GB / minute with virtio-balloon.
During a ballooning operation most of time is spent waiting for the guest
to come up with newly freed page ranges, processing the received ranges on
the host side (in QEMU and KVM) is nearly instantaneous.
The unballoon operation is also pretty much instantaneous:
thanks to the merging of the ballooned out page ranges 200 GB of memory can
be returned to the guest in about 1 second.
With virtio-balloon this operation takes about 2.5 minutes.
These tests were done against a Windows Server 2019 guest running on a
Xeon E5-2699, after dirtying the whole memory inside guest before each
balloon operation.
Using a range tree instead of a bitmap to track the removed memory also
means that the solution scales well with the guest size: even a 1 TB range
takes just a few bytes of such metadata.
Since the required GTree operations aren't present in every Glib version
a check for them was added to the meson build script, together with new
"--enable-hv-balloon" and "--disable-hv-balloon" configure arguments.
If these GTree operations are missing in the system's Glib version this
driver will be skipped during QEMU build.
An optional "status-report=on" device parameter requests memory status
events from the guest (typically sent every second), which allow the host
to learn both the guest memory available and the guest memory in use
counts.
Following commits will add support for their external emission as
"HV_BALLOON_STATUS_REPORT" QMP events.
The driver is named hv-balloon since the Linux kernel client driver for
the Dynamic Memory Protocol is named as such and to follow the naming
pattern established by the virtio-balloon driver.
The whole protocol runs over Hyper-V VMBus.
The driver was tested against Windows Server 2012 R2, Windows Server 2016
and Windows Server 2019 guests and obeys the guest alignment requirements
reported to the host via DM_CAPABILITIES_REPORT message.
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
2023-06-12 17:00:54 +03:00
|
|
|
|
hv_balloon = false
|
|
|
|
|
if get_option('hv_balloon').allowed() and have_system
|
|
|
|
|
if cc.links('''
|
|
|
|
|
#include <string.h>
|
|
|
|
|
#include <gmodule.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
GTree *tree;
|
|
|
|
|
|
|
|
|
|
tree = g_tree_new((GCompareFunc)strcmp);
|
|
|
|
|
(void)g_tree_node_first(tree);
|
|
|
|
|
g_tree_destroy(tree);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
''', dependencies: glib)
|
|
|
|
|
hv_balloon = true
|
|
|
|
|
else
|
|
|
|
|
if get_option('hv_balloon').enabled()
|
|
|
|
|
error('could not enable hv-balloon, update your glib')
|
|
|
|
|
else
|
|
|
|
|
warning('could not find glib support for hv-balloon, disabling')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
libssh = not_found
|
2021-12-09 17:48:01 +03:00
|
|
|
|
if not get_option('libssh').auto() or have_block
|
|
|
|
|
libssh = dependency('libssh', version: '>=0.8.7',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('libssh'))
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2021-12-09 17:48:01 +03:00
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
libbzip2 = not_found
|
2020-11-17 15:07:52 +03:00
|
|
|
|
if not get_option('bzip2').auto() or have_block
|
|
|
|
|
libbzip2 = cc.find_library('bz2', has_headers: ['bzlib.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('bzip2'))
|
2020-11-17 15:07:52 +03:00
|
|
|
|
if libbzip2.found() and not cc.links('''
|
|
|
|
|
#include <bzlib.h>
|
|
|
|
|
int main(void) { BZ2_bzlibVersion(); return 0; }''', dependencies: libbzip2)
|
|
|
|
|
libbzip2 = not_found
|
|
|
|
|
if get_option('bzip2').enabled()
|
|
|
|
|
error('could not link libbzip2')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link libbzip2, disabling')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2020-11-17 15:35:28 +03:00
|
|
|
|
|
2019-08-29 21:34:43 +03:00
|
|
|
|
liblzfse = not_found
|
2020-11-17 15:35:28 +03:00
|
|
|
|
if not get_option('lzfse').auto() or have_block
|
|
|
|
|
liblzfse = cc.find_library('lzfse', has_headers: ['lzfse.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('lzfse'))
|
2020-11-17 15:35:28 +03:00
|
|
|
|
endif
|
|
|
|
|
if liblzfse.found() and not cc.links('''
|
|
|
|
|
#include <lzfse.h>
|
|
|
|
|
int main(void) { lzfse_decode_scratch_size(); return 0; }''', dependencies: liblzfse)
|
|
|
|
|
liblzfse = not_found
|
|
|
|
|
if get_option('lzfse').enabled()
|
|
|
|
|
error('could not link liblzfse')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link liblzfse, disabling')
|
|
|
|
|
endif
|
2019-08-29 21:34:43 +03:00
|
|
|
|
endif
|
2020-11-17 15:35:28 +03:00
|
|
|
|
|
2020-08-17 13:47:55 +03:00
|
|
|
|
oss = not_found
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('oss').allowed() and have_system
|
2021-10-07 16:06:09 +03:00
|
|
|
|
if not cc.has_header('sys/soundcard.h')
|
|
|
|
|
# not found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'netbsd'
|
2022-07-14 15:56:58 +03:00
|
|
|
|
oss = cc.find_library('ossaudio', required: get_option('oss'))
|
2021-10-07 16:06:09 +03:00
|
|
|
|
else
|
|
|
|
|
oss = declare_dependency()
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
if not oss.found()
|
|
|
|
|
if get_option('oss').enabled()
|
|
|
|
|
error('OSS not found')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2020-08-17 13:47:55 +03:00
|
|
|
|
endif
|
|
|
|
|
dsound = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not get_option('dsound').auto() or (host_os == 'windows' and have_system)
|
2021-10-07 16:06:09 +03:00
|
|
|
|
if cc.has_header('dsound.h')
|
|
|
|
|
dsound = declare_dependency(link_args: ['-lole32', '-ldxguid'])
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
if not dsound.found()
|
|
|
|
|
if get_option('dsound').enabled()
|
|
|
|
|
error('DirectSound not found')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2020-08-17 13:47:55 +03:00
|
|
|
|
endif
|
2021-10-07 16:06:09 +03:00
|
|
|
|
|
2020-08-17 13:47:55 +03:00
|
|
|
|
coreaudio = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not get_option('coreaudio').auto() or (host_os == 'darwin' and have_system)
|
2021-10-07 16:06:09 +03:00
|
|
|
|
coreaudio = dependency('appleframeworks', modules: 'CoreAudio',
|
|
|
|
|
required: get_option('coreaudio'))
|
2019-07-16 22:21:02 +03:00
|
|
|
|
endif
|
2021-07-13 14:09:02 +03:00
|
|
|
|
|
2019-07-16 22:21:02 +03:00
|
|
|
|
opengl = not_found
|
2022-04-20 18:33:40 +03:00
|
|
|
|
if not get_option('opengl').auto() or have_system or have_vhost_user_gpu
|
|
|
|
|
epoxy = dependency('epoxy', method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('opengl'))
|
2022-04-20 18:33:40 +03:00
|
|
|
|
if cc.has_header('epoxy/egl.h', dependencies: epoxy)
|
|
|
|
|
opengl = epoxy
|
|
|
|
|
elif get_option('opengl').enabled()
|
|
|
|
|
error('epoxy/egl.h not found')
|
|
|
|
|
endif
|
2019-07-16 22:21:02 +03:00
|
|
|
|
endif
|
2021-07-13 14:09:02 +03:00
|
|
|
|
gbm = not_found
|
|
|
|
|
if (have_system or have_tools) and (virgl.found() or opengl.found())
|
2022-07-14 15:56:58 +03:00
|
|
|
|
gbm = dependency('gbm', method: 'pkg-config', required: false)
|
2021-07-13 14:09:02 +03:00
|
|
|
|
endif
|
2022-06-28 16:23:15 +03:00
|
|
|
|
have_vhost_user_gpu = have_vhost_user_gpu and virgl.found() and opengl.found() and gbm.found()
|
2021-01-07 16:02:29 +03:00
|
|
|
|
|
2024-10-09 00:17:23 +03:00
|
|
|
|
libcbor = not_found
|
|
|
|
|
if not get_option('libcbor').auto() or have_system
|
|
|
|
|
libcbor = dependency('libcbor', version: '>=0.7.0',
|
|
|
|
|
required: get_option('libcbor'))
|
|
|
|
|
endif
|
|
|
|
|
|
2021-06-03 12:15:26 +03:00
|
|
|
|
gnutls = not_found
|
2021-06-30 19:20:02 +03:00
|
|
|
|
gnutls_crypto = not_found
|
2021-08-06 17:49:47 +03:00
|
|
|
|
if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_system)
|
2021-06-30 19:20:02 +03:00
|
|
|
|
# For general TLS support our min gnutls matches
|
|
|
|
|
# that implied by our platform support matrix
|
|
|
|
|
#
|
|
|
|
|
# For the crypto backends, we look for a newer
|
|
|
|
|
# gnutls:
|
|
|
|
|
#
|
|
|
|
|
# Version 3.6.8 is needed to get XTS
|
|
|
|
|
# Version 3.6.13 is needed to get PBKDF
|
|
|
|
|
# Version 3.6.14 is needed to get HW accelerated XTS
|
|
|
|
|
#
|
|
|
|
|
# If newer enough gnutls isn't available, we can
|
|
|
|
|
# still use a different crypto backend to satisfy
|
|
|
|
|
# the platform support requirements
|
|
|
|
|
gnutls_crypto = dependency('gnutls', version: '>=3.6.14',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: false)
|
2021-06-30 19:20:02 +03:00
|
|
|
|
if gnutls_crypto.found()
|
|
|
|
|
gnutls = gnutls_crypto
|
|
|
|
|
else
|
|
|
|
|
# Our min version if all we need is TLS
|
|
|
|
|
gnutls = dependency('gnutls', version: '>=3.5.18',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('gnutls'))
|
2021-06-30 19:20:02 +03:00
|
|
|
|
endif
|
2021-06-03 12:15:26 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2021-07-02 19:38:33 +03:00
|
|
|
|
# We prefer use of gnutls for crypto, unless the options
|
|
|
|
|
# explicitly asked for nettle or gcrypt.
|
|
|
|
|
#
|
|
|
|
|
# If gnutls isn't available for crypto, then we'll prefer
|
|
|
|
|
# gcrypt over nettle for performance reasons.
|
2021-06-03 12:15:26 +03:00
|
|
|
|
gcrypt = not_found
|
|
|
|
|
nettle = not_found
|
2022-05-25 12:01:14 +03:00
|
|
|
|
hogweed = not_found
|
2023-12-07 18:47:35 +03:00
|
|
|
|
crypto_sm4 = not_found
|
2021-07-02 19:00:32 +03:00
|
|
|
|
xts = 'none'
|
2021-07-02 19:38:33 +03:00
|
|
|
|
|
2021-06-03 12:15:26 +03:00
|
|
|
|
if get_option('nettle').enabled() and get_option('gcrypt').enabled()
|
|
|
|
|
error('Only one of gcrypt & nettle can be enabled')
|
|
|
|
|
endif
|
2021-07-02 19:38:33 +03:00
|
|
|
|
|
|
|
|
|
# Explicit nettle/gcrypt request, so ignore gnutls for crypto
|
|
|
|
|
if get_option('nettle').enabled() or get_option('gcrypt').enabled()
|
2021-06-30 19:20:02 +03:00
|
|
|
|
gnutls_crypto = not_found
|
|
|
|
|
endif
|
2021-06-03 12:15:26 +03:00
|
|
|
|
|
2021-07-02 19:38:33 +03:00
|
|
|
|
if not gnutls_crypto.found()
|
|
|
|
|
if (not get_option('gcrypt').auto() or have_system) and not get_option('nettle').enabled()
|
|
|
|
|
gcrypt = dependency('libgcrypt', version: '>=1.8',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('gcrypt'))
|
2021-07-02 19:38:33 +03:00
|
|
|
|
# Debian has removed -lgpg-error from libgcrypt-config
|
|
|
|
|
# as it "spreads unnecessary dependencies" which in
|
|
|
|
|
# turn breaks static builds...
|
2022-07-14 15:33:49 +03:00
|
|
|
|
if gcrypt.found() and get_option('prefer_static')
|
2023-03-30 13:45:58 +03:00
|
|
|
|
gcrypt = declare_dependency(dependencies:
|
|
|
|
|
[gcrypt,
|
|
|
|
|
cc.find_library('gpg-error', required: true)],
|
|
|
|
|
version: gcrypt.version())
|
2021-07-02 19:38:33 +03:00
|
|
|
|
endif
|
2023-12-07 18:47:35 +03:00
|
|
|
|
crypto_sm4 = gcrypt
|
|
|
|
|
# SM4 ALG is available in libgcrypt >= 1.9
|
|
|
|
|
if gcrypt.found() and not cc.links('''
|
|
|
|
|
#include <gcrypt.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
gcry_cipher_hd_t handler;
|
|
|
|
|
gcry_cipher_open(&handler, GCRY_CIPHER_SM4, GCRY_CIPHER_MODE_ECB, 0);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: gcrypt)
|
|
|
|
|
crypto_sm4 = not_found
|
|
|
|
|
endif
|
2021-06-03 12:15:26 +03:00
|
|
|
|
endif
|
2021-07-02 19:38:33 +03:00
|
|
|
|
if (not get_option('nettle').auto() or have_system) and not gcrypt.found()
|
|
|
|
|
nettle = dependency('nettle', version: '>=3.4',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('nettle'))
|
2021-07-02 19:38:33 +03:00
|
|
|
|
if nettle.found() and not cc.has_header('nettle/xts.h', dependencies: nettle)
|
|
|
|
|
xts = 'private'
|
|
|
|
|
endif
|
2023-12-07 18:47:35 +03:00
|
|
|
|
crypto_sm4 = nettle
|
|
|
|
|
# SM4 ALG is available in nettle >= 3.9
|
|
|
|
|
if nettle.found() and not cc.links('''
|
|
|
|
|
#include <nettle/sm4.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
struct sm4_ctx ctx;
|
|
|
|
|
unsigned char key[16] = {0};
|
|
|
|
|
sm4_set_encrypt_key(&ctx, key);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: nettle)
|
|
|
|
|
crypto_sm4 = not_found
|
|
|
|
|
endif
|
2021-06-03 12:15:26 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2023-09-08 13:09:22 +03:00
|
|
|
|
capstone = not_found
|
|
|
|
|
if not get_option('capstone').auto() or have_system or have_user
|
|
|
|
|
capstone = dependency('capstone', version: '>=3.0.5',
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
required: get_option('capstone'))
|
|
|
|
|
|
|
|
|
|
# Some versions of capstone have broken pkg-config file
|
|
|
|
|
# that reports a wrong -I path, causing the #include to
|
|
|
|
|
# fail later. If the system has such a broken version
|
|
|
|
|
# do not use it.
|
|
|
|
|
if capstone.found() and not cc.compiles('#include <capstone.h>',
|
|
|
|
|
dependencies: [capstone])
|
|
|
|
|
capstone = not_found
|
|
|
|
|
if get_option('capstone').enabled()
|
|
|
|
|
error('capstone requested, but it does not appear to work')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2022-07-14 15:56:58 +03:00
|
|
|
|
gmp = dependency('gmp', required: false, method: 'pkg-config')
|
2022-05-25 12:01:14 +03:00
|
|
|
|
if nettle.found() and gmp.found()
|
|
|
|
|
hogweed = dependency('hogweed', version: '>=3.4',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('nettle'))
|
2022-05-25 12:01:14 +03:00
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
|
2019-07-16 22:21:02 +03:00
|
|
|
|
gtk = not_found
|
2021-01-07 16:02:29 +03:00
|
|
|
|
gtkx11 = not_found
|
2021-06-03 12:31:35 +03:00
|
|
|
|
vte = not_found
|
2022-11-21 16:55:38 +03:00
|
|
|
|
have_gtk_clipboard = get_option('gtk_clipboard').enabled()
|
|
|
|
|
|
2023-08-30 12:38:36 +03:00
|
|
|
|
if get_option('gtk') \
|
|
|
|
|
.disable_auto_if(not have_system) \
|
|
|
|
|
.require(pixman.found(),
|
|
|
|
|
error_message: 'cannot enable GTK if pixman is not available') \
|
|
|
|
|
.allowed()
|
2021-01-07 16:02:29 +03:00
|
|
|
|
gtk = dependency('gtk+-3.0', version: '>=3.22.0',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('gtk'))
|
2021-01-07 16:02:29 +03:00
|
|
|
|
if gtk.found()
|
|
|
|
|
gtkx11 = dependency('gtk+-x11-3.0', version: '>=3.22.0',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: false)
|
2023-03-30 13:45:58 +03:00
|
|
|
|
gtk = declare_dependency(dependencies: [gtk, gtkx11],
|
|
|
|
|
version: gtk.version())
|
2021-06-03 12:31:35 +03:00
|
|
|
|
|
|
|
|
|
if not get_option('vte').auto() or have_system
|
|
|
|
|
vte = dependency('vte-2.91',
|
|
|
|
|
method: 'pkg-config',
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('vte'))
|
2021-06-03 12:31:35 +03:00
|
|
|
|
endif
|
2022-11-21 16:55:38 +03:00
|
|
|
|
elif have_gtk_clipboard
|
|
|
|
|
error('GTK clipboard requested, but GTK not found')
|
2021-01-07 16:02:29 +03:00
|
|
|
|
endif
|
2019-07-16 22:21:02 +03:00
|
|
|
|
endif
|
2021-01-07 16:02:29 +03:00
|
|
|
|
|
2019-07-16 22:21:02 +03:00
|
|
|
|
x11 = not_found
|
2021-05-03 11:40:33 +03:00
|
|
|
|
if gtkx11.found()
|
2022-07-14 15:56:58 +03:00
|
|
|
|
x11 = dependency('x11', method: 'pkg-config', required: gtkx11.found())
|
2019-07-16 22:21:02 +03:00
|
|
|
|
endif
|
|
|
|
|
png = not_found
|
2022-04-08 10:13:34 +03:00
|
|
|
|
if get_option('png').allowed() and have_system
|
2022-06-23 20:49:41 +03:00
|
|
|
|
png = dependency('libpng', version: '>=1.6.34', required: get_option('png'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-04-08 10:13:34 +03:00
|
|
|
|
endif
|
|
|
|
|
vnc = not_found
|
2019-07-16 22:21:02 +03:00
|
|
|
|
jpeg = not_found
|
|
|
|
|
sasl = not_found
|
2023-08-30 12:38:34 +03:00
|
|
|
|
if get_option('vnc') \
|
|
|
|
|
.disable_auto_if(not have_system) \
|
|
|
|
|
.require(pixman.found(),
|
|
|
|
|
error_message: 'cannot enable VNC if pixman is not available') \
|
|
|
|
|
.allowed()
|
2020-02-06 17:48:52 +03:00
|
|
|
|
vnc = declare_dependency() # dummy dependency
|
2020-11-23 21:34:02 +03:00
|
|
|
|
jpeg = dependency('libjpeg', required: get_option('vnc_jpeg'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-02-06 17:48:52 +03:00
|
|
|
|
sasl = cc.find_library('sasl2', has_headers: ['sasl/sasl.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('vnc_sasl'))
|
2020-02-06 17:48:52 +03:00
|
|
|
|
if sasl.found()
|
|
|
|
|
sasl = declare_dependency(dependencies: sasl,
|
|
|
|
|
compile_args: '-DSTRUCT_IOVEC_DEFINED')
|
|
|
|
|
endif
|
2020-08-17 13:47:55 +03:00
|
|
|
|
endif
|
2020-11-17 15:32:34 +03:00
|
|
|
|
|
2021-06-03 12:15:26 +03:00
|
|
|
|
pam = not_found
|
|
|
|
|
if not get_option('auth_pam').auto() or have_system
|
|
|
|
|
pam = cc.find_library('pam', has_headers: ['security/pam_appl.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('auth_pam'))
|
2021-06-03 12:15:26 +03:00
|
|
|
|
endif
|
|
|
|
|
if pam.found() and not cc.links('''
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
#include <security/pam_appl.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
const char *service_name = "qemu";
|
|
|
|
|
const char *user = "frank";
|
|
|
|
|
const struct pam_conv pam_conv = { 0 };
|
|
|
|
|
pam_handle_t *pamh = NULL;
|
|
|
|
|
pam_start(service_name, user, &pam_conv, &pamh);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: pam)
|
|
|
|
|
pam = not_found
|
|
|
|
|
if get_option('auth_pam').enabled()
|
|
|
|
|
error('could not link libpam')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link libpam, disabling')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2019-09-03 15:59:33 +03:00
|
|
|
|
snappy = not_found
|
2020-11-17 15:32:34 +03:00
|
|
|
|
if not get_option('snappy').auto() or have_system
|
|
|
|
|
snappy = cc.find_library('snappy', has_headers: ['snappy-c.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('snappy'))
|
2020-11-17 15:32:34 +03:00
|
|
|
|
endif
|
2023-07-06 09:47:36 +03:00
|
|
|
|
if snappy.found() and not cc.links('''
|
2020-11-17 15:32:34 +03:00
|
|
|
|
#include <snappy-c.h>
|
|
|
|
|
int main(void) { snappy_max_compressed_length(4096); return 0; }''', dependencies: snappy)
|
|
|
|
|
snappy = not_found
|
|
|
|
|
if get_option('snappy').enabled()
|
|
|
|
|
error('could not link libsnappy')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link libsnappy, disabling')
|
|
|
|
|
endif
|
2019-09-03 15:59:33 +03:00
|
|
|
|
endif
|
2020-11-17 15:11:25 +03:00
|
|
|
|
|
2019-09-03 15:59:33 +03:00
|
|
|
|
lzo = not_found
|
2020-11-17 15:11:25 +03:00
|
|
|
|
if not get_option('lzo').auto() or have_system
|
|
|
|
|
lzo = cc.find_library('lzo2', has_headers: ['lzo/lzo1x.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('lzo'))
|
2020-11-17 15:11:25 +03:00
|
|
|
|
endif
|
|
|
|
|
if lzo.found() and not cc.links('''
|
|
|
|
|
#include <lzo/lzo1x.h>
|
|
|
|
|
int main(void) { lzo_version(); return 0; }''', dependencies: lzo)
|
|
|
|
|
lzo = not_found
|
|
|
|
|
if get_option('lzo').enabled()
|
|
|
|
|
error('could not link liblzo2')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link liblzo2, disabling')
|
|
|
|
|
endif
|
2019-09-03 15:59:33 +03:00
|
|
|
|
endif
|
2020-11-17 15:11:25 +03:00
|
|
|
|
|
2021-12-21 14:38:27 +03:00
|
|
|
|
numa = not_found
|
|
|
|
|
if not get_option('numa').auto() or have_system or have_tools
|
|
|
|
|
numa = cc.find_library('numa', has_headers: ['numa.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('numa'))
|
2021-12-21 14:38:27 +03:00
|
|
|
|
endif
|
|
|
|
|
if numa.found() and not cc.links('''
|
|
|
|
|
#include <numa.h>
|
|
|
|
|
int main(void) { return numa_available(); }
|
|
|
|
|
''', dependencies: numa)
|
|
|
|
|
numa = not_found
|
|
|
|
|
if get_option('numa').enabled()
|
|
|
|
|
error('could not link numa')
|
|
|
|
|
else
|
|
|
|
|
warning('could not link numa, disabling')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2024-01-25 14:22:57 +03:00
|
|
|
|
fdt = not_found
|
|
|
|
|
fdt_opt = get_option('fdt')
|
|
|
|
|
if fdt_opt == 'enabled' and get_option('wrap_mode') == 'nodownload'
|
|
|
|
|
fdt_opt = 'system'
|
|
|
|
|
endif
|
|
|
|
|
if fdt_opt in ['enabled', 'system'] or (fdt_opt == 'auto' and have_system)
|
|
|
|
|
fdt = cc.find_library('fdt', required: fdt_opt == 'system')
|
|
|
|
|
if fdt.found() and cc.links('''
|
|
|
|
|
#include <libfdt.h>
|
|
|
|
|
#include <libfdt_env.h>
|
|
|
|
|
int main(void) { fdt_find_max_phandle(NULL, NULL); return 0; }''',
|
|
|
|
|
dependencies: fdt)
|
|
|
|
|
fdt_opt = 'system'
|
|
|
|
|
elif fdt_opt != 'system'
|
|
|
|
|
fdt_opt = get_option('wrap_mode') == 'nodownload' ? 'disabled' : 'internal'
|
|
|
|
|
fdt = not_found
|
|
|
|
|
else
|
|
|
|
|
error('system libfdt is too old (1.5.1 or newer required)')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
if fdt_opt == 'internal'
|
|
|
|
|
assert(not fdt.found())
|
|
|
|
|
libfdt_proj = subproject('dtc', required: true,
|
|
|
|
|
default_options: ['tools=false', 'yaml=disabled',
|
|
|
|
|
'python=disabled', 'default_library=static'])
|
|
|
|
|
fdt = libfdt_proj.get_variable('libfdt_dep')
|
|
|
|
|
endif
|
|
|
|
|
|
2019-07-24 18:16:22 +03:00
|
|
|
|
rdma = not_found
|
2022-04-20 18:33:41 +03:00
|
|
|
|
if not get_option('rdma').auto() or have_system
|
|
|
|
|
rdma_libs = [cc.find_library('rdmacm', has_headers: ['rdma/rdma_cma.h'],
|
2022-07-14 15:56:58 +03:00
|
|
|
|
required: get_option('rdma')),
|
2024-06-11 13:54:26 +03:00
|
|
|
|
cc.find_library('ibverbs', required: get_option('rdma'))]
|
2022-04-20 18:33:41 +03:00
|
|
|
|
rdma = declare_dependency(dependencies: rdma_libs)
|
|
|
|
|
foreach lib: rdma_libs
|
|
|
|
|
if not lib.found()
|
|
|
|
|
rdma = not_found
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
2019-07-24 18:16:22 +03:00
|
|
|
|
endif
|
2022-04-20 18:33:41 +03:00
|
|
|
|
|
2020-08-06 14:07:39 +03:00
|
|
|
|
cacard = not_found
|
2021-06-03 12:15:26 +03:00
|
|
|
|
if not get_option('smartcard').auto() or have_system
|
|
|
|
|
cacard = dependency('libcacard', required: get_option('smartcard'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
version: '>=2.5.1', method: 'pkg-config')
|
2020-08-06 14:07:39 +03:00
|
|
|
|
endif
|
2020-08-26 14:42:04 +03:00
|
|
|
|
u2f = not_found
|
2023-09-08 13:10:27 +03:00
|
|
|
|
if not get_option('u2f').auto() or have_system
|
2020-08-26 14:42:04 +03:00
|
|
|
|
u2f = dependency('u2f-emu', required: get_option('u2f'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-26 14:42:04 +03:00
|
|
|
|
endif
|
2022-05-19 15:38:57 +03:00
|
|
|
|
canokey = not_found
|
2023-09-08 13:10:27 +03:00
|
|
|
|
if not get_option('canokey').auto() or have_system
|
2022-05-19 15:38:57 +03:00
|
|
|
|
canokey = dependency('canokey-qemu', required: get_option('canokey'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2022-05-19 15:38:57 +03:00
|
|
|
|
endif
|
2020-08-06 14:07:39 +03:00
|
|
|
|
usbredir = not_found
|
2021-06-03 12:15:26 +03:00
|
|
|
|
if not get_option('usb_redir').auto() or have_system
|
|
|
|
|
usbredir = dependency('libusbredirparser-0.5', required: get_option('usb_redir'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
version: '>=0.6', method: 'pkg-config')
|
2020-08-06 14:07:39 +03:00
|
|
|
|
endif
|
|
|
|
|
libusb = not_found
|
2021-06-03 12:15:26 +03:00
|
|
|
|
if not get_option('libusb').auto() or have_system
|
|
|
|
|
libusb = dependency('libusb-1.0', required: get_option('libusb'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
version: '>=1.0.13', method: 'pkg-config')
|
2020-08-06 14:07:39 +03:00
|
|
|
|
endif
|
2021-06-03 12:15:26 +03:00
|
|
|
|
|
2019-08-18 18:51:17 +03:00
|
|
|
|
libpmem = not_found
|
2021-06-03 12:31:35 +03:00
|
|
|
|
if not get_option('libpmem').auto() or have_system
|
|
|
|
|
libpmem = dependency('libpmem', required: get_option('libpmem'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2019-08-18 18:51:17 +03:00
|
|
|
|
endif
|
2020-08-24 18:52:12 +03:00
|
|
|
|
libdaxctl = not_found
|
2021-06-03 12:31:35 +03:00
|
|
|
|
if not get_option('libdaxctl').auto() or have_system
|
|
|
|
|
libdaxctl = dependency('libdaxctl', required: get_option('libdaxctl'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
version: '>=57', method: 'pkg-config')
|
2020-08-24 18:52:12 +03:00
|
|
|
|
endif
|
2020-08-28 14:07:20 +03:00
|
|
|
|
tasn1 = not_found
|
2021-06-03 12:15:26 +03:00
|
|
|
|
if gnutls.found()
|
|
|
|
|
tasn1 = dependency('libtasn1',
|
2024-05-02 12:56:42 +03:00
|
|
|
|
required: false,
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2020-08-28 14:07:20 +03:00
|
|
|
|
endif
|
2023-05-22 03:12:02 +03:00
|
|
|
|
keyutils = not_found
|
2023-08-24 12:42:08 +03:00
|
|
|
|
if not get_option('libkeyutils').auto() or have_block
|
|
|
|
|
keyutils = dependency('libkeyutils', required: get_option('libkeyutils'),
|
|
|
|
|
method: 'pkg-config')
|
2023-05-22 03:12:02 +03:00
|
|
|
|
endif
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2020-08-28 14:07:33 +03:00
|
|
|
|
has_gettid = cc.has_function('gettid')
|
|
|
|
|
|
2021-11-15 23:29:43 +03:00
|
|
|
|
# libselinux
|
|
|
|
|
selinux = dependency('libselinux',
|
|
|
|
|
required: get_option('selinux'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
method: 'pkg-config')
|
2021-11-15 23:29:43 +03:00
|
|
|
|
|
2020-09-01 18:15:30 +03:00
|
|
|
|
# Malloc tests
|
|
|
|
|
|
|
|
|
|
malloc = []
|
|
|
|
|
if get_option('malloc') == 'system'
|
|
|
|
|
has_malloc_trim = \
|
2021-12-18 18:39:43 +03:00
|
|
|
|
get_option('malloc_trim').allowed() and \
|
2023-05-30 13:31:23 +03:00
|
|
|
|
cc.has_function('malloc_trim', prefix: '#include <malloc.h>')
|
2020-09-01 18:15:30 +03:00
|
|
|
|
else
|
|
|
|
|
has_malloc_trim = false
|
|
|
|
|
malloc = cc.find_library(get_option('malloc'), required: true)
|
|
|
|
|
endif
|
|
|
|
|
if not has_malloc_trim and get_option('malloc_trim').enabled()
|
|
|
|
|
if get_option('malloc') == 'system'
|
|
|
|
|
error('malloc_trim not available on this platform.')
|
|
|
|
|
else
|
|
|
|
|
error('malloc_trim not available with non-libc memory allocator')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2021-06-03 13:10:05 +03:00
|
|
|
|
gnu_source_prefix = '''
|
2020-11-02 19:18:55 +03:00
|
|
|
|
#ifndef _GNU_SOURCE
|
|
|
|
|
#define _GNU_SOURCE
|
|
|
|
|
#endif
|
2021-06-03 13:10:05 +03:00
|
|
|
|
'''
|
2020-11-02 19:18:55 +03:00
|
|
|
|
|
2023-05-30 13:31:23 +03:00
|
|
|
|
# Check whether the glibc provides STATX_BASIC_STATS
|
2020-11-02 19:18:55 +03:00
|
|
|
|
|
2023-05-30 13:31:23 +03:00
|
|
|
|
has_statx = cc.has_header_symbol('sys/stat.h', 'STATX_BASIC_STATS', prefix: gnu_source_prefix)
|
2022-02-23 12:23:40 +03:00
|
|
|
|
|
2023-05-30 13:31:23 +03:00
|
|
|
|
# Check whether statx() provides mount ID information
|
2022-02-23 12:23:40 +03:00
|
|
|
|
|
2023-05-30 13:31:23 +03:00
|
|
|
|
has_statx_mnt_id = cc.has_header_symbol('sys/stat.h', 'STATX_MNT_ID', prefix: gnu_source_prefix)
|
2022-02-23 12:23:40 +03:00
|
|
|
|
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
have_vhost_user_blk_server = get_option('vhost_user_blk_server') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux',
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
error_message: 'vhost_user_blk_server requires linux') \
|
2022-04-20 18:34:05 +03:00
|
|
|
|
.require(have_vhost_user,
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
error_message: 'vhost_user_blk_server requires vhost-user support') \
|
2022-05-24 18:40:42 +03:00
|
|
|
|
.disable_auto_if(not have_tools and not have_system) \
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
.allowed()
|
2020-12-05 02:06:14 +03:00
|
|
|
|
|
2020-10-27 22:05:46 +03:00
|
|
|
|
if get_option('fuse').disabled() and get_option('fuse_lseek').enabled()
|
|
|
|
|
error('Cannot enable fuse-lseek while fuse is disabled')
|
|
|
|
|
endif
|
|
|
|
|
|
2020-10-27 22:05:41 +03:00
|
|
|
|
fuse = dependency('fuse3', required: get_option('fuse'),
|
2022-07-14 15:56:58 +03:00
|
|
|
|
version: '>=3.1', method: 'pkg-config')
|
2020-10-27 22:05:41 +03:00
|
|
|
|
|
2020-10-27 22:05:46 +03:00
|
|
|
|
fuse_lseek = not_found
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('fuse_lseek').allowed()
|
2020-10-27 22:05:46 +03:00
|
|
|
|
if fuse.version().version_compare('>=3.8')
|
|
|
|
|
# Dummy dependency
|
|
|
|
|
fuse_lseek = declare_dependency()
|
|
|
|
|
elif get_option('fuse_lseek').enabled()
|
|
|
|
|
if fuse.found()
|
|
|
|
|
error('fuse-lseek requires libfuse >=3.8, found ' + fuse.version())
|
|
|
|
|
else
|
|
|
|
|
error('fuse-lseek requires libfuse, which was not found')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
have_libvduse = (host_os == 'linux')
|
2022-05-23 11:46:08 +03:00
|
|
|
|
if get_option('libvduse').enabled()
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'linux'
|
2022-05-23 11:46:08 +03:00
|
|
|
|
error('libvduse requires linux')
|
|
|
|
|
endif
|
|
|
|
|
elif get_option('libvduse').disabled()
|
|
|
|
|
have_libvduse = false
|
|
|
|
|
endif
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
have_vduse_blk_export = (have_libvduse and host_os == 'linux')
|
2022-05-23 11:46:09 +03:00
|
|
|
|
if get_option('vduse_blk_export').enabled()
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'linux'
|
2022-05-23 11:46:09 +03:00
|
|
|
|
error('vduse_blk_export requires linux')
|
|
|
|
|
elif not have_libvduse
|
|
|
|
|
error('vduse_blk_export requires libvduse support')
|
|
|
|
|
endif
|
|
|
|
|
elif get_option('vduse_blk_export').disabled()
|
|
|
|
|
have_vduse_blk_export = false
|
|
|
|
|
endif
|
|
|
|
|
|
2021-05-14 14:48:32 +03:00
|
|
|
|
# libbpf
|
2024-02-05 19:54:35 +03:00
|
|
|
|
bpf_version = '1.1.0'
|
|
|
|
|
libbpf = dependency('libbpf', version: '>=' + bpf_version, required: get_option('bpf'), method: 'pkg-config')
|
2021-05-14 14:48:32 +03:00
|
|
|
|
if libbpf.found() and not cc.links('''
|
|
|
|
|
#include <bpf/libbpf.h>
|
2024-02-05 19:54:35 +03:00
|
|
|
|
#include <linux/bpf.h>
|
2021-05-14 14:48:32 +03:00
|
|
|
|
int main(void)
|
|
|
|
|
{
|
2024-02-05 19:54:35 +03:00
|
|
|
|
// check flag availability
|
|
|
|
|
int flag = BPF_F_MMAPABLE;
|
2021-05-14 14:48:32 +03:00
|
|
|
|
bpf_object__destroy_skeleton(NULL);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: libbpf)
|
|
|
|
|
libbpf = not_found
|
|
|
|
|
if get_option('bpf').enabled()
|
2024-02-05 19:54:35 +03:00
|
|
|
|
error('libbpf skeleton/mmaping test failed')
|
2021-05-14 14:48:32 +03:00
|
|
|
|
else
|
2024-02-05 19:54:35 +03:00
|
|
|
|
warning('libbpf skeleton/mmaping test failed, disabling')
|
2021-05-14 14:48:32 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
net: add initial support for AF_XDP network backend
AF_XDP is a network socket family that allows communication directly
with the network device driver in the kernel, bypassing most or all
of the kernel networking stack. In the essence, the technology is
pretty similar to netmap. But, unlike netmap, AF_XDP is Linux-native
and works with any network interfaces without driver modifications.
Unlike vhost-based backends (kernel, user, vdpa), AF_XDP doesn't
require access to character devices or unix sockets. Only access to
the network interface itself is necessary.
This patch implements a network backend that communicates with the
kernel by creating an AF_XDP socket. A chunk of userspace memory
is shared between QEMU and the host kernel. 4 ring buffers (Tx, Rx,
Fill and Completion) are placed in that memory along with a pool of
memory buffers for the packet data. Data transmission is done by
allocating one of the buffers, copying packet data into it and
placing the pointer into Tx ring. After transmission, device will
return the buffer via Completion ring. On Rx, device will take
a buffer form a pre-populated Fill ring, write the packet data into
it and place the buffer into Rx ring.
AF_XDP network backend takes on the communication with the host
kernel and the network interface and forwards packets to/from the
peer device in QEMU.
Usage example:
-device virtio-net-pci,netdev=guest1,mac=00:16:35:AF:AA:5C
-netdev af-xdp,ifname=ens6f1np1,id=guest1,mode=native,queues=1
XDP program bridges the socket with a network interface. It can be
attached to the interface in 2 different modes:
1. skb - this mode should work for any interface and doesn't require
driver support. With a caveat of lower performance.
2. native - this does require support from the driver and allows to
bypass skb allocation in the kernel and potentially use
zero-copy while getting packets in/out userspace.
By default, QEMU will try to use native mode and fall back to skb.
Mode can be forced via 'mode' option. To force 'copy' even in native
mode, use 'force-copy=on' option. This might be useful if there is
some issue with the driver.
Option 'queues=N' allows to specify how many device queues should
be open. Note that all the queues that are not open are still
functional and can receive traffic, but it will not be delivered to
QEMU. So, the number of device queues should generally match the
QEMU configuration, unless the device is shared with something
else and the traffic re-direction to appropriate queues is correctly
configured on a device level (e.g. with ethtool -N).
'start-queue=M' option can be used to specify from which queue id
QEMU should start configuring 'N' queues. It might also be necessary
to use this option with certain NICs, e.g. MLX5 NICs. See the docs
for examples.
In a general case QEMU will need CAP_NET_ADMIN and CAP_SYS_ADMIN
or CAP_BPF capabilities in order to load default XSK/XDP programs to
the network interface and configure BPF maps. It is possible, however,
to run with no capabilities. For that to work, an external process
with enough capabilities will need to pre-load default XSK program,
create AF_XDP sockets and pass their file descriptors to QEMU process
on startup via 'sock-fds' option. Network backend will need to be
configured with 'inhibit=on' to avoid loading of the program.
QEMU will need 32 MB of locked memory (RLIMIT_MEMLOCK) per queue
or CAP_IPC_LOCK.
There are few performance challenges with the current network backends.
First is that they do not support IO threads. This means that data
path is handled by the main thread in QEMU and may slow down other
work or may be slowed down by some other work. This also means that
taking advantage of multi-queue is generally not possible today.
Another thing is that data path is going through the device emulation
code, which is not really optimized for performance. The fastest
"frontend" device is virtio-net. But it's not optimized for heavy
traffic either, because it expects such use-cases to be handled via
some implementation of vhost (user, kernel, vdpa). In practice, we
have virtio notifications and rcu lock/unlock on a per-packet basis
and not very efficient accesses to the guest memory. Communication
channels between backend and frontend devices do not allow passing
more than one packet at a time as well.
Some of these challenges can be avoided in the future by adding better
batching into device emulation or by implementing vhost-af-xdp variant.
There are also a few kernel limitations. AF_XDP sockets do not
support any kinds of checksum or segmentation offloading. Buffers
are limited to a page size (4K), i.e. MTU is limited. Multi-buffer
support implementation for AF_XDP is in progress, but not ready yet.
Also, transmission in all non-zero-copy modes is synchronous, i.e.
done in a syscall. That doesn't allow high packet rates on virtual
interfaces.
However, keeping in mind all of these challenges, current implementation
of the AF_XDP backend shows a decent performance while running on top
of a physical NIC with zero-copy support.
Test setup:
2 VMs running on 2 physical hosts connected via ConnectX6-Dx card.
Network backend is configured to open the NIC directly in native mode.
The driver supports zero-copy. NIC is configured to use 1 queue.
Inside a VM - iperf3 for basic TCP performance testing and dpdk-testpmd
for PPS testing.
iperf3 result:
TCP stream : 19.1 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 3.4 Mpps
Rx only : 2.0 Mpps
L2 FWD Loopback : 1.5 Mpps
In skb mode the same setup shows much lower performance, similar to
the setup where pair of physical NICs is replaced with veth pair:
iperf3 result:
TCP stream : 9 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 1.2 Mpps
Rx only : 1.0 Mpps
L2 FWD Loopback : 0.7 Mpps
Results in skb mode or over the veth are close to results of a tap
backend with vhost=on and disabled segmentation offloading bridged
with a NIC.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> (docker/lcitool)
Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-09-13 21:34:37 +03:00
|
|
|
|
# libxdp
|
|
|
|
|
libxdp = not_found
|
|
|
|
|
if not get_option('af_xdp').auto() or have_system
|
net: fix build when libbpf is disabled, but libxdp is enabled
The net/af-xdp.c code is enabled when the libxdp library is present,
however, it also has direct API calls to bpf_xdp_query_id &
bpf_xdp_detach which are provided by the libbpf library.
As a result if building with --disable-libbpf, but libxdp gets
auto-detected, we'll fail to link QEMU
/usr/bin/ld: libcommon.a.p/net_af-xdp.c.o: undefined reference to symbol 'bpf_xdp_query_id@@LIBBPF_0.7.0'
There are two bugs here
* Since we have direct libbpf API calls, when building
net/af-xdp.c, we must tell meson that libbpf is a
dependancy, so that we directly link to it, rather
than relying on indirect linkage.
* When must skip probing for libxdp at all, when libbpf
is not found, raising an error if --enable-libxdp was
given explicitly.
Fixes: cb039ef3d9e3112da01e1ecd9b136ac9809ef733
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
2024-10-23 11:50:56 +03:00
|
|
|
|
if libbpf.found()
|
|
|
|
|
libxdp = dependency('libxdp', required: get_option('af_xdp'),
|
|
|
|
|
version: '>=1.4.0', method: 'pkg-config')
|
|
|
|
|
else
|
|
|
|
|
if get_option('af_xdp').enabled()
|
|
|
|
|
error('libxdp requested, but libbpf is not available')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
net: add initial support for AF_XDP network backend
AF_XDP is a network socket family that allows communication directly
with the network device driver in the kernel, bypassing most or all
of the kernel networking stack. In the essence, the technology is
pretty similar to netmap. But, unlike netmap, AF_XDP is Linux-native
and works with any network interfaces without driver modifications.
Unlike vhost-based backends (kernel, user, vdpa), AF_XDP doesn't
require access to character devices or unix sockets. Only access to
the network interface itself is necessary.
This patch implements a network backend that communicates with the
kernel by creating an AF_XDP socket. A chunk of userspace memory
is shared between QEMU and the host kernel. 4 ring buffers (Tx, Rx,
Fill and Completion) are placed in that memory along with a pool of
memory buffers for the packet data. Data transmission is done by
allocating one of the buffers, copying packet data into it and
placing the pointer into Tx ring. After transmission, device will
return the buffer via Completion ring. On Rx, device will take
a buffer form a pre-populated Fill ring, write the packet data into
it and place the buffer into Rx ring.
AF_XDP network backend takes on the communication with the host
kernel and the network interface and forwards packets to/from the
peer device in QEMU.
Usage example:
-device virtio-net-pci,netdev=guest1,mac=00:16:35:AF:AA:5C
-netdev af-xdp,ifname=ens6f1np1,id=guest1,mode=native,queues=1
XDP program bridges the socket with a network interface. It can be
attached to the interface in 2 different modes:
1. skb - this mode should work for any interface and doesn't require
driver support. With a caveat of lower performance.
2. native - this does require support from the driver and allows to
bypass skb allocation in the kernel and potentially use
zero-copy while getting packets in/out userspace.
By default, QEMU will try to use native mode and fall back to skb.
Mode can be forced via 'mode' option. To force 'copy' even in native
mode, use 'force-copy=on' option. This might be useful if there is
some issue with the driver.
Option 'queues=N' allows to specify how many device queues should
be open. Note that all the queues that are not open are still
functional and can receive traffic, but it will not be delivered to
QEMU. So, the number of device queues should generally match the
QEMU configuration, unless the device is shared with something
else and the traffic re-direction to appropriate queues is correctly
configured on a device level (e.g. with ethtool -N).
'start-queue=M' option can be used to specify from which queue id
QEMU should start configuring 'N' queues. It might also be necessary
to use this option with certain NICs, e.g. MLX5 NICs. See the docs
for examples.
In a general case QEMU will need CAP_NET_ADMIN and CAP_SYS_ADMIN
or CAP_BPF capabilities in order to load default XSK/XDP programs to
the network interface and configure BPF maps. It is possible, however,
to run with no capabilities. For that to work, an external process
with enough capabilities will need to pre-load default XSK program,
create AF_XDP sockets and pass their file descriptors to QEMU process
on startup via 'sock-fds' option. Network backend will need to be
configured with 'inhibit=on' to avoid loading of the program.
QEMU will need 32 MB of locked memory (RLIMIT_MEMLOCK) per queue
or CAP_IPC_LOCK.
There are few performance challenges with the current network backends.
First is that they do not support IO threads. This means that data
path is handled by the main thread in QEMU and may slow down other
work or may be slowed down by some other work. This also means that
taking advantage of multi-queue is generally not possible today.
Another thing is that data path is going through the device emulation
code, which is not really optimized for performance. The fastest
"frontend" device is virtio-net. But it's not optimized for heavy
traffic either, because it expects such use-cases to be handled via
some implementation of vhost (user, kernel, vdpa). In practice, we
have virtio notifications and rcu lock/unlock on a per-packet basis
and not very efficient accesses to the guest memory. Communication
channels between backend and frontend devices do not allow passing
more than one packet at a time as well.
Some of these challenges can be avoided in the future by adding better
batching into device emulation or by implementing vhost-af-xdp variant.
There are also a few kernel limitations. AF_XDP sockets do not
support any kinds of checksum or segmentation offloading. Buffers
are limited to a page size (4K), i.e. MTU is limited. Multi-buffer
support implementation for AF_XDP is in progress, but not ready yet.
Also, transmission in all non-zero-copy modes is synchronous, i.e.
done in a syscall. That doesn't allow high packet rates on virtual
interfaces.
However, keeping in mind all of these challenges, current implementation
of the AF_XDP backend shows a decent performance while running on top
of a physical NIC with zero-copy support.
Test setup:
2 VMs running on 2 physical hosts connected via ConnectX6-Dx card.
Network backend is configured to open the NIC directly in native mode.
The driver supports zero-copy. NIC is configured to use 1 queue.
Inside a VM - iperf3 for basic TCP performance testing and dpdk-testpmd
for PPS testing.
iperf3 result:
TCP stream : 19.1 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 3.4 Mpps
Rx only : 2.0 Mpps
L2 FWD Loopback : 1.5 Mpps
In skb mode the same setup shows much lower performance, similar to
the setup where pair of physical NICs is replaced with veth pair:
iperf3 result:
TCP stream : 9 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 1.2 Mpps
Rx only : 1.0 Mpps
L2 FWD Loopback : 0.7 Mpps
Results in skb mode or over the veth are close to results of a tap
backend with vhost=on and disabled segmentation offloading bridged
with a NIC.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> (docker/lcitool)
Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-09-13 21:34:37 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-01-12 18:20:12 +03:00
|
|
|
|
# libdw
|
2023-02-10 03:52:07 +03:00
|
|
|
|
libdw = not_found
|
2023-02-10 03:52:08 +03:00
|
|
|
|
if not get_option('libdw').auto() or \
|
2022-07-14 15:33:49 +03:00
|
|
|
|
(not get_option('prefer_static') and (have_system or have_user))
|
2023-02-10 03:52:07 +03:00
|
|
|
|
libdw = dependency('libdw',
|
|
|
|
|
method: 'pkg-config',
|
|
|
|
|
required: get_option('libdw'))
|
|
|
|
|
endif
|
2023-01-12 18:20:12 +03:00
|
|
|
|
|
2021-10-07 16:06:09 +03:00
|
|
|
|
#################
|
|
|
|
|
# config-host.h #
|
|
|
|
|
#################
|
|
|
|
|
|
2023-09-08 13:10:08 +03:00
|
|
|
|
config_host_data = configuration_data()
|
|
|
|
|
|
2024-10-03 16:28:44 +03:00
|
|
|
|
config_host_data.set('CONFIG_HAVE_RUST', have_rust)
|
2021-10-07 16:06:09 +03:00
|
|
|
|
audio_drivers_selected = []
|
|
|
|
|
if have_system
|
|
|
|
|
audio_drivers_available = {
|
|
|
|
|
'alsa': alsa.found(),
|
|
|
|
|
'coreaudio': coreaudio.found(),
|
|
|
|
|
'dsound': dsound.found(),
|
|
|
|
|
'jack': jack.found(),
|
|
|
|
|
'oss': oss.found(),
|
|
|
|
|
'pa': pulse.found(),
|
2023-04-17 13:56:54 +03:00
|
|
|
|
'pipewire': pipewire.found(),
|
2021-10-07 16:06:09 +03:00
|
|
|
|
'sdl': sdl.found(),
|
2022-09-07 16:23:42 +03:00
|
|
|
|
'sndio': sndio.found(),
|
2021-10-07 16:06:09 +03:00
|
|
|
|
}
|
2021-10-07 16:06:10 +03:00
|
|
|
|
foreach k, v: audio_drivers_available
|
|
|
|
|
config_host_data.set('CONFIG_AUDIO_' + k.to_upper(), v)
|
|
|
|
|
endforeach
|
2021-10-07 16:06:09 +03:00
|
|
|
|
|
|
|
|
|
# Default to native drivers first, OSS second, SDL third
|
|
|
|
|
audio_drivers_priority = \
|
2022-09-07 16:23:42 +03:00
|
|
|
|
[ 'pa', 'coreaudio', 'dsound', 'sndio', 'oss' ] + \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
(host_os == 'linux' ? [] : [ 'sdl' ])
|
2021-10-07 16:06:09 +03:00
|
|
|
|
audio_drivers_default = []
|
|
|
|
|
foreach k: audio_drivers_priority
|
|
|
|
|
if audio_drivers_available[k]
|
|
|
|
|
audio_drivers_default += k
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
|
foreach k: get_option('audio_drv_list')
|
|
|
|
|
if k == 'default'
|
|
|
|
|
audio_drivers_selected += audio_drivers_default
|
|
|
|
|
elif not audio_drivers_available[k]
|
|
|
|
|
error('Audio driver "@0@" not available.'.format(k))
|
|
|
|
|
else
|
|
|
|
|
audio_drivers_selected += k
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_AUDIO_DRIVERS',
|
|
|
|
|
'"' + '", "'.join(audio_drivers_selected) + '", ')
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
have_host_block_device = (host_os != 'darwin' or
|
2021-03-15 21:03:38 +03:00
|
|
|
|
cc.has_header('IOKit/storage/IOMedia.h'))
|
|
|
|
|
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
dbus_display = get_option('dbus_display') \
|
|
|
|
|
.require(gio.version().version_compare('>=2.64'),
|
|
|
|
|
error_message: '-display dbus requires glib>=2.64') \
|
2022-04-20 18:33:44 +03:00
|
|
|
|
.require(gdbus_codegen.found(),
|
2022-09-30 10:53:02 +03:00
|
|
|
|
error_message: gdbus_codegen_error.format('-display dbus')) \
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
.allowed()
|
|
|
|
|
|
|
|
|
|
have_virtfs = get_option('virtfs') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux' or host_os == 'darwin',
|
2022-02-28 01:35:22 +03:00
|
|
|
|
error_message: 'virtio-9p (virtfs) requires Linux or macOS') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux' or cc.has_function('pthread_fchdir_np'),
|
2022-02-28 01:35:22 +03:00
|
|
|
|
error_message: 'virtio-9p (virtfs) on macOS requires the presence of pthread_fchdir_np') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'darwin' or libattr.found(),
|
2023-05-03 16:07:56 +03:00
|
|
|
|
error_message: 'virtio-9p (virtfs) on Linux requires libattr-devel') \
|
meson: use .require() and .disable_auto_if() method for features
The method is now in 0.59, using it simplifies some conditionals.
There is a small change, which is to build virtfs-proxy-helper in a
tools-only build. This is done for consistency with other tools,
which are not culled by the absence of system emulator binaries.
.disable_auto_if() would also be useful to check for packages,
for example
-linux_io_uring = not_found
-if not get_option('linux_io_uring').auto() or have_block
- linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'),
- method: 'pkg-config', kwargs: static_kwargs)
-endif
+linux_io_uring = dependency('liburing',
+ required: get_option('linux_io_uring').disable_auto_if(not have_block),
+ method: 'pkg-config', kwargs: static_kwargs)
This change however is much larger and I am not sure about the improved
readability, so I am not performing it right now.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-12-18 18:39:43 +03:00
|
|
|
|
.disable_auto_if(not have_tools and not have_system) \
|
|
|
|
|
.allowed()
|
2020-11-17 16:46:21 +03:00
|
|
|
|
|
2024-07-12 16:24:44 +03:00
|
|
|
|
qga_fsfreeze = false
|
|
|
|
|
qga_fstrim = false
|
|
|
|
|
if host_os == 'linux'
|
|
|
|
|
if cc.has_header_symbol('linux/fs.h', 'FIFREEZE')
|
|
|
|
|
qga_fsfreeze = true
|
|
|
|
|
endif
|
|
|
|
|
if cc.has_header_symbol('linux/fs.h', 'FITRIM')
|
|
|
|
|
qga_fstrim = true
|
|
|
|
|
endif
|
|
|
|
|
elif host_os == 'freebsd' and cc.has_header_symbol('ufs/ffs/fs.h', 'UFSSUSPEND')
|
|
|
|
|
qga_fsfreeze = true
|
|
|
|
|
endif
|
|
|
|
|
|
2022-04-20 18:33:53 +03:00
|
|
|
|
if get_option('block_drv_ro_whitelist') == ''
|
|
|
|
|
config_host_data.set('CONFIG_BDRV_RO_WHITELIST', '')
|
|
|
|
|
else
|
|
|
|
|
config_host_data.set('CONFIG_BDRV_RO_WHITELIST',
|
|
|
|
|
'"' + get_option('block_drv_ro_whitelist').replace(',', '", "') + '", ')
|
|
|
|
|
endif
|
|
|
|
|
if get_option('block_drv_rw_whitelist') == ''
|
|
|
|
|
config_host_data.set('CONFIG_BDRV_RW_WHITELIST', '')
|
|
|
|
|
else
|
|
|
|
|
config_host_data.set('CONFIG_BDRV_RW_WHITELIST',
|
|
|
|
|
'"' + get_option('block_drv_rw_whitelist').replace(',', '", "') + '", ')
|
|
|
|
|
endif
|
|
|
|
|
|
2021-10-07 16:08:14 +03:00
|
|
|
|
foreach k : get_option('trace_backends')
|
|
|
|
|
config_host_data.set('CONFIG_TRACE_' + k.to_upper(), true)
|
|
|
|
|
endforeach
|
|
|
|
|
config_host_data.set_quoted('CONFIG_TRACE_FILE', get_option('trace_file'))
|
2022-04-20 18:33:52 +03:00
|
|
|
|
config_host_data.set_quoted('CONFIG_TLS_PRIORITY', get_option('tls_priority'))
|
2022-04-20 18:33:49 +03:00
|
|
|
|
if iasl.found()
|
|
|
|
|
config_host_data.set_quoted('CONFIG_IASL', iasl.full_path())
|
2021-10-13 14:19:00 +03:00
|
|
|
|
endif
|
2020-10-16 10:19:14 +03:00
|
|
|
|
config_host_data.set_quoted('CONFIG_BINDIR', get_option('prefix') / get_option('bindir'))
|
|
|
|
|
config_host_data.set_quoted('CONFIG_PREFIX', get_option('prefix'))
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_CONFDIR', get_option('prefix') / qemu_confdir)
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_DATADIR', get_option('prefix') / qemu_datadir)
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_DESKTOPDIR', get_option('prefix') / qemu_desktopdir)
|
2022-06-24 18:40:42 +03:00
|
|
|
|
|
|
|
|
|
qemu_firmwarepath = ''
|
|
|
|
|
foreach k : get_option('qemu_firmwarepath')
|
|
|
|
|
qemu_firmwarepath += '"' + get_option('prefix') / k + '", '
|
|
|
|
|
endforeach
|
|
|
|
|
config_host_data.set('CONFIG_QEMU_FIRMWAREPATH', qemu_firmwarepath)
|
|
|
|
|
|
2020-10-16 10:19:14 +03:00
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_HELPERDIR', get_option('prefix') / get_option('libexecdir'))
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_ICONDIR', get_option('prefix') / qemu_icondir)
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_LOCALEDIR', get_option('prefix') / get_option('localedir'))
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_LOCALSTATEDIR', get_option('prefix') / get_option('localstatedir'))
|
|
|
|
|
config_host_data.set_quoted('CONFIG_QEMU_MODDIR', get_option('prefix') / qemu_moddir)
|
|
|
|
|
config_host_data.set_quoted('CONFIG_SYSCONFDIR', get_option('prefix') / get_option('sysconfdir'))
|
|
|
|
|
|
2022-10-20 15:53:10 +03:00
|
|
|
|
if enable_modules
|
2022-04-20 18:33:54 +03:00
|
|
|
|
config_host_data.set('CONFIG_STAMP', run_command(
|
|
|
|
|
meson.current_source_dir() / 'scripts/qemu-stamp.py',
|
|
|
|
|
meson.project_version(), get_option('pkgversion'), '--',
|
|
|
|
|
meson.current_source_dir() / 'configure',
|
|
|
|
|
capture: true, check: true).stdout().strip())
|
|
|
|
|
endif
|
|
|
|
|
|
2021-10-13 14:43:36 +03:00
|
|
|
|
have_slirp_smbd = get_option('slirp_smbd') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os != 'windows', error_message: 'Host smbd not supported on this platform.') \
|
2021-10-13 14:43:36 +03:00
|
|
|
|
.allowed()
|
|
|
|
|
if have_slirp_smbd
|
|
|
|
|
smbd_path = get_option('smbd')
|
|
|
|
|
if smbd_path == ''
|
2023-11-03 11:17:48 +03:00
|
|
|
|
smbd_path = (host_os == 'sunos' ? '/usr/sfw/sbin/smbd' : '/usr/sbin/smbd')
|
2021-10-13 14:43:36 +03:00
|
|
|
|
endif
|
|
|
|
|
config_host_data.set_quoted('CONFIG_SMBD_COMMAND', smbd_path)
|
|
|
|
|
endif
|
|
|
|
|
|
2021-11-08 16:18:17 +03:00
|
|
|
|
config_host_data.set('HOST_' + host_arch.to_upper(), 1)
|
|
|
|
|
|
2023-09-08 13:10:08 +03:00
|
|
|
|
kvm_targets_c = '""'
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if get_option('kvm').allowed() and host_os == 'linux'
|
2023-09-08 13:10:08 +03:00
|
|
|
|
kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"'
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c)
|
|
|
|
|
|
2022-04-20 18:33:46 +03:00
|
|
|
|
if get_option('module_upgrades') and not enable_modules
|
|
|
|
|
error('Cannot enable module-upgrades as modules are not enabled')
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_MODULE_UPGRADES', get_option('module_upgrades'))
|
|
|
|
|
|
2020-11-17 16:45:24 +03:00
|
|
|
|
config_host_data.set('CONFIG_ATTR', libattr.found())
|
2021-10-13 12:46:09 +03:00
|
|
|
|
config_host_data.set('CONFIG_BDRV_WHITELIST_TOOLS', get_option('block_drv_whitelist_in_tools'))
|
2020-11-17 15:02:17 +03:00
|
|
|
|
config_host_data.set('CONFIG_BRLAPI', brlapi.found())
|
2023-11-03 11:17:48 +03:00
|
|
|
|
config_host_data.set('CONFIG_BSD', host_os in bsd_oses)
|
2024-07-12 16:24:54 +03:00
|
|
|
|
config_host_data.set('CONFIG_FREEBSD', host_os == 'freebsd')
|
2023-09-08 13:09:22 +03:00
|
|
|
|
config_host_data.set('CONFIG_CAPSTONE', capstone.found())
|
2020-09-01 18:28:59 +03:00
|
|
|
|
config_host_data.set('CONFIG_COCOA', cocoa.found())
|
2023-11-03 11:17:48 +03:00
|
|
|
|
config_host_data.set('CONFIG_DARWIN', host_os == 'darwin')
|
2024-01-25 14:22:57 +03:00
|
|
|
|
config_host_data.set('CONFIG_FDT', fdt.found())
|
2021-10-07 16:08:12 +03:00
|
|
|
|
config_host_data.set('CONFIG_FUZZ', get_option('fuzzing'))
|
2021-10-07 16:08:17 +03:00
|
|
|
|
config_host_data.set('CONFIG_GCOV', get_option('b_coverage'))
|
2020-09-16 18:54:14 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBUDEV', libudev.found())
|
2023-11-03 11:17:48 +03:00
|
|
|
|
config_host_data.set('CONFIG_LINUX', host_os == 'linux')
|
|
|
|
|
config_host_data.set('CONFIG_POSIX', host_os != 'windows')
|
|
|
|
|
config_host_data.set('CONFIG_WIN32', host_os == 'windows')
|
2020-11-17 15:11:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_LZO', lzo.found())
|
2020-09-16 19:07:29 +03:00
|
|
|
|
config_host_data.set('CONFIG_MPATH', mpathpersist.found())
|
blkio: add libblkio block driver
libblkio (https://gitlab.com/libblkio/libblkio/) is a library for
high-performance disk I/O. It currently supports io_uring,
virtio-blk-vhost-user, and virtio-blk-vhost-vdpa with additional drivers
under development.
One of the reasons for developing libblkio is that other applications
besides QEMU can use it. This will be particularly useful for
virtio-blk-vhost-user which applications may wish to use for connecting
to qemu-storage-daemon.
libblkio also gives us an opportunity to develop in Rust behind a C API
that is easy to consume from QEMU.
This commit adds io_uring, nvme-io_uring, virtio-blk-vhost-user, and
virtio-blk-vhost-vdpa BlockDrivers to QEMU using libblkio. It will be
easy to add other libblkio drivers since they will share the majority of
code.
For now I/O buffers are copied through bounce buffers if the libblkio
driver requires it. Later commits add an optimization for
pre-registering guest RAM to avoid bounce buffers.
The syntax is:
--blockdev io_uring,node-name=drive0,filename=test.img,readonly=on|off,cache.direct=on|off
--blockdev nvme-io_uring,node-name=drive0,filename=/dev/ng0n1,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-vdpa,node-name=drive0,path=/dev/vdpa...,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-user,node-name=drive0,path=vhost-user-blk.sock,readonly=on|off,cache.direct=on
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20221013185908.1297568-3-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-10-13 21:58:57 +03:00
|
|
|
|
config_host_data.set('CONFIG_BLKIO', blkio.found())
|
2023-05-30 10:19:41 +03:00
|
|
|
|
if blkio.found()
|
|
|
|
|
config_host_data.set('CONFIG_BLKIO_VHOST_VDPA_FD',
|
|
|
|
|
blkio.version().version_compare('>=1.3.0'))
|
2024-08-08 11:05:45 +03:00
|
|
|
|
config_host_data.set('CONFIG_BLKIO_WRITE_ZEROS_FUA',
|
|
|
|
|
blkio.version().version_compare('>=1.4.0'))
|
2023-05-30 10:19:41 +03:00
|
|
|
|
endif
|
2020-11-17 14:43:15 +03:00
|
|
|
|
config_host_data.set('CONFIG_CURL', curl.found())
|
2020-10-13 02:43:48 +03:00
|
|
|
|
config_host_data.set('CONFIG_CURSES', curses.found())
|
2021-07-13 14:09:02 +03:00
|
|
|
|
config_host_data.set('CONFIG_GBM', gbm.found())
|
2022-04-20 18:33:44 +03:00
|
|
|
|
config_host_data.set('CONFIG_GIO', gio.found())
|
2020-11-17 15:01:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS', glusterfs.found())
|
|
|
|
|
if glusterfs.found()
|
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS_XLATOR_OPT', glusterfs.version().version_compare('>=4'))
|
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS_DISCARD', glusterfs.version().version_compare('>=5'))
|
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS_FALLOCATE', glusterfs.version().version_compare('>=6'))
|
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS_ZEROFILL', glusterfs.version().version_compare('>=6'))
|
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS_FTRUNCATE_HAS_STAT', glusterfs_ftruncate_has_stat)
|
|
|
|
|
config_host_data.set('CONFIG_GLUSTERFS_IOCB_HAS_STAT', glusterfs_iocb_has_stat)
|
|
|
|
|
endif
|
2021-01-07 16:02:29 +03:00
|
|
|
|
config_host_data.set('CONFIG_GTK', gtk.found())
|
2021-06-03 12:31:35 +03:00
|
|
|
|
config_host_data.set('CONFIG_VTE', vte.found())
|
2022-11-21 16:55:38 +03:00
|
|
|
|
config_host_data.set('CONFIG_GTK_CLIPBOARD', have_gtk_clipboard)
|
2023-04-28 01:59:52 +03:00
|
|
|
|
config_host_data.set('CONFIG_HEXAGON_IDEF_PARSER', get_option('hexagon_idef_parser'))
|
2020-11-17 16:45:24 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBATTR', have_old_libattr)
|
2020-11-17 16:46:58 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBCAP_NG', libcap_ng.found())
|
2021-05-14 14:48:32 +03:00
|
|
|
|
config_host_data.set('CONFIG_EBPF', libbpf.found())
|
net: add initial support for AF_XDP network backend
AF_XDP is a network socket family that allows communication directly
with the network device driver in the kernel, bypassing most or all
of the kernel networking stack. In the essence, the technology is
pretty similar to netmap. But, unlike netmap, AF_XDP is Linux-native
and works with any network interfaces without driver modifications.
Unlike vhost-based backends (kernel, user, vdpa), AF_XDP doesn't
require access to character devices or unix sockets. Only access to
the network interface itself is necessary.
This patch implements a network backend that communicates with the
kernel by creating an AF_XDP socket. A chunk of userspace memory
is shared between QEMU and the host kernel. 4 ring buffers (Tx, Rx,
Fill and Completion) are placed in that memory along with a pool of
memory buffers for the packet data. Data transmission is done by
allocating one of the buffers, copying packet data into it and
placing the pointer into Tx ring. After transmission, device will
return the buffer via Completion ring. On Rx, device will take
a buffer form a pre-populated Fill ring, write the packet data into
it and place the buffer into Rx ring.
AF_XDP network backend takes on the communication with the host
kernel and the network interface and forwards packets to/from the
peer device in QEMU.
Usage example:
-device virtio-net-pci,netdev=guest1,mac=00:16:35:AF:AA:5C
-netdev af-xdp,ifname=ens6f1np1,id=guest1,mode=native,queues=1
XDP program bridges the socket with a network interface. It can be
attached to the interface in 2 different modes:
1. skb - this mode should work for any interface and doesn't require
driver support. With a caveat of lower performance.
2. native - this does require support from the driver and allows to
bypass skb allocation in the kernel and potentially use
zero-copy while getting packets in/out userspace.
By default, QEMU will try to use native mode and fall back to skb.
Mode can be forced via 'mode' option. To force 'copy' even in native
mode, use 'force-copy=on' option. This might be useful if there is
some issue with the driver.
Option 'queues=N' allows to specify how many device queues should
be open. Note that all the queues that are not open are still
functional and can receive traffic, but it will not be delivered to
QEMU. So, the number of device queues should generally match the
QEMU configuration, unless the device is shared with something
else and the traffic re-direction to appropriate queues is correctly
configured on a device level (e.g. with ethtool -N).
'start-queue=M' option can be used to specify from which queue id
QEMU should start configuring 'N' queues. It might also be necessary
to use this option with certain NICs, e.g. MLX5 NICs. See the docs
for examples.
In a general case QEMU will need CAP_NET_ADMIN and CAP_SYS_ADMIN
or CAP_BPF capabilities in order to load default XSK/XDP programs to
the network interface and configure BPF maps. It is possible, however,
to run with no capabilities. For that to work, an external process
with enough capabilities will need to pre-load default XSK program,
create AF_XDP sockets and pass their file descriptors to QEMU process
on startup via 'sock-fds' option. Network backend will need to be
configured with 'inhibit=on' to avoid loading of the program.
QEMU will need 32 MB of locked memory (RLIMIT_MEMLOCK) per queue
or CAP_IPC_LOCK.
There are few performance challenges with the current network backends.
First is that they do not support IO threads. This means that data
path is handled by the main thread in QEMU and may slow down other
work or may be slowed down by some other work. This also means that
taking advantage of multi-queue is generally not possible today.
Another thing is that data path is going through the device emulation
code, which is not really optimized for performance. The fastest
"frontend" device is virtio-net. But it's not optimized for heavy
traffic either, because it expects such use-cases to be handled via
some implementation of vhost (user, kernel, vdpa). In practice, we
have virtio notifications and rcu lock/unlock on a per-packet basis
and not very efficient accesses to the guest memory. Communication
channels between backend and frontend devices do not allow passing
more than one packet at a time as well.
Some of these challenges can be avoided in the future by adding better
batching into device emulation or by implementing vhost-af-xdp variant.
There are also a few kernel limitations. AF_XDP sockets do not
support any kinds of checksum or segmentation offloading. Buffers
are limited to a page size (4K), i.e. MTU is limited. Multi-buffer
support implementation for AF_XDP is in progress, but not ready yet.
Also, transmission in all non-zero-copy modes is synchronous, i.e.
done in a syscall. That doesn't allow high packet rates on virtual
interfaces.
However, keeping in mind all of these challenges, current implementation
of the AF_XDP backend shows a decent performance while running on top
of a physical NIC with zero-copy support.
Test setup:
2 VMs running on 2 physical hosts connected via ConnectX6-Dx card.
Network backend is configured to open the NIC directly in native mode.
The driver supports zero-copy. NIC is configured to use 1 queue.
Inside a VM - iperf3 for basic TCP performance testing and dpdk-testpmd
for PPS testing.
iperf3 result:
TCP stream : 19.1 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 3.4 Mpps
Rx only : 2.0 Mpps
L2 FWD Loopback : 1.5 Mpps
In skb mode the same setup shows much lower performance, similar to
the setup where pair of physical NICs is replaced with veth pair:
iperf3 result:
TCP stream : 9 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 1.2 Mpps
Rx only : 1.0 Mpps
L2 FWD Loopback : 0.7 Mpps
Results in skb mode or over the veth are close to results of a tap
backend with vhost=on and disabled segmentation offloading bridged
with a NIC.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> (docker/lcitool)
Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-09-13 21:34:37 +03:00
|
|
|
|
config_host_data.set('CONFIG_AF_XDP', libxdp.found())
|
meson: fix missing preprocessor symbols
While most libraries do not need a CONFIG_* symbol because the
"when:" clauses are enough, some do. Add them back or stop
using them if possible.
In the case of libpmem, the statement to add the CONFIG_* symbol
was still in configure, but could not be triggered because it
checked for "no" instead of "disabled" (and it would be wrong anyway
since the test for the library has not been done yet).
Reported-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Fixes: 587d59d6cc ("configure, meson: convert virgl detection to meson", 2021-07-06)
Fixes: 83ef16821a ("configure, meson: convert libdaxctl detection to meson", 2021-07-06)
Fixes: e36e8c70f6 ("configure, meson: convert libpmem detection to meson", 2021-07-06)
Fixes: 53c22b68e3 ("configure, meson: convert liburing detection to meson", 2021-07-06)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-08 14:50:06 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBDAXCTL', libdaxctl.found())
|
2020-11-17 15:11:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBISCSI', libiscsi.found())
|
2020-11-17 15:11:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBNFS', libnfs.found())
|
2021-12-09 17:48:01 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBSSH', libssh.found())
|
2021-10-07 16:08:20 +03:00
|
|
|
|
config_host_data.set('CONFIG_LINUX_AIO', libaio.found())
|
meson: fix missing preprocessor symbols
While most libraries do not need a CONFIG_* symbol because the
"when:" clauses are enough, some do. Add them back or stop
using them if possible.
In the case of libpmem, the statement to add the CONFIG_* symbol
was still in configure, but could not be triggered because it
checked for "no" instead of "disabled" (and it would be wrong anyway
since the test for the library has not been done yet).
Reported-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Fixes: 587d59d6cc ("configure, meson: convert virgl detection to meson", 2021-07-06)
Fixes: 83ef16821a ("configure, meson: convert libdaxctl detection to meson", 2021-07-06)
Fixes: e36e8c70f6 ("configure, meson: convert libpmem detection to meson", 2021-07-06)
Fixes: 53c22b68e3 ("configure, meson: convert liburing detection to meson", 2021-07-06)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-08 14:50:06 +03:00
|
|
|
|
config_host_data.set('CONFIG_LINUX_IO_URING', linux_io_uring.found())
|
|
|
|
|
config_host_data.set('CONFIG_LIBPMEM', libpmem.found())
|
2022-10-20 15:53:10 +03:00
|
|
|
|
config_host_data.set('CONFIG_MODULES', enable_modules)
|
2021-12-21 14:38:27 +03:00
|
|
|
|
config_host_data.set('CONFIG_NUMA', numa.found())
|
hostmem: Honor multiple preferred nodes if possible
If a memory-backend is configured with mode
HOST_MEM_POLICY_PREFERRED then
host_memory_backend_memory_complete() calls mbind() as:
mbind(..., MPOL_PREFERRED, nodemask, ...);
Here, 'nodemask' is a bitmap of host NUMA nodes and corresponds
to the .host-nodes attribute. Therefore, there can be multiple
nodes specified. However, the documentation to MPOL_PREFERRED
says:
MPOL_PREFERRED
This mode sets the preferred node for allocation. ...
If nodemask specifies more than one node ID, the first node
in the mask will be selected as the preferred node.
Therefore, only the first node is honored and the rest is
silently ignored. Well, with recent changes to the kernel and
numactl we can do better.
The Linux kernel added in v5.15 via commit cfcaa66f8032
("mm/hugetlb: add support for mempolicy MPOL_PREFERRED_MANY")
support for MPOL_PREFERRED_MANY, which accepts multiple preferred
NUMA nodes instead.
Then, numa_has_preferred_many() API was introduced to numactl
(v2.0.15~26) allowing applications to query kernel support.
Wiring this all together, we can pass MPOL_PREFERRED_MANY to the
mbind() call instead and stop ignoring multiple nodes, silently.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Message-Id: <a0b4adce1af5bd2344c2218eb4a04b3ff7bcfdb4.1671097918.git.mprivozn@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
2022-12-15 12:55:03 +03:00
|
|
|
|
if numa.found()
|
|
|
|
|
config_host_data.set('HAVE_NUMA_HAS_PREFERRED_MANY',
|
|
|
|
|
cc.has_function('numa_has_preferred_many',
|
|
|
|
|
dependencies: numa))
|
|
|
|
|
endif
|
2022-04-20 18:33:40 +03:00
|
|
|
|
config_host_data.set('CONFIG_OPENGL', opengl.found())
|
2023-08-30 13:20:53 +03:00
|
|
|
|
config_host_data.set('CONFIG_PLUGIN', get_option('plugins'))
|
2020-11-17 15:11:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_RBD', rbd.found())
|
2022-04-20 18:33:41 +03:00
|
|
|
|
config_host_data.set('CONFIG_RDMA', rdma.found())
|
meson, cutils: allow non-relocatable installs
Say QEMU is configured with bindir = "/usr/bin" and a firmware path
that starts with "/usr/share/qemu". Ever since QEMU 5.2, QEMU's
install has been relocatable: if you move qemu-system-x86_64 from
/usr/bin to /home/username/bin, it will start looking for firmware in
/home/username/share/qemu. Previously, you would get a non-relocatable
install where the moved QEMU will keep looking for firmware in
/usr/share/qemu.
Windows almost always wants relocatable installs, and in fact that
is why QEMU 5.2 introduced relocatability in the first place.
However, newfangled distribution mechanisms such as AppImage
(https://docs.appimage.org/reference/best-practices.html), and
possibly NixOS, also dislike using at runtime the absolute paths
that were established at build time.
On POSIX systems you almost never care; if you do, your usecase
dictates which one is desirable, so there's no single answer.
Obviously relocatability works fine most of the time, because not many
people have complained about QEMU's switch to relocatable install,
and that's why until now there was no way to disable relocatability.
But a non-relocatable, non-modular binary can help if you want to do
experiments with old firmware and new QEMU or vice versa (because you
can just upgrade/downgrade the firmware package, and use rpm2cpio or
similar to extract the QEMU binaries outside /usr), so allow both.
This patch allows one to build a non-relocatable install using a new
option to configure. Why? Because it's not too hard, and because
it helps the user double check the relocatability of their install.
Note that the same code that handles relocation also lets you run QEMU
from the build tree and pick e.g. firmware files from the source tree
transparently. Therefore that part remains active with this patch,
even if you configure with --disable-relocatable.
Suggested-by: Michael Tokarev <mjt@tls.msk.ru>
Reviewed-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-05 15:19:34 +03:00
|
|
|
|
config_host_data.set('CONFIG_RELOCATABLE', get_option('relocatable'))
|
2022-10-12 12:59:51 +03:00
|
|
|
|
config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack'))
|
2020-02-06 16:17:15 +03:00
|
|
|
|
config_host_data.set('CONFIG_SDL', sdl.found())
|
|
|
|
|
config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found())
|
2020-11-17 16:22:24 +03:00
|
|
|
|
config_host_data.set('CONFIG_SECCOMP', seccomp.found())
|
2022-10-26 10:30:24 +03:00
|
|
|
|
if seccomp.found()
|
|
|
|
|
config_host_data.set('CONFIG_SECCOMP_SYSRAWRC', seccomp_has_sysrawrc)
|
|
|
|
|
endif
|
2023-08-30 12:38:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_PIXMAN', pixman.found())
|
2023-09-08 13:09:22 +03:00
|
|
|
|
config_host_data.set('CONFIG_SLIRP', slirp.found())
|
2020-11-17 15:32:34 +03:00
|
|
|
|
config_host_data.set('CONFIG_SNAPPY', snappy.found())
|
2023-11-03 11:17:48 +03:00
|
|
|
|
config_host_data.set('CONFIG_SOLARIS', host_os == 'sunos')
|
2022-02-15 13:37:00 +03:00
|
|
|
|
if get_option('tcg').allowed()
|
|
|
|
|
config_host_data.set('CONFIG_TCG', 1)
|
|
|
|
|
config_host_data.set('CONFIG_TCG_INTERPRETER', tcg_arch == 'tci')
|
|
|
|
|
endif
|
2021-12-21 14:38:27 +03:00
|
|
|
|
config_host_data.set('CONFIG_TPM', have_tpm)
|
2023-01-09 17:31:51 +03:00
|
|
|
|
config_host_data.set('CONFIG_TSAN', get_option('tsan'))
|
2021-06-03 12:15:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_USB_LIBUSB', libusb.found())
|
2021-10-07 16:08:21 +03:00
|
|
|
|
config_host_data.set('CONFIG_VDE', vde.found())
|
2023-10-04 04:45:32 +03:00
|
|
|
|
config_host_data.set('CONFIG_VHOST', have_vhost)
|
2022-04-20 18:34:07 +03:00
|
|
|
|
config_host_data.set('CONFIG_VHOST_NET', have_vhost_net)
|
|
|
|
|
config_host_data.set('CONFIG_VHOST_NET_USER', have_vhost_net_user)
|
|
|
|
|
config_host_data.set('CONFIG_VHOST_NET_VDPA', have_vhost_net_vdpa)
|
|
|
|
|
config_host_data.set('CONFIG_VHOST_KERNEL', have_vhost_kernel)
|
|
|
|
|
config_host_data.set('CONFIG_VHOST_USER', have_vhost_user)
|
|
|
|
|
config_host_data.set('CONFIG_VHOST_CRYPTO', have_vhost_user_crypto)
|
|
|
|
|
config_host_data.set('CONFIG_VHOST_VDPA', have_vhost_vdpa)
|
2022-03-17 20:28:33 +03:00
|
|
|
|
config_host_data.set('CONFIG_VMNET', vmnet.found())
|
2020-11-10 20:11:19 +03:00
|
|
|
|
config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server)
|
2022-05-23 11:46:09 +03:00
|
|
|
|
config_host_data.set('CONFIG_VDUSE_BLK_EXPORT', have_vduse_blk_export)
|
2022-04-08 10:13:34 +03:00
|
|
|
|
config_host_data.set('CONFIG_PNG', png.found())
|
2020-02-06 17:48:52 +03:00
|
|
|
|
config_host_data.set('CONFIG_VNC', vnc.found())
|
|
|
|
|
config_host_data.set('CONFIG_VNC_JPEG', jpeg.found())
|
|
|
|
|
config_host_data.set('CONFIG_VNC_SASL', sasl.found())
|
2023-09-08 13:10:08 +03:00
|
|
|
|
if virgl.found()
|
2024-10-25 00:03:04 +03:00
|
|
|
|
config_host_data.set('VIRGL_VERSION_MAJOR', virgl.version().split('.')[0])
|
2023-09-08 13:10:08 +03:00
|
|
|
|
endif
|
2020-11-17 16:46:21 +03:00
|
|
|
|
config_host_data.set('CONFIG_VIRTFS', have_virtfs)
|
meson: fix missing preprocessor symbols
While most libraries do not need a CONFIG_* symbol because the
"when:" clauses are enough, some do. Add them back or stop
using them if possible.
In the case of libpmem, the statement to add the CONFIG_* symbol
was still in configure, but could not be triggered because it
checked for "no" instead of "disabled" (and it would be wrong anyway
since the test for the library has not been done yet).
Reported-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Fixes: 587d59d6cc ("configure, meson: convert virgl detection to meson", 2021-07-06)
Fixes: 83ef16821a ("configure, meson: convert libdaxctl detection to meson", 2021-07-06)
Fixes: e36e8c70f6 ("configure, meson: convert libpmem detection to meson", 2021-07-06)
Fixes: 53c22b68e3 ("configure, meson: convert liburing detection to meson", 2021-07-06)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2021-07-08 14:50:06 +03:00
|
|
|
|
config_host_data.set('CONFIG_VTE', vte.found())
|
2020-08-24 18:24:29 +03:00
|
|
|
|
config_host_data.set('CONFIG_XKBCOMMON', xkbcommon.found())
|
2020-08-28 14:07:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_KEYUTILS', keyutils.found())
|
2020-08-28 14:07:33 +03:00
|
|
|
|
config_host_data.set('CONFIG_GETTID', has_gettid)
|
2021-06-03 12:15:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_GNUTLS', gnutls.found())
|
2021-06-30 19:20:02 +03:00
|
|
|
|
config_host_data.set('CONFIG_GNUTLS_CRYPTO', gnutls_crypto.found())
|
2022-04-26 19:00:43 +03:00
|
|
|
|
config_host_data.set('CONFIG_TASN1', tasn1.found())
|
2021-06-03 12:15:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_GCRYPT', gcrypt.found())
|
|
|
|
|
config_host_data.set('CONFIG_NETTLE', nettle.found())
|
2023-12-07 18:47:35 +03:00
|
|
|
|
config_host_data.set('CONFIG_CRYPTO_SM4', crypto_sm4.found())
|
2022-05-25 12:01:14 +03:00
|
|
|
|
config_host_data.set('CONFIG_HOGWEED', hogweed.found())
|
2021-06-03 12:15:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private')
|
2020-09-01 18:15:30 +03:00
|
|
|
|
config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim)
|
2020-11-02 19:18:55 +03:00
|
|
|
|
config_host_data.set('CONFIG_STATX', has_statx)
|
2022-02-23 12:23:40 +03:00
|
|
|
|
config_host_data.set('CONFIG_STATX_MNT_ID', has_statx_mnt_id)
|
2020-11-17 15:37:39 +03:00
|
|
|
|
config_host_data.set('CONFIG_ZSTD', zstd.found())
|
2024-06-10 13:21:06 +03:00
|
|
|
|
config_host_data.set('CONFIG_QPL', qpl.found())
|
2024-06-07 16:53:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_UADK', uadk.found())
|
2024-08-31 02:27:19 +03:00
|
|
|
|
config_host_data.set('CONFIG_QATZIP', qatzip.found())
|
2020-10-27 22:05:41 +03:00
|
|
|
|
config_host_data.set('CONFIG_FUSE', fuse.found())
|
2020-10-27 22:05:46 +03:00
|
|
|
|
config_host_data.set('CONFIG_FUSE_LSEEK', fuse_lseek.found())
|
2021-10-07 16:08:23 +03:00
|
|
|
|
config_host_data.set('CONFIG_SPICE_PROTOCOL', spice_protocol.found())
|
2021-10-06 13:18:09 +03:00
|
|
|
|
if spice_protocol.found()
|
|
|
|
|
config_host_data.set('CONFIG_SPICE_PROTOCOL_MAJOR', spice_protocol.version().split('.')[0])
|
|
|
|
|
config_host_data.set('CONFIG_SPICE_PROTOCOL_MINOR', spice_protocol.version().split('.')[1])
|
|
|
|
|
config_host_data.set('CONFIG_SPICE_PROTOCOL_MICRO', spice_protocol.version().split('.')[2])
|
|
|
|
|
endif
|
2021-10-07 16:08:23 +03:00
|
|
|
|
config_host_data.set('CONFIG_SPICE', spice.found())
|
2021-01-07 15:54:22 +03:00
|
|
|
|
config_host_data.set('CONFIG_X11', x11.found())
|
2021-07-15 10:53:53 +03:00
|
|
|
|
config_host_data.set('CONFIG_DBUS_DISPLAY', dbus_display)
|
2020-12-05 02:06:14 +03:00
|
|
|
|
config_host_data.set('CONFIG_CFI', get_option('cfi'))
|
2021-11-15 23:29:43 +03:00
|
|
|
|
config_host_data.set('CONFIG_SELINUX', selinux.found())
|
2022-04-20 18:33:47 +03:00
|
|
|
|
config_host_data.set('CONFIG_XEN_BACKEND', xen.found())
|
2023-01-12 18:20:12 +03:00
|
|
|
|
config_host_data.set('CONFIG_LIBDW', libdw.found())
|
2022-04-20 18:33:47 +03:00
|
|
|
|
if xen.found()
|
|
|
|
|
# protect from xen.version() having less than three components
|
|
|
|
|
xen_version = xen.version().split('.') + ['0', '0']
|
|
|
|
|
xen_ctrl_version = xen_version[0] + \
|
|
|
|
|
('0' + xen_version[1]).substring(-2) + \
|
|
|
|
|
('0' + xen_version[2]).substring(-2)
|
|
|
|
|
config_host_data.set('CONFIG_XEN_CTRL_INTERFACE_VERSION', xen_ctrl_version)
|
|
|
|
|
endif
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_host_data.set('QEMU_VERSION', '"@0@"'.format(meson.project_version()))
|
|
|
|
|
config_host_data.set('QEMU_VERSION_MAJOR', meson.project_version().split('.')[0])
|
|
|
|
|
config_host_data.set('QEMU_VERSION_MINOR', meson.project_version().split('.')[1])
|
|
|
|
|
config_host_data.set('QEMU_VERSION_MICRO', meson.project_version().split('.')[2])
|
|
|
|
|
|
2021-10-07 16:08:15 +03:00
|
|
|
|
config_host_data.set_quoted('CONFIG_HOST_DSOSUF', host_dsosuf)
|
2021-06-03 12:56:11 +03:00
|
|
|
|
config_host_data.set('HAVE_HOST_BLOCK_DEVICE', have_host_block_device)
|
|
|
|
|
|
2021-10-13 12:52:03 +03:00
|
|
|
|
have_coroutine_pool = get_option('coroutine_pool')
|
|
|
|
|
if get_option('debug_stack_usage') and have_coroutine_pool
|
|
|
|
|
message('Disabling coroutine pool to measure stack usage')
|
|
|
|
|
have_coroutine_pool = false
|
|
|
|
|
endif
|
2023-10-05 15:31:27 +03:00
|
|
|
|
config_host_data.set('CONFIG_COROUTINE_POOL', have_coroutine_pool)
|
2023-05-01 20:34:43 +03:00
|
|
|
|
config_host_data.set('CONFIG_DEBUG_GRAPH_LOCK', get_option('debug_graph_lock'))
|
2021-10-13 12:46:09 +03:00
|
|
|
|
config_host_data.set('CONFIG_DEBUG_MUTEX', get_option('debug_mutex'))
|
2021-10-13 12:52:03 +03:00
|
|
|
|
config_host_data.set('CONFIG_DEBUG_STACK_USAGE', get_option('debug_stack_usage'))
|
2023-08-28 12:48:30 +03:00
|
|
|
|
config_host_data.set('CONFIG_DEBUG_TCG', get_option('debug_tcg'))
|
2024-03-12 03:23:30 +03:00
|
|
|
|
config_host_data.set('CONFIG_DEBUG_REMAP', get_option('debug_remap'))
|
2021-10-13 12:46:09 +03:00
|
|
|
|
config_host_data.set('CONFIG_QOM_CAST_DEBUG', get_option('qom_cast_debug'))
|
2022-09-02 19:51:25 +03:00
|
|
|
|
config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed())
|
2024-07-12 16:24:44 +03:00
|
|
|
|
config_host_data.set('CONFIG_FSFREEZE', qga_fsfreeze)
|
|
|
|
|
config_host_data.set('CONFIG_FSTRIM', qga_fstrim)
|
2021-10-13 12:43:54 +03:00
|
|
|
|
|
2021-06-03 12:56:11 +03:00
|
|
|
|
# has_header
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_EPOLL', cc.has_header('sys/epoll.h'))
|
2021-06-03 13:02:00 +03:00
|
|
|
|
config_host_data.set('CONFIG_LINUX_MAGIC_H', cc.has_header('linux/magic.h'))
|
|
|
|
|
config_host_data.set('CONFIG_VALGRIND_H', cc.has_header('valgrind/valgrind.h'))
|
2020-11-18 20:10:52 +03:00
|
|
|
|
config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h'))
|
2020-11-18 20:10:49 +03:00
|
|
|
|
config_host_data.set('HAVE_DRM_H', cc.has_header('libdrm/drm.h'))
|
2024-10-01 18:14:54 +03:00
|
|
|
|
config_host_data.set('HAVE_OPENAT2_H', cc.has_header('linux/openat2.h'))
|
2020-11-18 20:10:48 +03:00
|
|
|
|
config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h'))
|
2021-06-03 12:56:11 +03:00
|
|
|
|
config_host_data.set('HAVE_SYS_DISK_H', cc.has_header('sys/disk.h'))
|
2020-11-14 13:10:11 +03:00
|
|
|
|
config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h'))
|
2020-11-18 20:10:51 +03:00
|
|
|
|
config_host_data.set('HAVE_SYS_KCOV_H', cc.has_header('sys/kcov.h'))
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2022-08-02 10:51:58 +03:00
|
|
|
|
config_host_data.set('HAVE_AFUNIX_H', cc.has_header('afunix.h'))
|
|
|
|
|
endif
|
2020-11-14 13:10:11 +03:00
|
|
|
|
|
2021-06-03 12:56:11 +03:00
|
|
|
|
# has_function
|
os-posix: asynchronous teardown for shutdown on Linux
This patch adds support for asynchronously tearing down a VM on Linux.
When qemu terminates, either naturally or because of a fatal signal,
the VM is torn down. If the VM is huge, it can take a considerable
amount of time for it to be cleaned up. In case of a protected VM, it
might take even longer than a non-protected VM (this is the case on
s390x, for example).
Some users might want to shut down a VM and restart it immediately,
without having to wait. This is especially true if management
infrastructure like libvirt is used.
This patch implements a simple trick on Linux to allow qemu to return
immediately, with the teardown of the VM being performed
asynchronously.
If the new commandline option -async-teardown is used, a new process is
spawned from qemu at startup, using the clone syscall, in such way that
it will share its address space with qemu.The new process will have the
name "cleanup/<QEMU_PID>". It will wait until qemu terminates
completely, and then it will exit itself.
This allows qemu to terminate quickly, without having to wait for the
whole address space to be torn down. The cleanup process will exit
after qemu, so it will be the last user of the address space, and
therefore it will take care of the actual teardown. The cleanup
process will share the same cgroups as qemu, so both memory usage and
cpu time will be accounted properly.
If possible, close_range will be used in the cleanup process to close
all open file descriptors. If it is not available or if it fails, /proc
will be used to determine which file descriptors to close.
If the cleanup process is forcefully killed with SIGKILL before the
main qemu process has terminated completely, the mechanism is defeated
and the teardown will not be asynchronous.
This feature can already be used with libvirt by adding the following
to the XML domain definition to pass the parameter to qemu directly:
<commandline xmlns="http://libvirt.org/schemas/domain/qemu/1.0">
<arg value='-async-teardown'/>
</commandline>
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com>
Tested-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com>
Message-Id: <20220812133453.82671-1-imbrenda@linux.ibm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-12 16:34:53 +03:00
|
|
|
|
config_host_data.set('CONFIG_CLOSE_RANGE', cc.has_function('close_range'))
|
2021-06-03 14:04:47 +03:00
|
|
|
|
config_host_data.set('CONFIG_ACCEPT4', cc.has_function('accept4'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_CLOCK_ADJTIME', cc.has_function('clock_adjtime'))
|
|
|
|
|
config_host_data.set('CONFIG_DUP3', cc.has_function('dup3'))
|
|
|
|
|
config_host_data.set('CONFIG_FALLOCATE', cc.has_function('fallocate'))
|
|
|
|
|
config_host_data.set('CONFIG_POSIX_FALLOCATE', cc.has_function('posix_fallocate'))
|
2023-06-21 01:47:31 +03:00
|
|
|
|
config_host_data.set('CONFIG_GETCPU', cc.has_function('getcpu', prefix: gnu_source_prefix))
|
|
|
|
|
config_host_data.set('CONFIG_SCHED_GETCPU', cc.has_function('sched_getcpu', prefix: '#include <sched.h>'))
|
2022-02-26 21:07:19 +03:00
|
|
|
|
# Note that we need to specify prefix: here to avoid incorrectly
|
|
|
|
|
# thinking that Windows has posix_memalign()
|
|
|
|
|
config_host_data.set('CONFIG_POSIX_MEMALIGN', cc.has_function('posix_memalign', prefix: '#include <stdlib.h>'))
|
2022-02-26 21:07:20 +03:00
|
|
|
|
config_host_data.set('CONFIG_ALIGNED_MALLOC', cc.has_function('_aligned_malloc'))
|
2022-02-26 21:07:21 +03:00
|
|
|
|
config_host_data.set('CONFIG_VALLOC', cc.has_function('valloc'))
|
|
|
|
|
config_host_data.set('CONFIG_MEMALIGN', cc.has_function('memalign'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_PPOLL', cc.has_function('ppoll'))
|
2021-01-26 18:58:46 +03:00
|
|
|
|
config_host_data.set('CONFIG_PREADV', cc.has_function('preadv', prefix: '#include <sys/uio.h>'))
|
2022-02-28 01:35:20 +03:00
|
|
|
|
config_host_data.set('CONFIG_PTHREAD_FCHDIR_NP', cc.has_function('pthread_fchdir_np'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_SENDFILE', cc.has_function('sendfile'))
|
|
|
|
|
config_host_data.set('CONFIG_SETNS', cc.has_function('setns') and cc.has_function('unshare'))
|
|
|
|
|
config_host_data.set('CONFIG_SYNCFS', cc.has_function('syncfs'))
|
|
|
|
|
config_host_data.set('CONFIG_SYNC_FILE_RANGE', cc.has_function('sync_file_range'))
|
|
|
|
|
config_host_data.set('CONFIG_TIMERFD', cc.has_function('timerfd_create'))
|
2021-06-03 13:02:00 +03:00
|
|
|
|
config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range'))
|
2022-04-26 22:55:22 +03:00
|
|
|
|
config_host_data.set('HAVE_GETIFADDRS', cc.has_function('getifaddrs'))
|
2022-10-12 12:31:32 +03:00
|
|
|
|
config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice)
|
2024-10-03 16:28:48 +03:00
|
|
|
|
config_host_data.set('HAVE_GLIB_WITH_ALIGNED_ALLOC', glib_has_aligned_alloc)
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util))
|
2021-06-03 13:14:48 +03:00
|
|
|
|
config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul'))
|
2021-06-03 12:56:11 +03:00
|
|
|
|
config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include <stdlib.h>'))
|
2022-05-17 10:10:12 +03:00
|
|
|
|
if rbd.found()
|
|
|
|
|
config_host_data.set('HAVE_RBD_NAMESPACE_EXISTS',
|
|
|
|
|
cc.has_function('rbd_namespace_exists',
|
|
|
|
|
dependencies: rbd,
|
|
|
|
|
prefix: '#include <rbd/librbd.h>'))
|
|
|
|
|
endif
|
2021-09-10 10:02:55 +03:00
|
|
|
|
if rdma.found()
|
|
|
|
|
config_host_data.set('HAVE_IBV_ADVISE_MR',
|
|
|
|
|
cc.has_function('ibv_advise_mr',
|
2022-04-20 18:33:41 +03:00
|
|
|
|
dependencies: rdma,
|
2021-09-10 10:02:55 +03:00
|
|
|
|
prefix: '#include <infiniband/verbs.h>'))
|
|
|
|
|
endif
|
2021-01-26 18:58:46 +03:00
|
|
|
|
|
2023-01-09 17:31:51 +03:00
|
|
|
|
have_asan_fiber = false
|
2024-08-13 12:52:15 +03:00
|
|
|
|
if get_option('asan') and \
|
2023-01-09 17:31:51 +03:00
|
|
|
|
not cc.has_function('__sanitizer_start_switch_fiber',
|
|
|
|
|
args: '-fsanitize=address',
|
|
|
|
|
prefix: '#include <sanitizer/asan_interface.h>')
|
|
|
|
|
warning('Missing ASAN due to missing fiber annotation interface')
|
|
|
|
|
warning('Without code annotation, the report may be inferior.')
|
|
|
|
|
else
|
|
|
|
|
have_asan_fiber = true
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_ASAN_IFACE_FIBER', have_asan_fiber)
|
|
|
|
|
|
2024-02-06 03:22:03 +03:00
|
|
|
|
have_inotify_init = cc.has_header_symbol('sys/inotify.h', 'inotify_init')
|
|
|
|
|
have_inotify_init1 = cc.has_header_symbol('sys/inotify.h', 'inotify_init1')
|
|
|
|
|
inotify = not_found
|
|
|
|
|
if (have_inotify_init or have_inotify_init1) and host_os == 'freebsd'
|
|
|
|
|
# libinotify-kqueue
|
|
|
|
|
inotify = cc.find_library('inotify')
|
|
|
|
|
if have_inotify_init
|
|
|
|
|
have_inotify_init = inotify.found()
|
|
|
|
|
endif
|
|
|
|
|
if have_inotify_init1
|
|
|
|
|
have_inotify_init1 = inotify.found()
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_INOTIFY', have_inotify_init)
|
|
|
|
|
config_host_data.set('CONFIG_INOTIFY1', have_inotify_init1)
|
|
|
|
|
|
2021-06-03 13:10:05 +03:00
|
|
|
|
# has_header_symbol
|
block/block-backend: add block layer APIs resembling Linux ZonedBlockDevice ioctls
Add zoned device option to host_device BlockDriver. It will be presented only
for zoned host block devices. By adding zone management operations to the
host_block_device BlockDriver, users can use the new block layer APIs
including Report Zone and four zone management operations
(open, close, finish, reset, reset_all).
Qemu-io uses the new APIs to perform zoned storage commands of the device:
zone_report(zrp), zone_open(zo), zone_close(zc), zone_reset(zrs),
zone_finish(zf).
For example, to test zone_report, use following command:
$ ./build/qemu-io --image-opts -n driver=host_device, filename=/dev/nullb0
-c "zrp offset nr_zones"
Signed-off-by: Sam Li <faithilikerun@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
Acked-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20230508045533.175575-4-faithilikerun@gmail.com
Message-id: 20230324090605.28361-4-faithilikerun@gmail.com
[Adjust commit message prefix as suggested by Philippe Mathieu-Daudé
<philmd@linaro.org> and remove spurious ret = -errno in
raw_co_zone_mgmt().
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2023-05-08 07:55:28 +03:00
|
|
|
|
config_host_data.set('CONFIG_BLKZONED',
|
|
|
|
|
cc.has_header_symbol('linux/blkzoned.h', 'BLKOPENZONE'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_EPOLL_CREATE1',
|
|
|
|
|
cc.has_header_symbol('sys/epoll.h', 'epoll_create1'))
|
|
|
|
|
config_host_data.set('CONFIG_FALLOCATE_PUNCH_HOLE',
|
|
|
|
|
cc.has_header_symbol('linux/falloc.h', 'FALLOC_FL_PUNCH_HOLE') and
|
|
|
|
|
cc.has_header_symbol('linux/falloc.h', 'FALLOC_FL_KEEP_SIZE'))
|
|
|
|
|
config_host_data.set('CONFIG_FALLOCATE_ZERO_RANGE',
|
|
|
|
|
cc.has_header_symbol('linux/falloc.h', 'FALLOC_FL_ZERO_RANGE'))
|
|
|
|
|
config_host_data.set('CONFIG_FIEMAP',
|
|
|
|
|
cc.has_header('linux/fiemap.h') and
|
|
|
|
|
cc.has_header_symbol('linux/fs.h', 'FS_IOC_FIEMAP'))
|
2021-06-03 13:02:00 +03:00
|
|
|
|
config_host_data.set('CONFIG_GETRANDOM',
|
|
|
|
|
cc.has_function('getrandom') and
|
|
|
|
|
cc.has_header_symbol('sys/random.h', 'GRND_NONBLOCK'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_PRCTL_PR_SET_TIMERSLACK',
|
|
|
|
|
cc.has_header_symbol('sys/prctl.h', 'PR_SET_TIMERSLACK'))
|
2021-06-03 13:02:00 +03:00
|
|
|
|
config_host_data.set('CONFIG_RTNETLINK',
|
|
|
|
|
cc.has_header_symbol('linux/rtnetlink.h', 'IFLA_PROTO_DOWN'))
|
|
|
|
|
config_host_data.set('CONFIG_SYSMACROS',
|
|
|
|
|
cc.has_header_symbol('sys/sysmacros.h', 'makedev'))
|
2021-06-03 13:02:00 +03:00
|
|
|
|
config_host_data.set('HAVE_OPTRESET',
|
|
|
|
|
cc.has_header_symbol('getopt.h', 'optreset'))
|
2021-09-07 15:19:13 +03:00
|
|
|
|
config_host_data.set('HAVE_IPPROTO_MPTCP',
|
|
|
|
|
cc.has_header_symbol('netinet/in.h', 'IPPROTO_MPTCP'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
|
|
|
|
|
# has_member
|
|
|
|
|
config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID',
|
|
|
|
|
cc.has_member('struct sigevent', 'sigev_notify_thread_id',
|
|
|
|
|
prefix: '#include <signal.h>'))
|
2021-06-03 13:14:48 +03:00
|
|
|
|
config_host_data.set('HAVE_STRUCT_STAT_ST_ATIM',
|
|
|
|
|
cc.has_member('struct stat', 'st_atim',
|
|
|
|
|
prefix: '#include <sys/stat.h>'))
|
block/block-backend: add block layer APIs resembling Linux ZonedBlockDevice ioctls
Add zoned device option to host_device BlockDriver. It will be presented only
for zoned host block devices. By adding zone management operations to the
host_block_device BlockDriver, users can use the new block layer APIs
including Report Zone and four zone management operations
(open, close, finish, reset, reset_all).
Qemu-io uses the new APIs to perform zoned storage commands of the device:
zone_report(zrp), zone_open(zo), zone_close(zc), zone_reset(zrs),
zone_finish(zf).
For example, to test zone_report, use following command:
$ ./build/qemu-io --image-opts -n driver=host_device, filename=/dev/nullb0
-c "zrp offset nr_zones"
Signed-off-by: Sam Li <faithilikerun@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
Acked-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20230508045533.175575-4-faithilikerun@gmail.com
Message-id: 20230324090605.28361-4-faithilikerun@gmail.com
[Adjust commit message prefix as suggested by Philippe Mathieu-Daudé
<philmd@linaro.org> and remove spurious ret = -errno in
raw_co_zone_mgmt().
--Stefan]
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2023-05-08 07:55:28 +03:00
|
|
|
|
config_host_data.set('HAVE_BLK_ZONE_REP_CAPACITY',
|
|
|
|
|
cc.has_member('struct blk_zone', 'capacity',
|
|
|
|
|
prefix: '#include <linux/blkzoned.h>'))
|
2021-06-03 13:10:05 +03:00
|
|
|
|
|
2021-11-16 10:28:29 +03:00
|
|
|
|
# has_type
|
|
|
|
|
config_host_data.set('CONFIG_IOVEC',
|
|
|
|
|
cc.has_type('struct iovec',
|
|
|
|
|
prefix: '#include <sys/uio.h>'))
|
|
|
|
|
config_host_data.set('HAVE_UTMPX',
|
|
|
|
|
cc.has_type('struct utmpx',
|
|
|
|
|
prefix: '#include <utmpx.h>'))
|
|
|
|
|
|
2021-07-07 17:35:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_EVENTFD', cc.links('''
|
2021-06-03 13:02:00 +03:00
|
|
|
|
#include <sys/eventfd.h>
|
|
|
|
|
int main(void) { return eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); }'''))
|
2021-07-07 17:35:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_FDATASYNC', cc.links(gnu_source_prefix + '''
|
2021-06-03 13:02:00 +03:00
|
|
|
|
#include <unistd.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
#if defined(_POSIX_SYNCHRONIZED_IO) && _POSIX_SYNCHRONIZED_IO > 0
|
|
|
|
|
return fdatasync(0);
|
|
|
|
|
#else
|
|
|
|
|
#error Not supported
|
|
|
|
|
#endif
|
|
|
|
|
}'''))
|
2022-03-16 06:52:25 +03:00
|
|
|
|
|
|
|
|
|
has_madvise = cc.links(gnu_source_prefix + '''
|
2021-06-03 13:02:00 +03:00
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
#include <stddef.h>
|
2022-03-16 06:52:25 +03:00
|
|
|
|
int main(void) { return madvise(NULL, 0, MADV_DONTNEED); }''')
|
|
|
|
|
missing_madvise_proto = false
|
|
|
|
|
if has_madvise
|
|
|
|
|
# Some platforms (illumos and Solaris before Solaris 11) provide madvise()
|
|
|
|
|
# but forget to prototype it. In this case, has_madvise will be true (the
|
|
|
|
|
# test program links despite a compile warning). To detect the
|
|
|
|
|
# missing-prototype case, we try again with a definitely-bogus prototype.
|
|
|
|
|
# This will only compile if the system headers don't provide the prototype;
|
|
|
|
|
# otherwise the conflicting prototypes will cause a compiler error.
|
|
|
|
|
missing_madvise_proto = cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
extern int madvise(int);
|
|
|
|
|
int main(void) { return madvise(0); }''')
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_MADVISE', has_madvise)
|
|
|
|
|
config_host_data.set('HAVE_MADVISE_WITHOUT_PROTOTYPE', missing_madvise_proto)
|
|
|
|
|
|
2021-07-07 17:35:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_MEMFD', cc.links(gnu_source_prefix + '''
|
2021-06-03 13:02:00 +03:00
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
int main(void) { return memfd_create("foo", MFD_ALLOW_SEALING); }'''))
|
2021-07-07 17:35:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_OPEN_BY_HANDLE', cc.links(gnu_source_prefix + '''
|
2021-06-03 13:02:00 +03:00
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
#if !defined(AT_EMPTY_PATH)
|
|
|
|
|
# error missing definition
|
|
|
|
|
#else
|
|
|
|
|
int main(void) { struct file_handle fh; return open_by_handle_at(0, &fh, 0); }
|
|
|
|
|
#endif'''))
|
2024-06-05 13:44:54 +03:00
|
|
|
|
|
|
|
|
|
# On Darwin posix_madvise() has the same return semantics as plain madvise(),
|
|
|
|
|
# i.e. errno is set and -1 is returned. That's not really how POSIX defines the
|
|
|
|
|
# function. On the flip side, it has madvise() which is preferred anyways.
|
|
|
|
|
if host_os != 'darwin'
|
|
|
|
|
config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); }'''))
|
|
|
|
|
endif
|
2021-10-07 16:08:19 +03:00
|
|
|
|
|
2021-11-16 10:28:29 +03:00
|
|
|
|
config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(gnu_source_prefix + '''
|
2021-10-07 16:08:19 +03:00
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
|
|
static void *f(void *p) { return NULL; }
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
pthread_t thread;
|
|
|
|
|
pthread_create(&thread, 0, f, 0);
|
|
|
|
|
pthread_setname_np(thread, "QEMU");
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: threads))
|
2021-11-16 10:28:29 +03:00
|
|
|
|
config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(gnu_source_prefix + '''
|
2021-10-07 16:08:19 +03:00
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
|
|
static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; }
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
pthread_t thread;
|
|
|
|
|
pthread_create(&thread, 0, f, 0);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: threads))
|
2022-12-18 11:22:04 +03:00
|
|
|
|
config_host_data.set('CONFIG_PTHREAD_SET_NAME_NP', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
#include <pthread_np.h>
|
|
|
|
|
|
|
|
|
|
static void *f(void *p) { return NULL; }
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
pthread_t thread;
|
|
|
|
|
pthread_create(&thread, 0, f, 0);
|
|
|
|
|
pthread_set_name_np(thread, "QEMU");
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: threads))
|
2022-02-22 12:05:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_PTHREAD_CONDATTR_SETCLOCK', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
#include <time.h>
|
|
|
|
|
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
pthread_condattr_t attr
|
|
|
|
|
pthread_condattr_init(&attr);
|
|
|
|
|
pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: threads))
|
2022-10-14 16:47:15 +03:00
|
|
|
|
config_host_data.set('CONFIG_PTHREAD_AFFINITY_NP', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <pthread.h>
|
2021-10-07 16:08:19 +03:00
|
|
|
|
|
2022-10-14 16:47:15 +03:00
|
|
|
|
static void *f(void *p) { return NULL; }
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
int setsize = CPU_ALLOC_SIZE(64);
|
|
|
|
|
pthread_t thread;
|
|
|
|
|
cpu_set_t *cpuset;
|
|
|
|
|
pthread_create(&thread, 0, f, 0);
|
|
|
|
|
cpuset = CPU_ALLOC(64);
|
|
|
|
|
CPU_ZERO_S(setsize, cpuset);
|
|
|
|
|
pthread_setaffinity_np(thread, setsize, cpuset);
|
|
|
|
|
pthread_getaffinity_np(thread, setsize, cpuset);
|
|
|
|
|
CPU_FREE(cpuset);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', dependencies: threads))
|
2021-07-07 17:35:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_SIGNALFD', cc.links(gnu_source_prefix + '''
|
2021-09-05 04:16:22 +03:00
|
|
|
|
#include <sys/signalfd.h>
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
int main(void) { return signalfd(-1, NULL, SFD_CLOEXEC); }'''))
|
2021-07-07 17:35:26 +03:00
|
|
|
|
config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + '''
|
2021-06-03 14:04:47 +03:00
|
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
#include <limits.h>
|
|
|
|
|
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
|
|
|
|
int len, fd = 0;
|
|
|
|
|
len = tee(STDIN_FILENO, STDOUT_FILENO, INT_MAX, SPLICE_F_NONBLOCK);
|
|
|
|
|
splice(STDIN_FILENO, NULL, fd, NULL, len, SPLICE_F_MOVE);
|
|
|
|
|
return 0;
|
|
|
|
|
}'''))
|
2021-06-03 13:02:00 +03:00
|
|
|
|
|
2021-10-07 16:08:18 +03:00
|
|
|
|
config_host_data.set('HAVE_MLOCKALL', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <sys/mman.h>
|
2022-11-03 20:19:18 +03:00
|
|
|
|
int main(void) {
|
2021-10-07 16:08:18 +03:00
|
|
|
|
return mlockall(MCL_FUTURE);
|
|
|
|
|
}'''))
|
|
|
|
|
|
2021-10-28 21:59:08 +03:00
|
|
|
|
have_l2tpv3 = false
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('l2tpv3').allowed() and have_system
|
2021-11-16 10:28:29 +03:00
|
|
|
|
have_l2tpv3 = cc.has_type('struct mmsghdr',
|
|
|
|
|
prefix: gnu_source_prefix + '''
|
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
#include <linux/ip.h>''')
|
2021-10-28 21:59:08 +03:00
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_L2TPV3', have_l2tpv3)
|
|
|
|
|
|
2021-10-07 16:08:22 +03:00
|
|
|
|
have_netmap = false
|
2021-12-18 18:39:43 +03:00
|
|
|
|
if get_option('netmap').allowed() and have_system
|
2021-10-07 16:08:22 +03:00
|
|
|
|
have_netmap = cc.compiles('''
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
#include <net/if.h>
|
|
|
|
|
#include <net/netmap.h>
|
|
|
|
|
#include <net/netmap_user.h>
|
|
|
|
|
#if (NETMAP_API < 11) || (NETMAP_API > 15)
|
|
|
|
|
#error
|
|
|
|
|
#endif
|
|
|
|
|
int main(void) { return 0; }''')
|
|
|
|
|
if not have_netmap and get_option('netmap').enabled()
|
|
|
|
|
error('Netmap headers not available')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_NETMAP', have_netmap)
|
|
|
|
|
|
2021-10-07 16:08:18 +03:00
|
|
|
|
# Work around a system header bug with some kernel/XFS header
|
|
|
|
|
# versions where they both try to define 'struct fsxattr':
|
|
|
|
|
# xfs headers will not try to redefine structs from linux headers
|
|
|
|
|
# if this macro is set.
|
|
|
|
|
config_host_data.set('HAVE_FSXATTR', cc.links('''
|
2021-11-16 10:28:29 +03:00
|
|
|
|
#include <linux/fs.h>
|
2021-10-07 16:08:18 +03:00
|
|
|
|
struct fsxattr foo;
|
|
|
|
|
int main(void) {
|
|
|
|
|
return 0;
|
|
|
|
|
}'''))
|
|
|
|
|
|
2021-06-03 12:57:04 +03:00
|
|
|
|
# Some versions of Mac OS X incorrectly define SIZE_MAX
|
|
|
|
|
config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles('''
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
#include <stdio.h>
|
2022-11-03 20:19:18 +03:00
|
|
|
|
int main(void) {
|
2021-06-03 12:57:04 +03:00
|
|
|
|
return printf("%zu", SIZE_MAX);
|
|
|
|
|
}''', args: ['-Werror']))
|
|
|
|
|
|
2022-11-05 14:34:58 +03:00
|
|
|
|
# See if 64-bit atomic operations are supported.
|
|
|
|
|
# Note that without __atomic builtins, we can only
|
|
|
|
|
# assume atomic loads/stores max at pointer size.
|
|
|
|
|
config_host_data.set('CONFIG_ATOMIC64', cc.links('''
|
2021-10-07 16:08:25 +03:00
|
|
|
|
#include <stdint.h>
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
2022-11-05 14:34:58 +03:00
|
|
|
|
uint64_t x = 0, y = 0;
|
2021-10-07 16:08:25 +03:00
|
|
|
|
y = __atomic_load_n(&x, __ATOMIC_RELAXED);
|
|
|
|
|
__atomic_store_n(&x, y, __ATOMIC_RELAXED);
|
|
|
|
|
__atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
|
|
|
|
|
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
|
|
|
|
|
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
|
|
|
|
|
return 0;
|
2024-10-06 10:44:00 +03:00
|
|
|
|
}''', args: qemu_isa_flags))
|
2021-10-07 16:08:25 +03:00
|
|
|
|
|
2023-05-24 18:14:41 +03:00
|
|
|
|
has_int128_type = cc.compiles('''
|
|
|
|
|
__int128_t a;
|
|
|
|
|
__uint128_t b;
|
|
|
|
|
int main(void) { b = a; }''')
|
|
|
|
|
config_host_data.set('CONFIG_INT128_TYPE', has_int128_type)
|
|
|
|
|
|
|
|
|
|
has_int128 = has_int128_type and cc.links('''
|
2022-02-28 14:49:19 +03:00
|
|
|
|
__int128_t a;
|
|
|
|
|
__uint128_t b;
|
|
|
|
|
int main (void) {
|
|
|
|
|
a = a + b;
|
|
|
|
|
b = a * b;
|
|
|
|
|
a = a * a;
|
|
|
|
|
return 0;
|
|
|
|
|
}''')
|
|
|
|
|
config_host_data.set('CONFIG_INT128', has_int128)
|
|
|
|
|
|
2023-05-24 18:14:41 +03:00
|
|
|
|
if has_int128_type
|
2022-02-28 15:03:09 +03:00
|
|
|
|
# "do we have 128-bit atomics which are handled inline and specifically not
|
|
|
|
|
# via libatomic". The reason we can't use libatomic is documented in the
|
|
|
|
|
# comment starting "GCC is a house divided" in include/qemu/atomic128.h.
|
2022-11-05 14:34:58 +03:00
|
|
|
|
# We only care about these operations on 16-byte aligned pointers, so
|
|
|
|
|
# force 16-byte alignment of the pointer, which may be greater than
|
|
|
|
|
# __alignof(unsigned __int128) for the host.
|
|
|
|
|
atomic_test_128 = '''
|
|
|
|
|
int main(int ac, char **av) {
|
2023-05-24 18:14:41 +03:00
|
|
|
|
__uint128_t *p = __builtin_assume_aligned(av[ac - 1], 16);
|
2022-11-05 14:34:58 +03:00
|
|
|
|
p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED);
|
|
|
|
|
__atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED);
|
|
|
|
|
__atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
|
|
|
|
|
return 0;
|
|
|
|
|
}'''
|
2024-10-06 10:44:00 +03:00
|
|
|
|
has_atomic128 = cc.links(atomic_test_128, args: qemu_isa_flags)
|
2022-02-28 14:49:19 +03:00
|
|
|
|
|
|
|
|
|
config_host_data.set('CONFIG_ATOMIC128', has_atomic128)
|
|
|
|
|
|
|
|
|
|
if not has_atomic128
|
2022-11-05 14:34:58 +03:00
|
|
|
|
# Even with __builtin_assume_aligned, the above test may have failed
|
|
|
|
|
# without optimization enabled. Try again with optimizations locally
|
|
|
|
|
# enabled for the function. See
|
|
|
|
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
|
2024-10-06 10:44:00 +03:00
|
|
|
|
has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128,
|
|
|
|
|
args: qemu_isa_flags)
|
2022-11-05 14:34:58 +03:00
|
|
|
|
config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt)
|
|
|
|
|
|
|
|
|
|
if not has_atomic128_opt
|
|
|
|
|
config_host_data.set('CONFIG_CMPXCHG128', cc.links('''
|
|
|
|
|
int main(void)
|
|
|
|
|
{
|
2023-05-24 18:14:41 +03:00
|
|
|
|
__uint128_t x = 0, y = 0;
|
2022-11-05 14:34:58 +03:00
|
|
|
|
__sync_val_compare_and_swap_16(&x, y, x);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2024-10-06 10:44:00 +03:00
|
|
|
|
''', args: qemu_isa_flags))
|
2022-11-05 14:34:58 +03:00
|
|
|
|
endif
|
2022-02-28 14:49:19 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
2021-10-07 16:08:25 +03:00
|
|
|
|
|
|
|
|
|
config_host_data.set('CONFIG_GETAUXVAL', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <sys/auxv.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
return getauxval(AT_HWCAP) == 0;
|
|
|
|
|
}'''))
|
|
|
|
|
|
2024-07-28 06:58:55 +03:00
|
|
|
|
config_host_data.set('CONFIG_ELF_AUX_INFO', cc.links(gnu_source_prefix + '''
|
|
|
|
|
#include <sys/auxv.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
unsigned long hwcap = 0;
|
|
|
|
|
elf_aux_info(AT_HWCAP, &hwcap, sizeof(hwcap));
|
|
|
|
|
return hwcap;
|
|
|
|
|
}'''))
|
|
|
|
|
|
2022-04-20 18:33:43 +03:00
|
|
|
|
config_host_data.set('CONFIG_USBFS', have_linux_user and cc.compiles('''
|
|
|
|
|
#include <linux/usbdevice_fs.h>
|
|
|
|
|
|
|
|
|
|
#ifndef USBDEVFS_GET_CAPABILITIES
|
|
|
|
|
#error "USBDEVFS_GET_CAPABILITIES undefined"
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#ifndef USBDEVFS_DISCONNECT_CLAIM
|
|
|
|
|
#error "USBDEVFS_DISCONNECT_CLAIM undefined"
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
int main(void) { return 0; }'''))
|
|
|
|
|
|
2022-04-20 18:33:42 +03:00
|
|
|
|
have_keyring = get_option('keyring') \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
.require(host_os == 'linux', error_message: 'keyring is only available on Linux') \
|
2022-04-20 18:33:42 +03:00
|
|
|
|
.require(cc.compiles('''
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
|
#include <linux/keyctl.h>
|
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
return syscall(__NR_keyctl, KEYCTL_READ, 0, NULL, NULL, 0);
|
|
|
|
|
}'''), error_message: 'keyctl syscall not available on this system').allowed()
|
|
|
|
|
config_host_data.set('CONFIG_SECRET_KEYRING', have_keyring)
|
|
|
|
|
|
2021-11-08 15:38:58 +03:00
|
|
|
|
have_cpuid_h = cc.links('''
|
|
|
|
|
#include <cpuid.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
unsigned a, b, c, d;
|
|
|
|
|
unsigned max = __get_cpuid_max(0, 0);
|
|
|
|
|
|
|
|
|
|
if (max >= 1) {
|
|
|
|
|
__cpuid(1, a, b, c, d);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (max >= 7) {
|
|
|
|
|
__cpuid_count(7, 0, a, b, c, d);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}''')
|
|
|
|
|
config_host_data.set('CONFIG_CPUID_H', have_cpuid_h)
|
|
|
|
|
|
2024-06-27 20:36:43 +03:00
|
|
|
|
# Don't bother to advertise asm/hwprobe.h for old versions that do
|
|
|
|
|
# not contain RISCV_HWPROBE_EXT_ZBA.
|
|
|
|
|
config_host_data.set('CONFIG_ASM_HWPROBE_H',
|
|
|
|
|
cc.has_header_symbol('asm/hwprobe.h',
|
|
|
|
|
'RISCV_HWPROBE_EXT_ZBA'))
|
|
|
|
|
|
2021-11-08 15:38:58 +03:00
|
|
|
|
config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \
|
|
|
|
|
.require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \
|
|
|
|
|
.require(cc.links('''
|
|
|
|
|
#include <cpuid.h>
|
|
|
|
|
#include <immintrin.h>
|
2022-12-04 04:31:12 +03:00
|
|
|
|
static int __attribute__((target("avx2"))) bar(void *a) {
|
2021-11-08 15:38:58 +03:00
|
|
|
|
__m256i x = *(__m256i *)a;
|
|
|
|
|
return _mm256_testz_si256(x, x);
|
|
|
|
|
}
|
2022-11-03 20:19:18 +03:00
|
|
|
|
int main(int argc, char *argv[]) { return bar(argv[argc - 1]); }
|
2021-11-08 15:38:58 +03:00
|
|
|
|
'''), error_message: 'AVX2 not available').allowed())
|
|
|
|
|
|
2022-11-16 18:29:22 +03:00
|
|
|
|
config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \
|
|
|
|
|
.require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512BW') \
|
|
|
|
|
.require(cc.links('''
|
|
|
|
|
#include <cpuid.h>
|
|
|
|
|
#include <immintrin.h>
|
2023-05-02 00:05:55 +03:00
|
|
|
|
static int __attribute__((target("avx512bw"))) bar(void *a) {
|
2022-11-16 18:29:22 +03:00
|
|
|
|
__m512i *x = a;
|
|
|
|
|
__m512i res= _mm512_abs_epi8(*x);
|
|
|
|
|
return res[1];
|
|
|
|
|
}
|
|
|
|
|
int main(int argc, char *argv[]) { return bar(argv[0]); }
|
|
|
|
|
'''), error_message: 'AVX512BW not available').allowed())
|
|
|
|
|
|
2023-06-02 10:43:40 +03:00
|
|
|
|
# For both AArch64 and AArch32, detect if builtins are available.
|
|
|
|
|
config_host_data.set('CONFIG_ARM_AES_BUILTIN', cc.compiles('''
|
|
|
|
|
#include <arm_neon.h>
|
|
|
|
|
#ifndef __ARM_FEATURE_AES
|
|
|
|
|
__attribute__((target("+crypto")))
|
|
|
|
|
#endif
|
|
|
|
|
void foo(uint8x16_t *p) { *p = vaesmcq_u8(*p); }
|
|
|
|
|
'''))
|
|
|
|
|
|
2021-11-08 15:52:11 +03:00
|
|
|
|
if get_option('membarrier').disabled()
|
|
|
|
|
have_membarrier = false
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'windows'
|
2021-11-08 15:52:11 +03:00
|
|
|
|
have_membarrier = true
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'linux'
|
2021-11-08 15:52:11 +03:00
|
|
|
|
have_membarrier = cc.compiles('''
|
|
|
|
|
#include <linux/membarrier.h>
|
|
|
|
|
#include <sys/syscall.h>
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
syscall(__NR_membarrier, MEMBARRIER_CMD_QUERY, 0);
|
|
|
|
|
syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0);
|
|
|
|
|
exit(0);
|
|
|
|
|
}''')
|
|
|
|
|
endif
|
|
|
|
|
config_host_data.set('CONFIG_MEMBARRIER', get_option('membarrier') \
|
|
|
|
|
.require(have_membarrier, error_message: 'membarrier system call not available') \
|
|
|
|
|
.allowed())
|
|
|
|
|
|
2021-11-08 16:02:42 +03:00
|
|
|
|
have_afalg = get_option('crypto_afalg') \
|
|
|
|
|
.require(cc.compiles(gnu_source_prefix + '''
|
|
|
|
|
#include <errno.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
#include <linux/if_alg.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
int sock;
|
|
|
|
|
sock = socket(AF_ALG, SOCK_SEQPACKET, 0);
|
|
|
|
|
return sock;
|
|
|
|
|
}
|
|
|
|
|
'''), error_message: 'AF_ALG requested but could not be detected').allowed()
|
|
|
|
|
config_host_data.set('CONFIG_AF_ALG', have_afalg)
|
|
|
|
|
|
2022-04-01 14:50:05 +03:00
|
|
|
|
config_host_data.set('CONFIG_AF_VSOCK', cc.has_header_symbol(
|
|
|
|
|
'linux/vm_sockets.h', 'AF_VSOCK',
|
|
|
|
|
prefix: '#include <sys/socket.h>',
|
|
|
|
|
))
|
2021-10-07 16:08:25 +03:00
|
|
|
|
|
2022-02-01 15:53:43 +03:00
|
|
|
|
have_vss = false
|
2022-02-22 22:40:02 +03:00
|
|
|
|
have_vss_sdk = false # old xp/2003 SDK
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows' and 'cpp' in all_languages
|
2022-02-01 15:53:43 +03:00
|
|
|
|
have_vss = cxx.compiles('''
|
|
|
|
|
#define __MIDL_user_allocate_free_DEFINED__
|
2022-02-22 22:40:01 +03:00
|
|
|
|
#include <vss.h>
|
2022-02-01 15:53:43 +03:00
|
|
|
|
int main(void) { return VSS_CTX_BACKUP; }''')
|
2022-02-22 22:40:02 +03:00
|
|
|
|
have_vss_sdk = cxx.has_header('vscoordint.h')
|
2022-02-01 15:53:43 +03:00
|
|
|
|
endif
|
2022-02-22 22:40:02 +03:00
|
|
|
|
config_host_data.set('HAVE_VSS_SDK', have_vss_sdk)
|
2022-02-01 15:53:43 +03:00
|
|
|
|
|
2022-04-17 21:30:06 +03:00
|
|
|
|
# Older versions of MinGW do not import _lock_file and _unlock_file properly.
|
|
|
|
|
# This was fixed for v6.0.0 with commit b48e3ac8969d.
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2022-04-17 21:30:06 +03:00
|
|
|
|
config_host_data.set('HAVE__LOCK_FILE', cc.links('''
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
_lock_file(NULL);
|
|
|
|
|
_unlock_file(NULL);
|
|
|
|
|
return 0;
|
|
|
|
|
}''', name: '_lock_file and _unlock_file'))
|
|
|
|
|
endif
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2023-02-21 18:30:04 +03:00
|
|
|
|
mingw_has_setjmp_longjmp = cc.links('''
|
|
|
|
|
#include <setjmp.h>
|
|
|
|
|
int main(void) {
|
|
|
|
|
/*
|
|
|
|
|
* These functions are not available in setjmp header, but may be
|
|
|
|
|
* available at link time, from libmingwex.a.
|
|
|
|
|
*/
|
|
|
|
|
extern int __mingw_setjmp(jmp_buf);
|
|
|
|
|
extern void __attribute__((noreturn)) __mingw_longjmp(jmp_buf, int);
|
|
|
|
|
jmp_buf env;
|
|
|
|
|
__mingw_setjmp(env);
|
|
|
|
|
__mingw_longjmp(env, 0);
|
|
|
|
|
}
|
|
|
|
|
''', name: 'mingw setjmp and longjmp')
|
|
|
|
|
|
|
|
|
|
if cpu == 'aarch64' and not mingw_has_setjmp_longjmp
|
|
|
|
|
error('mingw must provide setjmp/longjmp for windows-arm64')
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2020-10-07 18:01:51 +03:00
|
|
|
|
########################
|
|
|
|
|
# Target configuration #
|
|
|
|
|
########################
|
|
|
|
|
|
2020-02-03 13:42:03 +03:00
|
|
|
|
minikconf = find_program('scripts/minikconf.py')
|
2023-08-30 12:39:45 +03:00
|
|
|
|
|
2023-09-29 12:40:03 +03:00
|
|
|
|
config_all_accel = {}
|
2020-09-01 12:32:23 +03:00
|
|
|
|
config_all_devices = {}
|
2020-02-03 13:42:03 +03:00
|
|
|
|
config_devices_mak_list = []
|
|
|
|
|
config_devices_h = {}
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_target_h = {}
|
2020-02-03 13:42:03 +03:00
|
|
|
|
config_target_mak = {}
|
2020-09-01 13:04:28 +03:00
|
|
|
|
|
|
|
|
|
disassemblers = {
|
|
|
|
|
'alpha' : ['CONFIG_ALPHA_DIS'],
|
|
|
|
|
'avr' : ['CONFIG_AVR_DIS'],
|
2021-02-08 08:46:24 +03:00
|
|
|
|
'hexagon' : ['CONFIG_HEXAGON_DIS'],
|
2020-09-01 13:04:28 +03:00
|
|
|
|
'hppa' : ['CONFIG_HPPA_DIS'],
|
|
|
|
|
'i386' : ['CONFIG_I386_DIS'],
|
|
|
|
|
'x86_64' : ['CONFIG_I386_DIS'],
|
|
|
|
|
'm68k' : ['CONFIG_M68K_DIS'],
|
|
|
|
|
'microblaze' : ['CONFIG_MICROBLAZE_DIS'],
|
|
|
|
|
'mips' : ['CONFIG_MIPS_DIS'],
|
|
|
|
|
'or1k' : ['CONFIG_OPENRISC_DIS'],
|
|
|
|
|
'ppc' : ['CONFIG_PPC_DIS'],
|
|
|
|
|
'riscv' : ['CONFIG_RISCV_DIS'],
|
|
|
|
|
'rx' : ['CONFIG_RX_DIS'],
|
|
|
|
|
's390' : ['CONFIG_S390_DIS'],
|
|
|
|
|
'sh4' : ['CONFIG_SH4_DIS'],
|
|
|
|
|
'sparc' : ['CONFIG_SPARC_DIS'],
|
|
|
|
|
'xtensa' : ['CONFIG_XTENSA_DIS'],
|
2022-06-06 15:43:06 +03:00
|
|
|
|
'loongarch' : ['CONFIG_LOONGARCH_DIS'],
|
2020-09-01 13:04:28 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-03 13:02:00 +03:00
|
|
|
|
have_ivshmem = config_host_data.get('CONFIG_EVENTFD')
|
2020-11-17 16:58:32 +03:00
|
|
|
|
host_kconfig = \
|
2021-10-07 16:08:12 +03:00
|
|
|
|
(get_option('fuzzing') ? ['CONFIG_FUZZ=y'] : []) + \
|
2021-12-21 14:38:27 +03:00
|
|
|
|
(have_tpm ? ['CONFIG_TPM=y'] : []) + \
|
2023-08-30 12:38:25 +03:00
|
|
|
|
(pixman.found() ? ['CONFIG_PIXMAN=y'] : []) + \
|
2021-10-07 16:08:23 +03:00
|
|
|
|
(spice.found() ? ['CONFIG_SPICE=y'] : []) + \
|
2021-06-03 13:50:17 +03:00
|
|
|
|
(have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \
|
2022-04-20 18:33:40 +03:00
|
|
|
|
(opengl.found() ? ['CONFIG_OPENGL=y'] : []) + \
|
2024-10-09 00:17:23 +03:00
|
|
|
|
(libcbor.found() ? ['CONFIG_LIBCBOR=y'] : []) + \
|
2024-10-09 00:17:24 +03:00
|
|
|
|
(gnutls.found() ? ['CONFIG_GNUTLS=y'] : []) + \
|
2021-01-07 15:54:22 +03:00
|
|
|
|
(x11.found() ? ['CONFIG_X11=y'] : []) + \
|
2024-05-07 15:13:46 +03:00
|
|
|
|
(fdt.found() ? ['CONFIG_FDT=y'] : []) + \
|
2022-04-20 18:34:05 +03:00
|
|
|
|
(have_vhost_user ? ['CONFIG_VHOST_USER=y'] : []) + \
|
|
|
|
|
(have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \
|
|
|
|
|
(have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \
|
2020-11-17 16:46:21 +03:00
|
|
|
|
(have_virtfs ? ['CONFIG_VIRTFS=y'] : []) + \
|
2023-11-03 11:17:48 +03:00
|
|
|
|
(host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \
|
2022-06-13 23:26:24 +03:00
|
|
|
|
(multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + \
|
Add Hyper-V Dynamic Memory Protocol driver (hv-balloon) base
This driver is like virtio-balloon on steroids: it allows both changing the
guest memory allocation via ballooning and (in the next patch) inserting
pieces of extra RAM into it on demand from a provided memory backend.
The actual resizing is done via ballooning interface (for example, via
the "balloon" HMP command).
This includes resizing the guest past its boot size - that is, hot-adding
additional memory in granularity limited only by the guest alignment
requirements, as provided by the next patch.
In contrast with ACPI DIMM hotplug where one can only request to unplug a
whole DIMM stick this driver allows removing memory from guest in single
page (4k) units via ballooning.
After a VM reboot the guest is back to its original (boot) size.
In the future, the guest boot memory size might be changed on reboot
instead, taking into account the effective size that VM had before that
reboot (much like Hyper-V does).
For performance reasons, the guest-released memory is tracked in a few
range trees, as a series of (start, count) ranges.
Each time a new page range is inserted into such tree its neighbors are
checked as candidates for possible merging with it.
Besides performance reasons, the Dynamic Memory protocol itself uses page
ranges as the data structure in its messages, so relevant pages need to be
merged into such ranges anyway.
One has to be careful when tracking the guest-released pages, since the
guest can maliciously report returning pages outside its current address
space, which later clash with the address range of newly added memory.
Similarly, the guest can report freeing the same page twice.
The above design results in much better ballooning performance than when
using virtio-balloon with the same guest: 230 GB / minute with this driver
versus 70 GB / minute with virtio-balloon.
During a ballooning operation most of time is spent waiting for the guest
to come up with newly freed page ranges, processing the received ranges on
the host side (in QEMU and KVM) is nearly instantaneous.
The unballoon operation is also pretty much instantaneous:
thanks to the merging of the ballooned out page ranges 200 GB of memory can
be returned to the guest in about 1 second.
With virtio-balloon this operation takes about 2.5 minutes.
These tests were done against a Windows Server 2019 guest running on a
Xeon E5-2699, after dirtying the whole memory inside guest before each
balloon operation.
Using a range tree instead of a bitmap to track the removed memory also
means that the solution scales well with the guest size: even a 1 TB range
takes just a few bytes of such metadata.
Since the required GTree operations aren't present in every Glib version
a check for them was added to the meson build script, together with new
"--enable-hv-balloon" and "--disable-hv-balloon" configure arguments.
If these GTree operations are missing in the system's Glib version this
driver will be skipped during QEMU build.
An optional "status-report=on" device parameter requests memory status
events from the guest (typically sent every second), which allow the host
to learn both the guest memory available and the guest memory in use
counts.
Following commits will add support for their external emission as
"HV_BALLOON_STATUS_REPORT" QMP events.
The driver is named hv-balloon since the Linux kernel client driver for
the Dynamic Memory Protocol is named as such and to follow the naming
pattern established by the virtio-balloon driver.
The whole protocol runs over Hyper-V VMBus.
The driver was tested against Windows Server 2012 R2, Windows Server 2016
and Windows Server 2019 guests and obeys the guest alignment requirements
reported to the host via DM_CAPABILITIES_REPORT message.
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
2023-06-12 17:00:54 +03:00
|
|
|
|
(vfio_user_server_allowed ? ['CONFIG_VFIO_USER_SERVER_ALLOWED=y'] : []) + \
|
2024-10-03 16:28:44 +03:00
|
|
|
|
(hv_balloon ? ['CONFIG_HV_BALLOON_POSSIBLE=y'] : []) + \
|
|
|
|
|
(have_rust ? ['CONFIG_HAVE_RUST=y'] : [])
|
2020-11-17 16:58:32 +03:00
|
|
|
|
|
2020-09-21 12:11:01 +03:00
|
|
|
|
ignored = [ 'TARGET_XML_FILES', 'TARGET_ABI_DIR', 'TARGET_ARCH' ]
|
2020-09-16 22:31:11 +03:00
|
|
|
|
|
2020-09-21 11:37:49 +03:00
|
|
|
|
default_targets = 'CONFIG_DEFAULT_TARGETS' in config_host
|
|
|
|
|
actual_target_dirs = []
|
2020-10-05 12:31:15 +03:00
|
|
|
|
fdt_required = []
|
2020-08-19 15:44:56 +03:00
|
|
|
|
foreach target : target_dirs
|
2020-09-18 13:37:21 +03:00
|
|
|
|
config_target = { 'TARGET_NAME': target.split('-')[0] }
|
|
|
|
|
if target.endswith('linux-user')
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'linux'
|
2020-09-21 11:37:49 +03:00
|
|
|
|
if default_targets
|
|
|
|
|
continue
|
|
|
|
|
endif
|
|
|
|
|
error('Target @0@ is only available on a Linux host'.format(target))
|
|
|
|
|
endif
|
2020-09-18 13:37:21 +03:00
|
|
|
|
config_target += { 'CONFIG_LINUX_USER': 'y' }
|
|
|
|
|
elif target.endswith('bsd-user')
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os not in bsd_oses
|
2020-09-21 11:37:49 +03:00
|
|
|
|
if default_targets
|
|
|
|
|
continue
|
|
|
|
|
endif
|
|
|
|
|
error('Target @0@ is only available on a BSD host'.format(target))
|
|
|
|
|
endif
|
2020-09-18 13:37:21 +03:00
|
|
|
|
config_target += { 'CONFIG_BSD_USER': 'y' }
|
|
|
|
|
elif target.endswith('softmmu')
|
2023-06-13 16:33:45 +03:00
|
|
|
|
config_target += { 'CONFIG_SYSTEM_ONLY': 'y' }
|
2020-09-18 13:37:21 +03:00
|
|
|
|
config_target += { 'CONFIG_SOFTMMU': 'y' }
|
|
|
|
|
endif
|
|
|
|
|
if target.endswith('-user')
|
|
|
|
|
config_target += {
|
|
|
|
|
'CONFIG_USER_ONLY': 'y',
|
|
|
|
|
'CONFIG_QEMU_INTERP_PREFIX':
|
2024-10-30 02:17:47 +03:00
|
|
|
|
get_option('interp_prefix').replace('%M', config_target['TARGET_NAME']),
|
|
|
|
|
'CONFIG_QEMU_RTSIG_MAP': get_option('rtsig_map'),
|
2020-09-18 13:37:21 +03:00
|
|
|
|
}
|
|
|
|
|
endif
|
2020-08-04 19:14:26 +03:00
|
|
|
|
|
2024-01-29 13:53:17 +03:00
|
|
|
|
target_kconfig = []
|
2020-09-18 12:37:01 +03:00
|
|
|
|
foreach sym: accelerators
|
|
|
|
|
if sym == 'CONFIG_TCG' or target in accelerator_targets.get(sym, [])
|
|
|
|
|
config_target += { sym: 'y' }
|
2023-09-29 12:40:03 +03:00
|
|
|
|
config_all_accel += { sym: 'y' }
|
2021-06-24 13:38:31 +03:00
|
|
|
|
if target in modular_tcg
|
|
|
|
|
config_target += { 'CONFIG_TCG_MODULAR': 'y' }
|
|
|
|
|
else
|
|
|
|
|
config_target += { 'CONFIG_TCG_BUILTIN': 'y' }
|
|
|
|
|
endif
|
2024-01-29 13:53:17 +03:00
|
|
|
|
target_kconfig += [ sym + '=y' ]
|
2020-09-18 12:37:01 +03:00
|
|
|
|
endif
|
|
|
|
|
endforeach
|
2024-01-29 13:53:17 +03:00
|
|
|
|
if target_kconfig.length() == 0
|
2020-09-21 11:37:49 +03:00
|
|
|
|
if default_targets
|
|
|
|
|
continue
|
|
|
|
|
endif
|
|
|
|
|
error('No accelerator available for target @0@'.format(target))
|
|
|
|
|
endif
|
2020-09-18 12:37:01 +03:00
|
|
|
|
|
2021-07-07 16:17:43 +03:00
|
|
|
|
config_target += keyval.load('configs/targets' / target + '.mak')
|
2020-09-21 12:11:01 +03:00
|
|
|
|
config_target += { 'TARGET_' + config_target['TARGET_ARCH'].to_upper(): 'y' }
|
2020-09-18 13:37:21 +03:00
|
|
|
|
|
2024-01-25 14:22:57 +03:00
|
|
|
|
if 'TARGET_NEED_FDT' in config_target and not fdt.found()
|
2024-05-08 10:25:48 +03:00
|
|
|
|
if default_targets
|
|
|
|
|
warning('Disabling ' + target + ' due to missing libfdt')
|
|
|
|
|
else
|
|
|
|
|
fdt_required += target
|
|
|
|
|
endif
|
2024-01-25 14:22:57 +03:00
|
|
|
|
continue
|
2020-10-05 12:31:15 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2024-01-25 14:22:57 +03:00
|
|
|
|
actual_target_dirs += target
|
|
|
|
|
|
2020-09-21 12:19:07 +03:00
|
|
|
|
# Add default keys
|
|
|
|
|
if 'TARGET_BASE_ARCH' not in config_target
|
|
|
|
|
config_target += {'TARGET_BASE_ARCH': config_target['TARGET_ARCH']}
|
|
|
|
|
endif
|
|
|
|
|
if 'TARGET_ABI_DIR' not in config_target
|
|
|
|
|
config_target += {'TARGET_ABI_DIR': config_target['TARGET_ARCH']}
|
|
|
|
|
endif
|
2022-03-23 18:57:18 +03:00
|
|
|
|
if 'TARGET_BIG_ENDIAN' not in config_target
|
|
|
|
|
config_target += {'TARGET_BIG_ENDIAN': 'n'}
|
|
|
|
|
endif
|
2020-08-04 19:14:26 +03:00
|
|
|
|
|
2020-09-01 13:04:28 +03:00
|
|
|
|
foreach k, v: disassemblers
|
2021-11-08 16:18:17 +03:00
|
|
|
|
if host_arch.startswith(k) or config_target['TARGET_BASE_ARCH'].startswith(k)
|
2020-09-01 13:04:28 +03:00
|
|
|
|
foreach sym: v
|
|
|
|
|
config_target += { sym: 'y' }
|
|
|
|
|
endforeach
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_target_data = configuration_data()
|
|
|
|
|
foreach k, v: config_target
|
|
|
|
|
if not k.startswith('TARGET_') and not k.startswith('CONFIG_')
|
|
|
|
|
# do nothing
|
|
|
|
|
elif ignored.contains(k)
|
|
|
|
|
# do nothing
|
|
|
|
|
elif k == 'TARGET_BASE_ARCH'
|
2020-09-21 12:11:01 +03:00
|
|
|
|
# Note that TARGET_BASE_ARCH ends up in config-target.h but it is
|
|
|
|
|
# not used to select files from sourcesets.
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_target_data.set('TARGET_' + v.to_upper(), 1)
|
2020-09-18 13:37:21 +03:00
|
|
|
|
elif k == 'TARGET_NAME' or k == 'CONFIG_QEMU_INTERP_PREFIX'
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_target_data.set_quoted(k, v)
|
|
|
|
|
elif v == 'y'
|
|
|
|
|
config_target_data.set(k, 1)
|
2022-03-23 18:57:18 +03:00
|
|
|
|
elif v == 'n'
|
|
|
|
|
config_target_data.set(k, 0)
|
2020-08-04 19:14:26 +03:00
|
|
|
|
else
|
|
|
|
|
config_target_data.set(k, v)
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
2021-07-30 13:59:43 +03:00
|
|
|
|
config_target_data.set('QEMU_ARCH',
|
|
|
|
|
'QEMU_ARCH_' + config_target['TARGET_BASE_ARCH'].to_upper())
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_target_h += {target: configure_file(output: target + '-config-target.h',
|
|
|
|
|
configuration: config_target_data)}
|
2020-02-03 13:42:03 +03:00
|
|
|
|
|
|
|
|
|
if target.endswith('-softmmu')
|
2024-01-29 13:53:17 +03:00
|
|
|
|
target_kconfig += 'CONFIG_' + config_target['TARGET_ARCH'].to_upper() + '=y'
|
|
|
|
|
target_kconfig += 'CONFIG_TARGET_BIG_ENDIAN=' + config_target['TARGET_BIG_ENDIAN']
|
|
|
|
|
|
2021-07-07 16:17:44 +03:00
|
|
|
|
config_input = meson.get_external_property(target, 'default')
|
2020-02-03 13:42:03 +03:00
|
|
|
|
config_devices_mak = target + '-config-devices.mak'
|
|
|
|
|
config_devices_mak = configure_file(
|
2021-07-07 16:17:44 +03:00
|
|
|
|
input: ['configs/devices' / target / config_input + '.mak', 'Kconfig'],
|
2020-02-03 13:42:03 +03:00
|
|
|
|
output: config_devices_mak,
|
|
|
|
|
depfile: config_devices_mak + '.d',
|
|
|
|
|
capture: true,
|
2020-11-20 10:38:22 +03:00
|
|
|
|
command: [minikconf,
|
|
|
|
|
get_option('default_devices') ? '--defconfig' : '--allnoconfig',
|
2020-02-03 13:42:03 +03:00
|
|
|
|
config_devices_mak, '@DEPFILE@', '@INPUT@',
|
2024-01-29 13:53:17 +03:00
|
|
|
|
host_kconfig, target_kconfig])
|
2020-08-04 19:14:26 +03:00
|
|
|
|
|
|
|
|
|
config_devices_data = configuration_data()
|
|
|
|
|
config_devices = keyval.load(config_devices_mak)
|
|
|
|
|
foreach k, v: config_devices
|
|
|
|
|
config_devices_data.set(k, 1)
|
|
|
|
|
endforeach
|
2020-02-03 13:42:03 +03:00
|
|
|
|
config_devices_mak_list += config_devices_mak
|
2020-08-04 19:14:26 +03:00
|
|
|
|
config_devices_h += {target: configure_file(output: target + '-config-devices.h',
|
|
|
|
|
configuration: config_devices_data)}
|
|
|
|
|
config_target += config_devices
|
2020-09-01 12:32:23 +03:00
|
|
|
|
config_all_devices += config_devices
|
2020-02-03 13:42:03 +03:00
|
|
|
|
endif
|
|
|
|
|
config_target_mak += {target: config_target}
|
2020-08-19 15:44:56 +03:00
|
|
|
|
endforeach
|
2020-09-21 11:37:49 +03:00
|
|
|
|
target_dirs = actual_target_dirs
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2021-11-10 13:01:26 +03:00
|
|
|
|
target_configs_h = []
|
|
|
|
|
foreach target: target_dirs
|
|
|
|
|
target_configs_h += config_target_h[target]
|
|
|
|
|
target_configs_h += config_devices_h.get(target, [])
|
|
|
|
|
endforeach
|
|
|
|
|
genh += custom_target('config-poison.h',
|
|
|
|
|
input: [target_configs_h],
|
|
|
|
|
output: 'config-poison.h',
|
|
|
|
|
capture: true,
|
|
|
|
|
command: [find_program('scripts/make-config-poison.sh'),
|
|
|
|
|
target_configs_h])
|
|
|
|
|
|
2024-01-25 14:22:57 +03:00
|
|
|
|
if fdt_required.length() > 0
|
|
|
|
|
error('fdt disabled but required by targets ' + ', '.join(fdt_required))
|
|
|
|
|
endif
|
|
|
|
|
|
2023-09-08 13:09:22 +03:00
|
|
|
|
###############
|
|
|
|
|
# Subprojects #
|
|
|
|
|
###############
|
2020-10-05 12:31:15 +03:00
|
|
|
|
|
2022-06-13 23:26:24 +03:00
|
|
|
|
libvfio_user_dep = not_found
|
|
|
|
|
if have_system and vfio_user_server_allowed
|
meson: subprojects: replace submodules with wrap files
Compared to submodules, .wrap files have several advantages:
* option parsing and downloading is delegated to meson
* the commit is stored in a text file instead of a magic entry in the
git tree object
* we could stop shipping external dependencies that are only used as a
fallback, but not break compilation on platforms that lack them.
For example it may make sense to download dtc at build time, controlled
by --enable-download, even when building from a tarball. Right now,
this patch does the opposite: make-release treats dtc like libvfio-user
(which is not stable API and therefore hasn't found its way into any
distros) and keycodemap (which is a copylib, for better or worse).
dependency() can fall back to a wrap automatically. However, this
is only possible for libraries that come with a .pc file, and this
is not very common for libfdt even though the upstream project in
principle provides it; it also removes the control that we provide with
--enable-fdt={system,internal}. Therefore, the logic to pick system
vs. internal libfdt is left untouched.
--enable-fdt=git is removed; it was already a synonym for
--enable-fdt=internal.
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-05-18 17:50:00 +03:00
|
|
|
|
libvfio_user_proj = subproject('libvfio-user', required: true)
|
2023-03-30 13:47:23 +03:00
|
|
|
|
libvfio_user_dep = libvfio_user_proj.get_variable('libvfio_user_dep')
|
2022-06-13 23:26:24 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-09-08 13:09:22 +03:00
|
|
|
|
vhost_user = not_found
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux' and have_vhost_user
|
2023-09-08 13:09:22 +03:00
|
|
|
|
libvhost_user = subproject('libvhost-user')
|
|
|
|
|
vhost_user = libvhost_user.get_variable('vhost_user_dep')
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
libvduse = not_found
|
|
|
|
|
if have_libvduse
|
|
|
|
|
libvduse_proj = subproject('libvduse')
|
|
|
|
|
libvduse = libvduse_proj.get_variable('libvduse_dep')
|
|
|
|
|
endif
|
2020-09-13 22:19:25 +03:00
|
|
|
|
|
2020-10-07 18:01:51 +03:00
|
|
|
|
#####################
|
|
|
|
|
# Generated sources #
|
|
|
|
|
#####################
|
2020-09-13 22:19:25 +03:00
|
|
|
|
|
2020-10-07 18:01:51 +03:00
|
|
|
|
genh += configure_file(output: 'config-host.h', configuration: config_host_data)
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2019-07-15 17:06:04 +03:00
|
|
|
|
hxtool = find_program('scripts/hxtool')
|
2023-01-24 21:00:57 +03:00
|
|
|
|
shaderinclude = find_program('scripts/shaderinclude.py')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
qapi_gen = find_program('scripts/qapi-gen.py')
|
2021-02-09 16:59:26 +03:00
|
|
|
|
qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/commands.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/common.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/error.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/events.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/expr.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/gen.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/introspect.py',
|
2023-04-28 13:54:16 +03:00
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/main.py',
|
2021-02-09 16:59:26 +03:00
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/parser.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/schema.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/source.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/types.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi/visit.py',
|
|
|
|
|
meson.current_source_dir() / 'scripts/qapi-gen.py'
|
2020-08-19 15:44:56 +03:00
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
tracetool = [
|
|
|
|
|
python, files('scripts/tracetool.py'),
|
2021-10-07 16:08:14 +03:00
|
|
|
|
'--backend=' + ','.join(get_option('trace_backends'))
|
2020-08-19 15:44:56 +03:00
|
|
|
|
]
|
2021-01-25 14:09:58 +03:00
|
|
|
|
tracetool_depends = files(
|
|
|
|
|
'scripts/tracetool/backend/log.py',
|
|
|
|
|
'scripts/tracetool/backend/__init__.py',
|
|
|
|
|
'scripts/tracetool/backend/dtrace.py',
|
|
|
|
|
'scripts/tracetool/backend/ftrace.py',
|
|
|
|
|
'scripts/tracetool/backend/simple.py',
|
|
|
|
|
'scripts/tracetool/backend/syslog.py',
|
|
|
|
|
'scripts/tracetool/backend/ust.py',
|
|
|
|
|
'scripts/tracetool/format/ust_events_c.py',
|
|
|
|
|
'scripts/tracetool/format/ust_events_h.py',
|
|
|
|
|
'scripts/tracetool/format/__init__.py',
|
|
|
|
|
'scripts/tracetool/format/d.py',
|
|
|
|
|
'scripts/tracetool/format/simpletrace_stap.py',
|
|
|
|
|
'scripts/tracetool/format/c.py',
|
|
|
|
|
'scripts/tracetool/format/h.py',
|
|
|
|
|
'scripts/tracetool/format/log_stap.py',
|
|
|
|
|
'scripts/tracetool/format/stap.py',
|
|
|
|
|
'scripts/tracetool/__init__.py',
|
|
|
|
|
)
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2019-07-15 16:10:19 +03:00
|
|
|
|
qemu_version_cmd = [find_program('scripts/qemu-version.sh'),
|
|
|
|
|
meson.current_source_dir(),
|
2022-04-20 18:33:54 +03:00
|
|
|
|
get_option('pkgversion'), meson.project_version()]
|
2019-07-15 16:10:19 +03:00
|
|
|
|
qemu_version = custom_target('qemu-version.h',
|
|
|
|
|
output: 'qemu-version.h',
|
|
|
|
|
command: qemu_version_cmd,
|
|
|
|
|
capture: true,
|
|
|
|
|
build_by_default: true,
|
|
|
|
|
build_always_stale: true)
|
|
|
|
|
genh += qemu_version
|
|
|
|
|
|
2019-07-15 17:06:04 +03:00
|
|
|
|
hxdep = []
|
|
|
|
|
hx_headers = [
|
|
|
|
|
['qemu-options.hx', 'qemu-options.def'],
|
|
|
|
|
['qemu-img-cmds.hx', 'qemu-img-cmds.h'],
|
|
|
|
|
]
|
|
|
|
|
if have_system
|
|
|
|
|
hx_headers += [
|
|
|
|
|
['hmp-commands.hx', 'hmp-commands.h'],
|
|
|
|
|
['hmp-commands-info.hx', 'hmp-commands-info.h'],
|
|
|
|
|
]
|
|
|
|
|
endif
|
|
|
|
|
foreach d : hx_headers
|
2019-07-16 20:37:25 +03:00
|
|
|
|
hxdep += custom_target(d[1],
|
2019-07-15 17:06:04 +03:00
|
|
|
|
input: files(d[0]),
|
|
|
|
|
output: d[1],
|
|
|
|
|
capture: true,
|
|
|
|
|
command: [hxtool, '-h', '@INPUT0@'])
|
|
|
|
|
endforeach
|
|
|
|
|
genh += hxdep
|
|
|
|
|
|
2020-08-19 15:44:56 +03:00
|
|
|
|
###############
|
|
|
|
|
# Trace files #
|
|
|
|
|
###############
|
|
|
|
|
|
2019-08-18 18:51:17 +03:00
|
|
|
|
# TODO: add each directory to the subdirs from its own meson.build, once
|
|
|
|
|
# we have those
|
2020-08-19 15:44:56 +03:00
|
|
|
|
trace_events_subdirs = [
|
|
|
|
|
'crypto',
|
2021-01-22 23:44:35 +03:00
|
|
|
|
'qapi',
|
|
|
|
|
'qom',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'monitor',
|
2021-01-22 23:44:35 +03:00
|
|
|
|
'util',
|
2022-09-29 14:42:22 +03:00
|
|
|
|
'gdbstub',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
]
|
2022-01-09 03:37:23 +03:00
|
|
|
|
if have_linux_user
|
2020-08-19 15:44:56 +03:00
|
|
|
|
trace_events_subdirs += [ 'linux-user' ]
|
|
|
|
|
endif
|
2022-01-09 03:37:23 +03:00
|
|
|
|
if have_bsd_user
|
|
|
|
|
trace_events_subdirs += [ 'bsd-user' ]
|
|
|
|
|
endif
|
2020-08-19 15:44:56 +03:00
|
|
|
|
if have_block
|
|
|
|
|
trace_events_subdirs += [
|
|
|
|
|
'authz',
|
|
|
|
|
'block',
|
2024-07-23 13:31:24 +03:00
|
|
|
|
'chardev',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'io',
|
|
|
|
|
'nbd',
|
|
|
|
|
'scsi',
|
|
|
|
|
]
|
|
|
|
|
endif
|
|
|
|
|
if have_system
|
|
|
|
|
trace_events_subdirs += [
|
2021-01-22 23:44:36 +03:00
|
|
|
|
'accel/kvm',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'audio',
|
|
|
|
|
'backends',
|
|
|
|
|
'backends/tpm',
|
2021-05-14 14:48:32 +03:00
|
|
|
|
'ebpf',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/9pfs',
|
|
|
|
|
'hw/acpi',
|
2021-01-08 22:09:42 +03:00
|
|
|
|
'hw/adc',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/alpha',
|
|
|
|
|
'hw/arm',
|
|
|
|
|
'hw/audio',
|
|
|
|
|
'hw/block',
|
|
|
|
|
'hw/char',
|
|
|
|
|
'hw/display',
|
|
|
|
|
'hw/dma',
|
2024-01-26 13:49:46 +03:00
|
|
|
|
'hw/fsi',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/hyperv',
|
|
|
|
|
'hw/i2c',
|
|
|
|
|
'hw/i386',
|
|
|
|
|
'hw/i386/xen',
|
2023-01-13 22:51:32 +03:00
|
|
|
|
'hw/i386/kvm',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/ide',
|
|
|
|
|
'hw/input',
|
|
|
|
|
'hw/intc',
|
|
|
|
|
'hw/isa',
|
|
|
|
|
'hw/mem',
|
|
|
|
|
'hw/mips',
|
|
|
|
|
'hw/misc',
|
|
|
|
|
'hw/misc/macio',
|
|
|
|
|
'hw/net',
|
hw/net/can: Introduce Xilinx ZynqMP CAN controller
The Xilinx ZynqMP CAN controller is developed based on SocketCAN, QEMU CAN bus
implementation. Bus connection and socketCAN connection for each CAN module
can be set through command lines.
Example for using single CAN:
-object can-bus,id=canbus0 \
-machine xlnx-zcu102.canbus0=canbus0 \
-object can-host-socketcan,id=socketcan0,if=vcan0,canbus=canbus0
Example for connecting both CAN to same virtual CAN on host machine:
-object can-bus,id=canbus0 -object can-bus,id=canbus1 \
-machine xlnx-zcu102.canbus0=canbus0 \
-machine xlnx-zcu102.canbus1=canbus1 \
-object can-host-socketcan,id=socketcan0,if=vcan0,canbus=canbus0 \
-object can-host-socketcan,id=socketcan1,if=vcan0,canbus=canbus1
To create virtual CAN on the host machine, please check the QEMU CAN docs:
https://github.com/qemu/qemu/blob/master/docs/can.txt
Signed-off-by: Vikram Garhwal <fnu.vikram@xilinx.com>
Message-id: 1605728926-352690-2-git-send-email-fnu.vikram@xilinx.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-11-18 22:48:43 +03:00
|
|
|
|
'hw/net/can',
|
2021-09-24 10:37:55 +03:00
|
|
|
|
'hw/nubus',
|
2021-04-14 23:14:30 +03:00
|
|
|
|
'hw/nvme',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/nvram',
|
|
|
|
|
'hw/pci',
|
|
|
|
|
'hw/pci-host',
|
|
|
|
|
'hw/ppc',
|
|
|
|
|
'hw/rtc',
|
2024-10-16 23:40:27 +03:00
|
|
|
|
'hw/riscv',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/s390x',
|
|
|
|
|
'hw/scsi',
|
|
|
|
|
'hw/sd',
|
2024-11-05 13:10:00 +03:00
|
|
|
|
'hw/sensor',
|
2021-10-30 00:02:09 +03:00
|
|
|
|
'hw/sh4',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/sparc',
|
|
|
|
|
'hw/sparc64',
|
|
|
|
|
'hw/ssi',
|
|
|
|
|
'hw/timer',
|
|
|
|
|
'hw/tpm',
|
2023-09-06 10:43:48 +03:00
|
|
|
|
'hw/ufs',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'hw/usb',
|
|
|
|
|
'hw/vfio',
|
|
|
|
|
'hw/virtio',
|
|
|
|
|
'hw/watchdog',
|
|
|
|
|
'hw/xen',
|
|
|
|
|
'hw/gpio',
|
|
|
|
|
'migration',
|
|
|
|
|
'net',
|
2023-10-04 12:06:28 +03:00
|
|
|
|
'system',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
'ui',
|
2021-01-29 19:46:10 +03:00
|
|
|
|
'hw/remote',
|
2020-08-19 15:44:56 +03:00
|
|
|
|
]
|
|
|
|
|
endif
|
2021-01-22 23:44:36 +03:00
|
|
|
|
if have_system or have_user
|
|
|
|
|
trace_events_subdirs += [
|
|
|
|
|
'accel/tcg',
|
|
|
|
|
'hw/core',
|
|
|
|
|
'target/arm',
|
2021-09-16 18:53:58 +03:00
|
|
|
|
'target/arm/hvf',
|
2021-01-22 23:44:36 +03:00
|
|
|
|
'target/hppa',
|
|
|
|
|
'target/i386',
|
|
|
|
|
'target/i386/kvm',
|
2024-01-05 10:57:59 +03:00
|
|
|
|
'target/loongarch',
|
2021-05-30 10:02:16 +03:00
|
|
|
|
'target/mips/tcg',
|
2021-01-22 23:44:36 +03:00
|
|
|
|
'target/ppc',
|
|
|
|
|
'target/riscv',
|
|
|
|
|
'target/s390x',
|
2021-07-07 13:53:23 +03:00
|
|
|
|
'target/s390x/kvm',
|
2021-01-22 23:44:36 +03:00
|
|
|
|
'target/sparc',
|
|
|
|
|
]
|
|
|
|
|
endif
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2023-11-03 11:33:57 +03:00
|
|
|
|
###################
|
|
|
|
|
# Collect sources #
|
|
|
|
|
###################
|
|
|
|
|
|
|
|
|
|
authz_ss = ss.source_set()
|
|
|
|
|
blockdev_ss = ss.source_set()
|
|
|
|
|
block_ss = ss.source_set()
|
|
|
|
|
chardev_ss = ss.source_set()
|
|
|
|
|
common_ss = ss.source_set()
|
|
|
|
|
crypto_ss = ss.source_set()
|
|
|
|
|
hwcore_ss = ss.source_set()
|
|
|
|
|
io_ss = ss.source_set()
|
|
|
|
|
qmp_ss = ss.source_set()
|
|
|
|
|
qom_ss = ss.source_set()
|
|
|
|
|
system_ss = ss.source_set()
|
|
|
|
|
specific_fuzz_ss = ss.source_set()
|
|
|
|
|
specific_ss = ss.source_set()
|
2024-10-10 17:11:28 +03:00
|
|
|
|
rust_devices_ss = ss.source_set()
|
2023-11-03 11:33:57 +03:00
|
|
|
|
stub_ss = ss.source_set()
|
|
|
|
|
trace_ss = ss.source_set()
|
|
|
|
|
user_ss = ss.source_set()
|
|
|
|
|
util_ss = ss.source_set()
|
|
|
|
|
|
|
|
|
|
# accel modules
|
|
|
|
|
qtest_module_ss = ss.source_set()
|
|
|
|
|
tcg_module_ss = ss.source_set()
|
|
|
|
|
|
|
|
|
|
modules = {}
|
|
|
|
|
target_modules = {}
|
|
|
|
|
hw_arch = {}
|
|
|
|
|
target_arch = {}
|
|
|
|
|
target_system_arch = {}
|
|
|
|
|
target_user_arch = {}
|
|
|
|
|
|
2022-01-26 19:11:27 +03:00
|
|
|
|
# NOTE: the trace/ subdirectory needs the qapi_trace_events variable
|
|
|
|
|
# that is filled in by qapi/.
|
2020-08-19 15:44:56 +03:00
|
|
|
|
subdir('qapi')
|
|
|
|
|
subdir('qobject')
|
|
|
|
|
subdir('stubs')
|
|
|
|
|
subdir('trace')
|
|
|
|
|
subdir('util')
|
2019-07-16 18:28:54 +03:00
|
|
|
|
subdir('qom')
|
|
|
|
|
subdir('authz')
|
2020-08-19 15:44:56 +03:00
|
|
|
|
subdir('crypto')
|
2019-07-15 15:00:36 +03:00
|
|
|
|
subdir('ui')
|
2022-09-29 14:42:22 +03:00
|
|
|
|
subdir('gdbstub')
|
2024-04-08 18:53:20 +03:00
|
|
|
|
if have_system
|
|
|
|
|
subdir('hw')
|
|
|
|
|
else
|
|
|
|
|
subdir('hw/core')
|
|
|
|
|
endif
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2019-08-29 21:07:01 +03:00
|
|
|
|
if enable_modules
|
|
|
|
|
libmodulecommon = static_library('module-common', files('module-common.c') + genh, pic: true, c_args: '-DBUILD_DSO')
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
modulecommon = declare_dependency(objects: libmodulecommon.extract_all_objects(recursive: false), compile_args: '-DBUILD_DSO')
|
2019-08-29 21:07:01 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
qom_ss = qom_ss.apply({})
|
2022-04-25 10:57:21 +03:00
|
|
|
|
libqom = static_library('qom', qom_ss.sources() + genh,
|
|
|
|
|
dependencies: [qom_ss.dependencies()],
|
2023-09-27 16:48:31 +03:00
|
|
|
|
build_by_default: false)
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
qom = declare_dependency(objects: libqom.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: qom_ss.dependencies())
|
2022-04-25 10:57:21 +03:00
|
|
|
|
|
|
|
|
|
event_loop_base = files('event-loop-base.c')
|
2023-09-27 16:48:31 +03:00
|
|
|
|
event_loop_base = static_library('event-loop-base',
|
|
|
|
|
sources: event_loop_base + genh,
|
|
|
|
|
build_by_default: false)
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
event_loop_base = declare_dependency(objects: event_loop_base.extract_all_objects(recursive: false),
|
2022-04-25 10:57:21 +03:00
|
|
|
|
dependencies: [qom])
|
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
stub_ss = stub_ss.apply({})
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
|
|
|
|
util_ss.add_all(trace_ss)
|
2023-08-30 12:52:43 +03:00
|
|
|
|
util_ss = util_ss.apply({})
|
2020-08-19 15:44:56 +03:00
|
|
|
|
libqemuutil = static_library('qemuutil',
|
2023-09-27 16:48:31 +03:00
|
|
|
|
build_by_default: false,
|
2020-08-19 15:44:56 +03:00
|
|
|
|
sources: util_ss.sources() + stub_ss.sources() + genh,
|
2024-04-08 18:53:13 +03:00
|
|
|
|
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc])
|
2020-08-19 15:44:56 +03:00
|
|
|
|
qemuutil = declare_dependency(link_with: libqemuutil,
|
2022-04-25 10:57:22 +03:00
|
|
|
|
sources: genh + version_res,
|
|
|
|
|
dependencies: [event_loop_base])
|
2020-08-19 15:44:56 +03:00
|
|
|
|
|
2021-01-22 23:44:37 +03:00
|
|
|
|
if have_system or have_user
|
|
|
|
|
decodetree = generator(find_program('scripts/decodetree.py'),
|
|
|
|
|
output: 'decode-@BASENAME@.c.inc',
|
|
|
|
|
arguments: ['@INPUT@', '@EXTRA_ARGS@', '-o', '@OUTPUT@'])
|
|
|
|
|
subdir('libdecnumber')
|
|
|
|
|
subdir('target')
|
|
|
|
|
endif
|
2020-08-07 13:10:23 +03:00
|
|
|
|
|
2020-08-17 13:47:55 +03:00
|
|
|
|
subdir('audio')
|
2019-07-16 18:33:55 +03:00
|
|
|
|
subdir('io')
|
2019-07-15 22:18:07 +03:00
|
|
|
|
subdir('chardev')
|
2019-07-15 14:04:49 +03:00
|
|
|
|
subdir('fsdev')
|
2019-09-03 15:59:33 +03:00
|
|
|
|
subdir('dump')
|
2019-07-15 14:04:49 +03:00
|
|
|
|
|
2021-01-22 23:44:34 +03:00
|
|
|
|
if have_block
|
|
|
|
|
block_ss.add(files(
|
|
|
|
|
'block.c',
|
|
|
|
|
'blockjob.c',
|
|
|
|
|
'job.c',
|
|
|
|
|
'qemu-io-cmds.c',
|
|
|
|
|
))
|
2021-10-13 12:43:54 +03:00
|
|
|
|
if config_host_data.get('CONFIG_REPLICATION')
|
|
|
|
|
block_ss.add(files('replication.c'))
|
|
|
|
|
endif
|
2021-01-22 23:44:34 +03:00
|
|
|
|
|
|
|
|
|
subdir('nbd')
|
|
|
|
|
subdir('scsi')
|
|
|
|
|
subdir('block')
|
|
|
|
|
|
|
|
|
|
blockdev_ss.add(files(
|
|
|
|
|
'blockdev.c',
|
|
|
|
|
'blockdev-nbd.c',
|
|
|
|
|
'iothread.c',
|
|
|
|
|
'job-qmp.c',
|
2024-05-24 11:00:23 +03:00
|
|
|
|
))
|
2021-01-22 23:44:34 +03:00
|
|
|
|
|
|
|
|
|
# os-posix.c contains POSIX-specific functions used by qemu-storage-daemon,
|
|
|
|
|
# os-win32.c does not
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2023-08-30 12:29:54 +03:00
|
|
|
|
system_ss.add(files('os-win32.c'))
|
|
|
|
|
else
|
|
|
|
|
blockdev_ss.add(files('os-posix.c'))
|
|
|
|
|
endif
|
2021-01-22 23:44:34 +03:00
|
|
|
|
endif
|
2020-08-03 17:22:28 +03:00
|
|
|
|
|
2023-09-14 21:57:12 +03:00
|
|
|
|
common_ss.add(files('cpu-common.c'))
|
|
|
|
|
specific_ss.add(files('cpu-target.c'))
|
2020-08-03 17:22:28 +03:00
|
|
|
|
|
2023-10-04 12:06:28 +03:00
|
|
|
|
subdir('system')
|
2019-08-18 18:51:17 +03:00
|
|
|
|
|
exec: Build page-vary-common.c with -fno-lto
In bbc17caf81f, we used an alias attribute to allow target_page
to be declared const, and yet be initialized late.
This fails when using LTO with several versions of gcc.
The compiler looks through the alias and decides that the const
variable is statically initialized to zero, then propagates that
zero to many uses of the variable.
This can be avoided by compiling one object file with -fno-lto.
In this way, any initializer cannot be seen, and the constant
propagation does not occur.
Since we are certain to have this separate compilation unit, we
can drop the alias attribute as well. We simply have differing
declarations for target_page in different compilation units.
Drop the use of init_target_page, and drop the configure detection
for CONFIG_ATTRIBUTE_ALIAS.
In order to change the compilation flags for a file with meson,
we must use a static_library. This runs into specific_ss, where
we would need to create many static_library instances.
Fix this by splitting page-vary.c: the page-vary-common.c part is
compiled once as a static_library, while the page-vary.c part is
left in specific_ss in order to handle the target-specific value
of TARGET_PAGE_BITS_MIN.
Reported-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210321211534.2101231-1-richard.henderson@linaro.org>
[PMD: Fix typo in subject, split original patch in 3]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Gavin Shan <gshan@redhat.com>
Message-Id: <20210322112427.4045204-4-f4bug@amsat.org>
[rth: Update MAINTAINERS]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2021-03-22 14:24:26 +03:00
|
|
|
|
# Work around a gcc bug/misfeature wherein constant propagation looks
|
|
|
|
|
# through an alias:
|
|
|
|
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99696
|
|
|
|
|
# to guess that a const variable is always zero. Without lto, this is
|
|
|
|
|
# impossible, as the alias is restricted to page-vary-common.c. Indeed,
|
|
|
|
|
# without lto, not even the alias is required -- we simply use different
|
|
|
|
|
# declarations in different compilation units.
|
|
|
|
|
pagevary = files('page-vary-common.c')
|
|
|
|
|
if get_option('b_lto')
|
|
|
|
|
pagevary_flags = ['-fno-lto']
|
|
|
|
|
if get_option('cfi')
|
|
|
|
|
pagevary_flags += '-fno-sanitize=cfi-icall'
|
|
|
|
|
endif
|
2022-03-30 14:48:08 +03:00
|
|
|
|
pagevary = static_library('page-vary-common', sources: pagevary + genh,
|
exec: Build page-vary-common.c with -fno-lto
In bbc17caf81f, we used an alias attribute to allow target_page
to be declared const, and yet be initialized late.
This fails when using LTO with several versions of gcc.
The compiler looks through the alias and decides that the const
variable is statically initialized to zero, then propagates that
zero to many uses of the variable.
This can be avoided by compiling one object file with -fno-lto.
In this way, any initializer cannot be seen, and the constant
propagation does not occur.
Since we are certain to have this separate compilation unit, we
can drop the alias attribute as well. We simply have differing
declarations for target_page in different compilation units.
Drop the use of init_target_page, and drop the configure detection
for CONFIG_ATTRIBUTE_ALIAS.
In order to change the compilation flags for a file with meson,
we must use a static_library. This runs into specific_ss, where
we would need to create many static_library instances.
Fix this by splitting page-vary.c: the page-vary-common.c part is
compiled once as a static_library, while the page-vary.c part is
left in specific_ss in order to handle the target-specific value
of TARGET_PAGE_BITS_MIN.
Reported-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210321211534.2101231-1-richard.henderson@linaro.org>
[PMD: Fix typo in subject, split original patch in 3]
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Gavin Shan <gshan@redhat.com>
Message-Id: <20210322112427.4045204-4-f4bug@amsat.org>
[rth: Update MAINTAINERS]
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2021-03-22 14:24:26 +03:00
|
|
|
|
c_args: pagevary_flags)
|
|
|
|
|
pagevary = declare_dependency(link_with: pagevary)
|
|
|
|
|
endif
|
|
|
|
|
common_ss.add(pagevary)
|
2023-12-07 12:41:27 +03:00
|
|
|
|
specific_ss.add(files('page-target.c', 'page-vary-target.c'))
|
2021-03-22 14:24:24 +03:00
|
|
|
|
|
2019-07-24 18:23:16 +03:00
|
|
|
|
subdir('backends')
|
2019-07-26 11:02:31 +03:00
|
|
|
|
subdir('disas')
|
2019-07-24 18:16:22 +03:00
|
|
|
|
subdir('migration')
|
2020-08-04 22:14:26 +03:00
|
|
|
|
subdir('monitor')
|
2019-07-22 22:47:50 +03:00
|
|
|
|
subdir('net')
|
2019-07-22 22:40:45 +03:00
|
|
|
|
subdir('replay')
|
2021-03-05 16:54:50 +03:00
|
|
|
|
subdir('semihosting')
|
2023-01-24 15:19:36 +03:00
|
|
|
|
subdir('stats')
|
2021-03-08 23:04:33 +03:00
|
|
|
|
subdir('tcg')
|
2021-03-08 23:15:06 +03:00
|
|
|
|
subdir('fpu')
|
2019-08-18 15:13:08 +03:00
|
|
|
|
subdir('accel')
|
2020-01-24 15:08:01 +03:00
|
|
|
|
subdir('plugins')
|
2021-11-17 18:14:00 +03:00
|
|
|
|
subdir('ebpf')
|
|
|
|
|
|
2024-10-24 00:28:11 +03:00
|
|
|
|
if 'CONFIG_TCG' in config_all_accel
|
|
|
|
|
subdir('contrib/plugins')
|
|
|
|
|
endif
|
|
|
|
|
|
2021-11-17 18:14:00 +03:00
|
|
|
|
common_user_inc = []
|
|
|
|
|
|
|
|
|
|
subdir('common-user')
|
2019-08-18 18:20:37 +03:00
|
|
|
|
subdir('bsd-user')
|
2019-08-18 15:13:08 +03:00
|
|
|
|
subdir('linux-user')
|
2021-05-14 14:48:32 +03:00
|
|
|
|
|
2020-08-04 21:00:40 +03:00
|
|
|
|
# needed for fuzzing binaries
|
|
|
|
|
subdir('tests/qtest/libqos')
|
2020-08-03 18:04:25 +03:00
|
|
|
|
subdir('tests/qtest/fuzz')
|
2020-08-04 21:00:40 +03:00
|
|
|
|
|
2021-06-24 13:38:29 +03:00
|
|
|
|
# accel modules
|
2021-06-24 13:38:31 +03:00
|
|
|
|
tcg_real_module_ss = ss.source_set()
|
|
|
|
|
tcg_real_module_ss.add_all(when: 'CONFIG_TCG_MODULAR', if_true: tcg_module_ss)
|
|
|
|
|
specific_ss.add_all(when: 'CONFIG_TCG_BUILTIN', if_true: tcg_module_ss)
|
|
|
|
|
target_modules += { 'accel' : { 'qtest': qtest_module_ss,
|
|
|
|
|
'tcg': tcg_real_module_ss }}
|
2021-06-24 13:38:29 +03:00
|
|
|
|
|
2023-09-08 13:06:12 +03:00
|
|
|
|
##############################################
|
|
|
|
|
# Internal static_libraries and dependencies #
|
|
|
|
|
##############################################
|
2020-10-07 18:01:51 +03:00
|
|
|
|
|
2021-06-24 13:38:04 +03:00
|
|
|
|
modinfo_collect = find_program('scripts/modinfo-collect.py')
|
2021-06-24 13:38:05 +03:00
|
|
|
|
modinfo_generate = find_program('scripts/modinfo-generate.py')
|
2021-06-24 13:38:04 +03:00
|
|
|
|
modinfo_files = []
|
|
|
|
|
|
2019-08-29 21:07:01 +03:00
|
|
|
|
block_mods = []
|
2023-10-04 12:06:26 +03:00
|
|
|
|
system_mods = []
|
2024-05-24 11:27:21 +03:00
|
|
|
|
emulator_modules = []
|
2019-08-29 21:07:01 +03:00
|
|
|
|
foreach d, list : modules
|
2023-05-04 11:20:46 +03:00
|
|
|
|
if not (d == 'block' ? have_block : have_system)
|
|
|
|
|
continue
|
|
|
|
|
endif
|
|
|
|
|
|
2019-08-29 21:07:01 +03:00
|
|
|
|
foreach m, module_ss : list
|
2022-10-20 15:53:10 +03:00
|
|
|
|
if enable_modules
|
2024-05-24 11:27:21 +03:00
|
|
|
|
module_ss.add(modulecommon)
|
2023-08-31 12:18:24 +03:00
|
|
|
|
module_ss = module_ss.apply(config_all_devices, strict: false)
|
2019-08-29 21:07:01 +03:00
|
|
|
|
sl = static_library(d + '-' + m, [genh, module_ss.sources()],
|
2024-05-24 11:27:21 +03:00
|
|
|
|
dependencies: module_ss.dependencies(), pic: true)
|
2019-08-29 21:07:01 +03:00
|
|
|
|
if d == 'block'
|
|
|
|
|
block_mods += sl
|
|
|
|
|
else
|
2023-10-04 12:06:26 +03:00
|
|
|
|
system_mods += sl
|
2019-08-29 21:07:01 +03:00
|
|
|
|
endif
|
2024-05-24 11:27:21 +03:00
|
|
|
|
emulator_modules += shared_module(sl.name(),
|
|
|
|
|
name_prefix: '',
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
objects: sl.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: module_ss.dependencies(),
|
2024-05-24 11:27:21 +03:00
|
|
|
|
install: true,
|
|
|
|
|
install_dir: qemu_moddir)
|
2021-06-24 13:38:04 +03:00
|
|
|
|
if module_ss.sources() != []
|
|
|
|
|
# FIXME: Should use sl.extract_all_objects(recursive: true) as
|
|
|
|
|
# input. Sources can be used multiple times but objects are
|
|
|
|
|
# unique when it comes to lookup in compile_commands.json.
|
|
|
|
|
# Depnds on a mesion version with
|
|
|
|
|
# https://github.com/mesonbuild/meson/pull/8900
|
|
|
|
|
modinfo_files += custom_target(d + '-' + m + '.modinfo',
|
|
|
|
|
output: d + '-' + m + '.modinfo',
|
2021-07-21 19:51:57 +03:00
|
|
|
|
input: module_ss.sources() + genh,
|
2021-06-24 13:38:04 +03:00
|
|
|
|
capture: true,
|
2021-07-21 19:51:57 +03:00
|
|
|
|
command: [modinfo_collect, module_ss.sources()])
|
2021-06-24 13:38:04 +03:00
|
|
|
|
endif
|
2019-08-29 21:07:01 +03:00
|
|
|
|
else
|
|
|
|
|
if d == 'block'
|
|
|
|
|
block_ss.add_all(module_ss)
|
|
|
|
|
else
|
2023-06-13 16:33:47 +03:00
|
|
|
|
system_ss.add_all(module_ss)
|
2019-08-29 21:07:01 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
endforeach
|
|
|
|
|
|
2021-06-24 13:38:22 +03:00
|
|
|
|
foreach d, list : target_modules
|
|
|
|
|
foreach m, module_ss : list
|
2022-10-20 15:53:10 +03:00
|
|
|
|
if enable_modules
|
2024-05-24 11:27:21 +03:00
|
|
|
|
module_ss.add(modulecommon)
|
2021-06-24 13:38:22 +03:00
|
|
|
|
foreach target : target_dirs
|
|
|
|
|
if target.endswith('-softmmu')
|
|
|
|
|
config_target = config_target_mak[target]
|
|
|
|
|
target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])]
|
2023-06-13 17:29:11 +03:00
|
|
|
|
c_args = ['-DCOMPILING_PER_TARGET',
|
2021-06-24 13:38:22 +03:00
|
|
|
|
'-DCONFIG_TARGET="@0@-config-target.h"'.format(target),
|
|
|
|
|
'-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)]
|
|
|
|
|
target_module_ss = module_ss.apply(config_target, strict: false)
|
|
|
|
|
if target_module_ss.sources() != []
|
|
|
|
|
module_name = d + '-' + m + '-' + config_target['TARGET_NAME']
|
|
|
|
|
sl = static_library(module_name,
|
|
|
|
|
[genh, target_module_ss.sources()],
|
2024-05-24 11:27:21 +03:00
|
|
|
|
dependencies: target_module_ss.dependencies(),
|
2021-06-24 13:38:22 +03:00
|
|
|
|
include_directories: target_inc,
|
|
|
|
|
c_args: c_args,
|
|
|
|
|
pic: true)
|
2023-10-04 12:06:26 +03:00
|
|
|
|
system_mods += sl
|
2024-05-24 11:27:21 +03:00
|
|
|
|
emulator_modules += shared_module(sl.name(),
|
|
|
|
|
name_prefix: '',
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
objects: sl.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: target_module_ss.dependencies(),
|
2024-05-24 11:27:21 +03:00
|
|
|
|
install: true,
|
|
|
|
|
install_dir: qemu_moddir)
|
2021-06-24 13:38:22 +03:00
|
|
|
|
# FIXME: Should use sl.extract_all_objects(recursive: true) too.
|
|
|
|
|
modinfo_files += custom_target(module_name + '.modinfo',
|
|
|
|
|
output: module_name + '.modinfo',
|
2021-07-23 15:01:56 +03:00
|
|
|
|
input: target_module_ss.sources() + genh,
|
2021-06-24 13:38:22 +03:00
|
|
|
|
capture: true,
|
2021-07-23 15:01:56 +03:00
|
|
|
|
command: [modinfo_collect, '--target', target, target_module_ss.sources()])
|
2021-06-24 13:38:22 +03:00
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
else
|
|
|
|
|
specific_ss.add_all(module_ss)
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
|
|
|
|
endforeach
|
|
|
|
|
|
2021-06-24 13:38:05 +03:00
|
|
|
|
if enable_modules
|
2022-05-28 01:20:35 +03:00
|
|
|
|
foreach target : target_dirs
|
|
|
|
|
if target.endswith('-softmmu')
|
|
|
|
|
config_target = config_target_mak[target]
|
|
|
|
|
config_devices_mak = target + '-config-devices.mak'
|
|
|
|
|
modinfo_src = custom_target('modinfo-' + target + '.c',
|
|
|
|
|
output: 'modinfo-' + target + '.c',
|
|
|
|
|
input: modinfo_files,
|
|
|
|
|
command: [modinfo_generate, '--devices', config_devices_mak, '@INPUT@'],
|
|
|
|
|
capture: true)
|
|
|
|
|
|
|
|
|
|
modinfo_lib = static_library('modinfo-' + target + '.c', modinfo_src)
|
|
|
|
|
modinfo_dep = declare_dependency(link_with: modinfo_lib)
|
|
|
|
|
|
|
|
|
|
arch = config_target['TARGET_NAME'] == 'sparc64' ? 'sparc64' : config_target['TARGET_BASE_ARCH']
|
|
|
|
|
hw_arch[arch].add(modinfo_dep)
|
|
|
|
|
endif
|
|
|
|
|
endforeach
|
2024-05-24 11:27:21 +03:00
|
|
|
|
|
|
|
|
|
if emulator_modules.length() > 0
|
|
|
|
|
alias_target('modules', emulator_modules)
|
|
|
|
|
endif
|
2021-06-24 13:38:05 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2019-08-29 21:07:01 +03:00
|
|
|
|
nm = find_program('nm')
|
2020-09-02 20:00:50 +03:00
|
|
|
|
undefsym = find_program('scripts/undefsym.py')
|
2019-08-29 21:07:01 +03:00
|
|
|
|
block_syms = custom_target('block.syms', output: 'block.syms',
|
|
|
|
|
input: [libqemuutil, block_mods],
|
|
|
|
|
capture: true,
|
|
|
|
|
command: [undefsym, nm, '@INPUT@'])
|
|
|
|
|
qemu_syms = custom_target('qemu.syms', output: 'qemu.syms',
|
2023-10-04 12:06:26 +03:00
|
|
|
|
input: [libqemuutil, system_mods],
|
2019-08-29 21:07:01 +03:00
|
|
|
|
capture: true,
|
|
|
|
|
command: [undefsym, nm, '@INPUT@'])
|
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
authz_ss = authz_ss.apply({})
|
2020-10-06 15:56:01 +03:00
|
|
|
|
libauthz = static_library('authz', authz_ss.sources() + genh,
|
|
|
|
|
dependencies: [authz_ss.dependencies()],
|
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
authz = declare_dependency(objects: libauthz.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: [authz_ss.dependencies(), qom])
|
2020-10-06 15:56:01 +03:00
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
crypto_ss = crypto_ss.apply({})
|
2020-10-06 15:56:00 +03:00
|
|
|
|
libcrypto = static_library('crypto', crypto_ss.sources() + genh,
|
|
|
|
|
dependencies: [crypto_ss.dependencies()],
|
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
crypto = declare_dependency(objects: libcrypto.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: [crypto_ss.dependencies(), authz, qom])
|
2020-10-06 15:56:00 +03:00
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
io_ss = io_ss.apply({})
|
2020-10-06 15:55:59 +03:00
|
|
|
|
libio = static_library('io', io_ss.sources() + genh,
|
|
|
|
|
dependencies: [io_ss.dependencies()],
|
|
|
|
|
link_with: libqemuutil,
|
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
io = declare_dependency(objects: libio.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: [io_ss.dependencies(), crypto, qom])
|
2020-10-06 15:55:59 +03:00
|
|
|
|
|
2020-10-06 15:55:58 +03:00
|
|
|
|
libmigration = static_library('migration', sources: migration_files + genh,
|
|
|
|
|
build_by_default: false)
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
migration = declare_dependency(objects: libmigration.extract_all_objects(recursive: false),
|
2024-05-24 19:16:08 +03:00
|
|
|
|
dependencies: [qom, io])
|
2023-06-13 16:33:47 +03:00
|
|
|
|
system_ss.add(migration)
|
2020-10-06 15:55:58 +03:00
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
block_ss = block_ss.apply({})
|
2019-08-29 21:34:43 +03:00
|
|
|
|
libblock = static_library('block', block_ss.sources() + genh,
|
|
|
|
|
dependencies: block_ss.dependencies(),
|
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
block = declare_dependency(objects: libblock.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: [block_ss.dependencies(), crypto, io])
|
2019-08-29 21:34:43 +03:00
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
blockdev_ss = blockdev_ss.apply({})
|
2020-09-29 15:55:14 +03:00
|
|
|
|
libblockdev = static_library('blockdev', blockdev_ss.sources() + genh,
|
|
|
|
|
dependencies: blockdev_ss.dependencies(),
|
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
blockdev = declare_dependency(objects: libblockdev.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: [blockdev_ss.dependencies(), block, event_loop_base])
|
2020-09-29 15:55:14 +03:00
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
qmp_ss = qmp_ss.apply({})
|
2020-08-04 22:14:26 +03:00
|
|
|
|
libqmp = static_library('qmp', qmp_ss.sources() + genh,
|
|
|
|
|
dependencies: qmp_ss.dependencies(),
|
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
qmp = declare_dependency(objects: libqmp.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: qmp_ss.dependencies())
|
2020-08-04 22:14:26 +03:00
|
|
|
|
|
2020-10-06 15:55:57 +03:00
|
|
|
|
libchardev = static_library('chardev', chardev_ss.sources() + genh,
|
2022-03-23 18:57:12 +03:00
|
|
|
|
dependencies: chardev_ss.dependencies(),
|
2020-10-06 15:55:57 +03:00
|
|
|
|
build_by_default: false)
|
|
|
|
|
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
chardev = declare_dependency(objects: libchardev.extract_all_objects(recursive: false),
|
|
|
|
|
dependencies: chardev_ss.dependencies())
|
2020-10-06 15:55:57 +03:00
|
|
|
|
|
2023-08-30 12:52:43 +03:00
|
|
|
|
hwcore_ss = hwcore_ss.apply({})
|
2021-10-28 17:34:19 +03:00
|
|
|
|
libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh,
|
2020-10-06 15:55:56 +03:00
|
|
|
|
build_by_default: false)
|
meson: Pass objects and dependencies to declare_dependency()
We used to request declare_dependency() to link_whole static libraries.
If a static library is a thin archive, GNU ld keeps all object files
referenced by the archive open, and sometimes exceeds the open file limit.
Another problem with link_whole is that suboptimal handling of nested
dependencies.
link_whole by itself does not propagate dependencies. In particular,
gnutls, a dependency of crypto, is not propagated to its users, and we
currently workaround the issue by declaring gnutls as a dependency for
each crypto user. On the other hand, if you write something like
libfoo = static_library('foo', 'foo.c', dependencies: gnutls)
foo = declare_dependency(link_whole: libfoo)
libbar = static_library('bar', 'bar.c', dependencies: foo)
bar = declare_dependency(link_whole: libbar, dependencies: foo)
executable('prog', sources: files('prog.c'), dependencies: [foo, bar])
hoping to propagate the gnutls dependency into bar.c, you'll see a
linking failure for "prog", because the foo.c.o object file is included in
libbar.a and therefore it is linked twice into "prog": once from libfoo.a
and once from libbar.a. Here Meson does not see the duplication, it
just asks the linker to link all of libfoo.a and libbar.a into "prog".
Instead of using link_whole, extract objects included in static libraries
and pass them to declare_dependency(); and then the dependencies can be
added as well so that they are propagated, because object files on the
linker command line are always deduplicated.
This requires Meson 1.1.0 or later.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Message-ID: <20240524-objects-v1-1-07cbbe96166b@daynix.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-05-24 11:00:22 +03:00
|
|
|
|
hwcore = declare_dependency(objects: libhwcore.extract_all_objects(recursive: false))
|
2020-10-06 15:55:56 +03:00
|
|
|
|
common_ss.add(hwcore)
|
|
|
|
|
|
2020-10-06 15:55:54 +03:00
|
|
|
|
###########
|
|
|
|
|
# Targets #
|
|
|
|
|
###########
|
|
|
|
|
|
2023-06-13 16:33:47 +03:00
|
|
|
|
system_ss.add(authz, blockdev, chardev, crypto, io, qmp)
|
2020-08-03 18:04:25 +03:00
|
|
|
|
common_ss.add(qom, qemuutil)
|
|
|
|
|
|
2023-06-13 16:33:47 +03:00
|
|
|
|
common_ss.add_all(when: 'CONFIG_SYSTEM_ONLY', if_true: [system_ss])
|
2020-02-03 13:42:03 +03:00
|
|
|
|
common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss)
|
|
|
|
|
|
2023-08-31 12:18:24 +03:00
|
|
|
|
# Note that this library is never used directly (only through extract_objects)
|
|
|
|
|
# and is not built by default; therefore, source files not used by the build
|
|
|
|
|
# configuration will be in build.ninja, but are never built by default.
|
2020-02-03 13:42:03 +03:00
|
|
|
|
common_all = static_library('common',
|
|
|
|
|
build_by_default: false,
|
2023-08-31 12:18:24 +03:00
|
|
|
|
sources: common_ss.all_sources() + genh,
|
2021-12-21 18:09:54 +03:00
|
|
|
|
include_directories: common_user_inc,
|
2021-04-29 05:43:07 +03:00
|
|
|
|
implicit_include_directories: false,
|
2024-05-24 11:56:55 +03:00
|
|
|
|
dependencies: common_ss.all_dependencies())
|
2020-02-03 13:42:03 +03:00
|
|
|
|
|
2024-10-03 16:28:46 +03:00
|
|
|
|
if have_rust and have_system
|
|
|
|
|
rustc_args = run_command(
|
|
|
|
|
find_program('scripts/rust/rustc_args.py'),
|
|
|
|
|
'--config-headers', meson.project_build_root() / 'config-host.h',
|
|
|
|
|
capture : true,
|
|
|
|
|
check: true).stdout().strip().split()
|
|
|
|
|
rustc_args += ['-D', 'unsafe_op_in_unsafe_fn']
|
|
|
|
|
bindgen_args = [
|
|
|
|
|
'--disable-header-comment',
|
|
|
|
|
'--raw-line', '// @generated',
|
|
|
|
|
'--ctypes-prefix', 'core::ffi',
|
|
|
|
|
'--formatter', 'rustfmt',
|
|
|
|
|
'--generate-block',
|
|
|
|
|
'--generate-cstr',
|
|
|
|
|
'--impl-debug',
|
|
|
|
|
'--merge-extern-blocks',
|
|
|
|
|
'--no-doc-comments',
|
|
|
|
|
'--use-core',
|
|
|
|
|
'--with-derive-default',
|
|
|
|
|
'--no-size_t-is-usize',
|
|
|
|
|
'--no-layout-tests',
|
|
|
|
|
'--no-prepend-enum-name',
|
|
|
|
|
'--allowlist-file', meson.project_source_root() + '/include/.*',
|
|
|
|
|
'--allowlist-file', meson.project_source_root() + '/.*',
|
|
|
|
|
'--allowlist-file', meson.project_build_root() + '/.*'
|
|
|
|
|
]
|
|
|
|
|
c_enums = [
|
|
|
|
|
'DeviceCategory',
|
|
|
|
|
'GpioPolarity',
|
|
|
|
|
'MachineInitPhase',
|
|
|
|
|
'MemoryDeviceInfoKind',
|
|
|
|
|
'MigrationPolicy',
|
|
|
|
|
'MigrationPriority',
|
|
|
|
|
'QEMUChrEvent',
|
|
|
|
|
'QEMUClockType',
|
|
|
|
|
'device_endian',
|
|
|
|
|
'module_init_type',
|
|
|
|
|
]
|
|
|
|
|
foreach enum : c_enums
|
|
|
|
|
bindgen_args += ['--rustified-enum', enum]
|
|
|
|
|
endforeach
|
|
|
|
|
c_bitfields = [
|
|
|
|
|
'ClockEvent',
|
|
|
|
|
'VMStateFlags',
|
|
|
|
|
]
|
|
|
|
|
foreach enum : c_bitfields
|
|
|
|
|
bindgen_args += ['--bitfield-enum', enum]
|
|
|
|
|
endforeach
|
|
|
|
|
|
|
|
|
|
# TODO: Remove this comment when the clang/libclang mismatch issue is solved.
|
|
|
|
|
#
|
|
|
|
|
# Rust bindings generation with `bindgen` might fail in some cases where the
|
|
|
|
|
# detected `libclang` does not match the expected `clang` version/target. In
|
|
|
|
|
# this case you must pass the path to `clang` and `libclang` to your build
|
|
|
|
|
# command invocation using the environment variables CLANG_PATH and
|
|
|
|
|
# LIBCLANG_PATH
|
|
|
|
|
bindings_rs = import('rust').bindgen(
|
|
|
|
|
input: 'rust/wrapper.h',
|
|
|
|
|
dependencies: common_ss.all_dependencies(),
|
|
|
|
|
output: 'bindings.rs',
|
|
|
|
|
include_directories: include_directories('.', 'include'),
|
|
|
|
|
bindgen_version: ['>=0.69.4'],
|
|
|
|
|
args: bindgen_args,
|
|
|
|
|
)
|
|
|
|
|
subdir('rust')
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
|
2023-10-09 19:40:51 +03:00
|
|
|
|
feature_to_c = find_program('scripts/feature_to_c.py')
|
2019-08-18 18:51:17 +03:00
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin'
|
2022-02-15 19:25:03 +03:00
|
|
|
|
entitlement = find_program('scripts/entitlement.sh')
|
|
|
|
|
endif
|
|
|
|
|
|
2024-01-08 20:13:56 +03:00
|
|
|
|
traceable = []
|
2020-09-16 12:00:53 +03:00
|
|
|
|
emulators = {}
|
2020-02-03 13:42:03 +03:00
|
|
|
|
foreach target : target_dirs
|
|
|
|
|
config_target = config_target_mak[target]
|
|
|
|
|
target_name = config_target['TARGET_NAME']
|
2021-11-08 17:44:39 +03:00
|
|
|
|
target_base_arch = config_target['TARGET_BASE_ARCH']
|
2020-08-04 19:14:26 +03:00
|
|
|
|
arch_srcs = [config_target_h[target]]
|
2020-08-03 18:04:25 +03:00
|
|
|
|
arch_deps = []
|
2023-06-13 17:29:11 +03:00
|
|
|
|
c_args = ['-DCOMPILING_PER_TARGET',
|
2020-08-03 18:04:25 +03:00
|
|
|
|
'-DCONFIG_TARGET="@0@-config-target.h"'.format(target),
|
|
|
|
|
'-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)]
|
2020-09-21 11:49:50 +03:00
|
|
|
|
link_args = emulator_link_args
|
2020-02-03 13:42:03 +03:00
|
|
|
|
|
|
|
|
|
target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])]
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux'
|
2020-02-03 13:42:03 +03:00
|
|
|
|
target_inc += include_directories('linux-headers', is_system: true)
|
|
|
|
|
endif
|
|
|
|
|
if target.endswith('-softmmu')
|
|
|
|
|
target_type='system'
|
2023-10-04 12:06:27 +03:00
|
|
|
|
t = target_system_arch[target_base_arch].apply(config_target, strict: false)
|
2020-08-07 13:10:23 +03:00
|
|
|
|
arch_srcs += t.sources()
|
2020-08-03 18:04:25 +03:00
|
|
|
|
arch_deps += t.dependencies()
|
2020-08-07 13:10:23 +03:00
|
|
|
|
|
2021-11-08 17:44:39 +03:00
|
|
|
|
hw_dir = target_name == 'sparc64' ? 'sparc64' : target_base_arch
|
2023-11-21 15:52:48 +03:00
|
|
|
|
if hw_arch.has_key(hw_dir)
|
|
|
|
|
hw = hw_arch[hw_dir].apply(config_target, strict: false)
|
|
|
|
|
arch_srcs += hw.sources()
|
|
|
|
|
arch_deps += hw.dependencies()
|
|
|
|
|
endif
|
2019-08-17 12:55:58 +03:00
|
|
|
|
|
2020-02-03 13:42:03 +03:00
|
|
|
|
arch_srcs += config_devices_h[target]
|
2020-08-03 18:04:25 +03:00
|
|
|
|
link_args += ['@block.syms', '@qemu.syms']
|
2020-02-03 13:42:03 +03:00
|
|
|
|
else
|
2019-08-18 15:13:08 +03:00
|
|
|
|
abi = config_target['TARGET_ABI_DIR']
|
2020-02-03 13:42:03 +03:00
|
|
|
|
target_type='user'
|
2021-12-21 18:23:55 +03:00
|
|
|
|
target_inc += common_user_inc
|
2021-11-08 17:44:39 +03:00
|
|
|
|
if target_base_arch in target_user_arch
|
|
|
|
|
t = target_user_arch[target_base_arch].apply(config_target, strict: false)
|
2021-04-13 12:27:09 +03:00
|
|
|
|
arch_srcs += t.sources()
|
|
|
|
|
arch_deps += t.dependencies()
|
|
|
|
|
endif
|
2020-02-03 13:42:03 +03:00
|
|
|
|
if 'CONFIG_LINUX_USER' in config_target
|
|
|
|
|
base_dir = 'linux-user'
|
2021-08-04 02:17:17 +03:00
|
|
|
|
endif
|
|
|
|
|
if 'CONFIG_BSD_USER' in config_target
|
2020-02-03 13:42:03 +03:00
|
|
|
|
base_dir = 'bsd-user'
|
2023-11-03 11:17:48 +03:00
|
|
|
|
target_inc += include_directories('bsd-user/' / host_os)
|
2022-01-09 03:27:34 +03:00
|
|
|
|
target_inc += include_directories('bsd-user/host/' / host_arch)
|
2021-08-04 02:17:17 +03:00
|
|
|
|
dir = base_dir / abi
|
2021-11-05 01:34:48 +03:00
|
|
|
|
arch_srcs += files(dir / 'signal.c', dir / 'target_arch_cpu.c')
|
2020-02-03 13:42:03 +03:00
|
|
|
|
endif
|
|
|
|
|
target_inc += include_directories(
|
|
|
|
|
base_dir,
|
2019-08-18 15:13:08 +03:00
|
|
|
|
base_dir / abi,
|
2020-02-03 13:42:03 +03:00
|
|
|
|
)
|
2019-08-18 15:13:08 +03:00
|
|
|
|
if 'CONFIG_LINUX_USER' in config_target
|
|
|
|
|
dir = base_dir / abi
|
|
|
|
|
arch_srcs += files(dir / 'signal.c', dir / 'cpu_loop.c')
|
|
|
|
|
if config_target.has_key('TARGET_SYSTBL_ABI')
|
|
|
|
|
arch_srcs += \
|
|
|
|
|
syscall_nr_generators[abi].process(base_dir / abi / config_target['TARGET_SYSTBL'],
|
|
|
|
|
extra_args : config_target['TARGET_SYSTBL_ABI'])
|
|
|
|
|
endif
|
|
|
|
|
endif
|
2020-02-03 13:42:03 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2019-08-18 18:51:17 +03:00
|
|
|
|
if 'TARGET_XML_FILES' in config_target
|
|
|
|
|
gdbstub_xml = custom_target(target + '-gdbstub-xml.c',
|
|
|
|
|
output: target + '-gdbstub-xml.c',
|
|
|
|
|
input: files(config_target['TARGET_XML_FILES'].split()),
|
|
|
|
|
command: [feature_to_c, '@INPUT@'],
|
|
|
|
|
capture: true)
|
|
|
|
|
arch_srcs += gdbstub_xml
|
|
|
|
|
endif
|
|
|
|
|
|
2021-11-08 17:44:39 +03:00
|
|
|
|
t = target_arch[target_base_arch].apply(config_target, strict: false)
|
2020-08-07 13:10:23 +03:00
|
|
|
|
arch_srcs += t.sources()
|
2020-08-03 18:04:25 +03:00
|
|
|
|
arch_deps += t.dependencies()
|
2020-08-07 13:10:23 +03:00
|
|
|
|
|
2020-02-03 13:42:03 +03:00
|
|
|
|
target_common = common_ss.apply(config_target, strict: false)
|
|
|
|
|
objects = common_all.extract_objects(target_common.sources())
|
2024-05-07 13:22:31 +03:00
|
|
|
|
arch_deps += target_common.dependencies()
|
2020-02-03 13:42:03 +03:00
|
|
|
|
|
|
|
|
|
target_specific = specific_ss.apply(config_target, strict: false)
|
|
|
|
|
arch_srcs += target_specific.sources()
|
2020-08-03 18:04:25 +03:00
|
|
|
|
arch_deps += target_specific.dependencies()
|
2020-02-03 13:42:03 +03:00
|
|
|
|
|
2024-10-10 17:11:28 +03:00
|
|
|
|
if have_rust and have_system
|
|
|
|
|
target_rust = rust_devices_ss.apply(config_target, strict: false)
|
|
|
|
|
crates = []
|
|
|
|
|
foreach dep : target_rust.dependencies()
|
|
|
|
|
crates += dep.get_variable('crate')
|
|
|
|
|
endforeach
|
|
|
|
|
if crates.length() > 0
|
|
|
|
|
rlib_rs = custom_target('rust_' + target.underscorify() + '.rs',
|
|
|
|
|
output: 'rust_' + target.underscorify() + '.rs',
|
|
|
|
|
command: [find_program('scripts/rust/rust_root_crate.sh')] + crates,
|
|
|
|
|
capture: true,
|
|
|
|
|
build_by_default: true,
|
|
|
|
|
build_always_stale: true)
|
|
|
|
|
rlib = static_library('rust_' + target.underscorify(),
|
|
|
|
|
rlib_rs,
|
|
|
|
|
dependencies: target_rust.dependencies(),
|
|
|
|
|
override_options: ['rust_std=2021', 'build.rust_std=2021'],
|
|
|
|
|
rust_args: rustc_args,
|
|
|
|
|
rust_abi: 'c')
|
|
|
|
|
arch_deps += declare_dependency(link_whole: [rlib])
|
|
|
|
|
endif
|
|
|
|
|
endif
|
|
|
|
|
|
2024-05-07 13:22:31 +03:00
|
|
|
|
# allow using headers from the dependencies but do not include the sources,
|
|
|
|
|
# because this emulator only needs those in "objects". For external
|
|
|
|
|
# dependencies, the full dependency is included below in the executable.
|
|
|
|
|
lib_deps = []
|
|
|
|
|
foreach dep : arch_deps
|
|
|
|
|
lib_deps += dep.partial_dependency(compile_args: true, includes: true)
|
|
|
|
|
endforeach
|
|
|
|
|
|
2020-08-03 18:04:25 +03:00
|
|
|
|
lib = static_library('qemu-' + target,
|
2020-08-04 19:14:26 +03:00
|
|
|
|
sources: arch_srcs + genh,
|
2024-05-07 13:22:31 +03:00
|
|
|
|
dependencies: lib_deps,
|
2020-02-03 13:42:03 +03:00
|
|
|
|
objects: objects,
|
|
|
|
|
include_directories: target_inc,
|
2020-08-03 18:04:25 +03:00
|
|
|
|
c_args: c_args,
|
2024-05-24 11:56:55 +03:00
|
|
|
|
build_by_default: false)
|
2020-08-03 18:04:25 +03:00
|
|
|
|
|
|
|
|
|
if target.endswith('-softmmu')
|
|
|
|
|
execs = [{
|
|
|
|
|
'name': 'qemu-system-' + target_name,
|
2021-02-09 16:59:26 +03:00
|
|
|
|
'win_subsystem': 'console',
|
2023-10-04 12:06:28 +03:00
|
|
|
|
'sources': files('system/main.c'),
|
2020-08-03 18:04:25 +03:00
|
|
|
|
'dependencies': []
|
|
|
|
|
}]
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows' and (sdl.found() or gtk.found())
|
2020-08-03 18:04:25 +03:00
|
|
|
|
execs += [{
|
|
|
|
|
'name': 'qemu-system-' + target_name + 'w',
|
2021-02-09 16:59:26 +03:00
|
|
|
|
'win_subsystem': 'windows',
|
2023-10-04 12:06:28 +03:00
|
|
|
|
'sources': files('system/main.c'),
|
2020-08-03 18:04:25 +03:00
|
|
|
|
'dependencies': []
|
|
|
|
|
}]
|
|
|
|
|
endif
|
2021-10-07 16:08:12 +03:00
|
|
|
|
if get_option('fuzzing')
|
2020-08-03 18:04:25 +03:00
|
|
|
|
specific_fuzz = specific_fuzz_ss.apply(config_target, strict: false)
|
|
|
|
|
execs += [{
|
|
|
|
|
'name': 'qemu-fuzz-' + target_name,
|
2021-02-09 16:59:26 +03:00
|
|
|
|
'win_subsystem': 'console',
|
2020-08-03 18:04:25 +03:00
|
|
|
|
'sources': specific_fuzz.sources(),
|
|
|
|
|
'dependencies': specific_fuzz.dependencies(),
|
|
|
|
|
}]
|
|
|
|
|
endif
|
|
|
|
|
else
|
|
|
|
|
execs = [{
|
|
|
|
|
'name': 'qemu-' + target_name,
|
2021-02-09 16:59:26 +03:00
|
|
|
|
'win_subsystem': 'console',
|
2020-08-03 18:04:25 +03:00
|
|
|
|
'sources': [],
|
|
|
|
|
'dependencies': []
|
|
|
|
|
}]
|
|
|
|
|
endif
|
|
|
|
|
foreach exe: execs
|
2021-01-21 01:44:34 +03:00
|
|
|
|
exe_name = exe['name']
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin'
|
2021-01-21 01:44:34 +03:00
|
|
|
|
exe_name += '-unsigned'
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
emulator = executable(exe_name, exe['sources'],
|
2021-02-25 03:06:14 +03:00
|
|
|
|
install: true,
|
2020-08-03 18:04:25 +03:00
|
|
|
|
c_args: c_args,
|
2024-05-07 13:22:31 +03:00
|
|
|
|
dependencies: arch_deps + exe['dependencies'],
|
2020-08-03 18:04:25 +03:00
|
|
|
|
objects: lib.extract_all_objects(recursive: true),
|
2024-03-20 13:28:28 +03:00
|
|
|
|
link_depends: [block_syms, qemu_syms],
|
2020-08-03 18:04:25 +03:00
|
|
|
|
link_args: link_args,
|
2021-02-09 16:59:26 +03:00
|
|
|
|
win_subsystem: exe['win_subsystem'])
|
2021-01-21 01:44:34 +03:00
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin'
|
2021-07-09 04:25:33 +03:00
|
|
|
|
icon = 'pc-bios/qemu.rsrc'
|
|
|
|
|
build_input = [emulator, files(icon)]
|
|
|
|
|
install_input = [
|
|
|
|
|
get_option('bindir') / exe_name,
|
|
|
|
|
meson.current_source_dir() / icon
|
|
|
|
|
]
|
|
|
|
|
if 'CONFIG_HVF' in config_target
|
|
|
|
|
entitlements = 'accel/hvf/entitlements.plist'
|
|
|
|
|
build_input += files(entitlements)
|
|
|
|
|
install_input += meson.current_source_dir() / entitlements
|
|
|
|
|
endif
|
|
|
|
|
|
2021-01-21 01:44:34 +03:00
|
|
|
|
emulators += {exe['name'] : custom_target(exe['name'],
|
2021-07-09 04:25:33 +03:00
|
|
|
|
input: build_input,
|
2021-01-21 01:44:34 +03:00
|
|
|
|
output: exe['name'],
|
2022-01-22 03:20:52 +03:00
|
|
|
|
command: [entitlement, '@OUTPUT@', '@INPUT@'])
|
2021-01-21 01:44:34 +03:00
|
|
|
|
}
|
2021-02-25 03:06:14 +03:00
|
|
|
|
|
2022-01-22 03:20:52 +03:00
|
|
|
|
meson.add_install_script(entitlement, '--install',
|
2021-02-25 03:06:14 +03:00
|
|
|
|
get_option('bindir') / exe['name'],
|
2021-07-09 04:25:33 +03:00
|
|
|
|
install_input)
|
2021-01-21 01:44:34 +03:00
|
|
|
|
else
|
|
|
|
|
emulators += {exe['name']: emulator}
|
|
|
|
|
endif
|
2019-08-20 11:29:52 +03:00
|
|
|
|
|
2024-01-08 20:13:56 +03:00
|
|
|
|
traceable += [{
|
|
|
|
|
'exe': exe['name'],
|
|
|
|
|
'probe-prefix': 'qemu.' + target_type + '.' + target_name,
|
|
|
|
|
}]
|
|
|
|
|
|
2020-08-03 18:04:25 +03:00
|
|
|
|
endforeach
|
2020-02-03 13:42:03 +03:00
|
|
|
|
endforeach
|
|
|
|
|
|
2020-02-05 11:44:24 +03:00
|
|
|
|
# Other build targets
|
2019-07-16 20:54:15 +03:00
|
|
|
|
|
2023-08-30 13:20:53 +03:00
|
|
|
|
if get_option('plugins')
|
2020-01-24 15:08:01 +03:00
|
|
|
|
install_headers('include/qemu/qemu-plugin.h')
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2023-11-06 21:51:06 +03:00
|
|
|
|
# On windows, we want to deliver the qemu_plugin_api.lib file in the qemu installer,
|
|
|
|
|
# so that plugin authors can compile against it.
|
|
|
|
|
install_data(win32_qemu_plugin_api_lib, install_dir: 'lib')
|
|
|
|
|
endif
|
2020-01-24 15:08:01 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2021-10-15 17:47:43 +03:00
|
|
|
|
subdir('qga')
|
2019-07-18 14:19:02 +03:00
|
|
|
|
|
2020-08-24 18:24:30 +03:00
|
|
|
|
# Don't build qemu-keymap if xkbcommon is not explicitly enabled
|
|
|
|
|
# when we don't build tools or system
|
2020-08-24 18:24:29 +03:00
|
|
|
|
if xkbcommon.found()
|
2019-09-19 19:24:43 +03:00
|
|
|
|
# used for the update-keymaps target, so include rules even if !have_tools
|
|
|
|
|
qemu_keymap = executable('qemu-keymap', files('qemu-keymap.c', 'ui/input-keymap.c') + genh,
|
|
|
|
|
dependencies: [qemuutil, xkbcommon], install: have_tools)
|
|
|
|
|
endif
|
|
|
|
|
|
2020-02-05 11:44:24 +03:00
|
|
|
|
if have_tools
|
2019-07-16 20:37:25 +03:00
|
|
|
|
qemu_img = executable('qemu-img', [files('qemu-img.c'), hxdep],
|
2024-05-24 14:17:08 +03:00
|
|
|
|
link_args: '@block.syms', link_depends: block_syms,
|
2019-07-16 20:37:25 +03:00
|
|
|
|
dependencies: [authz, block, crypto, io, qom, qemuutil], install: true)
|
|
|
|
|
qemu_io = executable('qemu-io', files('qemu-io.c'),
|
2024-05-24 14:17:08 +03:00
|
|
|
|
link_args: '@block.syms', link_depends: block_syms,
|
2019-07-16 20:37:25 +03:00
|
|
|
|
dependencies: [block, qemuutil], install: true)
|
2020-08-25 13:38:50 +03:00
|
|
|
|
qemu_nbd = executable('qemu-nbd', files('qemu-nbd.c'),
|
2024-05-24 14:17:08 +03:00
|
|
|
|
link_args: '@block.syms', link_depends: block_syms,
|
2024-05-24 11:00:23 +03:00
|
|
|
|
dependencies: [blockdev, qemuutil, selinux],
|
2021-11-15 23:29:43 +03:00
|
|
|
|
install: true)
|
2019-07-16 20:37:25 +03:00
|
|
|
|
|
2020-08-04 21:18:36 +03:00
|
|
|
|
subdir('storage-daemon')
|
2024-01-08 20:13:56 +03:00
|
|
|
|
|
|
|
|
|
foreach exe: [ 'qemu-img', 'qemu-io', 'qemu-nbd', 'qemu-storage-daemon']
|
|
|
|
|
traceable += [{
|
|
|
|
|
'exe': exe,
|
|
|
|
|
'probe-prefix': 'qemu.' + exe.substring(5).replace('-', '_')
|
|
|
|
|
}]
|
|
|
|
|
endforeach
|
|
|
|
|
|
2019-07-12 22:47:06 +03:00
|
|
|
|
subdir('contrib/elf2dmp')
|
2019-06-10 13:27:52 +03:00
|
|
|
|
|
2019-07-15 13:50:58 +03:00
|
|
|
|
executable('qemu-edid', files('qemu-edid.c', 'hw/display/edid-generate.c'),
|
|
|
|
|
dependencies: qemuutil,
|
|
|
|
|
install: true)
|
|
|
|
|
|
2022-04-20 18:34:05 +03:00
|
|
|
|
if have_vhost_user
|
2019-06-10 13:18:02 +03:00
|
|
|
|
subdir('contrib/vhost-user-blk')
|
2020-08-26 09:22:58 +03:00
|
|
|
|
subdir('contrib/vhost-user-gpu')
|
2019-07-12 21:11:20 +03:00
|
|
|
|
subdir('contrib/vhost-user-input')
|
2019-06-10 13:21:14 +03:00
|
|
|
|
subdir('contrib/vhost-user-scsi')
|
2020-02-05 11:44:24 +03:00
|
|
|
|
endif
|
2019-07-15 13:39:25 +03:00
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux'
|
2019-07-15 13:39:25 +03:00
|
|
|
|
executable('qemu-bridge-helper', files('qemu-bridge-helper.c'),
|
|
|
|
|
dependencies: [qemuutil, libcap_ng],
|
|
|
|
|
install: true,
|
|
|
|
|
install_dir: get_option('libexecdir'))
|
2019-07-16 20:54:15 +03:00
|
|
|
|
|
|
|
|
|
executable('qemu-pr-helper', files('scsi/qemu-pr-helper.c', 'scsi/utils.c'),
|
|
|
|
|
dependencies: [authz, crypto, io, qom, qemuutil,
|
2020-09-16 19:07:29 +03:00
|
|
|
|
libcap_ng, mpathpersist],
|
2019-07-16 20:54:15 +03:00
|
|
|
|
install: true)
|
2024-05-22 18:34:51 +03:00
|
|
|
|
|
|
|
|
|
if cpu in ['x86', 'x86_64']
|
|
|
|
|
executable('qemu-vmsr-helper', files('tools/i386/qemu-vmsr-helper.c'),
|
|
|
|
|
dependencies: [authz, crypto, io, qom, qemuutil,
|
|
|
|
|
libcap_ng, mpathpersist],
|
|
|
|
|
install: true)
|
|
|
|
|
endif
|
2019-07-15 13:39:25 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2021-06-03 13:50:17 +03:00
|
|
|
|
if have_ivshmem
|
2019-07-12 22:16:54 +03:00
|
|
|
|
subdir('contrib/ivshmem-client')
|
|
|
|
|
subdir('contrib/ivshmem-server')
|
|
|
|
|
endif
|
2020-02-05 11:44:24 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2024-01-08 20:13:56 +03:00
|
|
|
|
if stap.found()
|
|
|
|
|
foreach t: traceable
|
|
|
|
|
foreach stp: [
|
|
|
|
|
{'ext': '.stp-build', 'fmt': 'stap', 'bin': meson.current_build_dir() / t['exe'], 'install': false},
|
|
|
|
|
{'ext': '.stp', 'fmt': 'stap', 'bin': get_option('prefix') / get_option('bindir') / t['exe'], 'install': true},
|
|
|
|
|
{'ext': '-simpletrace.stp', 'fmt': 'simpletrace-stap', 'bin': '', 'install': true},
|
|
|
|
|
{'ext': '-log.stp', 'fmt': 'log-stap', 'bin': '', 'install': true},
|
|
|
|
|
]
|
|
|
|
|
cmd = [
|
|
|
|
|
tracetool, '--group=all', '--format=' + stp['fmt'],
|
|
|
|
|
'--binary=' + stp['bin'],
|
|
|
|
|
'--probe-prefix=' + t['probe-prefix'],
|
|
|
|
|
'@INPUT@', '@OUTPUT@'
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
custom_target(t['exe'] + stp['ext'],
|
|
|
|
|
input: trace_events_all,
|
|
|
|
|
output: t['exe'] + stp['ext'],
|
|
|
|
|
install: stp['install'],
|
|
|
|
|
install_dir: get_option('datadir') / 'systemtap/tapset',
|
|
|
|
|
command: cmd,
|
|
|
|
|
depend_files: tracetool_depends)
|
|
|
|
|
endforeach
|
|
|
|
|
endforeach
|
|
|
|
|
endif
|
|
|
|
|
|
2020-08-26 16:06:18 +03:00
|
|
|
|
subdir('scripts')
|
2020-02-05 11:45:39 +03:00
|
|
|
|
subdir('tools')
|
2019-07-15 20:22:31 +03:00
|
|
|
|
subdir('pc-bios')
|
2020-08-05 16:49:10 +03:00
|
|
|
|
subdir('docs')
|
2020-10-16 01:06:25 +03:00
|
|
|
|
subdir('tests')
|
2021-01-07 16:02:29 +03:00
|
|
|
|
if gtk.found()
|
2019-09-19 20:02:09 +03:00
|
|
|
|
subdir('po')
|
|
|
|
|
endif
|
2020-02-05 11:45:39 +03:00
|
|
|
|
|
2020-08-26 14:04:19 +03:00
|
|
|
|
if host_machine.system() == 'windows'
|
|
|
|
|
nsis_cmd = [
|
|
|
|
|
find_program('scripts/nsis.py'),
|
|
|
|
|
'@OUTPUT@',
|
|
|
|
|
get_option('prefix'),
|
|
|
|
|
meson.current_source_dir(),
|
2022-10-12 12:31:32 +03:00
|
|
|
|
glib_pc.get_variable('bindir'),
|
2020-11-25 22:18:33 +03:00
|
|
|
|
host_machine.cpu(),
|
2020-08-26 14:04:19 +03:00
|
|
|
|
'--',
|
|
|
|
|
'-DDISPLAYVERSION=' + meson.project_version(),
|
|
|
|
|
]
|
|
|
|
|
if build_docs
|
|
|
|
|
nsis_cmd += '-DCONFIG_DOCUMENTATION=y'
|
|
|
|
|
endif
|
2021-01-07 16:02:29 +03:00
|
|
|
|
if gtk.found()
|
2020-08-26 14:04:19 +03:00
|
|
|
|
nsis_cmd += '-DCONFIG_GTK=y'
|
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
nsis = custom_target('nsis',
|
|
|
|
|
output: 'qemu-setup-' + meson.project_version() + '.exe',
|
|
|
|
|
input: files('qemu.nsi'),
|
|
|
|
|
build_always_stale: true,
|
|
|
|
|
command: nsis_cmd + ['@INPUT@'])
|
|
|
|
|
alias_target('installer', nsis)
|
|
|
|
|
endif
|
|
|
|
|
|
2020-10-07 18:01:51 +03:00
|
|
|
|
#########################
|
|
|
|
|
# Configuration summary #
|
|
|
|
|
#########################
|
|
|
|
|
|
2023-05-18 17:11:29 +03:00
|
|
|
|
# Build environment
|
2020-02-03 15:28:38 +03:00
|
|
|
|
summary_info = {}
|
2023-05-18 17:11:29 +03:00
|
|
|
|
summary_info += {'Build directory': meson.current_build_dir()}
|
|
|
|
|
summary_info += {'Source path': meson.current_source_dir()}
|
|
|
|
|
summary_info += {'Download dependencies': get_option('wrap_mode') != 'nodownload'}
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Build environment')
|
|
|
|
|
|
|
|
|
|
# Directories
|
2020-10-16 10:19:14 +03:00
|
|
|
|
summary_info += {'Install prefix': get_option('prefix')}
|
|
|
|
|
summary_info += {'BIOS directory': qemu_datadir}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
pathsep = host_os == 'windows' ? ';' : ':'
|
2022-06-24 18:40:42 +03:00
|
|
|
|
summary_info += {'firmware path': pathsep.join(get_option('qemu_firmwarepath'))}
|
2022-04-20 18:33:56 +03:00
|
|
|
|
summary_info += {'binary directory': get_option('prefix') / get_option('bindir')}
|
|
|
|
|
summary_info += {'library directory': get_option('prefix') / get_option('libdir')}
|
2020-10-16 10:19:14 +03:00
|
|
|
|
summary_info += {'module directory': qemu_moddir}
|
2022-04-20 18:33:56 +03:00
|
|
|
|
summary_info += {'libexec directory': get_option('prefix') / get_option('libexecdir')}
|
|
|
|
|
summary_info += {'include directory': get_option('prefix') / get_option('includedir')}
|
|
|
|
|
summary_info += {'config directory': get_option('prefix') / get_option('sysconfdir')}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os != 'windows'
|
2022-04-20 18:33:56 +03:00
|
|
|
|
summary_info += {'local state directory': get_option('prefix') / get_option('localstatedir')}
|
|
|
|
|
summary_info += {'Manual directory': get_option('prefix') / get_option('mandir')}
|
2020-02-03 15:28:38 +03:00
|
|
|
|
else
|
|
|
|
|
summary_info += {'local state directory': 'queried at runtime'}
|
|
|
|
|
endif
|
2022-04-20 18:33:56 +03:00
|
|
|
|
summary_info += {'Doc directory': get_option('prefix') / get_option('docdir')}
|
2021-01-21 12:56:09 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Directories')
|
|
|
|
|
|
2021-01-21 12:56:10 +03:00
|
|
|
|
# Host binaries
|
|
|
|
|
summary_info = {}
|
|
|
|
|
summary_info += {'python': '@0@ (version: @1@)'.format(python.full_path(), python.language_version())}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'sphinx-build': sphinx_build}
|
2023-09-28 13:00:48 +03:00
|
|
|
|
|
|
|
|
|
# FIXME: the [binaries] section of machine files, which can be probed
|
|
|
|
|
# with find_program(), would be great for passing gdb and genisoimage
|
|
|
|
|
# paths from configure to Meson. However, there seems to be no way to
|
|
|
|
|
# hide a program (for example if gdb is too old).
|
2023-09-28 11:44:56 +03:00
|
|
|
|
if config_host.has_key('GDB')
|
|
|
|
|
summary_info += {'gdb': config_host['GDB']}
|
2021-01-21 12:56:10 +03:00
|
|
|
|
endif
|
2022-04-20 18:33:49 +03:00
|
|
|
|
summary_info += {'iasl': iasl}
|
2021-01-21 12:56:10 +03:00
|
|
|
|
summary_info += {'genisoimage': config_host['GENISOIMAGE']}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows' and have_ga
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'wixl': wixl}
|
2021-01-21 12:56:10 +03:00
|
|
|
|
endif
|
2022-04-08 19:20:47 +03:00
|
|
|
|
if slirp.found() and have_system
|
2021-10-13 14:43:36 +03:00
|
|
|
|
summary_info += {'smbd': have_slirp_smbd ? smbd_path : false}
|
2021-01-21 12:56:10 +03:00
|
|
|
|
endif
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Host binaries')
|
|
|
|
|
|
2021-01-21 12:56:11 +03:00
|
|
|
|
# Configurable features
|
|
|
|
|
summary_info = {}
|
|
|
|
|
summary_info += {'Documentation': build_docs}
|
2021-01-21 12:56:13 +03:00
|
|
|
|
summary_info += {'system-mode emulation': have_system}
|
|
|
|
|
summary_info += {'user-mode emulation': have_user}
|
2021-01-21 12:56:14 +03:00
|
|
|
|
summary_info += {'block layer': have_block}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
summary_info += {'Install blobs': get_option('install_blobs')}
|
2022-10-20 15:53:10 +03:00
|
|
|
|
summary_info += {'module support': enable_modules}
|
|
|
|
|
if enable_modules
|
2022-04-20 18:33:46 +03:00
|
|
|
|
summary_info += {'alternative module path': get_option('module_upgrades')}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
endif
|
2021-10-07 16:08:12 +03:00
|
|
|
|
summary_info += {'fuzzing support': get_option('fuzzing')}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
if have_system
|
2021-10-07 16:06:09 +03:00
|
|
|
|
summary_info += {'Audio drivers': ' '.join(audio_drivers_selected)}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
endif
|
2021-10-07 16:08:14 +03:00
|
|
|
|
summary_info += {'Trace backends': ','.join(get_option('trace_backends'))}
|
|
|
|
|
if 'simple' in get_option('trace_backends')
|
|
|
|
|
summary_info += {'Trace output file': get_option('trace_file') + '-<pid>'}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
endif
|
2021-07-15 10:53:53 +03:00
|
|
|
|
summary_info += {'D-Bus display': dbus_display}
|
2021-10-13 12:46:09 +03:00
|
|
|
|
summary_info += {'QOM debugging': get_option('qom_cast_debug')}
|
meson, cutils: allow non-relocatable installs
Say QEMU is configured with bindir = "/usr/bin" and a firmware path
that starts with "/usr/share/qemu". Ever since QEMU 5.2, QEMU's
install has been relocatable: if you move qemu-system-x86_64 from
/usr/bin to /home/username/bin, it will start looking for firmware in
/home/username/share/qemu. Previously, you would get a non-relocatable
install where the moved QEMU will keep looking for firmware in
/usr/share/qemu.
Windows almost always wants relocatable installs, and in fact that
is why QEMU 5.2 introduced relocatability in the first place.
However, newfangled distribution mechanisms such as AppImage
(https://docs.appimage.org/reference/best-practices.html), and
possibly NixOS, also dislike using at runtime the absolute paths
that were established at build time.
On POSIX systems you almost never care; if you do, your usecase
dictates which one is desirable, so there's no single answer.
Obviously relocatability works fine most of the time, because not many
people have complained about QEMU's switch to relocatable install,
and that's why until now there was no way to disable relocatability.
But a non-relocatable, non-modular binary can help if you want to do
experiments with old firmware and new QEMU or vice versa (because you
can just upgrade/downgrade the firmware package, and use rpm2cpio or
similar to extract the QEMU binaries outside /usr), so allow both.
This patch allows one to build a non-relocatable install using a new
option to configure. Why? Because it's not too hard, and because
it helps the user double check the relocatability of their install.
Note that the same code that handles relocation also lets you run QEMU
from the build tree and pick e.g. firmware files from the source tree
transparently. Therefore that part remains active with this patch,
even if you configure with --disable-relocatable.
Suggested-by: Michael Tokarev <mjt@tls.msk.ru>
Reviewed-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-05 15:19:34 +03:00
|
|
|
|
summary_info += {'Relocatable install': get_option('relocatable')}
|
2022-04-20 18:34:05 +03:00
|
|
|
|
summary_info += {'vhost-kernel support': have_vhost_kernel}
|
|
|
|
|
summary_info += {'vhost-net support': have_vhost_net}
|
|
|
|
|
summary_info += {'vhost-user support': have_vhost_user}
|
|
|
|
|
summary_info += {'vhost-user-crypto support': have_vhost_user_crypto}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
summary_info += {'vhost-user-blk server support': have_vhost_user_blk_server}
|
2022-04-20 18:34:05 +03:00
|
|
|
|
summary_info += {'vhost-vdpa support': have_vhost_vdpa}
|
2021-10-15 17:47:43 +03:00
|
|
|
|
summary_info += {'build guest agent': have_ga}
|
2021-01-21 12:56:11 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Configurable features')
|
|
|
|
|
|
2021-01-21 12:56:12 +03:00
|
|
|
|
# Compilation information
|
2021-01-21 12:56:09 +03:00
|
|
|
|
summary_info = {}
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'host CPU': cpu}
|
|
|
|
|
summary_info += {'host endianness': build_machine.endian()}
|
2021-05-27 19:03:15 +03:00
|
|
|
|
summary_info += {'C compiler': ' '.join(meson.get_compiler('c').cmd_array())}
|
|
|
|
|
summary_info += {'Host C compiler': ' '.join(meson.get_compiler('c', native: true).cmd_array())}
|
2023-07-06 09:47:36 +03:00
|
|
|
|
if 'cpp' in all_languages
|
2021-05-27 19:03:15 +03:00
|
|
|
|
summary_info += {'C++ compiler': ' '.join(meson.get_compiler('cpp').cmd_array())}
|
2020-02-03 15:28:38 +03:00
|
|
|
|
else
|
|
|
|
|
summary_info += {'C++ compiler': false}
|
|
|
|
|
endif
|
2023-10-09 09:51:29 +03:00
|
|
|
|
if 'objc' in all_languages
|
2021-05-27 19:03:15 +03:00
|
|
|
|
summary_info += {'Objective-C compiler': ' '.join(meson.get_compiler('objc').cmd_array())}
|
2023-10-09 09:51:29 +03:00
|
|
|
|
else
|
|
|
|
|
summary_info += {'Objective-C compiler': false}
|
2020-02-03 15:28:38 +03:00
|
|
|
|
endif
|
2024-10-03 16:28:44 +03:00
|
|
|
|
summary_info += {'Rust support': have_rust}
|
|
|
|
|
if have_rust
|
2024-10-03 16:28:45 +03:00
|
|
|
|
summary_info += {'rustc version': rustc.version()}
|
|
|
|
|
summary_info += {'rustc': ' '.join(rustc.cmd_array())}
|
|
|
|
|
summary_info += {'Rust target': config_host['RUST_TARGET_TRIPLE']}
|
2024-10-03 16:28:44 +03:00
|
|
|
|
endif
|
2022-11-02 15:07:23 +03:00
|
|
|
|
option_cflags = (get_option('debug') ? ['-g'] : [])
|
|
|
|
|
if get_option('optimization') != 'plain'
|
|
|
|
|
option_cflags += ['-O' + get_option('optimization')]
|
|
|
|
|
endif
|
|
|
|
|
summary_info += {'CFLAGS': ' '.join(get_option('c_args') + option_cflags)}
|
2023-07-06 09:47:36 +03:00
|
|
|
|
if 'cpp' in all_languages
|
2022-11-02 15:07:23 +03:00
|
|
|
|
summary_info += {'CXXFLAGS': ' '.join(get_option('cpp_args') + option_cflags)}
|
2020-09-23 12:26:17 +03:00
|
|
|
|
endif
|
2023-10-09 09:51:29 +03:00
|
|
|
|
if 'objc' in all_languages
|
2022-11-02 15:07:23 +03:00
|
|
|
|
summary_info += {'OBJCFLAGS': ' '.join(get_option('objc_args') + option_cflags)}
|
2022-01-09 00:38:55 +03:00
|
|
|
|
endif
|
2023-07-06 09:47:36 +03:00
|
|
|
|
link_args = get_option('c_link_args')
|
2020-09-23 12:26:17 +03:00
|
|
|
|
if link_args.length() > 0
|
|
|
|
|
summary_info += {'LDFLAGS': ' '.join(link_args)}
|
|
|
|
|
endif
|
2022-10-12 18:13:23 +03:00
|
|
|
|
summary_info += {'QEMU_CFLAGS': ' '.join(qemu_common_flags + qemu_cflags)}
|
2022-10-12 15:15:06 +03:00
|
|
|
|
if 'cpp' in all_languages
|
2022-10-12 18:13:23 +03:00
|
|
|
|
summary_info += {'QEMU_CXXFLAGS': ' '.join(qemu_common_flags + qemu_cxxflags)}
|
2022-10-12 15:15:06 +03:00
|
|
|
|
endif
|
|
|
|
|
if 'objc' in all_languages
|
2022-12-22 11:28:56 +03:00
|
|
|
|
summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_common_flags)}
|
2022-10-12 15:15:06 +03:00
|
|
|
|
endif
|
2022-04-20 18:33:34 +03:00
|
|
|
|
summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)}
|
2020-12-05 02:06:11 +03:00
|
|
|
|
summary_info += {'link-time optimization (LTO)': get_option('b_lto')}
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'PIE': get_option('b_pie')}
|
2023-06-07 11:00:21 +03:00
|
|
|
|
summary_info += {'static build': get_option('prefer_static')}
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'malloc trim support': has_malloc_trim}
|
2021-11-08 15:52:11 +03:00
|
|
|
|
summary_info += {'membarrier': have_membarrier}
|
2023-05-01 20:34:43 +03:00
|
|
|
|
summary_info += {'debug graph lock': get_option('debug_graph_lock')}
|
2021-10-13 12:52:03 +03:00
|
|
|
|
summary_info += {'debug stack usage': get_option('debug_stack_usage')}
|
2021-10-13 12:46:09 +03:00
|
|
|
|
summary_info += {'mutex debugging': get_option('debug_mutex')}
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'memory allocator': get_option('malloc')}
|
2021-11-08 15:38:58 +03:00
|
|
|
|
summary_info += {'avx2 optimization': config_host_data.get('CONFIG_AVX2_OPT')}
|
2022-11-16 18:29:22 +03:00
|
|
|
|
summary_info += {'avx512bw optimization': config_host_data.get('CONFIG_AVX512BW_OPT')}
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'gcov': get_option('b_coverage')}
|
2023-01-09 17:31:51 +03:00
|
|
|
|
summary_info += {'thread sanitizer': get_option('tsan')}
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'CFI support': get_option('cfi')}
|
|
|
|
|
if get_option('cfi')
|
|
|
|
|
summary_info += {'CFI debug support': get_option('cfi_debug')}
|
|
|
|
|
endif
|
|
|
|
|
summary_info += {'strip binaries': get_option('strip')}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'sparse': sparse}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
summary_info += {'mingw32 support': host_os == 'windows'}
|
2022-06-06 12:48:47 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Compilation')
|
2021-02-22 13:14:50 +03:00
|
|
|
|
|
|
|
|
|
# snarf the cross-compilation information for tests
|
2022-06-06 12:48:47 +03:00
|
|
|
|
summary_info = {}
|
|
|
|
|
have_cross = false
|
2021-02-22 13:14:50 +03:00
|
|
|
|
foreach target: target_dirs
|
2022-09-29 14:42:07 +03:00
|
|
|
|
tcg_mak = meson.current_build_dir() / 'tests/tcg' / target / 'config-target.mak'
|
2021-02-22 13:14:50 +03:00
|
|
|
|
if fs.exists(tcg_mak)
|
|
|
|
|
config_cross_tcg = keyval.load(tcg_mak)
|
2022-05-27 18:35:34 +03:00
|
|
|
|
if 'CC' in config_cross_tcg
|
2022-06-06 12:48:47 +03:00
|
|
|
|
summary_info += {config_cross_tcg['TARGET_NAME']: config_cross_tcg['CC']}
|
|
|
|
|
have_cross = true
|
2021-02-22 13:14:50 +03:00
|
|
|
|
endif
|
2022-06-06 12:48:47 +03:00
|
|
|
|
endif
|
2021-02-22 13:14:50 +03:00
|
|
|
|
endforeach
|
2022-06-06 12:48:47 +03:00
|
|
|
|
if have_cross
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Cross compilers')
|
|
|
|
|
endif
|
2021-01-21 12:56:12 +03:00
|
|
|
|
|
2021-01-21 12:56:13 +03:00
|
|
|
|
# Targets and accelerators
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info = {}
|
2021-01-21 12:56:13 +03:00
|
|
|
|
if have_system
|
2023-09-29 12:40:03 +03:00
|
|
|
|
summary_info += {'KVM support': config_all_accel.has_key('CONFIG_KVM')}
|
|
|
|
|
summary_info += {'HVF support': config_all_accel.has_key('CONFIG_HVF')}
|
|
|
|
|
summary_info += {'WHPX support': config_all_accel.has_key('CONFIG_WHPX')}
|
|
|
|
|
summary_info += {'NVMM support': config_all_accel.has_key('CONFIG_NVMM')}
|
2022-04-20 18:33:47 +03:00
|
|
|
|
summary_info += {'Xen support': xen.found()}
|
|
|
|
|
if xen.found()
|
|
|
|
|
summary_info += {'xen ctrl version': xen.version()}
|
2021-01-21 12:56:13 +03:00
|
|
|
|
endif
|
2023-08-31 12:18:24 +03:00
|
|
|
|
summary_info += {'Xen emulation': config_all_devices.has_key('CONFIG_XEN_EMU')}
|
2021-01-21 12:56:13 +03:00
|
|
|
|
endif
|
2023-09-29 12:40:03 +03:00
|
|
|
|
summary_info += {'TCG support': config_all_accel.has_key('CONFIG_TCG')}
|
|
|
|
|
if config_all_accel.has_key('CONFIG_TCG')
|
2021-01-25 17:45:29 +03:00
|
|
|
|
if get_option('tcg_interpreter')
|
2021-11-06 14:14:57 +03:00
|
|
|
|
summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, slow)'}
|
2021-01-25 17:45:29 +03:00
|
|
|
|
else
|
|
|
|
|
summary_info += {'TCG backend': 'native (@0@)'.format(cpu)}
|
|
|
|
|
endif
|
2023-08-30 13:20:53 +03:00
|
|
|
|
summary_info += {'TCG plugins': get_option('plugins')}
|
2023-08-28 12:48:30 +03:00
|
|
|
|
summary_info += {'TCG debug enabled': get_option('debug_tcg')}
|
2024-03-12 03:23:30 +03:00
|
|
|
|
if have_linux_user or have_bsd_user
|
|
|
|
|
summary_info += {'syscall buffer debugging support': get_option('debug_remap')}
|
|
|
|
|
endif
|
2021-01-21 12:56:13 +03:00
|
|
|
|
endif
|
2021-01-21 12:56:12 +03:00
|
|
|
|
summary_info += {'target list': ' '.join(target_dirs)}
|
2021-01-21 12:56:13 +03:00
|
|
|
|
if have_system
|
|
|
|
|
summary_info += {'default devices': get_option('default_devices')}
|
2021-02-17 18:24:25 +03:00
|
|
|
|
summary_info += {'out of process emulation': multiprocess_allowed}
|
2022-06-13 23:26:24 +03:00
|
|
|
|
summary_info += {'vfio-user server': vfio_user_server_allowed}
|
2021-01-21 12:56:13 +03:00
|
|
|
|
endif
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Targets and accelerators')
|
|
|
|
|
|
2021-01-21 12:56:14 +03:00
|
|
|
|
# Block layer
|
|
|
|
|
summary_info = {}
|
2022-10-12 14:19:35 +03:00
|
|
|
|
summary_info += {'coroutine backend': coroutine_backend}
|
2021-10-13 12:52:03 +03:00
|
|
|
|
summary_info += {'coroutine pool': have_coroutine_pool}
|
2021-01-21 12:56:14 +03:00
|
|
|
|
if have_block
|
2022-04-20 18:33:53 +03:00
|
|
|
|
summary_info += {'Block whitelist (rw)': get_option('block_drv_rw_whitelist')}
|
|
|
|
|
summary_info += {'Block whitelist (ro)': get_option('block_drv_ro_whitelist')}
|
2021-10-13 12:46:09 +03:00
|
|
|
|
summary_info += {'Use block whitelist in tools': get_option('block_drv_whitelist_in_tools')}
|
2023-05-11 17:12:34 +03:00
|
|
|
|
summary_info += {'VirtFS (9P) support': have_virtfs}
|
2021-10-13 12:43:54 +03:00
|
|
|
|
summary_info += {'replication support': config_host_data.get('CONFIG_REPLICATION')}
|
2021-10-13 12:42:25 +03:00
|
|
|
|
summary_info += {'bochs support': get_option('bochs').allowed()}
|
|
|
|
|
summary_info += {'cloop support': get_option('cloop').allowed()}
|
|
|
|
|
summary_info += {'dmg support': get_option('dmg').allowed()}
|
|
|
|
|
summary_info += {'qcow v1 support': get_option('qcow1').allowed()}
|
|
|
|
|
summary_info += {'vdi support': get_option('vdi').allowed()}
|
2023-04-21 12:27:58 +03:00
|
|
|
|
summary_info += {'vhdx support': get_option('vhdx').allowed()}
|
|
|
|
|
summary_info += {'vmdk support': get_option('vmdk').allowed()}
|
|
|
|
|
summary_info += {'vpc support': get_option('vpc').allowed()}
|
2021-10-13 12:42:25 +03:00
|
|
|
|
summary_info += {'vvfat support': get_option('vvfat').allowed()}
|
|
|
|
|
summary_info += {'qed support': get_option('qed').allowed()}
|
|
|
|
|
summary_info += {'parallels support': get_option('parallels').allowed()}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'FUSE exports': fuse}
|
2022-05-23 11:46:09 +03:00
|
|
|
|
summary_info += {'VDUSE block exports': have_vduse_blk_export}
|
2021-01-21 12:56:14 +03:00
|
|
|
|
endif
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Block layer support')
|
|
|
|
|
|
2021-01-21 12:56:15 +03:00
|
|
|
|
# Crypto
|
2021-01-21 12:56:13 +03:00
|
|
|
|
summary_info = {}
|
2022-04-20 18:33:52 +03:00
|
|
|
|
summary_info += {'TLS priority': get_option('tls_priority')}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'GNUTLS support': gnutls}
|
|
|
|
|
if gnutls.found()
|
|
|
|
|
summary_info += {' GNUTLS crypto': gnutls_crypto.found()}
|
|
|
|
|
endif
|
|
|
|
|
summary_info += {'libgcrypt': gcrypt}
|
|
|
|
|
summary_info += {'nettle': nettle}
|
2021-06-03 12:15:26 +03:00
|
|
|
|
if nettle.found()
|
|
|
|
|
summary_info += {' XTS': xts != 'private'}
|
2020-02-03 15:28:38 +03:00
|
|
|
|
endif
|
2023-12-07 18:47:35 +03:00
|
|
|
|
summary_info += {'SM4 ALG support': crypto_sm4}
|
2021-11-08 16:02:42 +03:00
|
|
|
|
summary_info += {'AF_ALG support': have_afalg}
|
2021-10-13 12:46:09 +03:00
|
|
|
|
summary_info += {'rng-none': get_option('rng_none')}
|
2022-04-20 18:33:42 +03:00
|
|
|
|
summary_info += {'Linux keyring': have_keyring}
|
2023-08-24 12:42:08 +03:00
|
|
|
|
summary_info += {'Linux keyutils': keyutils}
|
2021-01-21 12:56:15 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Crypto')
|
|
|
|
|
|
2023-06-02 20:18:30 +03:00
|
|
|
|
# UI
|
2021-01-21 12:56:15 +03:00
|
|
|
|
summary_info = {}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin'
|
2022-03-17 20:28:33 +03:00
|
|
|
|
summary_info += {'Cocoa support': cocoa}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
endif
|
|
|
|
|
summary_info += {'SDL support': sdl}
|
|
|
|
|
summary_info += {'SDL image support': sdl_image}
|
|
|
|
|
summary_info += {'GTK support': gtk}
|
|
|
|
|
summary_info += {'pixman': pixman}
|
|
|
|
|
summary_info += {'VTE support': vte}
|
2022-04-08 10:13:34 +03:00
|
|
|
|
summary_info += {'PNG support': png}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'VNC support': vnc}
|
2020-02-06 17:48:52 +03:00
|
|
|
|
if vnc.found()
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'VNC SASL support': sasl}
|
|
|
|
|
summary_info += {'VNC JPEG support': jpeg}
|
2020-02-03 15:28:38 +03:00
|
|
|
|
endif
|
2023-06-02 20:18:30 +03:00
|
|
|
|
summary_info += {'spice protocol support': spice_protocol}
|
|
|
|
|
if spice_protocol.found()
|
|
|
|
|
summary_info += {' spice server support': spice}
|
|
|
|
|
endif
|
|
|
|
|
summary_info += {'curses support': curses}
|
|
|
|
|
summary_info += {'brlapi support': brlapi}
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'User interface')
|
|
|
|
|
|
2023-12-22 14:48:46 +03:00
|
|
|
|
# Graphics backends
|
|
|
|
|
summary_info = {}
|
|
|
|
|
summary_info += {'VirGL support': virgl}
|
|
|
|
|
summary_info += {'Rutabaga support': rutabaga}
|
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Graphics backends')
|
|
|
|
|
|
2023-06-02 20:18:32 +03:00
|
|
|
|
# Audio backends
|
|
|
|
|
summary_info = {}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os not in ['darwin', 'haiku', 'windows']
|
2021-10-07 16:06:09 +03:00
|
|
|
|
summary_info += {'OSS support': oss}
|
2022-09-07 16:23:42 +03:00
|
|
|
|
summary_info += {'sndio support': sndio}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'darwin'
|
2021-10-07 16:06:09 +03:00
|
|
|
|
summary_info += {'CoreAudio support': coreaudio}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
elif host_os == 'windows'
|
2021-10-07 16:06:09 +03:00
|
|
|
|
summary_info += {'DirectSound support': dsound}
|
|
|
|
|
endif
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'linux'
|
2021-10-07 16:06:09 +03:00
|
|
|
|
summary_info += {'ALSA support': alsa}
|
|
|
|
|
summary_info += {'PulseAudio support': pulse}
|
|
|
|
|
endif
|
2023-05-06 19:37:26 +03:00
|
|
|
|
summary_info += {'PipeWire support': pipewire}
|
2021-10-07 16:06:09 +03:00
|
|
|
|
summary_info += {'JACK support': jack}
|
2023-06-02 20:18:32 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Audio backends')
|
|
|
|
|
|
2023-06-02 20:18:31 +03:00
|
|
|
|
# Network backends
|
2023-06-02 20:18:30 +03:00
|
|
|
|
summary_info = {}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'darwin'
|
2023-06-02 20:18:30 +03:00
|
|
|
|
summary_info += {'vmnet.framework support': vmnet}
|
|
|
|
|
endif
|
net: add initial support for AF_XDP network backend
AF_XDP is a network socket family that allows communication directly
with the network device driver in the kernel, bypassing most or all
of the kernel networking stack. In the essence, the technology is
pretty similar to netmap. But, unlike netmap, AF_XDP is Linux-native
and works with any network interfaces without driver modifications.
Unlike vhost-based backends (kernel, user, vdpa), AF_XDP doesn't
require access to character devices or unix sockets. Only access to
the network interface itself is necessary.
This patch implements a network backend that communicates with the
kernel by creating an AF_XDP socket. A chunk of userspace memory
is shared between QEMU and the host kernel. 4 ring buffers (Tx, Rx,
Fill and Completion) are placed in that memory along with a pool of
memory buffers for the packet data. Data transmission is done by
allocating one of the buffers, copying packet data into it and
placing the pointer into Tx ring. After transmission, device will
return the buffer via Completion ring. On Rx, device will take
a buffer form a pre-populated Fill ring, write the packet data into
it and place the buffer into Rx ring.
AF_XDP network backend takes on the communication with the host
kernel and the network interface and forwards packets to/from the
peer device in QEMU.
Usage example:
-device virtio-net-pci,netdev=guest1,mac=00:16:35:AF:AA:5C
-netdev af-xdp,ifname=ens6f1np1,id=guest1,mode=native,queues=1
XDP program bridges the socket with a network interface. It can be
attached to the interface in 2 different modes:
1. skb - this mode should work for any interface and doesn't require
driver support. With a caveat of lower performance.
2. native - this does require support from the driver and allows to
bypass skb allocation in the kernel and potentially use
zero-copy while getting packets in/out userspace.
By default, QEMU will try to use native mode and fall back to skb.
Mode can be forced via 'mode' option. To force 'copy' even in native
mode, use 'force-copy=on' option. This might be useful if there is
some issue with the driver.
Option 'queues=N' allows to specify how many device queues should
be open. Note that all the queues that are not open are still
functional and can receive traffic, but it will not be delivered to
QEMU. So, the number of device queues should generally match the
QEMU configuration, unless the device is shared with something
else and the traffic re-direction to appropriate queues is correctly
configured on a device level (e.g. with ethtool -N).
'start-queue=M' option can be used to specify from which queue id
QEMU should start configuring 'N' queues. It might also be necessary
to use this option with certain NICs, e.g. MLX5 NICs. See the docs
for examples.
In a general case QEMU will need CAP_NET_ADMIN and CAP_SYS_ADMIN
or CAP_BPF capabilities in order to load default XSK/XDP programs to
the network interface and configure BPF maps. It is possible, however,
to run with no capabilities. For that to work, an external process
with enough capabilities will need to pre-load default XSK program,
create AF_XDP sockets and pass their file descriptors to QEMU process
on startup via 'sock-fds' option. Network backend will need to be
configured with 'inhibit=on' to avoid loading of the program.
QEMU will need 32 MB of locked memory (RLIMIT_MEMLOCK) per queue
or CAP_IPC_LOCK.
There are few performance challenges with the current network backends.
First is that they do not support IO threads. This means that data
path is handled by the main thread in QEMU and may slow down other
work or may be slowed down by some other work. This also means that
taking advantage of multi-queue is generally not possible today.
Another thing is that data path is going through the device emulation
code, which is not really optimized for performance. The fastest
"frontend" device is virtio-net. But it's not optimized for heavy
traffic either, because it expects such use-cases to be handled via
some implementation of vhost (user, kernel, vdpa). In practice, we
have virtio notifications and rcu lock/unlock on a per-packet basis
and not very efficient accesses to the guest memory. Communication
channels between backend and frontend devices do not allow passing
more than one packet at a time as well.
Some of these challenges can be avoided in the future by adding better
batching into device emulation or by implementing vhost-af-xdp variant.
There are also a few kernel limitations. AF_XDP sockets do not
support any kinds of checksum or segmentation offloading. Buffers
are limited to a page size (4K), i.e. MTU is limited. Multi-buffer
support implementation for AF_XDP is in progress, but not ready yet.
Also, transmission in all non-zero-copy modes is synchronous, i.e.
done in a syscall. That doesn't allow high packet rates on virtual
interfaces.
However, keeping in mind all of these challenges, current implementation
of the AF_XDP backend shows a decent performance while running on top
of a physical NIC with zero-copy support.
Test setup:
2 VMs running on 2 physical hosts connected via ConnectX6-Dx card.
Network backend is configured to open the NIC directly in native mode.
The driver supports zero-copy. NIC is configured to use 1 queue.
Inside a VM - iperf3 for basic TCP performance testing and dpdk-testpmd
for PPS testing.
iperf3 result:
TCP stream : 19.1 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 3.4 Mpps
Rx only : 2.0 Mpps
L2 FWD Loopback : 1.5 Mpps
In skb mode the same setup shows much lower performance, similar to
the setup where pair of physical NICs is replaced with veth pair:
iperf3 result:
TCP stream : 9 Gbps
dpdk-testpmd (single queue, single CPU core, 64 B packets) results:
Tx only : 1.2 Mpps
Rx only : 1.0 Mpps
L2 FWD Loopback : 0.7 Mpps
Results in skb mode or over the veth are close to results of a tap
backend with vhost=on and disabled segmentation offloading bridged
with a NIC.
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> (docker/lcitool)
Signed-off-by: Jason Wang <jasowang@redhat.com>
2023-09-13 21:34:37 +03:00
|
|
|
|
summary_info += {'AF_XDP support': libxdp}
|
2022-04-08 19:20:47 +03:00
|
|
|
|
summary_info += {'slirp support': slirp}
|
2021-10-07 16:08:21 +03:00
|
|
|
|
summary_info += {'vde support': vde}
|
2021-10-07 16:08:22 +03:00
|
|
|
|
summary_info += {'netmap support': have_netmap}
|
2021-10-28 21:59:08 +03:00
|
|
|
|
summary_info += {'l2tpv3 support': have_l2tpv3}
|
2023-06-02 20:18:31 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Network backends')
|
|
|
|
|
|
|
|
|
|
# Libraries
|
|
|
|
|
summary_info = {}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'libtasn1': tasn1}
|
|
|
|
|
summary_info += {'PAM': pam}
|
|
|
|
|
summary_info += {'iconv support': iconv}
|
blkio: add libblkio block driver
libblkio (https://gitlab.com/libblkio/libblkio/) is a library for
high-performance disk I/O. It currently supports io_uring,
virtio-blk-vhost-user, and virtio-blk-vhost-vdpa with additional drivers
under development.
One of the reasons for developing libblkio is that other applications
besides QEMU can use it. This will be particularly useful for
virtio-blk-vhost-user which applications may wish to use for connecting
to qemu-storage-daemon.
libblkio also gives us an opportunity to develop in Rust behind a C API
that is easy to consume from QEMU.
This commit adds io_uring, nvme-io_uring, virtio-blk-vhost-user, and
virtio-blk-vhost-vdpa BlockDrivers to QEMU using libblkio. It will be
easy to add other libblkio drivers since they will share the majority of
code.
For now I/O buffers are copied through bounce buffers if the libblkio
driver requires it. Later commits add an optimization for
pre-registering guest RAM to avoid bounce buffers.
The syntax is:
--blockdev io_uring,node-name=drive0,filename=test.img,readonly=on|off,cache.direct=on|off
--blockdev nvme-io_uring,node-name=drive0,filename=/dev/ng0n1,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-vdpa,node-name=drive0,path=/dev/vdpa...,readonly=on|off,cache.direct=on
--blockdev virtio-blk-vhost-user,node-name=drive0,path=vhost-user-blk.sock,readonly=on|off,cache.direct=on
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Acked-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Message-id: 20221013185908.1297568-3-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2022-10-13 21:58:57 +03:00
|
|
|
|
summary_info += {'blkio support': blkio}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'curl support': curl}
|
|
|
|
|
summary_info += {'Multipath support': mpathpersist}
|
2021-10-07 16:08:20 +03:00
|
|
|
|
summary_info += {'Linux AIO support': libaio}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'Linux io_uring support': linux_io_uring}
|
|
|
|
|
summary_info += {'ATTR/XATTR support': libattr}
|
2022-04-20 18:33:41 +03:00
|
|
|
|
summary_info += {'RDMA support': rdma}
|
2024-01-25 14:22:57 +03:00
|
|
|
|
summary_info += {'fdt support': fdt_opt == 'internal' ? 'internal' : fdt}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'libcap-ng support': libcap_ng}
|
|
|
|
|
summary_info += {'bpf support': libbpf}
|
|
|
|
|
summary_info += {'rbd support': rbd}
|
|
|
|
|
summary_info += {'smartcard support': cacard}
|
|
|
|
|
summary_info += {'U2F support': u2f}
|
|
|
|
|
summary_info += {'libusb': libusb}
|
|
|
|
|
summary_info += {'usb net redir': usbredir}
|
2022-04-20 18:33:40 +03:00
|
|
|
|
summary_info += {'OpenGL support (epoxy)': opengl}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'GBM': gbm}
|
|
|
|
|
summary_info += {'libiscsi support': libiscsi}
|
|
|
|
|
summary_info += {'libnfs support': libnfs}
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_os == 'windows'
|
2021-10-15 17:47:43 +03:00
|
|
|
|
if have_ga
|
2022-02-01 15:53:43 +03:00
|
|
|
|
summary_info += {'QGA VSS support': have_qga_vss}
|
2021-01-21 13:49:04 +03:00
|
|
|
|
endif
|
2020-02-03 15:28:38 +03:00
|
|
|
|
endif
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'seccomp support': seccomp}
|
|
|
|
|
summary_info += {'GlusterFS support': glusterfs}
|
Add Hyper-V Dynamic Memory Protocol driver (hv-balloon) base
This driver is like virtio-balloon on steroids: it allows both changing the
guest memory allocation via ballooning and (in the next patch) inserting
pieces of extra RAM into it on demand from a provided memory backend.
The actual resizing is done via ballooning interface (for example, via
the "balloon" HMP command).
This includes resizing the guest past its boot size - that is, hot-adding
additional memory in granularity limited only by the guest alignment
requirements, as provided by the next patch.
In contrast with ACPI DIMM hotplug where one can only request to unplug a
whole DIMM stick this driver allows removing memory from guest in single
page (4k) units via ballooning.
After a VM reboot the guest is back to its original (boot) size.
In the future, the guest boot memory size might be changed on reboot
instead, taking into account the effective size that VM had before that
reboot (much like Hyper-V does).
For performance reasons, the guest-released memory is tracked in a few
range trees, as a series of (start, count) ranges.
Each time a new page range is inserted into such tree its neighbors are
checked as candidates for possible merging with it.
Besides performance reasons, the Dynamic Memory protocol itself uses page
ranges as the data structure in its messages, so relevant pages need to be
merged into such ranges anyway.
One has to be careful when tracking the guest-released pages, since the
guest can maliciously report returning pages outside its current address
space, which later clash with the address range of newly added memory.
Similarly, the guest can report freeing the same page twice.
The above design results in much better ballooning performance than when
using virtio-balloon with the same guest: 230 GB / minute with this driver
versus 70 GB / minute with virtio-balloon.
During a ballooning operation most of time is spent waiting for the guest
to come up with newly freed page ranges, processing the received ranges on
the host side (in QEMU and KVM) is nearly instantaneous.
The unballoon operation is also pretty much instantaneous:
thanks to the merging of the ballooned out page ranges 200 GB of memory can
be returned to the guest in about 1 second.
With virtio-balloon this operation takes about 2.5 minutes.
These tests were done against a Windows Server 2019 guest running on a
Xeon E5-2699, after dirtying the whole memory inside guest before each
balloon operation.
Using a range tree instead of a bitmap to track the removed memory also
means that the solution scales well with the guest size: even a 1 TB range
takes just a few bytes of such metadata.
Since the required GTree operations aren't present in every Glib version
a check for them was added to the meson build script, together with new
"--enable-hv-balloon" and "--disable-hv-balloon" configure arguments.
If these GTree operations are missing in the system's Glib version this
driver will be skipped during QEMU build.
An optional "status-report=on" device parameter requests memory status
events from the guest (typically sent every second), which allow the host
to learn both the guest memory available and the guest memory in use
counts.
Following commits will add support for their external emission as
"HV_BALLOON_STATUS_REPORT" QMP events.
The driver is named hv-balloon since the Linux kernel client driver for
the Dynamic Memory Protocol is named as such and to follow the naming
pattern established by the virtio-balloon driver.
The whole protocol runs over Hyper-V VMBus.
The driver was tested against Windows Server 2012 R2, Windows Server 2016
and Windows Server 2019 guests and obeys the guest alignment requirements
reported to the host via DM_CAPABILITIES_REPORT message.
Acked-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
2023-06-12 17:00:54 +03:00
|
|
|
|
summary_info += {'hv-balloon support': hv_balloon}
|
2021-12-21 14:38:27 +03:00
|
|
|
|
summary_info += {'TPM support': have_tpm}
|
2021-12-09 17:48:01 +03:00
|
|
|
|
summary_info += {'libssh support': libssh}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'lzo support': lzo}
|
|
|
|
|
summary_info += {'snappy support': snappy}
|
|
|
|
|
summary_info += {'bzip2 support': libbzip2}
|
|
|
|
|
summary_info += {'lzfse support': liblzfse}
|
|
|
|
|
summary_info += {'zstd support': zstd}
|
2024-06-10 13:21:06 +03:00
|
|
|
|
summary_info += {'Query Processing Library support': qpl}
|
2024-06-07 16:53:05 +03:00
|
|
|
|
summary_info += {'UADK Library support': uadk}
|
2024-08-31 02:27:19 +03:00
|
|
|
|
summary_info += {'qatzip support': qatzip}
|
2021-12-21 14:38:27 +03:00
|
|
|
|
summary_info += {'NUMA host support': numa}
|
2022-05-16 17:58:23 +03:00
|
|
|
|
summary_info += {'capstone': capstone}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'libpmem support': libpmem}
|
|
|
|
|
summary_info += {'libdaxctl support': libdaxctl}
|
2024-10-09 00:17:23 +03:00
|
|
|
|
summary_info += {'libcbor support': libcbor}
|
2021-06-03 12:24:56 +03:00
|
|
|
|
summary_info += {'libudev': libudev}
|
|
|
|
|
# Dummy dependency, keep .found()
|
2020-10-27 22:05:46 +03:00
|
|
|
|
summary_info += {'FUSE lseek': fuse_lseek.found()}
|
2021-11-15 23:29:43 +03:00
|
|
|
|
summary_info += {'selinux': selinux}
|
2023-01-12 18:20:12 +03:00
|
|
|
|
summary_info += {'libdw': libdw}
|
2024-02-06 03:22:03 +03:00
|
|
|
|
if host_os == 'freebsd'
|
|
|
|
|
summary_info += {'libinotify-kqueue': inotify}
|
|
|
|
|
endif
|
2021-01-21 12:56:16 +03:00
|
|
|
|
summary(summary_info, bool_yn: true, section: 'Dependencies')
|
2020-02-03 15:28:38 +03:00
|
|
|
|
|
2023-08-04 12:29:05 +03:00
|
|
|
|
if host_arch == 'unknown'
|
2020-02-03 15:28:38 +03:00
|
|
|
|
message()
|
2023-08-04 12:29:05 +03:00
|
|
|
|
warning('UNSUPPORTED HOST CPU')
|
2020-02-03 15:28:38 +03:00
|
|
|
|
message()
|
2023-08-04 12:29:05 +03:00
|
|
|
|
message('Support for CPU host architecture ' + cpu + ' is not currently')
|
|
|
|
|
message('maintained. The QEMU project does not guarantee that QEMU will')
|
|
|
|
|
message('compile or work on this host CPU. You can help by volunteering')
|
|
|
|
|
message('to maintain it and providing a build host for our continuous')
|
|
|
|
|
message('integration setup.')
|
|
|
|
|
if get_option('tcg').allowed() and target_dirs.length() > 0
|
|
|
|
|
message()
|
|
|
|
|
message('configure has succeeded and you can continue to build, but')
|
|
|
|
|
message('QEMU will use a slow interpreter to emulate the target CPU.')
|
|
|
|
|
endif
|
2024-10-27 16:07:01 +03:00
|
|
|
|
elif host_arch == 'mips'
|
|
|
|
|
message()
|
|
|
|
|
warning('DEPRECATED HOST CPU')
|
|
|
|
|
message()
|
|
|
|
|
message('Support for CPU host architecture ' + cpu + ' is going to be')
|
|
|
|
|
message('dropped as soon as the QEMU project stops supporting Debian 12')
|
|
|
|
|
message('("Bookworm"). Going forward, the QEMU project will not guarantee')
|
|
|
|
|
message('that QEMU will compile or work on this host CPU.')
|
2020-02-03 15:28:38 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not supported_oses.contains(host_os)
|
2020-02-03 15:28:38 +03:00
|
|
|
|
message()
|
2023-08-04 12:29:05 +03:00
|
|
|
|
warning('UNSUPPORTED HOST OS')
|
2020-02-03 15:28:38 +03:00
|
|
|
|
message()
|
2023-11-03 11:17:48 +03:00
|
|
|
|
message('Support for host OS ' + host_os + 'is not currently maintained.')
|
2020-02-03 15:28:38 +03:00
|
|
|
|
message('configure has succeeded and you can continue to build, but')
|
2023-08-04 12:29:05 +03:00
|
|
|
|
message('the QEMU project does not guarantee that QEMU will compile or')
|
|
|
|
|
message('work on this operating system. You can help by volunteering')
|
|
|
|
|
message('to maintain it and providing a build host for our continuous')
|
|
|
|
|
message('integration setup. This will ensure that future versions of QEMU')
|
2023-11-03 11:17:48 +03:00
|
|
|
|
message('will keep working on ' + host_os + '.')
|
2023-08-04 12:29:05 +03:00
|
|
|
|
endif
|
|
|
|
|
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if host_arch == 'unknown' or not supported_oses.contains(host_os)
|
2023-08-04 12:29:05 +03:00
|
|
|
|
message()
|
|
|
|
|
message('If you want to help supporting QEMU on this platform, please')
|
|
|
|
|
message('contact the developers at qemu-devel@nongnu.org.')
|
2020-02-03 15:28:38 +03:00
|
|
|
|
endif
|
meson, cutils: allow non-relocatable installs
Say QEMU is configured with bindir = "/usr/bin" and a firmware path
that starts with "/usr/share/qemu". Ever since QEMU 5.2, QEMU's
install has been relocatable: if you move qemu-system-x86_64 from
/usr/bin to /home/username/bin, it will start looking for firmware in
/home/username/share/qemu. Previously, you would get a non-relocatable
install where the moved QEMU will keep looking for firmware in
/usr/share/qemu.
Windows almost always wants relocatable installs, and in fact that
is why QEMU 5.2 introduced relocatability in the first place.
However, newfangled distribution mechanisms such as AppImage
(https://docs.appimage.org/reference/best-practices.html), and
possibly NixOS, also dislike using at runtime the absolute paths
that were established at build time.
On POSIX systems you almost never care; if you do, your usecase
dictates which one is desirable, so there's no single answer.
Obviously relocatability works fine most of the time, because not many
people have complained about QEMU's switch to relocatable install,
and that's why until now there was no way to disable relocatability.
But a non-relocatable, non-modular binary can help if you want to do
experiments with old firmware and new QEMU or vice versa (because you
can just upgrade/downgrade the firmware package, and use rpm2cpio or
similar to extract the QEMU binaries outside /usr), so allow both.
This patch allows one to build a non-relocatable install using a new
option to configure. Why? Because it's not too hard, and because
it helps the user double check the relocatability of their install.
Note that the same code that handles relocation also lets you run QEMU
from the build tree and pick e.g. firmware files from the source tree
transparently. Therefore that part remains active with this patch,
even if you configure with --disable-relocatable.
Suggested-by: Michael Tokarev <mjt@tls.msk.ru>
Reviewed-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-05 15:19:34 +03:00
|
|
|
|
|
|
|
|
|
actually_reloc = get_option('relocatable')
|
|
|
|
|
# check if get_relocated_path() is actually able to relocate paths
|
|
|
|
|
if get_option('relocatable') and \
|
|
|
|
|
not (get_option('prefix') / get_option('bindir')).startswith(get_option('prefix') / '')
|
|
|
|
|
message()
|
|
|
|
|
warning('bindir not included within prefix, the installation will not be relocatable.')
|
|
|
|
|
actually_reloc = false
|
|
|
|
|
endif
|
2023-11-03 11:17:48 +03:00
|
|
|
|
if not actually_reloc and (host_os == 'windows' or get_option('relocatable'))
|
|
|
|
|
if host_os == 'windows'
|
meson, cutils: allow non-relocatable installs
Say QEMU is configured with bindir = "/usr/bin" and a firmware path
that starts with "/usr/share/qemu". Ever since QEMU 5.2, QEMU's
install has been relocatable: if you move qemu-system-x86_64 from
/usr/bin to /home/username/bin, it will start looking for firmware in
/home/username/share/qemu. Previously, you would get a non-relocatable
install where the moved QEMU will keep looking for firmware in
/usr/share/qemu.
Windows almost always wants relocatable installs, and in fact that
is why QEMU 5.2 introduced relocatability in the first place.
However, newfangled distribution mechanisms such as AppImage
(https://docs.appimage.org/reference/best-practices.html), and
possibly NixOS, also dislike using at runtime the absolute paths
that were established at build time.
On POSIX systems you almost never care; if you do, your usecase
dictates which one is desirable, so there's no single answer.
Obviously relocatability works fine most of the time, because not many
people have complained about QEMU's switch to relocatable install,
and that's why until now there was no way to disable relocatability.
But a non-relocatable, non-modular binary can help if you want to do
experiments with old firmware and new QEMU or vice versa (because you
can just upgrade/downgrade the firmware package, and use rpm2cpio or
similar to extract the QEMU binaries outside /usr), so allow both.
This patch allows one to build a non-relocatable install using a new
option to configure. Why? Because it's not too hard, and because
it helps the user double check the relocatability of their install.
Note that the same code that handles relocation also lets you run QEMU
from the build tree and pick e.g. firmware files from the source tree
transparently. Therefore that part remains active with this patch,
even if you configure with --disable-relocatable.
Suggested-by: Michael Tokarev <mjt@tls.msk.ru>
Reviewed-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-05 15:19:34 +03:00
|
|
|
|
message()
|
|
|
|
|
warning('Windows installs should usually be relocatable.')
|
|
|
|
|
endif
|
|
|
|
|
message()
|
|
|
|
|
message('QEMU will have to be installed under ' + get_option('prefix') + '.')
|
|
|
|
|
message('Use --disable-relocatable to remove this warning.')
|
|
|
|
|
endif
|