Testing and misc build updates:

- tests/vm support for aarch64 VMs
   - tests/tcg better cross-compiler detection
   - update docker tooling to support registries
   - update docker support for xtensa
   - gitlab build docker images and store in registry
   - gitlab use docker images for builds
   - a number of skipIf updates to support move
   - linux-user MAP_FIXED_NOREPLACE fix
   - qht-bench compiler tweaks
   - configure fix for secret keyring
   - tsan fiber annotation clean-up
   - doc updates for mttcg/icount/gdbstub
   - fix cirrus to use brew bash for iotests
   - revert virtio-gpu breakage
   - fix LC_ALL to avoid sorting changes in iotests
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAl8J0yoACgkQ+9DbCVqe
 KkSzTAf/Vn+9TU8Qt7nZvl7W4tz7Sy5K8EJGwj2RXx6CWWWLiFbsXurIM8Krw5Vc
 RmvUxwa359b+J0lQpfeNDHYm1nM8RZLFlkG0a5bl0I8sW0EcPjBRtwNaGKXh2p0u
 u2RS2QAi6A9AvYT4ZREYlBM+o9WzbxCEQm4s8fr6WEJCQfxBnb5/bGiEjWR64e8C
 j9Kvou+zAKfVizbQMtu+mwqjsoPtcS1b3vVcO7anhNuUsuaEKkS0dFWzWvw3lwJR
 STIYnb8Y/eJ1yKr0hPH2qtWv3n6yhlYvYmpUCH6AwshGMUoeFEzR2VoWS6yZPGG6
 na6XA3UW5R9AxIDfkCJ5ueeo8t9xMQ==
 =HRWa
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-and-misc-110720-2' into staging

Testing and misc build updates:

  - tests/vm support for aarch64 VMs
  - tests/tcg better cross-compiler detection
  - update docker tooling to support registries
  - update docker support for xtensa
  - gitlab build docker images and store in registry
  - gitlab use docker images for builds
  - a number of skipIf updates to support move
  - linux-user MAP_FIXED_NOREPLACE fix
  - qht-bench compiler tweaks
  - configure fix for secret keyring
  - tsan fiber annotation clean-up
  - doc updates for mttcg/icount/gdbstub
  - fix cirrus to use brew bash for iotests
  - revert virtio-gpu breakage
  - fix LC_ALL to avoid sorting changes in iotests

# gpg: Signature made Sat 11 Jul 2020 15:56:42 BST
# gpg:                using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44
# gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full]
# Primary key fingerprint: 6685 AE99 E751 67BC AFC8  DF35 FBD0 DB09 5A9E 2A44

* remotes/stsquad/tags/pull-testing-and-misc-110720-2: (50 commits)
  iotests: Set LC_ALL=C for sort
  Revert "vga: build virtio-gpu as module"
  tests: fix "make check-qtest" for modular builds
  .cirrus.yml: add bash to the brew packages
  tests/docker: update toolchain set in debian-xtensa-cross
  tests/docker: fall back more gracefully when pull fails
  docs: Add to gdbstub documentation the PhyMemMode
  docs/devel: add some notes on tcg-icount for developers
  docs/devel: convert and update MTTCG design document
  tests/qht-bench: Adjust threshold computation
  tests/qht-bench: Adjust testing rate by -1
  travis.yml: Test also the other targets on s390x
  shippable: pull images from registry instead of building
  testing: add check-build target
  containers.yml: build with docker.py tooling
  gitlab: limit re-builds of the containers
  tests: improve performance of device-introspect-test
  gitlab: add avocado asset caching
  gitlab: enable check-tcg for linux-user tests
  linux-user/elfload: use MAP_FIXED_NOREPLACE in pgb_reserved_va
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-07-12 15:32:05 +01:00
commit 9f526fce49
76 changed files with 2027 additions and 441 deletions

View File

@ -20,7 +20,7 @@ macos_task:
osx_instance:
image: mojave-base
install_script:
- brew install pkg-config python gnu-sed glib pixman make sdl2
- brew install pkg-config python gnu-sed glib pixman make sdl2 bash
script:
- mkdir build
- cd build
@ -33,7 +33,7 @@ macos_xcode_task:
# this is an alias for the latest Xcode
image: mojave-xcode
install_script:
- brew install pkg-config gnu-sed glib pixman make sdl2
- brew install pkg-config gnu-sed glib pixman make sdl2 bash
script:
- mkdir build
- cd build

1
.gitignore vendored
View File

@ -93,6 +93,7 @@
*.tp
*.vr
*.d
!/.gitlab-ci.d
!/scripts/qemu-guest-agent/fsfreeze-hook.d
*.o
.sdk

263
.gitlab-ci.d/containers.yml Normal file
View File

@ -0,0 +1,263 @@
.container_job_template: &container_job_definition
image: docker:stable
stage: containers
services:
- docker:dind
before_script:
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/$NAME:latest"
- apk add python3
- docker info
- docker login registry.gitlab.com -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
script:
- echo "TAG:$TAG"
- echo "COMMON_TAG:$COMMON_TAG"
- docker pull "$TAG" || docker pull "$COMMON_TAG" || true
- ./tests/docker/docker.py --engine docker build
-t "qemu/$NAME" -f "tests/docker/dockerfiles/$NAME.docker"
-r $CI_REGISTRY_IMAGE
- docker tag "qemu/$NAME" "$TAG"
- docker push "$TAG"
after_script:
- docker logout
rules:
- changes:
- .gitlab-ci.d/containers.yml
- tests/docker/*
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
- if: '$CI_COMMIT_REF_NAME == "testing/next"'
amd64-centos7-container:
<<: *container_job_definition
variables:
NAME: centos7
amd64-centos8-container:
<<: *container_job_definition
variables:
NAME: centos8
amd64-debian10-container:
<<: *container_job_definition
variables:
NAME: debian10
amd64-debian11-container:
<<: *container_job_definition
variables:
NAME: debian11
amd64-debian9-container:
<<: *container_job_definition
variables:
NAME: debian9
amd64-debian9-mxe-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian9-container']
variables:
NAME: debian9-mxe
alpha-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-alpha-cross
amd64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-amd64-cross
amd64-debian-user-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-all-test-cross
amd64-debian-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-amd64
arm64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-arm64-cross
arm64-test-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian11-container']
variables:
NAME: debian-arm64-test-cross
armel-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-armel-cross
armhf-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-armhf-cross
hppa-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-hppa-cross
m68k-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-m68k-cross
mips64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips64-cross
mips64el-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips64el-cross
mips-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mips-cross
mipsel-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-mipsel-cross
powerpc-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-powerpc-cross
ppc64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-ppc64-cross
ppc64el-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-ppc64el-cross
riscv64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-riscv64-cross
s390x-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-s390x-cross
sh4-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-sh4-cross
sparc64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian10-container']
variables:
NAME: debian-sparc64-cross
tricore-debian-cross-container:
<<: *container_job_definition
stage: containers-layer2
needs: ['amd64-debian9-container']
variables:
NAME: debian-tricore-cross
win32-debian-cross-container:
<<: *container_job_definition
stage: containers-layer3
needs: ['amd64-debian9-mxe-container']
variables:
NAME: debian-win32-cross
win64-debian-cross-container:
<<: *container_job_definition
stage: containers-layer3
needs: ['amd64-debian9-mxe-container']
variables:
NAME: debian-win64-cross
xtensa-debian-cross-container:
<<: *container_job_definition
variables:
NAME: debian-xtensa-cross
cris-fedora-cross-container:
<<: *container_job_definition
variables:
NAME: fedora-cris-cross
amd64-fedora-container:
<<: *container_job_definition
variables:
NAME: fedora
i386-fedora-cross-container:
<<: *container_job_definition
variables:
NAME: fedora-i386-cross
amd64-ubuntu1804-container:
<<: *container_job_definition
variables:
NAME: ubuntu1804
amd64-ubuntu2004-container:
<<: *container_job_definition
variables:
NAME: ubuntu2004
amd64-ubuntu-container:
<<: *container_job_definition
variables:
NAME: ubuntu

View File

@ -1,8 +1,8 @@
docker-edk2:
stage: build
stage: containers
rules: # Only run this job when the Dockerfile is modified
- changes:
- .gitlab-ci-edk2.yml
- .gitlab-ci.d/edk2.yml
- .gitlab-ci.d/edk2/Dockerfile
when: always
image: docker:19.03.1
@ -24,6 +24,7 @@ docker-edk2:
- docker push $IMAGE_TAG
build-edk2:
stage: build
rules: # Only run this job when ...
- changes: # ... roms/edk2/ is modified (submodule updated)
- roms/edk2/*

View File

@ -1,8 +1,8 @@
docker-opensbi:
stage: build
stage: containers
rules: # Only run this job when the Dockerfile is modified
- changes:
- .gitlab-ci-opensbi.yml
- .gitlab-ci.d/opensbi.yml
- .gitlab-ci.d/opensbi/Dockerfile
when: always
image: docker:19.03.1
@ -24,6 +24,7 @@ docker-opensbi:
- docker push $IMAGE_TAG
build-opensbi:
stage: build
rules: # Only run this job when ...
- changes: # ... roms/opensbi/ is modified (submodule updated)
- roms/opensbi/*

View File

@ -1,74 +1,141 @@
# Currently we have two build stages after our containers are built:
# - build (for traditional build and test or first stage build)
# - test (for test stages, using build artefacts from a build stage)
stages:
- containers
- containers-layer2
- containers-layer3
- build
- test
# We assume GitLab has it's own caching set up for RPM/APT repositories so we
# just take care of avocado assets here.
cache:
paths:
- $HOME/avocado/data/cache
include:
- local: '/.gitlab-ci.d/edk2.yml'
- local: '/.gitlab-ci.d/opensbi.yml'
- local: '/.gitlab-ci.d/containers.yml'
.update_apt_template: &before_script_apt
.native_build_job_template: &native_build_job_definition
stage: build
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
before_script:
- apt-get update -qq
- apt-get install -y -qq git gcc libglib2.0-dev libpixman-1-dev make
genisoimage
- JOBS=$(expr $(nproc) + 1)
.update_dnf_template: &before_script_dnf
before_script:
- dnf update -y
- dnf install -y bzip2 diffutils gcc git genisoimage findutils glib2-devel
make python3 perl-podlators perl-Test-Harness pixman-devel zlib-devel
- JOBS=$(expr $(nproc) + 1)
build-system1:
image: ubuntu:19.10
<<: *before_script_apt
script:
- apt-get install -y -qq libgtk-3-dev libvte-dev nettle-dev libcacard-dev
libusb-dev libvde-dev libspice-protocol-dev libgl1-mesa-dev libvdeplug-dev
- mkdir build
- cd build
- ../configure --enable-werror --target-list="aarch64-softmmu alpha-softmmu
cris-softmmu hppa-softmmu lm32-softmmu moxie-softmmu microblazeel-softmmu
mips64el-softmmu m68k-softmmu ppc-softmmu riscv64-softmmu sparc-softmmu"
- if test -n "$TARGETS";
then
../configure --enable-werror $CONFIGURE_ARGS --target-list="$TARGETS" ;
else
../configure --enable-werror $CONFIGURE_ARGS ;
fi
- make -j"$JOBS"
- make -j"$JOBS" check
- if test -n "$MAKE_CHECK_ARGS";
then
make -j"$JOBS" $MAKE_CHECK_ARGS ;
fi
build-system2:
image: fedora:latest
<<: *before_script_dnf
.native_test_job_template: &native_test_job_definition
stage: test
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
script:
- yum install -y SDL2-devel libgcrypt-devel brlapi-devel libaio-devel
libfdt-devel lzo-devel librdmacm-devel libibverbs-devel libibumad-devel
libzstd-devel
- mkdir build
- cd build
- ../configure --enable-werror --target-list="tricore-softmmu unicore32-softmmu
microblaze-softmmu mips-softmmu riscv32-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu x86_64-softmmu xtensa-softmmu nios2-softmmu or1k-softmmu"
- make -j"$JOBS"
- make -j"$JOBS" check
- find . -type f -exec touch {} +
- make $MAKE_CHECK_ARGS
.post_acceptance_template: &post_acceptance
after_script:
- cd build
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP")]' | xargs cat
- du -chs $HOME/avocado/data/cache
build-system-ubuntu-main:
<<: *native_build_job_definition
variables:
IMAGE: ubuntu2004
TARGETS: aarch64-softmmu alpha-softmmu cris-softmmu hppa-softmmu lm32-softmmu
moxie-softmmu microblazeel-softmmu mips64el-softmmu m68k-softmmu ppc-softmmu
riscv64-softmmu sparc-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
paths:
- build
check-system-ubuntu-main:
<<: *native_test_job_definition
needs:
- job: build-system-ubuntu-main
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check
acceptance-system-ubuntu-main:
<<: *native_test_job_definition
needs:
- job: build-system-ubuntu-main
artifacts: true
variables:
IMAGE: ubuntu2004
MAKE_CHECK_ARGS: check-acceptance
<<: *post_acceptance
build-system-fedora-alt:
<<: *native_build_job_definition
variables:
IMAGE: fedora
TARGETS: tricore-softmmu unicore32-softmmu microblaze-softmmu mips-softmmu
riscv32-softmmu s390x-softmmu sh4-softmmu sparc64-softmmu x86_64-softmmu
xtensa-softmmu nios2-softmmu or1k-softmmu
MAKE_CHECK_ARGS: check-build
artifacts:
paths:
- build
check-system-fedora-alt:
<<: *native_test_job_definition
needs:
- job: build-system-fedora-alt
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check
acceptance-system-fedora-alt:
<<: *native_test_job_definition
needs:
- job: build-system-fedora-alt
artifacts: true
variables:
IMAGE: fedora
MAKE_CHECK_ARGS: check-acceptance
<<: *post_acceptance
build-disabled:
image: fedora:latest
<<: *before_script_dnf
script:
- mkdir build
- cd build
- ../configure --enable-werror --disable-rdma --disable-slirp --disable-curl
<<: *native_build_job_definition
variables:
IMAGE: fedora
CONFIGURE_ARGS: --disable-rdma --disable-slirp --disable-curl
--disable-capstone --disable-live-block-migration --disable-glusterfs
--disable-replication --disable-coroutine-pool --disable-smartcard
--disable-guest-agent --disable-curses --disable-libxml2 --disable-tpm
--disable-qom-cast-debug --disable-spice --disable-vhost-vsock
--disable-vhost-net --disable-vhost-crypto --disable-vhost-user
--target-list="i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user"
- make -j"$JOBS"
- make -j"$JOBS" check-qtest SPEED=slow
TARGETS: i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user
MAKE_CHECK_ARGS: check-qtest SPEED=slow
build-tcg-disabled:
image: centos:8
<<: *before_script_dnf
<<: *native_build_job_definition
variables:
IMAGE: centos8
script:
- dnf install -y clang gtk3-devel libusbx-devel libgcrypt-devel
- mkdir build
- cd build
- ../configure --cc=clang --enable-werror --disable-tcg --audio-drv-list=""
- ../configure --disable-tcg --audio-drv-list=""
- make -j"$JOBS"
- make check-unit
- make check-qapi-schema
@ -82,32 +149,25 @@ build-tcg-disabled:
260 261 262 263 264 270 272 273 277 279
build-user:
<<: *before_script_apt
script:
- mkdir build
- cd build
- ../configure --enable-werror --disable-system --disable-guest-agent
--disable-capstone --disable-slirp --disable-fdt
- make -j"$JOBS"
- make run-tcg-tests-i386-linux-user run-tcg-tests-x86_64-linux-user
<<: *native_build_job_definition
variables:
IMAGE: debian-all-test-cross
CONFIGURE_ARGS: --disable-tools --disable-system
MAKE_CHECK_ARGS: check-tcg
build-clang:
image: fedora:latest
<<: *before_script_dnf
script:
- yum install -y clang SDL2-devel libattr-devel libcap-ng-devel xfsprogs-devel
libiscsi-devel libnfs-devel libseccomp-devel gnutls-devel librbd-devel
- mkdir build
- cd build
- ../configure --cc=clang --cxx=clang++ --enable-werror
--target-list="alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user"
- make -j"$JOBS"
- make -j"$JOBS" check
<<: *native_build_job_definition
variables:
IMAGE: fedora
CONFIGURE_ARGS: --cc=clang --cxx=clang++
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user
MAKE_CHECK_ARGS: check
build-tci:
image: centos:8
<<: *before_script_dnf
<<: *native_build_job_definition
variables:
IMAGE: fedora
script:
- TARGETS="aarch64 alpha arm hppa m68k microblaze moxie ppc64 s390x x86_64"
- mkdir build
@ -123,5 +183,4 @@ build-tci:
./tests/qtest/cdrom-test || exit 1 ;
done
- QTEST_QEMU_BINARY="x86_64-softmmu/qemu-system-x86_64" ./tests/qtest/pxe-test
- QTEST_QEMU_BINARY="s390x-softmmu/qemu-system-s390x"
./tests/qtest/pxe-test -m slow
- QTEST_QEMU_BINARY="s390x-softmmu/qemu-system-s390x" ./tests/qtest/pxe-test -m slow

View File

@ -26,12 +26,10 @@ env:
- IMAGE=debian-ppc64el-cross
TARGET_LIST=ppc64-softmmu,ppc64-linux-user,ppc64abi32-linux-user
build:
pre_ci:
- make docker-image-${IMAGE} V=1
pre_ci_boot:
image_name: qemu
image_tag: ${IMAGE}
pull: false
image_name: registry.gitlab.com/qemu-project/qemu/${IMAGE}
image_tag: latest
pull: true
options: "-e HOME=/root"
ci:
- unset CC

View File

@ -289,29 +289,6 @@ jobs:
python: 3.6
# Acceptance (Functional) tests
- name: "GCC check-acceptance"
dist: bionic
env:
- CONFIG="--enable-tools --target-list=aarch64-softmmu,alpha-softmmu,arm-softmmu,m68k-softmmu,microblaze-softmmu,mips-softmmu,mips64el-softmmu,nios2-softmmu,or1k-softmmu,ppc-softmmu,ppc64-softmmu,s390x-softmmu,sh4-softmmu,sparc-softmmu,x86_64-softmmu,xtensa-softmmu"
- TEST_CMD="make check-acceptance"
- CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-acceptance"
after_script:
- python3 -c 'import json; r = json.load(open("tests/results/latest/results.json")); [print(t["logfile"]) for t in r["tests"] if t["status"] not in ("PASS", "SKIP")]' | xargs cat
- du -chs $HOME/avocado/data/cache
addons:
apt:
packages:
- python3-pil
- python3-pip
- python3-numpy
- python3-opencv
- python3-venv
- rpm2cpio
- tesseract-ocr
- tesseract-ocr-eng
# Using newer GCC with sanitizers
- name: "GCC9 with sanitizers (softmmu)"
addons:
@ -505,6 +482,45 @@ jobs:
$(exit $BUILD_RC);
fi
- name: "[s390x] GCC (other-softmmu)"
arch: s390x
dist: bionic
addons:
apt_packages:
- libaio-dev
- libattr1-dev
- libcap-ng-dev
- libgnutls28-dev
- libiscsi-dev
- liblttng-ust-dev
- liblzo2-dev
- libncurses-dev
- libnfs-dev
- libnss3-dev
- libpixman-1-dev
- libsdl2-dev
- libsdl2-image-dev
- libseccomp-dev
- libsnappy-dev
- libzstd-dev
- nettle-dev
- xfslibs-dev
# Tests dependencies
- genisoimage
env:
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
--target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
- name: "[s390x] GCC (user)"
arch: s390x
dist: bionic
addons:
apt_packages:
- libgcrypt20-dev
- libgnutls28-dev
env:
- CONFIG="--disable-containers --disable-system"
- name: "[s390x] Clang (disable-tcg)"
arch: s390x
dist: bionic

31
configure vendored
View File

@ -418,6 +418,7 @@ prefix="/usr/local"
mandir="\${prefix}/share/man"
datadir="\${prefix}/share"
firmwarepath="\${prefix}/share/qemu-firmware"
efi_aarch64=""
qemu_docdir="\${prefix}/share/doc/qemu"
bindir="\${prefix}/bin"
libdir="\${prefix}/lib"
@ -960,6 +961,13 @@ do
fi
done
# Check for existence of python3 yaml, needed to
# import yaml config files into vm-build.
python_yaml="no"
if $(python3 -c "import yaml" 2> /dev/null); then
python_yaml="yes"
fi
: ${smbd=${SMBD-/usr/sbin/smbd}}
# Default objcc to clang if available, otherwise use CC
@ -1102,6 +1110,8 @@ for opt do
;;
--firmwarepath=*) firmwarepath="$optarg"
;;
--efi-aarch64=*) efi_aarch64="$optarg"
;;
--host=*|--build=*|\
--disable-dependency-tracking|\
--sbindir=*|--sharedstatedir=*|\
@ -1784,6 +1794,7 @@ Advanced options (experts only):
--sysconfdir=PATH install config in PATH$confsuffix
--localstatedir=PATH install local state in PATH (set at runtime on win32)
--firmwarepath=PATH search PATH for firmware files
--efi-aarch64=PATH PATH of efi file to use for aarch64 VMs.
--with-confsuffix=SUFFIX suffix for QEMU data inside datadir/libdir/sysconfdir [$confsuffix]
--with-pkgversion=VERS use specified string as sub-version of the package
--enable-debug enable common debug build options
@ -3620,6 +3631,20 @@ EOF
fi
fi
############################################
# efi-aarch64 probe
# Check for efi files needed by aarch64 VMs.
# By default we will use the efi included with QEMU.
# Allow user to override the path for efi also.
if ! test -f "$efi_aarch64"; then
if test -f $source_path/pc-bios/edk2-aarch64-code.fd.bz2; then
# valid after build
efi_aarch64=$PWD/pc-bios/edk2-aarch64-code.fd
else
efi_aarch64=""
fi
fi
##########################################
# libcap-ng library probe
if test "$cap_ng" != "no" ; then
@ -6486,7 +6511,7 @@ EOF
fi
if test "$secret_keyring" != "no"
then
if test "$have_keyring" == "yes"
if test "$have_keyring" = "yes"
then
secret_keyring=yes
else
@ -6868,6 +6893,8 @@ if test "$docs" != "no"; then
echo "sphinx-build $sphinx_build"
fi
echo "genisoimage $genisoimage"
echo "efi_aarch64 $efi_aarch64"
echo "python_yaml $python_yaml"
echo "slirp support $slirp $(echo_version $slirp $slirp_version)"
if test "$slirp" != "no" ; then
echo "smbd $smbd"
@ -7966,6 +7993,8 @@ echo "PYTHON=$python" >> $config_host_mak
echo "SPHINX_BUILD=$sphinx_build" >> $config_host_mak
echo "SPHINX_WERROR=$sphinx_werror" >> $config_host_mak
echo "GENISOIMAGE=$genisoimage" >> $config_host_mak
echo "EFI_AARCH64=$efi_aarch64" >> $config_host_mak
echo "PYTHON_YAML=$python_yaml" >> $config_host_mak
echo "CC=$cc" >> $config_host_mak
if $iasl -h > /dev/null 2>&1; then
echo "IASL=$iasl" >> $config_host_mak

View File

@ -23,6 +23,8 @@ Contents:
decodetree
secure-coding-practices
tcg
tcg-icount
multi-thread-tcg
tcg-plugins
bitops
reset

View File

@ -1,15 +1,17 @@
Copyright (c) 2015-2016 Linaro Ltd.
..
Copyright (c) 2015-2020 Linaro Ltd.
This work is licensed under the terms of the GNU GPL, version 2 or
later. See the COPYING file in the top-level directory.
This work is licensed under the terms of the GNU GPL, version 2 or
later. See the COPYING file in the top-level directory.
Introduction
============
This document outlines the design for multi-threaded TCG system-mode
emulation. The current user-mode emulation mirrors the thread
structure of the translated executable. Some of the work will be
applicable to both system and linux-user emulation.
This document outlines the design for multi-threaded TCG (a.k.a MTTCG)
system-mode emulation. user-mode emulation has always mirrored the
thread structure of the translated executable although some of the
changes done for MTTCG system emulation have improved the stability of
linux-user emulation.
The original system-mode TCG implementation was single threaded and
dealt with multiple CPUs with simple round-robin scheduling. This
@ -21,9 +23,18 @@ vCPU Scheduling
===============
We introduce a new running mode where each vCPU will run on its own
user-space thread. This will be enabled by default for all FE/BE
combinations that have had the required work done to support this
safely.
user-space thread. This is enabled by default for all FE/BE
combinations where the host memory model is able to accommodate the
guest (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO is zero) and the
guest has had the required work done to support this safely
(TARGET_SUPPORTS_MTTCG).
System emulation will fall back to the original round robin approach
if:
* forced by --accel tcg,thread=single
* enabling --icount mode
* 64 bit guests on 32 bit hosts (TCG_OVERSIZED_GUEST)
In the general case of running translated code there should be no
inter-vCPU dependencies and all vCPUs should be able to run at full
@ -61,7 +72,9 @@ have their block-to-block jumps patched.
Global TCG State
----------------
### User-mode emulation
User-mode emulation
~~~~~~~~~~~~~~~~~~~
We need to protect the entire code generation cycle including any post
generation patching of the translated code. This also implies a shared
translation buffer which contains code running on all cores. Any
@ -78,9 +91,11 @@ patching.
Code generation is serialised with mmap_lock().
### !User-mode emulation
!User-mode emulation
~~~~~~~~~~~~~~~~~~~~
Each vCPU has its own TCG context and associated TCG region, thereby
requiring no locking.
requiring no locking during translation.
Translation Blocks
------------------
@ -92,6 +107,7 @@ including:
- debugging operations (breakpoint insertion/removal)
- some CPU helper functions
- linux-user spawning it's first thread
This is done with the async_safe_run_on_cpu() mechanism to ensure all
vCPUs are quiescent when changes are being made to shared global
@ -250,8 +266,10 @@ to enforce a particular ordering of memory operations from the point
of view of external observers (e.g. another processor core). They can
apply to any memory operations as well as just loads or stores.
The Linux kernel has an excellent write-up on the various forms of
memory barrier and the guarantees they can provide [1].
The Linux kernel has an excellent `write-up
<https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/Documentation/memory-barriers.txt>`
on the various forms of memory barrier and the guarantees they can
provide.
Barriers are often wrapped around synchronisation primitives to
provide explicit memory ordering semantics. However they can be used
@ -352,7 +370,3 @@ an exclusive lock which ensures all emulation is serialised.
While the atomic helpers look good enough for now there may be a need
to look at solutions that can more closely model the guest
architectures semantics.
==========
[1] https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/Documentation/memory-barriers.txt

97
docs/devel/tcg-icount.rst Normal file
View File

@ -0,0 +1,97 @@
..
Copyright (c) 2020, Linaro Limited
Written by Alex Bennée
========================
TCG Instruction Counting
========================
TCG has long supported a feature known as icount which allows for
instruction counting during execution. This should not be confused
with cycle accurate emulation - QEMU does not attempt to emulate how
long an instruction would take on real hardware. That is a job for
other more detailed (and slower) tools that simulate the rest of a
micro-architecture.
This feature is only available for system emulation and is
incompatible with multi-threaded TCG. It can be used to better align
execution time with wall-clock time so a "slow" device doesn't run too
fast on modern hardware. It can also provides for a degree of
deterministic execution and is an essential part of the record/replay
support in QEMU.
Core Concepts
=============
At its heart icount is simply a count of executed instructions which
is stored in the TimersState of QEMU's timer sub-system. The number of
executed instructions can then be used to calculate QEMU_CLOCK_VIRTUAL
which represents the amount of elapsed time in the system since
execution started. Depending on the icount mode this may either be a
fixed number of ns per instruction or adjusted as execution continues
to keep wall clock time and virtual time in sync.
To be able to calculate the number of executed instructions the
translator starts by allocating a budget of instructions to be
executed. The budget of instructions is limited by how long it will be
until the next timer will expire. We store this budget as part of a
vCPU icount_decr field which shared with the machinery for handling
cpu_exit(). The whole field is checked at the start of every
translated block and will cause a return to the outer loop to deal
with whatever caused the exit.
In the case of icount, before the flag is checked we subtract the
number of instructions the translation block would execute. If this
would cause the instruction budget to go negative we exit the main
loop and regenerate a new translation block with exactly the right
number of instructions to take the budget to 0 meaning whatever timer
was due to expire will expire exactly when we exit the main run loop.
Dealing with MMIO
-----------------
While we can adjust the instruction budget for known events like timer
expiry we cannot do the same for MMIO. Every load/store we execute
might potentially trigger an I/O event, at which point we will need an
up to date and accurate reading of the icount number.
To deal with this case, when an I/O access is made we:
- restore un-executed instructions to the icount budget
- re-compile a single [1]_ instruction block for the current PC
- exit the cpu loop and execute the re-compiled block
The new block is created with the CF_LAST_IO compile flag which
ensures the final instruction translation starts with a call to
gen_io_start() so we don't enter a perpetual loop constantly
recompiling a single instruction block. For translators using the
common translator_loop this is done automatically.
.. [1] sometimes two instructions if dealing with delay slots
Other I/O operations
--------------------
MMIO isn't the only type of operation for which we might need a
correct and accurate clock. IO port instructions and accesses to
system registers are the common examples here. These instructions have
to be handled by the individual translators which have the knowledge
of which operations are I/O operations.
When the translator is handling an instruction of this kind:
* it must call gen_io_start() if icount is enabled, at some
point before the generation of the code which actually does
the I/O, using a code fragment similar to:
.. code:: c
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
* it must end the TB immediately after this instruction
Note that some older front-ends call a "gen_io_end()" function:
this is obsolete and should not be used.

View File

@ -87,3 +87,23 @@ three commands you can query and set the single step behavior:
(gdb) maintenance packet Qqemu.sstep=0x5
sending: "qemu.sstep=0x5"
received: "OK"
Another feature that QEMU gdbstub provides is to toggle the memory GDB
works with, by default GDB will show the current process memory respecting
the virtual address translation.
If you want to examine/change the physical memory you can set the gdbstub
to work with the physical memory rather with the virtual one.
The memory mode can be checked by sending the following command:
``maintenance packet qqemu.PhyMemMode``
This will return either 0 or 1, 1 indicates you are currently in the
physical memory mode.
``maintenance packet Qqemu.PhyMemMode:1``
This will change the memory mode to physical memory.
``maintenance packet Qqemu.PhyMemMode:0``
This will change it back to normal memory mode.

View File

@ -49,19 +49,16 @@ common-obj-m += qxl.mo
qxl.mo-objs = qxl.o qxl-logger.o qxl-render.o
endif
ifeq ($(CONFIG_VIRTIO_GPU),y)
common-obj-m += virtio-gpu.mo
virtio-gpu-obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
virtio-gpu-obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
virtio-gpu-obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
virtio-gpu-obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
virtio-gpu-obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
virtio-gpu-obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
virtio-gpu.mo-objs := $(virtio-gpu-obj-y)
virtio-gpu.mo-cflags := $(VIRGL_CFLAGS)
virtio-gpu.mo-libs := $(VIRGL_LIBS)
endif
common-obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
common-obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
common-obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
common-obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
common-obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
common-obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
virtio-gpu.o-libs += $(VIRGL_LIBS)
virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS)
virtio-gpu-3d.o-libs += $(VIRGL_LIBS)
common-obj-$(CONFIG_DPCD) += dpcd.o
common-obj-$(CONFIG_XLNX_ZYNQMP_ARM) += xlnx_dp.o

View File

@ -2294,7 +2294,7 @@ static void pgb_dynamic(const char *image_name, long align)
static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
abi_ulong guest_hiaddr, long align)
{
const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
void *addr, *test;
if (guest_hiaddr > reserved_va) {
@ -2307,15 +2307,19 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
/* Widen the "image" to the entire reserved address space. */
pgb_static(image_name, 0, reserved_va, align);
#ifdef MAP_FIXED_NOREPLACE
flags |= MAP_FIXED_NOREPLACE;
#endif
/* Reserve the memory on the host. */
assert(guest_base != 0);
test = g2h(0);
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED) {
error_report("Unable to reserve 0x%lx bytes of virtual address "
"space for use as guest address space (check your "
"space (%s) for use as guest address space (check your "
"virtual memory ulimit setting or reserve less "
"using -R option)", reserved_va);
"using -R option)", reserved_va, strerror(errno));
exit(EXIT_FAILURE);
}
assert(addr == test);

View File

@ -0,0 +1,110 @@
#!/usr/bin/env python3
#
# This python module implements a ConsoleSocket object which is
# designed always drain the socket itself, and place
# the bytes into a in memory buffer for later processing.
#
# Optionally a file path can be passed in and we will also
# dump the characters to this file for debug.
#
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <robert.foley@linaro.org>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import asyncore
import socket
import threading
import io
import os
import sys
from collections import deque
import time
import traceback
class ConsoleSocket(asyncore.dispatcher):
def __init__(self, address, file=None):
self._recv_timeout_sec = 300
self._buffer = deque()
self._asyncore_thread = None
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._sock.connect(address)
self._logfile = None
if file:
self._logfile = open(file, "w")
asyncore.dispatcher.__init__(self, sock=self._sock)
self._open = True
self._thread_start()
def _thread_start(self):
"""Kick off a thread to wait on the asyncore.loop"""
if self._asyncore_thread is not None:
return
self._asyncore_thread = threading.Thread(target=asyncore.loop,
kwargs={'timeout':1})
self._asyncore_thread.daemon = True
self._asyncore_thread.start()
def handle_close(self):
"""redirect close to base class"""
# Call the base class close, but not self.close() since
# handle_close() occurs in the context of the thread which
# self.close() attempts to join.
asyncore.dispatcher.close(self)
def close(self):
"""Close the base object and wait for the thread to terminate"""
if self._open:
self._open = False
asyncore.dispatcher.close(self)
if self._asyncore_thread is not None:
thread, self._asyncore_thread = self._asyncore_thread, None
thread.join()
if self._logfile:
self._logfile.close()
self._logfile = None
def handle_read(self):
"""process arriving characters into in memory _buffer"""
try:
data = asyncore.dispatcher.recv(self, 1)
# latin1 is needed since there are some chars
# we are receiving that cannot be encoded to utf-8
# such as 0xe2, 0x80, 0xA6.
string = data.decode("latin1")
except:
print("Exception seen.")
traceback.print_exc()
return
if self._logfile:
self._logfile.write("{}".format(string))
self._logfile.flush()
for c in string:
self._buffer.extend(c)
def recv(self, n=1, sleep_delay_s=0.1):
"""Return chars from in memory buffer"""
start_time = time.time()
while len(self._buffer) < n:
time.sleep(sleep_delay_s)
elapsed_sec = time.time() - start_time
if elapsed_sec > self._recv_timeout_sec:
raise socket.timeout
chars = ''.join([self._buffer.popleft() for i in range(n)])
# We choose to use latin1 to remain consistent with
# handle_read() and give back the same data as the user would
# receive if they were reading directly from the
# socket w/o our intervention.
return chars.encode("latin1")
def set_blocking(self):
"""Maintain compatibility with socket API"""
pass
def settimeout(self, seconds):
"""Set current timeout on recv"""
self._recv_timeout_sec = seconds

View File

@ -26,6 +26,7 @@ import socket
import tempfile
from typing import Optional, Type
from types import TracebackType
from qemu.console_socket import ConsoleSocket
from . import qmp
@ -75,7 +76,8 @@ class QEMUMachine:
def __init__(self, binary, args=None, wrapper=None, name=None,
test_dir="/var/tmp", monitor_address=None,
socket_scm_helper=None, sock_dir=None):
socket_scm_helper=None, sock_dir=None,
drain_console=False, console_log=None):
'''
Initialize a QEMUMachine
@ -86,6 +88,9 @@ class QEMUMachine:
@param test_dir: where to create socket and log file
@param monitor_address: address for QMP monitor
@param socket_scm_helper: helper program, required for send_fd_scm()
@param sock_dir: where to create socket (overrides test_dir for sock)
@param console_log: (optional) path to console log file
@param drain_console: (optional) True to drain console socket to buffer
@note: Qemu process is not started until launch() is used.
'''
if args is None:
@ -122,6 +127,12 @@ class QEMUMachine:
self._console_address = None
self._console_socket = None
self._remove_files = []
self._console_log_path = console_log
if self._console_log_path:
# In order to log the console, buffering needs to be enabled.
self._drain_console = True
else:
self._drain_console = drain_console
def __enter__(self):
return self
@ -580,6 +591,10 @@ class QEMUMachine:
Returns a socket connected to the console
"""
if self._console_socket is None:
if self._drain_console:
self._console_socket = ConsoleSocket(self._console_address,
file=self._console_log_path)
else:
self._console_socket = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
self._console_socket.connect(self._console_address)

View File

@ -22,6 +22,8 @@ endif
@echo " $(MAKE) check-venv Creates a Python venv for tests"
@echo " $(MAKE) check-clean Clean the tests and related data"
@echo
@echo "The following are useful for CI builds"
@echo " $(MAKE) check-build Build most test binaris"
@echo " $(MAKE) get-vm-images Downloads all images used by acceptance tests, according to configured targets (~350 MB each, 1.5 GB max)"
@echo
@echo
@ -649,6 +651,10 @@ $(patsubst %, check-qtest-%, $(QTEST_TARGETS)): check-qtest-%: %-softmmu/all $(c
QTEST_QEMU_BINARY=$*-softmmu/qemu-system-$* \
QTEST_QEMU_IMG=qemu-img$(EXESUF))
build-qtest: $(patsubst %, %-softmmu/all, $(QTEST_TARGETS)) $(check-qtest-y)
build-unit: $(check-unit-y)
check-unit: $(check-unit-y)
$(call do_test_human, $^)
@ -680,7 +686,6 @@ check-report.tap: $(patsubst %,check-report-qtest-%.tap, $(QTEST_TARGETS)) check
FP_TEST_BIN=$(BUILD_DIR)/tests/fp/fp-test
# the build dir is created by configure
.PHONY: $(FP_TEST_BIN)
$(FP_TEST_BIN): config-host.h $(test-util-obj-y)
$(call quiet-command, \
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" $(notdir $@), \
@ -814,9 +819,10 @@ check-softfloat-ops: $(SF_MATH_RULES)
.PHONY: check-softfloat
ifeq ($(CONFIG_TCG),y)
check-softfloat: check-softfloat-conv check-softfloat-compare check-softfloat-ops
build-softfloat: $(FP_TEST_BIN)
check-softfloat: build-softfloat check-softfloat-conv check-softfloat-compare check-softfloat-ops
else
check-softfloat:
build-softfloat check-softfloat:
$(call quiet-command, /bin/true, "FLOAT TEST", \
"SKIPPED for non-TCG builds")
endif
@ -944,7 +950,7 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images
--show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \
--filter-by-tags-include-empty --filter-by-tags-include-empty-key \
$(AVOCADO_TAGS) \
--failfast=on tests/acceptance, \
$(if $(GITLAB_CI),,--failfast=on) tests/acceptance, \
"AVOCADO", "tests/acceptance")
# Consolidated targets
@ -955,7 +961,8 @@ check-qtest: $(patsubst %,check-qtest-%, $(QTEST_TARGETS))
ifeq ($(CONFIG_TOOLS),y)
check-block: $(patsubst %,check-%, $(check-block-y))
endif
check: check-block check-qapi-schema check-unit check-softfloat check-qtest check-decodetree
check-build: build-unit build-softfloat build-qtest
check-clean:
rm -rf $(check-unit-y) tests/*.o tests/*/*.o $(QEMU_IOTESTS_HELPERS-y)
rm -rf $(sort $(foreach target,$(SYSEMU_TARGET_LIST), $(check-qtest-$(target)-y:%=tests/qtest/%$(EXESUF))) $(check-qtest-generic-y:%=tests/qtest/%$(EXESUF)))
@ -963,6 +970,8 @@ check-clean:
rm -f tests/qtest/dbus-vmstate1-gen-timestamp
rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR)
check: check-block check-qapi-schema check-unit check-softfloat check-qtest check-decodetree
clean: check-clean
# Build the help program automatically

View File

@ -20,6 +20,7 @@ from avocado.utils import network
from avocado.utils import vmimage
from avocado.utils import datadrainer
from avocado.utils.path import find_command
from avocado import skipIf
ACCEL_NOT_AVAILABLE_FMT = "%s accelerator does not seem to be available"
KVM_NOT_AVAILABLE = ACCEL_NOT_AVAILABLE_FMT % "KVM"
@ -220,6 +221,7 @@ class BootLinuxS390X(BootLinux):
chksum = '4caaab5a434fd4d1079149a072fdc7891e354f834d355069ca982fdcaf5a122d'
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_s390_ccw_virtio_tcg(self):
"""
:avocado: tags=machine:s390-ccw-virtio

View File

@ -8,10 +8,12 @@
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
import os
import logging
import tempfile
from avocado_qemu import Test
from avocado import skipIf
class LinuxInitrd(Test):
@ -51,6 +53,7 @@ class LinuxInitrd(Test):
max_size + 1)
self.assertRegex(self.vm.get_log(), expected_msg)
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_with_2gib_file_should_work_with_linux_v4_16(self):
"""
QEMU has supported up to 4 GiB initrd for recent kernel

View File

@ -15,6 +15,7 @@ from avocado import skipUnless
from avocado_qemu import Test
from avocado_qemu import wait_for_console_pattern
from avocado.utils import archive
from avocado import skipIf
NUMPY_AVAILABLE = True
@ -99,6 +100,7 @@ class MaltaMachineFramebuffer(Test):
"""
self.do_test_i6400_framebuffer_logo(1)
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_mips_malta_i6400_framebuffer_logo_7cores(self):
"""
:avocado: tags=arch:mips64el
@ -108,6 +110,7 @@ class MaltaMachineFramebuffer(Test):
"""
self.do_test_i6400_framebuffer_logo(7)
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_mips_malta_i6400_framebuffer_logo_8cores(self):
"""
:avocado: tags=arch:mips64el

View File

@ -50,7 +50,7 @@ class RxGdbSimMachine(Test):
:avocado: tags=machine:gdbsim-r5f562n7
:avocado: tags=endian:little
"""
dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-qemu.dtb')
dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb')
dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18'
dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash)
kernel_url = ('http://acc.dl.osdn.jp/users/23/23845/zImage')

View File

@ -73,7 +73,7 @@ class ReplayKernel(LinuxKernelTest):
logger = logging.getLogger('replay')
logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1))
@skipIf(os.getenv('CONTINUOUS_INTEGRATION'), 'Running on Travis-CI')
@skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab')
def test_x86_64_pc(self):
"""
:avocado: tags=arch:x86_64

View File

@ -13,6 +13,7 @@ DOCKER_IMAGES := $(sort $(notdir $(basename $(wildcard $(DOCKER_FILES_DIR)/*.doc
DOCKER_TARGETS := $(patsubst %,docker-image-%,$(DOCKER_IMAGES))
# Use a global constant ccache directory to speed up repetitive builds
DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache
DOCKER_REGISTRY := $(if $(REGISTRY),$(REGISTRY),registry.gitlab.com/qemu-project/qemu)
DOCKER_TESTS := $(notdir $(shell \
find $(SRC_PATH)/tests/docker/ -name 'test-*' -type f))
@ -50,13 +51,15 @@ docker-image: ${DOCKER_TARGETS}
ifdef SKIP_DOCKER_BUILD
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
$(call quiet-command, \
$(DOCKER_SCRIPT) check --quiet qemu:$* $<, \
$(DOCKER_SCRIPT) check --quiet qemu/$* $<, \
"CHECK", "$*")
else
docker-image-%: $(DOCKER_FILES_DIR)/%.docker
$(call quiet-command,\
$(DOCKER_SCRIPT) build -t qemu:$* -f $< \
$(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
$(DOCKER_SCRIPT) build -t qemu/$* -f $< \
$(if $V,,--quiet) \
$(if $(NOCACHE),--no-cache, \
$(if $(DOCKER_REGISTRY),--registry $(DOCKER_REGISTRY))) \
$(if $(NOUSER),,--add-current-user) \
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES))\
$(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)),\
@ -75,14 +78,14 @@ docker-binfmt-image-debian-%: $(DOCKER_FILES_DIR)/debian-bootstrap.docker
DEB_ARCH=$(DEB_ARCH) \
DEB_TYPE=$(DEB_TYPE) \
$(if $(DEB_URL),DEB_URL=$(DEB_URL),) \
$(DOCKER_SCRIPT) build qemu:debian-$* $< \
$(DOCKER_SCRIPT) build qemu/debian-$* $< \
$(if $V,,--quiet) $(if $(NOCACHE),--no-cache) \
$(if $(NOUSER),,--add-current-user) \
$(if $(EXTRA_FILES),--extra-files $(EXTRA_FILES)) \
$(if $(EXECUTABLE),--include-executable=$(EXECUTABLE)), \
"BUILD","binfmt debian-$* (debootstrapped)"), \
$(call quiet-command, \
$(DOCKER_SCRIPT) check --quiet qemu:debian-$* $< || \
$(DOCKER_SCRIPT) check --quiet qemu/debian-$* $< || \
{ echo "You will need to build $(EXECUTABLE)"; exit 1;},\
"CHECK", "debian-$* exists"))
@ -131,6 +134,7 @@ docker-image-travis: NOUSER=1
# Specialist build images, sometimes very limited tools
docker-image-debian-tricore-cross: docker-image-debian9
docker-image-debian-all-test-cross: docker-image-debian10
docker-image-debian-arm64-test-cross: docker-image-debian11
# These images may be good enough for building tests but not for test builds
@ -213,6 +217,7 @@ endif
@echo ' Include extra files in image.'
@echo ' ENGINE=auto/docker/podman'
@echo ' Specify which container engine to run.'
@echo ' REGISTRY=url Cache builds from registry (default:$(DOCKER_REGISTRY))'
# This rule if for directly running against an arbitrary docker target.
# It is called by the expanded docker targets (e.g. make
@ -258,7 +263,7 @@ docker-run: docker-qemu-src
docker-run-%: CMD = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\1/')
docker-run-%: IMAGE = $(shell echo '$@' | sed -e 's/docker-run-\([^@]*\)@\(.*\)/\2/')
docker-run-%:
@$(MAKE) docker-run TEST=$(CMD) IMAGE=qemu:$(IMAGE)
@$(MAKE) docker-run TEST=$(CMD) IMAGE=qemu/$(IMAGE)
docker-clean:
$(call quiet-command, $(DOCKER_SCRIPT) clean)

View File

@ -47,7 +47,7 @@ build_qemu()
check_qemu()
{
# default to make check unless the caller specifies
if test -z "$@"; then
if [ $# = 0 ]; then
INVOCATION="check"
else
INVOCATION="$@"

View File

@ -204,7 +204,7 @@ def _dockerfile_preprocess(df):
for l in df.splitlines():
if len(l.strip()) == 0 or l.startswith("#"):
continue
from_pref = "FROM qemu:"
from_pref = "FROM qemu/"
if l.startswith(from_pref):
# TODO: Alternatively we could replace this line with "FROM $ID"
# where $ID is the image's hex id obtained with
@ -221,6 +221,13 @@ class Docker(object):
""" Running Docker commands """
def __init__(self):
self._command = _guess_engine_command()
if "docker" in self._command and "TRAVIS" not in os.environ:
os.environ["DOCKER_BUILDKIT"] = "1"
self._buildkit = True
else:
self._buildkit = False
self._instance = None
atexit.register(self._kill_instances)
signal.signal(signal.SIGTERM, self._kill_instances)
@ -289,10 +296,25 @@ class Docker(object):
return labels.get("com.qemu.dockerfile-checksum", "")
def build_image(self, tag, docker_dir, dockerfile,
quiet=True, user=False, argv=None, extra_files_cksum=[]):
quiet=True, user=False, argv=None, registry=None,
extra_files_cksum=[]):
if argv is None:
argv = []
# pre-calculate the docker checksum before any
# substitutions we make for caching
checksum = _text_checksum(_dockerfile_preprocess(dockerfile))
if registry is not None:
# see if we can fetch a cache copy, may fail...
pull_args = ["pull", "%s/%s" % (registry, tag)]
if self._do(pull_args, quiet=quiet) == 0:
dockerfile = dockerfile.replace("FROM qemu/",
"FROM %s/qemu/" %
(registry))
else:
registry = None
tmp_df = tempfile.NamedTemporaryFile(mode="w+t",
encoding='utf-8',
dir=docker_dir, suffix=".docker")
@ -306,15 +328,23 @@ class Docker(object):
(uname, uid, uname))
tmp_df.write("\n")
tmp_df.write("LABEL com.qemu.dockerfile-checksum=%s" %
_text_checksum(_dockerfile_preprocess(dockerfile)))
tmp_df.write("LABEL com.qemu.dockerfile-checksum=%s" % (checksum))
for f, c in extra_files_cksum:
tmp_df.write("LABEL com.qemu.%s-checksum=%s" % (f, c))
tmp_df.flush()
self._do_check(["build", "-t", tag, "-f", tmp_df.name] + argv +
[docker_dir],
build_args = ["build", "-t", tag, "-f", tmp_df.name]
if self._buildkit:
build_args += ["--build-arg", "BUILDKIT_INLINE_CACHE=1"]
if registry is not None:
cache = "%s/%s" % (registry, tag)
build_args += ["--cache-from", cache]
build_args += argv
build_args += [docker_dir]
self._do_check(build_args,
quiet=quiet)
def update_image(self, tag, tarball, quiet=True):
@ -403,6 +433,8 @@ class BuildCommand(SubCommand):
parser.add_argument("--add-current-user", "-u", dest="user",
action="store_true",
help="Add the current user to image's passwd")
parser.add_argument("--registry", "-r",
help="cache from docker registry")
parser.add_argument("-t", dest="tag",
help="Image Tag")
parser.add_argument("-f", dest="dockerfile",
@ -458,7 +490,8 @@ class BuildCommand(SubCommand):
for k, v in os.environ.items()
if k.lower() in FILTERED_ENV_NAMES]
dkr.build_image(tag, docker_dir, dockerfile,
quiet=args.quiet, user=args.user, argv=argv,
quiet=args.quiet, user=args.user,
argv=argv, registry=args.registry,
extra_files_cksum=cksum)
rmtree(docker_dir)

View File

@ -0,0 +1,53 @@
#
# Docker all cross-compiler target (tests only)
#
# While the normal cross builds take care to setup proper multiarch
# build environments which can cross build QEMU this just installs the
# basic compilers for as many targets as possible. We shall use this
# to build and run linux-user tests on GitLab
#
FROM qemu/debian10
# What we need to build QEMU itself
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \
apt build-dep -yy qemu
# Add the foreign architecture we want and install dependencies
RUN DEBIAN_FRONTEND=noninteractive eatmydata \
apt install -y --no-install-recommends \
gcc-aarch64-linux-gnu \
libc6-dev-arm64-cross \
gcc-alpha-linux-gnu \
libc6.1-dev-alpha-cross \
gcc-arm-linux-gnueabihf \
libc6-dev-armhf-cross \
gcc-hppa-linux-gnu \
libc6-dev-hppa-cross \
gcc-m68k-linux-gnu \
libc6-dev-m68k-cross \
gcc-mips-linux-gnu \
libc6-dev-mips-cross \
gcc-mips64-linux-gnuabi64 \
libc6-dev-mips64-cross \
gcc-mips64el-linux-gnuabi64 \
libc6-dev-mips64el-cross \
gcc-mipsel-linux-gnu \
libc6-dev-mipsel-cross \
gcc-powerpc-linux-gnu \
libc6-dev-powerpc-cross \
gcc-powerpc64-linux-gnu \
libc6-dev-ppc64-cross \
gcc-powerpc64le-linux-gnu \
libc6-dev-ppc64el-cross \
gcc-riscv64-linux-gnu \
libc6-dev-riscv64-cross \
gcc-s390x-linux-gnu \
libc6-dev-s390x-cross \
gcc-sh4-linux-gnu \
libc6-dev-sh4-cross \
gcc-sparc64-linux-gnu \
libc6-dev-sparc64-cross
ENV QEMU_CONFIGURE_OPTS --disable-system --disable-docs --disable-tools
ENV DEF_TARGET_LIST aarch64-linux-user,alpha-linux-user,arm-linux-user,hppa-linux-user,i386-linux-user,m68k-linux-user,mips-linux-user,mips64-linux-user,mips64el-linux-user,mipsel-linux-user,ppc-linux-user,ppc64-linux-user,ppc64le-linux-user,riscv64-linux-user,s390x-linux-user,sh4-linux-user,sparc64-linux-user

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -4,7 +4,7 @@
# This docker target is used on non-x86_64 machines which need the
# x86_64 cross compilers installed.
#
FROM qemu:debian10
FROM qemu/debian10
MAINTAINER Alex Bennée <alex.bennee@linaro.org>
# Add the foreign architecture we want and install dependencies

View File

@ -4,7 +4,7 @@
# This docker target builds on the debian Stretch base image. Further
# libraries which are not widely available are installed by hand.
#
FROM qemu:debian10
FROM qemu/debian10
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
RUN apt update && \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
# Add the foreign architecture we want and install dependencies
RUN dpkg --add-architecture arm64

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Bullseye base image.
#
FROM qemu:debian11
FROM qemu/debian11
# Add the foreign architecture we want and install dependencies
RUN dpkg --add-architecture arm64

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian10
FROM qemu/debian10
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>
# Add the foreign architecture we want and install dependencies

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian10
FROM qemu/debian10
# Add the foreign architecture we want and install dependencies
RUN dpkg --add-architecture armhf

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -4,7 +4,7 @@
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian10
FROM qemu/debian10
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian10
FROM qemu/debian10
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -2,7 +2,7 @@
# Docker ppc64 cross-compiler target
#
# This docker target builds on the debian Buster base image.
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian10
FROM qemu/debian10
# Add the foreign architecture we want and install dependencies
RUN dpkg --add-architecture ppc64el && \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian10
FROM qemu/debian10
# Add the s390x architecture
RUN dpkg --add-architecture s390x

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Buster base image.
#
FROM qemu:debian10
FROM qemu/debian10
RUN apt update && \
DEBIAN_FRONTEND=noninteractive eatmydata \

View File

@ -7,7 +7,7 @@
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
FROM qemu:debian9
FROM qemu/debian9
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch MXE base image.
#
FROM qemu:debian9-mxe
FROM qemu/debian9-mxe
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch MXE base image.
#
FROM qemu:debian9-mxe
FROM qemu/debian9-mxe
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -18,12 +18,12 @@ RUN apt-get update && \
git \
python3-minimal
ENV CPU_LIST csp dc232b dc233c
ENV TOOLCHAIN_RELEASE 2018.02
ENV CPU_LIST dc232b dc233c de233_fpu dsp3400
ENV TOOLCHAIN_RELEASE 2020.07
RUN for cpu in $CPU_LIST; do \
curl -#SL http://github.com/foss-xtensa/toolchain/releases/download/$TOOLCHAIN_RELEASE/x86_64-$TOOLCHAIN_RELEASE-xtensa-$cpu-elf.tar.gz \
| tar -xzC /opt; \
done
ENV PATH $PATH:/opt/$TOOLCHAIN_RELEASE/xtensa-dc232b-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dc233c-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-csp-elf/bin
ENV PATH $PATH:/opt/$TOOLCHAIN_RELEASE/xtensa-dc232b-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dc233c-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-de233_fpu-elf/bin:/opt/$TOOLCHAIN_RELEASE/xtensa-dsp3400-elf/bin

View File

@ -3,7 +3,7 @@
#
# This docker target builds on the debian Stretch base image.
#
FROM qemu:debian9
FROM qemu/debian9
MAINTAINER Philippe Mathieu-Daudé <f4bug@amsat.org>

View File

@ -80,7 +80,12 @@ ENV PACKAGES \
pixman-devel \
python3 \
python3-PyYAML \
python3-numpy \
python3-opencv \
python3-pillow \
python3-pip \
python3-sphinx \
python3-virtualenv \
rdma-core-devel \
SDL2-devel \
snappy-devel \
@ -89,6 +94,8 @@ ENV PACKAGES \
systemd-devel \
systemtap-sdt-devel \
tar \
tesseract \
tesseract-langpack-eng \
texinfo \
usbredir-devel \
virglrenderer-devel \

View File

@ -46,9 +46,17 @@ ENV PACKAGES flex bison \
libxen-dev \
libzstd-dev \
make \
python3-yaml \
python3-numpy \
python3-opencv \
python3-pil \
python3-pip \
python3-sphinx \
python3-venv \
python3-yaml \
rpm2cpio \
sparse \
tesseract-ocr \
tesseract-ocr-eng \
texinfo \
xfslibs-dev\
vim

View File

@ -186,7 +186,7 @@ _filter_img_create()
-e 's/^\(data_file\)/3-\1/' \
-e 's/^\(encryption\)/4-\1/' \
-e 's/^\(preallocation\)/8-\1/' \
| sort \
| LC_ALL=C sort \
| $SED -e 's/^[0-9]-//' \
| tr '\n\0' ' \n' \
| $SED -e 's/^ *$//' -e 's/ *$//'

View File

@ -25,7 +25,13 @@ struct thread_stats {
struct thread_info {
void (*func)(struct thread_info *);
struct thread_stats stats;
uint64_t r;
/*
* Seed is in the range [1..UINT64_MAX], because the RNG requires
* a non-zero seed. To use, subtract 1 and compare against the
* threshold with </>=. This lets threshold = 0 never match (0% hit),
* and threshold = UINT64_MAX always match (100% hit).
*/
uint64_t seed;
bool write_op; /* writes alternate between insertions and removals */
bool resize_down;
} QEMU_ALIGNED(64); /* avoid false sharing among threads */
@ -131,8 +137,9 @@ static uint64_t xorshift64star(uint64_t x)
static void do_rz(struct thread_info *info)
{
struct thread_stats *stats = &info->stats;
uint64_t r = info->seed - 1;
if (info->r < resize_threshold) {
if (r < resize_threshold) {
size_t size = info->resize_down ? resize_min : resize_max;
bool resized;
@ -151,13 +158,14 @@ static void do_rz(struct thread_info *info)
static void do_rw(struct thread_info *info)
{
struct thread_stats *stats = &info->stats;
uint64_t r = info->seed - 1;
uint32_t hash;
long *p;
if (info->r >= update_threshold) {
if (r >= update_threshold) {
bool read;
p = &keys[info->r & (lookup_range - 1)];
p = &keys[r & (lookup_range - 1)];
hash = hfunc(*p);
read = qht_lookup(&ht, p, hash);
if (read) {
@ -166,7 +174,7 @@ static void do_rw(struct thread_info *info)
stats->not_rd++;
}
} else {
p = &keys[info->r & (update_range - 1)];
p = &keys[r & (update_range - 1)];
hash = hfunc(*p);
if (info->write_op) {
bool written = false;
@ -208,7 +216,7 @@ static void *thread_func(void *p)
rcu_read_lock();
while (!atomic_read(&test_stop)) {
info->r = xorshift64star(info->r);
info->seed = xorshift64star(info->seed);
info->func(info);
}
rcu_read_unlock();
@ -221,7 +229,7 @@ static void *thread_func(void *p)
static void prepare_thread_info(struct thread_info *info, int i)
{
/* seed for the RNG; each thread should have a different one */
info->r = (i + 1) ^ time(NULL);
info->seed = (i + 1) ^ time(NULL);
/* the first update will be a write */
info->write_op = true;
/* the first resize will be down */
@ -281,11 +289,25 @@ static void pr_params(void)
static void do_threshold(double rate, uint64_t *threshold)
{
/*
* For 0 <= rate <= 1, scale to fit in a uint64_t.
*
* Scale by 2**64, with a special case for 1.0.
* The remainder of the possible values are scattered between 0
* and 0xfffffffffffff800 (nextafter(0x1p64, 0)).
*
* Note that we cannot simply scale by UINT64_MAX, because that
* value is not representable as an IEEE double value.
*
* If we scale by the next largest value, nextafter(0x1p64, 0),
* then the remainder of the possible values are scattered between
* 0 and 0xfffffffffffff000. Which leaves us with a gap between
* the final two inputs that is twice as large as any other.
*/
if (rate == 1.0) {
*threshold = UINT64_MAX;
} else {
*threshold = (rate * 0xffff000000000000ull)
+ (rate * 0x0000ffffffffffffull);
*threshold = rate * 0x1p64;
}
}

View File

@ -279,6 +279,7 @@ tests/qtest/tco-test$(EXESUF): tests/qtest/tco-test.o $(libqos-pc-obj-y)
tests/qtest/virtio-ccw-test$(EXESUF): tests/qtest/virtio-ccw-test.o
tests/qtest/display-vga-test$(EXESUF): tests/qtest/display-vga-test.o
tests/qtest/qom-test$(EXESUF): tests/qtest/qom-test.o
tests/qtest/modules-test$(EXESUF): tests/qtest/modules-test.o
tests/qtest/test-hmp$(EXESUF): tests/qtest/test-hmp.o
tests/qtest/machine-none-test$(EXESUF): tests/qtest/machine-none-test.o
tests/qtest/device-plug-test$(EXESUF): tests/qtest/device-plug-test.o

View File

@ -105,14 +105,9 @@ static void test_one_device(QTestState *qts, const char *type)
{
QDict *resp;
char *help;
char *qom_tree_start, *qom_tree_end;
char *qtree_start, *qtree_end;
g_test_message("Testing device '%s'", type);
qom_tree_start = qtest_hmp(qts, "info qom-tree");
qtree_start = qtest_hmp(qts, "info qtree");
resp = qtest_qmp(qts, "{'execute': 'device-list-properties',"
" 'arguments': {'typename': %s}}",
type);
@ -120,21 +115,6 @@ static void test_one_device(QTestState *qts, const char *type)
help = qtest_hmp(qts, "device_add \"%s,help\"", type);
g_free(help);
/*
* Some devices leave dangling pointers in QOM behind.
* "info qom-tree" or "info qtree" have a good chance at crashing then.
* Also make sure that the tree did not change.
*/
qom_tree_end = qtest_hmp(qts, "info qom-tree");
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
g_free(qom_tree_start);
g_free(qom_tree_end);
qtree_end = qtest_hmp(qts, "info qtree");
g_assert_cmpstr(qtree_start, ==, qtree_end);
g_free(qtree_start);
g_free(qtree_end);
}
static void test_device_intro_list(void)
@ -213,16 +193,38 @@ static void test_qom_list_fields(void)
static void test_device_intro_none(void)
{
QTestState *qts = qtest_init(common_args);
g_autofree char *qom_tree_start = qtest_hmp(qts, "info qom-tree");
g_autofree char *qom_tree_end = NULL;
g_autofree char *qtree_start = qtest_hmp(qts, "info qtree");
g_autofree char *qtree_end = NULL;
test_one_device(qts, "nonexistent");
/* Make sure that really nothing changed in the trees */
qom_tree_end = qtest_hmp(qts, "info qom-tree");
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
qtree_end = qtest_hmp(qts, "info qtree");
g_assert_cmpstr(qtree_start, ==, qtree_end);
qtest_quit(qts);
}
static void test_device_intro_abstract(void)
{
QTestState *qts = qtest_init(common_args);
g_autofree char *qom_tree_start = qtest_hmp(qts, "info qom-tree");
g_autofree char *qom_tree_end = NULL;
g_autofree char *qtree_start = qtest_hmp(qts, "info qtree");
g_autofree char *qtree_end = NULL;
test_one_device(qts, "device");
/* Make sure that really nothing changed in the trees */
qom_tree_end = qtest_hmp(qts, "info qom-tree");
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
qtree_end = qtest_hmp(qts, "info qtree");
g_assert_cmpstr(qtree_start, ==, qtree_end);
qtest_quit(qts);
}
@ -231,9 +233,12 @@ static void test_device_intro_concrete(const void *args)
QList *types;
QListEntry *entry;
const char *type;
QTestState *qts;
QTestState *qts = qtest_init(args);
g_autofree char *qom_tree_start = qtest_hmp(qts, "info qom-tree");
g_autofree char *qom_tree_end = NULL;
g_autofree char *qtree_start = qtest_hmp(qts, "info qtree");
g_autofree char *qtree_end = NULL;
qts = qtest_init(args);
types = device_type_list(qts, false);
QLIST_FOREACH_ENTRY(types, entry) {
@ -243,6 +248,17 @@ static void test_device_intro_concrete(const void *args)
test_one_device(qts, type);
}
/*
* Some devices leave dangling pointers in QOM behind.
* "info qom-tree" or "info qtree" have a good chance at crashing then.
* Also make sure that the tree did not change.
*/
qom_tree_end = qtest_hmp(qts, "info qom-tree");
g_assert_cmpstr(qom_tree_start, ==, qom_tree_end);
qtree_end = qtest_hmp(qts, "info qtree");
g_assert_cmpstr(qtree_start, ==, qtree_end);
qobject_unref(types);
qtest_quit(qts);
g_free((void *)args);

View File

@ -47,7 +47,7 @@ ifneq ($(DOCKER_IMAGE),)
DOCKER_COMPILE_CMD="$(DOCKER_SCRIPT) cc \
--cc $(DOCKER_CROSS_CC_GUEST) \
-i qemu:$(DOCKER_IMAGE) \
-i qemu/$(DOCKER_IMAGE) \
-s $(SRC_PATH) -- "
.PHONY: docker-build-guest-tests
@ -57,7 +57,7 @@ docker-build-guest-tests: docker-image-$(DOCKER_IMAGE)
$(MAKE) -f $(TCG_MAKE) TARGET="$(TARGET)" CC=$(DOCKER_COMPILE_CMD) \
SRC_PATH="$(SRC_PATH)" BUILD_STATIC=y \
EXTRA_CFLAGS="$(CROSS_CC_GUEST_CFLAGS)"), \
"BUILD","$(TARGET) guest-tests with docker qemu:$(DOCKER_IMAGE)")
"BUILD","$(TARGET) guest-tests with docker qemu/$(DOCKER_IMAGE)")
GUEST_BUILD=docker-build-guest-tests

View File

@ -46,20 +46,29 @@ fi
: ${cross_cc_aarch64="aarch64-linux-gnu-gcc"}
: ${cross_cc_aarch64_be="$cross_cc_aarch64"}
: ${cross_cc_cflags_aarch64_be="-mbig-endian"}
: $(cross_cc_alpha="alpha-linux-gnu-gcc")
: ${cross_cc_arm="arm-linux-gnueabihf-gcc"}
: ${cross_cc_cflags_armeb="-mbig-endian"}
: ${cross_cc_hppa="hppa-linux-gnu-gcc"}
: ${cross_cc_i386="i386-pc-linux-gnu-gcc"}
: ${cross_cc_cflags_i386="-m32"}
: ${cross_cc_x86_64="x86_64-pc-linux-gnu-gcc"}
: ${cross_cc_cflags_x86_64="-m64"}
: ${cross_cc_m68k="m68k-linux-gnu-gcc"}
: $(cross_cc_mips64el="mips64el-linux-gnuabi64-gcc")
: $(cross_cc_mips64="mips64-linux-gnuabi64-gcc")
: $(cross_cc_mipsel="mipsel-linux-gnu-gcc")
: $(cross_cc_mips="mips-linux-gnu-gcc")
: ${cross_cc_ppc="powerpc-linux-gnu-gcc"}
: ${cross_cc_cflags_ppc="-m32"}
: ${cross_cc_ppc64="powerpc-linux-gnu-gcc"}
: ${cross_cc_cflags_ppc64="-m64"}
: ${cross_cc_ppc64="powerpc64-linux-gnu-gcc"}
: ${cross_cc_ppc64le="powerpc64le-linux-gnu-gcc"}
: ${cross_cc_cflags_s390x="-m64"}
: $(cross_cc_riscv64="riscv64-linux-gnu-gcc")
: ${cross_cc_s390x="s390x-linux-gnu-gcc"}
: $(cross_cc_sh4="sh4-linux-gnu-gcc")
: ${cross_cc_cflags_sparc="-m32 -mv8plus -mcpu=ultrasparc"}
: ${cross_cc_sparc64="sparc64-linux-gnu-gcc"}
: ${cross_cc_cflags_sparc64="-m64 -mcpu=ultrasparc"}
: ${cross_cc_x86_64="x86_64-pc-linux-gnu-gcc"}
: ${cross_cc_cflags_x86_64="-m64"}
for target in $target_list; do
arch=${target%%-*}
@ -173,7 +182,7 @@ for target in $target_list; do
container_image=debian-xtensa-cross
# default to the dc232b cpu
container_cross_cc=/opt/2018.02/xtensa-dc232b-elf/bin/xtensa-dc232b-elf-gcc
container_cross_cc=/opt/2020.07/xtensa-dc232b-elf/bin/xtensa-dc232b-elf-gcc
;;
esac

View File

@ -5,6 +5,9 @@
IMAGES := freebsd netbsd openbsd centos fedora
ifneq ($(GENISOIMAGE),)
IMAGES += ubuntu.i386 centos
ifneq ($(EFI_AARCH64),)
IMAGES += ubuntu.aarch64 centos.aarch64
endif
endif
IMAGES_DIR := $(HOME)/.cache/qemu-vm/images
@ -23,6 +26,12 @@ vm-help vm-test:
ifneq ($(GENISOIMAGE),)
@echo " vm-build-centos - Build QEMU in CentOS VM, with Docker"
@echo " vm-build-ubuntu.i386 - Build QEMU in ubuntu i386 VM"
ifneq ($(EFI_AARCH64),)
@echo " vm-build-ubuntu.aarch64 - Build QEMU in ubuntu aarch64 VM"
@echo " vm-build-centos.aarch64 - Build QEMU in CentOS aarch64 VM"
else
@echo " (to build centos/ubuntu aarch64 images use configure --efi-aarch64)"
endif
else
@echo " (install genisoimage to build centos/ubuntu images)"
endif
@ -40,10 +49,17 @@ endif
@echo ' EXTRA_CONFIGURE_OPTS="..."'
@echo " J=[0..9]* - Override the -jN parameter for make commands"
@echo " DEBUG=1 - Enable verbose output on host and interactive debugging"
@echo " LOG_CONSOLE=1 - Log console to file in: ~/.cache/qemu-vm "
@echo " V=1 - Enable verbose ouput on host and guest commands"
@echo " QEMU_LOCAL=1 - Use QEMU binary local to this build."
@echo " QEMU=/path/to/qemu - Change path to QEMU binary"
@echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool"
ifeq ($(PYTHON_YAML),yes)
@echo " QEMU_CONFIG=/path/conf.yml - Change path to VM configuration .yml file."
else
@echo " (install python3-yaml to enable support for yaml file to configure a VM.)"
endif
@echo " See conf_example_*.yml for file format details."
vm-build-all: $(addprefix vm-build-, $(IMAGES))
@ -59,6 +75,8 @@ $(IMAGES_DIR)/%.img: $(SRC_PATH)/tests/vm/% \
$(if $(V)$(DEBUG), --debug) \
$(if $(GENISOIMAGE),--genisoimage $(GENISOIMAGE)) \
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
$(if $(LOG_CONSOLE),--log-console) \
--image "$@" \
--force \
--build-image $@, \
@ -74,6 +92,8 @@ vm-build-%: $(IMAGES_DIR)/%.img
$(if $(J),--jobs $(J)) \
$(if $(V),--verbose) \
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
$(if $(LOG_CONSOLE),--log-console) \
--image "$<" \
$(if $(BUILD_TARGET),--build-target $(BUILD_TARGET)) \
--snapshot \
@ -96,6 +116,8 @@ vm-boot-ssh-%: $(IMAGES_DIR)/%.img
$(if $(J),--jobs $(J)) \
$(if $(V)$(DEBUG), --debug) \
$(if $(QEMU_LOCAL),--build-path $(BUILD_DIR)) \
$(if $(EFI_AARCH64),--efi-aarch64 $(EFI_AARCH64)) \
$(if $(LOG_CONSOLE),--log-console) \
--image "$<" \
--interactive \
false, \

106
tests/vm/aarch64vm.py Normal file
View File

@ -0,0 +1,106 @@
#!/usr/bin/env python3
#
# VM testing aarch64 library
#
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <robert.foley@linaro.org>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import os
import sys
import subprocess
import basevm
from qemu.accel import kvm_available
# This is the config needed for current version of QEMU.
# This works for both kvm and tcg.
CURRENT_CONFIG = {
'cpu' : "max",
'machine' : "virt,gic-version=max",
}
# The minimum minor version of QEMU we will support with aarch64 VMs is 3.
# QEMU versions less than 3 have various issues running these VMs.
QEMU_AARCH64_MIN_VERSION = 3
# The DEFAULT_CONFIG will default to a version of
# parameters that works for backwards compatibility.
DEFAULT_CONFIG = {'kvm' : {'cpu' : "host",
'machine' : "virt,gic-version=host"},
'tcg' : {'cpu' : "cortex-a57",
'machine' : "virt"},
}
def get_config_defaults(vmcls, default_config):
"""Fetch the configuration defaults for this VM,
taking into consideration the defaults for
aarch64 first, followed by the defaults for this VM."""
config = default_config
config.update(aarch_get_config_defaults(vmcls))
return config
def aarch_get_config_defaults(vmcls):
"""Set the defaults for current version of QEMU."""
config = CURRENT_CONFIG
args = basevm.parse_args(vmcls)
qemu_path = basevm.get_qemu_path(vmcls.arch, args.build_path)
qemu_version = basevm.get_qemu_version(qemu_path)
if qemu_version < QEMU_AARCH64_MIN_VERSION:
error = "\nThis major version of QEMU {} is to old for aarch64 VMs.\n"\
"The major version must be at least {}.\n"\
"To continue with the current build of QEMU, "\
"please restart with QEMU_LOCAL=1 .\n"
print(error.format(qemu_version, QEMU_AARCH64_MIN_VERSION))
exit(1)
if qemu_version == QEMU_AARCH64_MIN_VERSION:
# We have an older version of QEMU,
# set the config values for backwards compatibility.
if kvm_available('aarch64'):
config.update(DEFAULT_CONFIG['kvm'])
else:
config.update(DEFAULT_CONFIG['tcg'])
return config
def create_flash_images(flash_dir="./", efi_img=""):
"""Creates the appropriate pflash files
for an aarch64 VM."""
flash0_path = get_flash_path(flash_dir, "flash0")
flash1_path = get_flash_path(flash_dir, "flash1")
fd_null = open(os.devnull, 'w')
subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash0_path),
"bs=1M", "count=64"],
stdout=fd_null, stderr=subprocess.STDOUT)
# A reliable way to get the QEMU EFI image is via an installed package or
# via the bios included with qemu.
if not os.path.exists(efi_img):
sys.stderr.write("*** efi argument is invalid ({})\n".format(efi_img))
sys.stderr.write("*** please check --efi-aarch64 argument or "\
"install qemu-efi-aarch64 package\n")
exit(3)
subprocess.check_call(["dd", "if={}".format(efi_img),
"of={}".format(flash0_path),
"conv=notrunc"],
stdout=fd_null, stderr=subprocess.STDOUT)
subprocess.check_call(["dd", "if=/dev/zero",
"of={}".format(flash1_path),
"bs=1M", "count=64"],
stdout=fd_null, stderr=subprocess.STDOUT)
fd_null.close()
def get_pflash_args(flash_dir="./"):
"""Returns a string that can be used to
boot qemu using the appropriate pflash files
for aarch64."""
flash0_path = get_flash_path(flash_dir, "flash0")
flash1_path = get_flash_path(flash_dir, "flash1")
pflash_args_str = "-drive file={},format=raw,if=pflash "\
"-drive file={},format=raw,if=pflash"
pflash_args = pflash_args_str.format(flash0_path, flash1_path)
return pflash_args.split(" ")
def get_flash_path(flash_dir, name):
return os.path.join(flash_dir, "{}.img".format(name))

View File

@ -23,22 +23,47 @@ from qemu.accel import kvm_available
from qemu.machine import QEMUMachine
import subprocess
import hashlib
import optparse
import argparse
import atexit
import tempfile
import shutil
import multiprocessing
import traceback
import shlex
SSH_KEY = open(os.path.join(os.path.dirname(__file__),
"..", "keys", "id_rsa")).read()
SSH_PUB_KEY = open(os.path.join(os.path.dirname(__file__),
"..", "keys", "id_rsa.pub")).read()
SSH_KEY_FILE = os.path.join(os.path.dirname(__file__),
"..", "keys", "id_rsa")
SSH_PUB_KEY_FILE = os.path.join(os.path.dirname(__file__),
"..", "keys", "id_rsa.pub")
# This is the standard configuration.
# Any or all of these can be overridden by
# passing in a config argument to the VM constructor.
DEFAULT_CONFIG = {
'cpu' : "max",
'machine' : 'pc',
'guest_user' : "qemu",
'guest_pass' : "qemupass",
'root_pass' : "qemupass",
'ssh_key_file' : SSH_KEY_FILE,
'ssh_pub_key_file': SSH_PUB_KEY_FILE,
'memory' : "4G",
'extra_args' : [],
'qemu_args' : "",
'dns' : "",
'ssh_port' : 0,
'install_cmds' : "",
'boot_dev_type' : "block",
'ssh_timeout' : 1,
}
BOOT_DEVICE = {
'block' : "-drive file={},if=none,id=drive0,cache=writeback "\
"-device virtio-blk,drive=drive0,bootindex=0",
'scsi' : "-device virtio-scsi-device,id=scsi "\
"-drive file={},format=raw,if=none,id=hd0 "\
"-device scsi-hd,drive=hd0,bootindex=0",
}
class BaseVM(object):
GUEST_USER = "qemu"
GUEST_PASS = "qemupass"
ROOT_PASS = "qemupass"
envvars = [
"https_proxy",
@ -57,49 +82,112 @@ class BaseVM(object):
poweroff = "poweroff"
# enable IPv6 networking
ipv6 = True
# This is the timeout on the wait for console bytes.
socket_timeout = 120
# Scale up some timeouts under TCG.
# 4 is arbitrary, but greater than 2,
# since we found we need to wait more than twice as long.
tcg_ssh_timeout_multiplier = 4
def __init__(self, debug=False, vcpus=None, genisoimage=None,
build_path=None):
def __init__(self, args, config=None):
self._guest = None
self._genisoimage = genisoimage
self._build_path = build_path
self._genisoimage = args.genisoimage
self._build_path = args.build_path
self._efi_aarch64 = args.efi_aarch64
# Allow input config to override defaults.
self._config = DEFAULT_CONFIG.copy()
if config != None:
self._config.update(config)
self.validate_ssh_keys()
self._tmpdir = os.path.realpath(tempfile.mkdtemp(prefix="vm-test-",
suffix=".tmp",
dir="."))
atexit.register(shutil.rmtree, self._tmpdir)
# Copy the key files to a temporary directory.
# Also chmod the key file to agree with ssh requirements.
self._config['ssh_key'] = \
open(self._config['ssh_key_file']).read().rstrip()
self._config['ssh_pub_key'] = \
open(self._config['ssh_pub_key_file']).read().rstrip()
self._ssh_tmp_key_file = os.path.join(self._tmpdir, "id_rsa")
open(self._ssh_tmp_key_file, "w").write(self._config['ssh_key'])
subprocess.check_call(["chmod", "600", self._ssh_tmp_key_file])
self._ssh_key_file = os.path.join(self._tmpdir, "id_rsa")
open(self._ssh_key_file, "w").write(SSH_KEY)
subprocess.check_call(["chmod", "600", self._ssh_key_file])
self._ssh_tmp_pub_key_file = os.path.join(self._tmpdir, "id_rsa.pub")
open(self._ssh_tmp_pub_key_file,
"w").write(self._config['ssh_pub_key'])
self._ssh_pub_key_file = os.path.join(self._tmpdir, "id_rsa.pub")
open(self._ssh_pub_key_file, "w").write(SSH_PUB_KEY)
self.debug = debug
self.debug = args.debug
self._console_log_path = None
if args.log_console:
self._console_log_path = \
os.path.join(os.path.expanduser("~/.cache/qemu-vm"),
"{}.install.log".format(self.name))
self._stderr = sys.stderr
self._devnull = open(os.devnull, "w")
if self.debug:
self._stdout = sys.stdout
else:
self._stdout = self._devnull
netdev = "user,id=vnet,hostfwd=:127.0.0.1:{}-:22"
self._args = [ \
"-nodefaults", "-m", "4G",
"-cpu", "max",
"-netdev", "user,id=vnet,hostfwd=:127.0.0.1:0-:22" +
(",ipv6=no" if not self.ipv6 else ""),
"-nodefaults", "-m", self._config['memory'],
"-cpu", self._config['cpu'],
"-netdev",
netdev.format(self._config['ssh_port']) +
(",ipv6=no" if not self.ipv6 else "") +
(",dns=" + self._config['dns'] if self._config['dns'] else ""),
"-device", "virtio-net-pci,netdev=vnet",
"-vnc", "127.0.0.1:0,to=20"]
if vcpus and vcpus > 1:
self._args += ["-smp", "%d" % vcpus]
if args.jobs and args.jobs > 1:
self._args += ["-smp", "%d" % args.jobs]
if kvm_available(self.arch):
self._args += ["-enable-kvm"]
else:
logging.info("KVM not available, not using -enable-kvm")
self._data_args = []
if self._config['qemu_args'] != None:
qemu_args = self._config['qemu_args']
qemu_args = qemu_args.replace('\n',' ').replace('\r','')
# shlex groups quoted arguments together
# we need this to keep the quoted args together for when
# the QEMU command is issued later.
args = shlex.split(qemu_args)
self._config['extra_args'] = []
for arg in args:
if arg:
# Preserve quotes around arguments.
# shlex above takes them out, so add them in.
if " " in arg:
arg = '"{}"'.format(arg)
self._config['extra_args'].append(arg)
def validate_ssh_keys(self):
"""Check to see if the ssh key files exist."""
if 'ssh_key_file' not in self._config or\
not os.path.exists(self._config['ssh_key_file']):
raise Exception("ssh key file not found.")
if 'ssh_pub_key_file' not in self._config or\
not os.path.exists(self._config['ssh_pub_key_file']):
raise Exception("ssh pub key file not found.")
def wait_boot(self, wait_string=None):
"""Wait for the standard string we expect
on completion of a normal boot.
The user can also choose to override with an
alternate string to wait for."""
if wait_string is None:
if self.login_prompt is None:
raise Exception("self.login_prompt not defined")
wait_string = self.login_prompt
# Intentionally bump up the default timeout under TCG,
# since the console wait below takes longer.
timeout = self.socket_timeout
if not kvm_available(self.arch):
timeout *= 8
self.console_init(timeout=timeout)
self.console_wait(wait_string)
def _download_with_cache(self, url, sha256sum=None, sha512sum=None):
def check_sha256sum(fname):
if not sha256sum:
@ -131,8 +219,9 @@ class BaseVM(object):
"-t",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=" + os.devnull,
"-o", "ConnectTimeout=1",
"-p", self.ssh_port, "-i", self._ssh_key_file]
"-o",
"ConnectTimeout={}".format(self._config["ssh_timeout"]),
"-p", self.ssh_port, "-i", self._ssh_tmp_key_file]
# If not in debug mode, set ssh to quiet mode to
# avoid printing the results of commands.
if not self.debug:
@ -148,13 +237,13 @@ class BaseVM(object):
return r
def ssh(self, *cmd):
return self._ssh_do(self.GUEST_USER, cmd, False)
return self._ssh_do(self._config["guest_user"], cmd, False)
def ssh_root(self, *cmd):
return self._ssh_do("root", cmd, False)
def ssh_check(self, *cmd):
self._ssh_do(self.GUEST_USER, cmd, True)
self._ssh_do(self._config["guest_user"], cmd, True)
def ssh_root_check(self, *cmd):
self._ssh_do("root", cmd, True)
@ -181,14 +270,20 @@ class BaseVM(object):
"virtio-blk,drive=%s,serial=%s,bootindex=1" % (name, name)]
def boot(self, img, extra_args=[]):
args = self._args + [
"-drive", "file=%s,if=none,id=drive0,cache=writeback" % img,
"-device", "virtio-blk,drive=drive0,bootindex=0"]
args += self._data_args + extra_args
boot_dev = BOOT_DEVICE[self._config['boot_dev_type']]
boot_params = boot_dev.format(img)
args = self._args + boot_params.split(' ')
args += self._data_args + extra_args + self._config['extra_args']
logging.debug("QEMU args: %s", " ".join(args))
qemu_path = get_qemu_path(self.arch, self._build_path)
guest = QEMUMachine(binary=qemu_path, args=args)
guest.set_machine('pc')
# Since console_log_path is only set when the user provides the
# log_console option, we will set drain_console=True so the
# console is always drained.
guest = QEMUMachine(binary=qemu_path, args=args,
console_log=self._console_log_path,
drain_console=True)
guest.set_machine(self._config['machine'])
guest.set_console()
try:
guest.launch()
@ -201,6 +296,8 @@ class BaseVM(object):
raise
atexit.register(self.shutdown)
self._guest = guest
# Init console so we can start consuming the chars.
self.console_init()
usernet_info = guest.qmp("human-monitor-command",
command_line="info usernet")
self.ssh_port = None
@ -212,7 +309,9 @@ class BaseVM(object):
raise Exception("Cannot find ssh port from 'info usernet':\n%s" % \
usernet_info)
def console_init(self, timeout = 120):
def console_init(self, timeout = None):
if timeout == None:
timeout = self.socket_timeout
vm = self._guest
vm.console_socket.settimeout(timeout)
self.console_raw_path = os.path.join(vm._temp_dir,
@ -302,7 +401,8 @@ class BaseVM(object):
self.console_send(command)
def console_ssh_init(self, prompt, user, pw):
sshkey_cmd = "echo '%s' > .ssh/authorized_keys\n" % SSH_PUB_KEY.rstrip()
sshkey_cmd = "echo '%s' > .ssh/authorized_keys\n" \
% self._config['ssh_pub_key'].rstrip()
self.console_wait_send("login:", "%s\n" % user)
self.console_wait_send("Password:", "%s\n" % pw)
self.console_wait_send(prompt, "mkdir .ssh\n")
@ -361,23 +461,23 @@ class BaseVM(object):
"local-hostname: {}-guest\n".format(name)])
mdata.close()
udata = open(os.path.join(cidir, "user-data"), "w")
print("guest user:pw {}:{}".format(self.GUEST_USER,
self.GUEST_PASS))
print("guest user:pw {}:{}".format(self._config['guest_user'],
self._config['guest_pass']))
udata.writelines(["#cloud-config\n",
"chpasswd:\n",
" list: |\n",
" root:%s\n" % self.ROOT_PASS,
" %s:%s\n" % (self.GUEST_USER,
self.GUEST_PASS),
" root:%s\n" % self._config['root_pass'],
" %s:%s\n" % (self._config['guest_user'],
self._config['guest_pass']),
" expire: False\n",
"users:\n",
" - name: %s\n" % self.GUEST_USER,
" - name: %s\n" % self._config['guest_user'],
" sudo: ALL=(ALL) NOPASSWD:ALL\n",
" ssh-authorized-keys:\n",
" - %s\n" % SSH_PUB_KEY,
" - %s\n" % self._config['ssh_pub_key'],
" - name: root\n",
" ssh-authorized-keys:\n",
" - %s\n" % SSH_PUB_KEY,
" - %s\n" % self._config['ssh_pub_key'],
"locale: en_US.UTF-8\n"])
proxy = os.environ.get("http_proxy")
if not proxy is None:
@ -390,7 +490,6 @@ class BaseVM(object):
cwd=cidir,
stdin=self._devnull, stdout=self._stdout,
stderr=self._stdout)
return os.path.join(cidir, "cloud-init.iso")
def get_qemu_path(arch, build_path=None):
@ -406,58 +505,121 @@ def get_qemu_path(arch, build_path=None):
qemu_path = "qemu-system-" + arch
return qemu_path
def get_qemu_version(qemu_path):
"""Get the version number from the current QEMU,
and return the major number."""
output = subprocess.check_output([qemu_path, '--version'])
version_line = output.decode("utf-8")
version_num = re.split(' |\(', version_line)[3].split('.')[0]
return int(version_num)
def parse_config(config, args):
""" Parse yaml config and populate our config structure.
The yaml config allows the user to override the
defaults for VM parameters. In many cases these
defaults can be overridden without rebuilding the VM."""
if args.config:
config_file = args.config
elif 'QEMU_CONFIG' in os.environ:
config_file = os.environ['QEMU_CONFIG']
else:
return config
if not os.path.exists(config_file):
raise Exception("config file {} does not exist".format(config_file))
# We gracefully handle importing the yaml module
# since it might not be installed.
# If we are here it means the user supplied a .yml file,
# so if the yaml module is not installed we will exit with error.
try:
import yaml
except ImportError:
print("The python3-yaml package is needed "\
"to support config.yaml files")
# Instead of raising an exception we exit to avoid
# a raft of messy (expected) errors to stdout.
exit(1)
with open(config_file) as f:
yaml_dict = yaml.safe_load(f)
if 'qemu-conf' in yaml_dict:
config.update(yaml_dict['qemu-conf'])
else:
raise Exception("config file {} is not valid"\
" missing qemu-conf".format(config_file))
return config
def parse_args(vmcls):
def get_default_jobs():
if multiprocessing.cpu_count() > 1:
if kvm_available(vmcls.arch):
return multiprocessing.cpu_count() // 2
elif os.uname().machine == "x86_64" and \
vmcls.arch in ["aarch64", "x86_64", "i386"]:
# MTTCG is available on these arches and we can allow
# more cores. but only up to a reasonable limit. User
# can always override these limits with --jobs.
return min(multiprocessing.cpu_count() // 2, 8)
else:
return 1
parser = optparse.OptionParser(
description="VM test utility. Exit codes: "
"0 = success, "
"1 = command line error, "
"2 = environment initialization failed, "
"3 = test command failed")
parser.add_option("--debug", "-D", action="store_true",
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Utility for provisioning VMs and running builds",
epilog="""Remaining arguments are passed to the command.
Exit codes: 0 = success, 1 = command line error,
2 = environment initialization failed,
3 = test command failed""")
parser.add_argument("--debug", "-D", action="store_true",
help="enable debug output")
parser.add_option("--image", "-i", default="%s.img" % vmcls.name,
parser.add_argument("--image", "-i", default="%s.img" % vmcls.name,
help="image file name")
parser.add_option("--force", "-f", action="store_true",
parser.add_argument("--force", "-f", action="store_true",
help="force build image even if image exists")
parser.add_option("--jobs", type=int, default=get_default_jobs(),
parser.add_argument("--jobs", type=int, default=get_default_jobs(),
help="number of virtual CPUs")
parser.add_option("--verbose", "-V", action="store_true",
parser.add_argument("--verbose", "-V", action="store_true",
help="Pass V=1 to builds within the guest")
parser.add_option("--build-image", "-b", action="store_true",
parser.add_argument("--build-image", "-b", action="store_true",
help="build image")
parser.add_option("--build-qemu",
parser.add_argument("--build-qemu",
help="build QEMU from source in guest")
parser.add_option("--build-target",
parser.add_argument("--build-target",
help="QEMU build target", default="check")
parser.add_option("--build-path", default=None,
parser.add_argument("--build-path", default=None,
help="Path of build directory, "\
"for using build tree QEMU binary. ")
parser.add_option("--interactive", "-I", action="store_true",
parser.add_argument("--interactive", "-I", action="store_true",
help="Interactively run command")
parser.add_option("--snapshot", "-s", action="store_true",
parser.add_argument("--snapshot", "-s", action="store_true",
help="run tests with a snapshot")
parser.add_option("--genisoimage", default="genisoimage",
parser.add_argument("--genisoimage", default="genisoimage",
help="iso imaging tool")
parser.disable_interspersed_args()
parser.add_argument("--config", "-c", default=None,
help="Provide config yaml for configuration. "\
"See config_example.yaml for example.")
parser.add_argument("--efi-aarch64",
default="/usr/share/qemu-efi-aarch64/QEMU_EFI.fd",
help="Path to efi image for aarch64 VMs.")
parser.add_argument("--log-console", action="store_true",
help="Log console to file.")
parser.add_argument("commands", nargs="*", help="""Remaining
commands after -- are passed to command inside the VM""")
return parser.parse_args()
def main(vmcls):
def main(vmcls, config=None):
try:
args, argv = parse_args(vmcls)
if not argv and not args.build_qemu and not args.build_image:
if config == None:
config = DEFAULT_CONFIG
args = parse_args(vmcls)
if not args.commands and not args.build_qemu and not args.build_image:
print("Nothing to do?")
return 1
config = parse_config(config, args)
logging.basicConfig(level=(logging.DEBUG if args.debug
else logging.WARN))
vm = vmcls(debug=args.debug, vcpus=args.jobs,
genisoimage=args.genisoimage, build_path=args.build_path)
vm = vmcls(args, config=config)
if args.build_image:
if os.path.exists(args.image) and not args.force:
sys.stderr.writelines(["Image file exists: %s\n" % args.image,
@ -467,12 +629,12 @@ def main(vmcls):
if args.build_qemu:
vm.add_source_dir(args.build_qemu)
cmd = [vm.BUILD_SCRIPT.format(
configure_opts = " ".join(argv),
configure_opts = " ".join(args.commands),
jobs=int(args.jobs),
target=args.build_target,
verbose = "V=1" if args.verbose else "")]
else:
cmd = argv
cmd = args.commands
img = args.image
if args.snapshot:
img += ",snapshot=on"

View File

@ -0,0 +1,51 @@
# CentOS aarch64 image kickstart file.
# This file is used by the CentOS installer to
# script the generation of the image.
#
# Copyright 2020 Linaro
#
ignoredisk --only-use=vda
# System bootloader configuration
bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=vda
autopart --type=plain
# Partition clearing information
clearpart --linux --initlabel --drives=vda
# Use text mode install
text
repo --name="AppStream" --baseurl=file:///run/install/repo/AppStream
# Use CDROM installation media
cdrom
# Keyboard layouts
keyboard --vckeymap=us --xlayouts=''
# System language
lang en_US.UTF-8
# Network information
network --bootproto=dhcp --device=enp0s1 --onboot=off --ipv6=auto --no-activate
network --hostname=localhost.localdomain
# Run the Setup Agent on first boot
firstboot --enable
# Do not configure the X Window System
skipx
# System services
services --enabled="chronyd"
# System timezone
timezone America/New_York --isUtc
# Shutdown after installation is complete.
shutdown
%packages
@^server-product-environment
kexec-tools
%end
%addon com_redhat_kdump --enable --reserve-mb='auto'
%end
%anaconda
pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty
pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok
pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty
%end

227
tests/vm/centos.aarch64 Executable file
View File

@ -0,0 +1,227 @@
#!/usr/bin/env python3
#
# Centos aarch64 image
#
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <robert.foley@linaro.org>
# Originally based on ubuntu.aarch64
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import os
import sys
import subprocess
import basevm
import time
import traceback
import aarch64vm
DEFAULT_CONFIG = {
'cpu' : "max",
'machine' : "virt,gic-version=max",
'install_cmds' : "yum install -y make git python3 gcc gcc-c++ flex bison, "\
"yum install -y glib2-devel pixman-devel zlib-devel, "\
"yum install -y perl-Test-Harness, "\
"alternatives --set python /usr/bin/python3, "\
"sudo dnf config-manager "\
"--add-repo=https://download.docker.com/linux/centos/docker-ce.repo,"\
"sudo dnf install --nobest -y docker-ce.aarch64,"\
"systemctl enable docker",
# We increase beyond the default time since during boot
# it can take some time (many seconds) to log into the VM.
'ssh_timeout' : 60,
}
class CentosAarch64VM(basevm.BaseVM):
name = "centos.aarch64"
arch = "aarch64"
login_prompt = "localhost login:"
prompt = '[root@localhost ~]#'
image_name = "CentOS-8-aarch64-1905-dvd1.iso"
image_link = "http://mirrors.usc.edu/pub/linux/distributions/centos/8.0.1905/isos/aarch64/"
image_link += image_name
BUILD_SCRIPT = """
set -e;
cd $(mktemp -d);
sudo chmod a+r /dev/vdb;
tar --checkpoint=.10 -xf /dev/vdb;
./configure {configure_opts};
make --output-sync {target} -j{jobs} {verbose};
"""
def set_key_perm(self):
"""Set permissions properly on certain files to allow
ssh access."""
self.console_wait_send(self.prompt,
"/usr/sbin/restorecon -R -v /root/.ssh\n")
self.console_wait_send(self.prompt,
"/usr/sbin/restorecon -R -v "\
"/home/{}/.ssh\n".format(self._config["guest_user"]))
def create_kickstart(self):
"""Generate the kickstart file used to generate the centos image."""
# Start with the template for the kickstart.
ks_file = "../tests/vm/centos-8-aarch64.ks"
subprocess.check_call("cp {} ./ks.cfg".format(ks_file), shell=True)
# Append the ssh keys to the kickstart file
# as the post processing phase of installation.
with open("ks.cfg", "a") as f:
# Add in the root pw and guest user.
rootpw = "rootpw --plaintext {}\n"
f.write(rootpw.format(self._config["root_pass"]))
add_user = "user --groups=wheel --name={} "\
"--password={} --plaintext\n"
f.write(add_user.format(self._config["guest_user"],
self._config["guest_pass"]))
# Add the ssh keys.
f.write("%post --log=/root/ks-post.log\n")
f.write("mkdir -p /root/.ssh\n")
addkey = 'echo "{}" >> /root/.ssh/authorized_keys\n'
addkey_cmd = addkey.format(self._config["ssh_pub_key"])
f.write(addkey_cmd)
f.write('mkdir -p /home/{}/.ssh\n'.format(self._config["guest_user"]))
addkey = 'echo "{}" >> /home/{}/.ssh/authorized_keys\n'
addkey_cmd = addkey.format(self._config["ssh_pub_key"],
self._config["guest_user"])
f.write(addkey_cmd)
f.write("%end\n")
# Take our kickstart file and create an .iso from it.
# The .iso will be provided to qemu as we boot
# from the install dvd.
# Anaconda will recognize the label "OEMDRV" and will
# start the automated installation.
gen_iso_img = 'genisoimage -output ks.iso -volid "OEMDRV" ks.cfg'
subprocess.check_call(gen_iso_img, shell=True)
def wait_for_shutdown(self):
"""We wait for qemu to shutdown the VM and exit.
While this happens we display the console view
for easier debugging."""
# The image creation is essentially done,
# so whether or not the wait is successful we want to
# wait for qemu to exit (the self.wait()) before we return.
try:
self.console_wait("reboot: Power down")
except Exception as e:
sys.stderr.write("Exception hit\n")
if isinstance(e, SystemExit) and e.code == 0:
return 0
traceback.print_exc()
finally:
self.wait()
def build_base_image(self, dest_img):
"""Run through the centos installer to create
a base image with name dest_img."""
# We create the temp image, and only rename
# to destination when we are done.
img = dest_img + ".tmp"
# Create an empty image.
# We will provide this as the install destination.
qemu_img_create = "qemu-img create {} 50G".format(img)
subprocess.check_call(qemu_img_create, shell=True)
# Create our kickstart file to be fed to the installer.
self.create_kickstart()
# Boot the install dvd with the params as our ks.iso
os_img = self._download_with_cache(self.image_link)
dvd_iso = "centos-8-dvd.iso"
subprocess.check_call(["cp", "-f", os_img, dvd_iso])
extra_args = "-cdrom ks.iso"
extra_args += " -drive file={},if=none,id=drive1,cache=writeback"
extra_args += " -device virtio-blk,drive=drive1,bootindex=1"
extra_args = extra_args.format(dvd_iso).split(" ")
self.boot(img, extra_args=extra_args)
self.console_wait_send("change the selection", "\n")
# We seem to need to hit esc (chr(27)) twice to abort the
# media check, which takes a long time.
# Waiting a bit seems to be more reliable before hitting esc.
self.console_wait("Checking")
time.sleep(5)
self.console_wait_send("Checking", chr(27))
time.sleep(5)
self.console_wait_send("Checking", chr(27))
print("Found Checking")
# Give sufficient time for the installer to create the image.
self.console_init(timeout=7200)
self.wait_for_shutdown()
os.rename(img, dest_img)
print("Done with base image build: {}".format(dest_img))
def check_create_base_img(self, img_base, img_dest):
"""Create a base image using the installer.
We will use the base image if it exists.
This helps cut down on install time in case we
need to restart image creation,
since the base image creation can take a long time."""
if not os.path.exists(img_base):
print("Generate new base image: {}".format(img_base))
self.build_base_image(img_base);
else:
print("Use existing base image: {}".format(img_base))
# Save a copy of the base image and copy it to dest.
# which we will use going forward.
subprocess.check_call(["cp", img_base, img_dest])
def boot(self, img, extra_args=None):
aarch64vm.create_flash_images(self._tmpdir, self._efi_aarch64)
default_args = aarch64vm.get_pflash_args(self._tmpdir)
if extra_args:
extra_args.extend(default_args)
else:
extra_args = default_args
# We always add these performance tweaks
# because without them, we boot so slowly that we
# can time out finding the boot efi device.
if '-smp' not in extra_args and \
'-smp' not in self._config['extra_args'] and \
'-smp' not in self._args:
# Only add if not already there to give caller option to change it.
extra_args.extend(["-smp", "8"])
# We have overridden boot() since aarch64 has additional parameters.
# Call down to the base class method.
super(CentosAarch64VM, self).boot(img, extra_args=extra_args)
def build_image(self, img):
img_tmp = img + ".tmp"
self.check_create_base_img(img + ".base", img_tmp)
# Boot the new image for the first time to finish installation.
self.boot(img_tmp)
self.console_init()
self.console_wait_send(self.login_prompt, "root\n")
self.console_wait_send("Password:",
"{}\n".format(self._config["root_pass"]))
self.set_key_perm()
self.console_wait_send(self.prompt, "rpm -q centos-release\n")
enable_adapter = "sed -i 's/ONBOOT=no/ONBOOT=yes/g'" \
" /etc/sysconfig/network-scripts/ifcfg-enp0s1\n"
self.console_wait_send(self.prompt, enable_adapter)
self.console_wait_send(self.prompt, "ifup enp0s1\n")
self.console_wait_send(self.prompt,
'echo "qemu ALL=(ALL) NOPASSWD:ALL" | '\
'sudo tee /etc/sudoers.d/qemu\n')
self.console_wait(self.prompt)
# Rest of the commands we issue through ssh.
self.wait_ssh(wait_root=True)
# If the user chooses *not* to do the second phase,
# then we will jump right to the graceful shutdown
if self._config['install_cmds'] != "":
install_cmds = self._config['install_cmds'].split(',')
for cmd in install_cmds:
self.ssh_root(cmd)
self.ssh_root("poweroff")
self.wait_for_shutdown()
os.rename(img_tmp, img)
print("image creation complete: {}".format(img))
return 0
if __name__ == "__main__":
defaults = aarch64vm.get_config_defaults(CentosAarch64VM, DEFAULT_CONFIG)
sys.exit(basevm.main(CentosAarch64VM, defaults))

View File

@ -0,0 +1,51 @@
#
# Example yaml for use by any of the scripts in tests/vm.
# Can be provided as an environment variable QEMU_CONFIG
#
qemu-conf:
# If any of the below are not provided, we will just use the qemu defaults.
# Login username and password(has to be sudo enabled)
guest_user: qemu
guest_pass: "qemupass"
# Password for root user can be different from guest.
root_pass: "qemupass"
# If one key is provided, both must be provided.
#ssh_key: /complete/path/of/your/keyfile/id_rsa
#ssh_pub_key: /complete/path/of/your/keyfile/id_rsa.pub
cpu: max
machine: virt,gic-version=max
memory: 16G
# The below is a example for how to configure NUMA topology with
# 4 NUMA nodes and 2 different NUMA distances.
qemu_args: "-smp cpus=16,sockets=2,cores=8
-numa node,cpus=0-3,nodeid=0 -numa node,cpus=4-7,nodeid=1
-numa node,cpus=8-11,nodeid=2 -numa node,cpus=12-15,nodeid=3
-numa dist,src=0,dst=1,val=15 -numa dist,src=2,dst=3,val=15
-numa dist,src=0,dst=2,val=20 -numa dist,src=0,dst=3,val=20
-numa dist,src=1,dst=2,val=20 -numa dist,src=1,dst=3,val=20"
# By default we do not set the DNS.
# You override the defaults by setting the below.
#dns: 1.234.567.89
# By default we will use a "block" device, but
# you can also boot from a "scsi" device.
# Just keep in mind your scripts might need to change
# As you will have /dev/sda instead of /dev/vda (for block device)
boot_dev_type: "block"
# By default the ssh port is not fixed.
# A fixed ssh port makes it easier for automated tests.
#ssh_port: 5555
# To install a different set of packages, provide a command to issue
#install_cmds: "apt-get update ; apt-get build-dep -y qemu"
# Or to skip the install entirely, just provide ""
#install_cmds: ""

View File

@ -0,0 +1,50 @@
#
# Example yaml for use by any of the x86 based scripts in tests/vm.
# Can be provided as an environment variable QEMU_CONFIG
#
qemu-conf:
# If any of the below are not provided, we will just use the qemu defaults.
# Login username and password(has to be sudo enabled)
guest_user: "qemu"
guest_pass: "qemupass"
# Password for root user can be different from guest.
root_pass: "qemupass"
# Provide default ssh keys of current user.
# You need to edit the below for your user.
#ssh_key_file: /home/<user>/.ssh/id_rsa
#ssh_pub_key_file: /home/<user>/.ssh/id_rsa.pub
cpu: max
machine: pc
memory: 8G
# The below is a example for how to configure NUMA topology with
# 4 NUMA nodes and 2 different NUMA distances.
qemu_args: "-smp cpus=8,sockets=2,cores=4
-object memory-backend-ram,size=4G,policy=bind,host-nodes=0,id=ram-node0
-object memory-backend-ram,size=4G,policy=bind,host-nodes=0,id=ram-node1
-object memory-backend-ram,size=4G,policy=bind,host-nodes=1,id=ram-node2
-object memory-backend-ram,size=4G,policy=bind,host-nodes=1,id=ram-node3
-numa node,cpus=0-1,nodeid=0 -numa node,cpus=2-3,nodeid=1
-numa node,cpus=4-5,nodeid=2 -numa node,cpus=6-7,nodeid=3
-numa dist,src=0,dst=1,val=15 -numa dist,src=2,dst=3,val=15
-numa dist,src=0,dst=2,val=20 -numa dist,src=0,dst=3,val=20
-numa dist,src=1,dst=2,val=20 -numa dist,src=1,dst=3,val=20"
# By default we do not set the DNS.
# You override the defaults by setting the below.
#dns: "1.234.567.89"
# By default we will use a "block" device, but
# you can also boot from a "scsi" device.
# Just keep in mind your scripts might need to change
# As you will have /dev/sda instead of /dev/vda (for block device)
boot_dev_type: "block"
# By default the ssh port is not fixed.
# A fixed ssh port makes it easier for automated tests.
ssh_port: 5555

View File

@ -108,20 +108,20 @@ class FedoraVM(basevm.BaseVM):
self.console_wait_send("7) [!] Root password", "7\n")
self.console_wait("Password:")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("Password (confirm):")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait_send("8) [ ] User creation", "8\n")
self.console_wait_send("1) [ ] Create user", "1\n")
self.console_wait_send("3) User name", "3\n")
self.console_wait_send("ENTER:", "%s\n" % self.GUEST_USER)
self.console_wait_send("ENTER:", "%s\n" % self._config["guest_user"])
self.console_wait_send("4) [ ] Use password", "4\n")
self.console_wait_send("5) Password", "5\n")
self.console_wait("Password:")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait("Password (confirm):")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait_send("7) Groups", "c\n")
while True:
@ -139,7 +139,7 @@ class FedoraVM(basevm.BaseVM):
if good:
break
time.sleep(10)
self.console_send("r\n" % self.GUEST_PASS)
self.console_send("r\n" % self._config["guest_pass"])
self.console_wait_send("'b' to begin install", "b\n")
@ -150,12 +150,13 @@ class FedoraVM(basevm.BaseVM):
# setup qemu user
prompt = " ~]$"
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
self.console_ssh_init(prompt, self._config["guest_user"],
self._config["guest_pass"])
self.console_wait_send(prompt, "exit\n")
# setup root user
prompt = " ~]#"
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
self.console_ssh_init(prompt, "root", self._config["root_pass"])
self.console_sshd_config(prompt)
# setup virtio-blk #1 (tarfile)

View File

@ -113,9 +113,9 @@ class FreeBSDVM(basevm.BaseVM):
# post-install configuration
self.console_wait("New Password:")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("Retype New Password:")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait_send("Network Configuration", "\n")
self.console_wait_send("IPv4", "y")
@ -134,9 +134,9 @@ class FreeBSDVM(basevm.BaseVM):
# qemu user
self.console_wait_send("Add User Accounts", "y")
self.console_wait("Username")
self.console_send("%s\n" % self.GUEST_USER)
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait("Full name")
self.console_send("%s\n" % self.GUEST_USER)
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait_send("Uid", "\n")
self.console_wait_send("Login group", "\n")
self.console_wait_send("Login group", "\n")
@ -148,9 +148,9 @@ class FreeBSDVM(basevm.BaseVM):
self.console_wait_send("Use an empty password", "\n")
self.console_wait_send("Use a random password", "\n")
self.console_wait("Enter password:")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait("Enter password again:")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait_send("Lock out", "\n")
self.console_wait_send("OK", "yes\n")
self.console_wait_send("Add another user", "no\n")
@ -164,12 +164,12 @@ class FreeBSDVM(basevm.BaseVM):
# setup qemu user
prompt = "$"
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
self.console_ssh_init(prompt, self._config["guest_user"], self._config["guest_pass"])
self.console_wait_send(prompt, "exit\n")
# setup root user
prompt = "root@freebsd:~ #"
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
self.console_ssh_init(prompt, "root", self._config["root_pass"])
self.console_sshd_config(prompt)
# setup serial console

View File

@ -120,24 +120,24 @@ class NetBSDVM(basevm.BaseVM):
self.console_wait_send("d: Change root password", "d\n")
self.console_wait_send("a: Yes", "a\n")
self.console_wait("New password:")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("New password:")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("Retype new password:")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait_send("o: Add a user", "o\n")
self.console_wait("username")
self.console_send("%s\n" % self.GUEST_USER)
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait("to group wheel")
self.console_wait_send("a: Yes", "a\n")
self.console_wait_send("a: /bin/sh", "a\n")
self.console_wait("New password:")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait("New password:")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait("Retype new password:")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait_send("a: Configure network", "a\n")
self.console_wait_send("a: vioif0", "a\n")
@ -170,12 +170,13 @@ class NetBSDVM(basevm.BaseVM):
# setup qemu user
prompt = "localhost$"
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
self.console_ssh_init(prompt, self._config["guest_user"],
self._config["guest_pass"])
self.console_wait_send(prompt, "exit\n")
# setup root user
prompt = "localhost#"
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
self.console_ssh_init(prompt, "root", self._config["root_pass"])
self.console_sshd_config(prompt)
# setup virtio-blk #1 (tarfile)

View File

@ -98,9 +98,9 @@ class OpenBSDVM(basevm.BaseVM):
self.console_wait_send("Which network interface", "done\n")
self.console_wait_send("DNS domain name", "localnet\n")
self.console_wait("Password for root account")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait("Password for root account")
self.console_send("%s\n" % self.ROOT_PASS)
self.console_send("%s\n" % self._config["root_pass"])
self.console_wait_send("Start sshd(8)", "yes\n")
self.console_wait_send("X Window System", "\n")
self.console_wait_send("xenodm", "\n")
@ -108,13 +108,13 @@ class OpenBSDVM(basevm.BaseVM):
self.console_wait_send("Which speed", "\n")
self.console_wait("Setup a user")
self.console_send("%s\n" % self.GUEST_USER)
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait("Full name")
self.console_send("%s\n" % self.GUEST_USER)
self.console_send("%s\n" % self._config["guest_user"])
self.console_wait("Password")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait("Password")
self.console_send("%s\n" % self.GUEST_PASS)
self.console_send("%s\n" % self._config["guest_pass"])
self.console_wait_send("Allow root ssh login", "yes\n")
self.console_wait_send("timezone", "UTC\n")
@ -135,12 +135,13 @@ class OpenBSDVM(basevm.BaseVM):
# setup qemu user
prompt = "$"
self.console_ssh_init(prompt, self.GUEST_USER, self.GUEST_PASS)
self.console_ssh_init(prompt, self._config["guest_user"],
self._config["guest_pass"])
self.console_wait_send(prompt, "exit\n")
# setup root user
prompt = "openbsd#"
self.console_ssh_init(prompt, "root", self.ROOT_PASS)
self.console_ssh_init(prompt, "root", self._config["root_pass"])
self.console_sshd_config(prompt)
# setup virtio-blk #1 (tarfile)

68
tests/vm/ubuntu.aarch64 Executable file
View File

@ -0,0 +1,68 @@
#!/usr/bin/env python3
#
# Ubuntu aarch64 image
#
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <robert.foley@linaro.org>
# Originally based on ubuntu.i386 Fam Zheng <famz@redhat.com>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
import sys
import basevm
import aarch64vm
import ubuntuvm
DEFAULT_CONFIG = {
'cpu' : "cortex-a57",
'machine' : "virt,gic-version=3",
'install_cmds' : "apt-get update,"\
"apt-get build-dep -y --arch-only qemu,"\
"apt-get install -y libfdt-dev pkg-config language-pack-en",
# We increase beyond the default time since during boot
# it can take some time (many seconds) to log into the VM
# especially using softmmu.
'ssh_timeout' : 60,
}
class UbuntuAarch64VM(ubuntuvm.UbuntuVM):
name = "ubuntu.aarch64"
arch = "aarch64"
image_name = "ubuntu-18.04-server-cloudimg-arm64.img"
image_link = "https://cloud-images.ubuntu.com/releases/18.04/release/" + image_name
image_sha256="0fdcba761965735a8a903d8b88df8e47f156f48715c00508e4315c506d7d3cb1"
BUILD_SCRIPT = """
set -e;
cd $(mktemp -d);
sudo chmod a+r /dev/vdb;
tar --checkpoint=.10 -xf /dev/vdb;
./configure {configure_opts};
make --output-sync {target} -j{jobs} {verbose};
"""
def boot(self, img, extra_args=None):
aarch64vm.create_flash_images(self._tmpdir, self._efi_aarch64)
default_args = aarch64vm.get_pflash_args(self._tmpdir)
if extra_args:
extra_args.extend(default_args)
else:
extra_args = default_args
# We always add these performance tweaks
# because without them, we boot so slowly that we
# can time out finding the boot efi device.
if '-smp' not in extra_args and \
'-smp' not in self._config['extra_args'] and \
'-smp' not in self._args:
# Only add if not already there to give caller option to change it.
extra_args.extend(["-smp", "8"])
# We have overridden boot() since aarch64 has additional parameters.
# Call down to the base class method.
super(UbuntuAarch64VM, self).boot(img, extra_args=extra_args)
if __name__ == "__main__":
defaults = aarch64vm.get_config_defaults(UbuntuAarch64VM, DEFAULT_CONFIG)
sys.exit(basevm.main(UbuntuAarch64VM, defaults))

View File

@ -11,15 +11,22 @@
# the COPYING file in the top-level directory.
#
import os
import sys
import subprocess
import basevm
import time
import ubuntuvm
class UbuntuX86VM(basevm.BaseVM):
DEFAULT_CONFIG = {
'install_cmds' : "apt-get update,"\
"apt-get build-dep -y qemu,"\
"apt-get install -y libfdt-dev language-pack-en",
}
class UbuntuX86VM(ubuntuvm.UbuntuVM):
name = "ubuntu.i386"
arch = "i386"
image_link="https://cloud-images.ubuntu.com/releases/bionic/"\
"release-20191114/ubuntu-18.04-server-cloudimg-i386.img"
image_sha256="28969840626d1ea80bb249c08eef1a4533e8904aa51a327b40f37ac4b4ff04ef"
BUILD_SCRIPT = """
set -e;
cd $(mktemp -d);
@ -29,34 +36,5 @@ class UbuntuX86VM(basevm.BaseVM):
make --output-sync {target} -j{jobs} {verbose};
"""
def build_image(self, img):
cimg = self._download_with_cache(
"https://cloud-images.ubuntu.com/releases/bionic/release-20191114/ubuntu-18.04-server-cloudimg-i386.img",
sha256sum="28969840626d1ea80bb249c08eef1a4533e8904aa51a327b40f37ac4b4ff04ef")
img_tmp = img + ".tmp"
subprocess.check_call(["cp", "-f", cimg, img_tmp])
self.exec_qemu_img("resize", img_tmp, "50G")
self.boot(img_tmp, extra_args = [
"-device", "VGA",
"-cdrom", self.gen_cloud_init_iso()
])
self.wait_ssh()
self.ssh_root_check("touch /etc/cloud/cloud-init.disabled")
self.ssh_root_check("apt-get update")
self.ssh_root_check("apt-get install -y cloud-initramfs-growroot")
# Don't check the status in case the guest hang up too quickly
self.ssh_root("sync && reboot")
time.sleep(5)
self.wait_ssh()
# The previous update sometimes doesn't survive a reboot, so do it again
self.ssh_root_check("sed -ie s/^#\ deb-src/deb-src/g /etc/apt/sources.list")
self.ssh_root_check("apt-get update")
self.ssh_root_check("apt-get build-dep -y qemu")
self.ssh_root_check("apt-get install -y libfdt-dev language-pack-en")
self.ssh_root("poweroff")
self.wait()
os.rename(img_tmp, img)
return 0
if __name__ == "__main__":
sys.exit(basevm.main(UbuntuX86VM))
sys.exit(basevm.main(UbuntuX86VM, DEFAULT_CONFIG))

60
tests/vm/ubuntuvm.py Normal file
View File

@ -0,0 +1,60 @@
#!/usr/bin/env python3
#
# Ubuntu VM testing library
#
# Copyright 2017 Red Hat Inc.
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <robert.foley@linaro.org>
# Originally based on ubuntu.i386 Fam Zheng <famz@redhat.com>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
import os
import subprocess
import basevm
class UbuntuVM(basevm.BaseVM):
def __init__(self, args, config=None):
self.login_prompt = "ubuntu-{}-guest login:".format(self.arch)
basevm.BaseVM.__init__(self, args, config)
def build_image(self, img):
"""Build an Ubuntu VM image. The child class will
define the install_cmds to init the VM."""
os_img = self._download_with_cache(self.image_link,
sha256sum=self.image_sha256)
img_tmp = img + ".tmp"
subprocess.check_call(["cp", "-f", os_img, img_tmp])
self.exec_qemu_img("resize", img_tmp, "+50G")
ci_img = self.gen_cloud_init_iso()
self.boot(img_tmp, extra_args = [ "-device", "VGA", "-cdrom", ci_img, ])
# First command we issue is fix for slow ssh login.
self.wait_ssh(wait_root=True,
cmd="chmod -x /etc/update-motd.d/*")
# Wait for cloud init to finish
self.wait_ssh(wait_root=True,
cmd="ls /var/lib/cloud/instance/boot-finished")
self.ssh_root("touch /etc/cloud/cloud-init.disabled")
# Disable auto upgrades.
# We want to keep the VM system state stable.
self.ssh_root('sed -ie \'s/"1"/"0"/g\' '\
'/etc/apt/apt.conf.d/20auto-upgrades')
self.ssh_root("sed -ie s/^#\ deb-src/deb-src/g /etc/apt/sources.list")
# If the user chooses not to do the install phase,
# then we will jump right to the graceful shutdown
if self._config['install_cmds'] != "":
# Issue the install commands.
# This can be overriden by the user in the config .yml.
install_cmds = self._config['install_cmds'].split(',')
for cmd in install_cmds:
self.ssh_root(cmd)
self.graceful_shutdown()
os.rename(img_tmp, img)
return 0

View File

@ -52,8 +52,10 @@ typedef struct {
#endif
sigjmp_buf env;
#ifdef CONFIG_TSAN
void *tsan_co_fiber;
void *tsan_caller_fiber;
#endif
#ifdef CONFIG_VALGRIND_H
unsigned int valgrind_stack_id;
@ -77,7 +79,10 @@ union cc_arg {
int i[2];
};
/* QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it. */
/*
* QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it.
* always_inline is required to avoid TSan runtime fatal errors.
*/
static inline __attribute__((always_inline))
void on_new_fiber(CoroutineUContext *co)
{
@ -87,6 +92,7 @@ void on_new_fiber(CoroutineUContext *co)
#endif
}
/* always_inline is required to avoid TSan runtime fatal errors. */
static inline __attribute__((always_inline))
void finish_switch_fiber(void *fake_stack_save)
{
@ -109,18 +115,29 @@ void finish_switch_fiber(void *fake_stack_save)
#endif
}
static inline __attribute__((always_inline)) void start_switch_fiber(
CoroutineAction action, void **fake_stack_save,
const void *bottom, size_t size, void *new_fiber)
/* always_inline is required to avoid TSan runtime fatal errors. */
static inline __attribute__((always_inline))
void start_switch_fiber_asan(CoroutineAction action, void **fake_stack_save,
const void *bottom, size_t size)
{
#ifdef CONFIG_ASAN
__sanitizer_start_switch_fiber(
action == COROUTINE_TERMINATE ? NULL : fake_stack_save,
bottom, size);
#endif
}
/* always_inline is required to avoid TSan runtime fatal errors. */
static inline __attribute__((always_inline))
void start_switch_fiber_tsan(void **fake_stack_save,
CoroutineUContext *co,
bool caller)
{
#ifdef CONFIG_TSAN
void *curr_fiber =
__tsan_get_current_fiber();
void *new_fiber = caller ?
co->tsan_caller_fiber :
co->tsan_co_fiber;
void *curr_fiber = __tsan_get_current_fiber();
__tsan_acquire(curr_fiber);
*fake_stack_save = curr_fiber;
@ -144,12 +161,9 @@ static void coroutine_trampoline(int i0, int i1)
/* Initialize longjmp environment and switch back the caller */
if (!sigsetjmp(self->env, 0)) {
start_switch_fiber(
COROUTINE_YIELD,
&fake_stack_save,
leader.stack,
leader.stack_size,
self->tsan_caller_fiber);
start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, leader.stack,
leader.stack_size);
start_switch_fiber_tsan(&fake_stack_save, self, true); /* true=caller */
siglongjmp(*(sigjmp_buf *)co->entry_arg, 1);
}
@ -208,10 +222,10 @@ Coroutine *qemu_coroutine_new(void)
/* swapcontext() in, siglongjmp() back out */
if (!sigsetjmp(old_env, 0)) {
start_switch_fiber(
COROUTINE_YIELD,
&fake_stack_save,
co->stack, co->stack_size, co->tsan_co_fiber);
start_switch_fiber_asan(COROUTINE_YIELD, &fake_stack_save, co->stack,
co->stack_size);
start_switch_fiber_tsan(&fake_stack_save,
co, false); /* false=not caller */
#ifdef CONFIG_SAFESTACK
/*
@ -287,8 +301,10 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
ret = sigsetjmp(from->env, 0);
if (ret == 0) {
start_switch_fiber(action, &fake_stack_save,
to->stack, to->stack_size, to->tsan_co_fiber);
start_switch_fiber_asan(action, &fake_stack_save, to->stack,
to->stack_size);
start_switch_fiber_tsan(&fake_stack_save,
to, false); /* false=not caller */
siglongjmp(to->env, action);
}

View File

@ -266,12 +266,6 @@ static struct {
{ "usb-redir", "hw-", "usb-redirect" },
{ "qxl-vga", "hw-", "display-qxl" },
{ "qxl", "hw-", "display-qxl" },
{ "virtio-gpu-device", "hw-", "display-virtio-gpu" },
{ "virtio-gpu-pci", "hw-", "display-virtio-gpu" },
{ "virtio-vga", "hw-", "display-virtio-gpu" },
{ "vhost-user-gpu-device", "hw-", "display-virtio-gpu" },
{ "vhost-user-gpu-pci", "hw-", "display-virtio-gpu" },
{ "vhost-user-vga", "hw-", "display-virtio-gpu" },
{ "chardev-braille", "chardev-", "baum" },
};