accel/tcg/cputlb: avoid recursive BQL (fixes #1706296)

The mmio path (see exec.c:prepare_mmio_access) already protects itself
against recursive locking and it makes sense to do the same for
io_readx/writex. Otherwise any helper running in the BQL context will
assert when it attempts to write to device memory as in the case of
the bug report.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
CC: Richard Jones <rjones@redhat.com>
CC: Paolo Bonzini <bonzini@gnu.org>
CC: qemu-stable@nongnu.org
Message-Id: <20170921110625.9500-1-alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Alex Bennée 2017-09-21 12:06:25 +01:00 committed by Richard Henderson
parent 460b6c8e58
commit 8b81253332

View File

@ -765,7 +765,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
cpu->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
if (mr->global_locking) { if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
locked = true; locked = true;
} }
@ -800,7 +800,7 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
cpu->mem_io_vaddr = addr; cpu->mem_io_vaddr = addr;
cpu->mem_io_pc = retaddr; cpu->mem_io_pc = retaddr;
if (mr->global_locking) { if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
locked = true; locked = true;
} }