i8257: rewrite DMA_schedule to avoid hooking into the CPU loop

The i8257 DMA controller uses an idle bottom half, which by default
does not cause the main loop to exit.  Therefore, the DMA_schedule
function is there to ensure that the CPU relinquishes the iothread
mutex to the iothread.

However, this is not enough since the iothread will call
aio_compute_timeout() and go to sleep again.  In the iothread
world, forcing execution of the idle bottom half is much simpler,
and only requires a call to qemu_notify_event().  Do it, removing
the need for the "cpu_request_exit" pseudo-irq.  The next patch
will remove it.

Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2015-02-16 14:08:22 +01:00
parent 5f5b5942d5
commit 19d2b5e6ff
5 changed files with 16 additions and 10 deletions

View File

@ -1417,7 +1417,7 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
* recall us...
*/
DMA_hold_DREQ(fdctrl->dma_chann);
DMA_schedule(fdctrl->dma_chann);
DMA_schedule();
} else {
/* Start transfer */
fdctrl_transfer_handler(fdctrl, fdctrl->dma_chann, 0,

View File

@ -358,6 +358,7 @@ static void channel_run (int ncont, int ichan)
}
static QEMUBH *dma_bh;
static bool dma_bh_scheduled;
static void DMA_run (void)
{
@ -390,12 +391,15 @@ static void DMA_run (void)
running = 0;
out:
if (rearm)
if (rearm) {
qemu_bh_schedule_idle(dma_bh);
dma_bh_scheduled = true;
}
}
static void DMA_run_bh(void *unused)
{
dma_bh_scheduled = false;
DMA_run();
}
@ -458,12 +462,14 @@ int DMA_write_memory (int nchan, void *buf, int pos, int len)
return len;
}
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule(int nchan)
/* request the emulator to transfer a new DMA memory block ASAP (even
* if the idle bottom half would not have exited the iothread yet).
*/
void DMA_schedule(void)
{
struct dma_cont *d = &dma_controllers[nchan > 3];
qemu_irq_pulse(*d->cpu_request_exit);
if (dma_bh_scheduled) {
qemu_notify_event();
}
}
static void dma_reset(void *opaque)

View File

@ -109,7 +109,7 @@ int DMA_write_memory (int nchan, void *buf, int pos, int size)
}
void DMA_hold_DREQ (int nchan) {}
void DMA_release_DREQ (int nchan) {}
void DMA_schedule(int nchan) {}
void DMA_schedule(void) {}
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
{

View File

@ -112,7 +112,7 @@ int DMA_write_memory (int nchan, void *buf, int pos, int size)
}
void DMA_hold_DREQ (int nchan) {}
void DMA_release_DREQ (int nchan) {}
void DMA_schedule(int nchan) {}
void DMA_schedule(void) {}
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
{

View File

@ -112,7 +112,7 @@ int DMA_read_memory (int nchan, void *buf, int pos, int size);
int DMA_write_memory (int nchan, void *buf, int pos, int size);
void DMA_hold_DREQ (int nchan);
void DMA_release_DREQ (int nchan);
void DMA_schedule(int nchan);
void DMA_schedule(void);
void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit);
void DMA_register_channel (int nchan,
DMA_transfer_handler transfer_handler,