Fix VMware VGA depth computation
VMware VGA requires that the depth presented to the guest is the same as the DisplaySurface that it renders to. This is because it performs a very simple memcpy() to blit from one surface to another. We currently hardcode a 24-bit depth. The surface allocator for SDL may, and usually will, allocate a surface with a different depth causing screen corruption. This changes the code to allocate the DisplaySurface before initializing the device which allows the depth of the DisplaySurface to be used instead of hardcoding something. Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
This commit is contained in:
parent
ee3e41a9a0
commit
a6109ff1b5
@ -915,8 +915,8 @@ static void vmsvga_reset(struct vmsvga_state_s *s)
|
||||
s->width = -1;
|
||||
s->height = -1;
|
||||
s->svgaid = SVGA_ID;
|
||||
s->depth = 24;
|
||||
s->bypp = (s->depth + 7) >> 3;
|
||||
s->depth = ds_get_bits_per_pixel(s->vga.ds);
|
||||
s->bypp = ds_get_bytes_per_pixel(s->vga.ds);
|
||||
s->cursor.on = 0;
|
||||
s->redraw_fifo_first = 0;
|
||||
s->redraw_fifo_last = 0;
|
||||
@ -1114,6 +1114,11 @@ static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size)
|
||||
s->scratch_size = SVGA_SCRATCH_SIZE;
|
||||
s->scratch = qemu_malloc(s->scratch_size * 4);
|
||||
|
||||
s->vga.ds = graphic_console_init(vmsvga_update_display,
|
||||
vmsvga_invalidate_display,
|
||||
vmsvga_screen_dump,
|
||||
vmsvga_text_update, s);
|
||||
|
||||
vmsvga_reset(s);
|
||||
|
||||
s->fifo_size = SVGA_FIFO_SIZE;
|
||||
@ -1124,11 +1129,6 @@ static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size)
|
||||
vga_init(&s->vga);
|
||||
vmstate_register(0, &vmstate_vga_common, &s->vga);
|
||||
|
||||
s->vga.ds = graphic_console_init(vmsvga_update_display,
|
||||
vmsvga_invalidate_display,
|
||||
vmsvga_screen_dump,
|
||||
vmsvga_text_update, s);
|
||||
|
||||
vga_init_vbe(&s->vga);
|
||||
rom_add_vga(VGABIOS_FILENAME);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user