- implemented save state function for the disk image modes 'vmware3', 'vmware4'

and 'vpc'
- prepared hdimage restore support
- vmware4: code cleanup
This commit is contained in:
Volker Ruppert 2012-09-19 21:05:18 +00:00
parent 5cbf0894ac
commit 50482a9f2b
8 changed files with 248 additions and 215 deletions

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2002-2011 The Bochs Project
// Copyright (C) 2002-2012 The Bochs Project
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@ -168,6 +168,21 @@ Bit64s hdimage_save_handler(void *class_ptr, bx_param_c *param)
return ((device_image_t*)class_ptr)->save_state(path);
}
void hdimage_restore_handler(void *class_ptr, bx_param_c *param, Bit64s value)
{
char imgname[BX_PATHNAME_LEN];
char path[BX_PATHNAME_LEN];
if (value != 0) {
param->get_param_path(imgname, BX_PATHNAME_LEN);
if (!strncmp(imgname, "bochs.", 6)) {
strcpy(imgname, imgname+6);
}
sprintf(path, "%s/%s", SIM->get_param_string(BXPN_RESTORE_PATH)->getptr(), imgname);
((device_image_t*)class_ptr)->restore_state(path);
}
}
bx_bool hdimage_backup_file(int fd, const char *backup_fname)
{
char *buf;
@ -224,7 +239,7 @@ void device_image_t::register_state(bx_list_c *parent)
{
bx_param_bool_c *image = new bx_param_bool_c(parent, "image", NULL, NULL, 0);
// TODO: restore image
image->set_sr_handlers(this, hdimage_save_handler, (param_restore_handler)NULL);
image->set_sr_handlers(this, hdimage_save_handler, hdimage_restore_handler);
}
/*** default_image_t function definitions ***/

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2005-2011 The Bochs Project
// Copyright (C) 2005-2012 The Bochs Project
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@ -125,6 +125,7 @@
int bx_read_image(int fd, Bit64s offset, void *buf, int count);
int bx_write_image(int fd, Bit64s offset, void *buf, int count);
bx_bool hdimage_backup_file(int fd, const char *backup_fname);
class device_image_t
{
@ -157,6 +158,7 @@ class device_image_t
// Save/restore support
virtual void register_state(bx_list_c *parent);
virtual bx_bool save_state(const char *backup_fname) {return 0;}
virtual void restore_state(const char *backup_fname) {}
unsigned cylinders;
unsigned heads;

View File

@ -520,3 +520,18 @@ Bit32u vmware3_image_t::get_capabilities(void)
{
return HDIMAGE_HAS_GEOMETRY;
}
bx_bool vmware3_image_t::save_state(const char *backup_fname)
{
bx_bool ret = 1;
char tempfn[BX_PATHNAME_LEN];
unsigned count = current->header.number_of_chains;
if (count < 1) count = 1;
for (unsigned i = 0; i < count; ++i) {
sprintf(tempfn, "%s%d", backup_fname, i);
ret &= hdimage_backup_file(images[i].fd, tempfn);
if (ret == 0) break;
}
return ret;
}

View File

@ -40,6 +40,7 @@ class vmware3_image_t : public device_image_t
ssize_t read(void* buf, size_t count);
ssize_t write(const void* buf, size_t count);
Bit32u get_capabilities();
bx_bool save_state(const char *backup_fname);
private:
static const off_t INVALID_OFFSET;

View File

@ -41,192 +41,185 @@ const off_t vmware4_image_t::INVALID_OFFSET = (off_t)-1;
const int vmware4_image_t::SECTOR_SIZE = 512;
vmware4_image_t::vmware4_image_t()
: file_descriptor(-1),
tlb(0),
tlb_offset(INVALID_OFFSET),
current_offset(INVALID_OFFSET),
is_dirty(false)
: file_descriptor(-1),
tlb(0),
tlb_offset(INVALID_OFFSET),
current_offset(INVALID_OFFSET),
is_dirty(0)
{
}
vmware4_image_t::~vmware4_image_t()
{
close();
close();
}
int vmware4_image_t::open(const char * pathname)
{
close();
close();
int flags = O_RDWR;
int flags = O_RDWR;
#ifdef O_BINARY
flags |= O_BINARY;
flags |= O_BINARY;
#endif
file_descriptor = ::open(pathname, flags);
file_descriptor = ::open(pathname, flags);
if(!is_open())
return -1;
if (!is_open())
return -1;
if(!read_header())
BX_PANIC(("unable to read vmware4 virtual disk header from file '%s'", pathname));
if (!read_header())
BX_PANIC(("unable to read vmware4 virtual disk header from file '%s'", pathname));
tlb = new Bit8u[(unsigned)header.tlb_size_sectors * SECTOR_SIZE];
if(tlb == 0)
BX_PANIC(("unable to allocate " FMT_LL "d bytes for vmware4 image's tlb", header.tlb_size_sectors * SECTOR_SIZE));
tlb = new Bit8u[(unsigned)header.tlb_size_sectors * SECTOR_SIZE];
if (tlb == 0)
BX_PANIC(("unable to allocate " FMT_LL "d bytes for vmware4 image's tlb", header.tlb_size_sectors * SECTOR_SIZE));
tlb_offset = INVALID_OFFSET;
current_offset = 0;
is_dirty = false;
tlb_offset = INVALID_OFFSET;
current_offset = 0;
is_dirty = 0;
hd_size = header.total_sectors * SECTOR_SIZE;
cylinders = (unsigned)hd_size / (16 * 63);
heads = 16;
spt = 63;
hd_size = header.total_sectors * SECTOR_SIZE;
cylinders = (unsigned)hd_size / (16 * 63);
heads = 16;
spt = 63;
BX_DEBUG(("VMware 4 disk geometry:"));
BX_DEBUG((" .size = " FMT_LL "d", hd_size));
BX_DEBUG((" .cylinders = %d", cylinders));
BX_DEBUG((" .heads = %d", heads));
BX_DEBUG((" .sectors = %d", spt));
BX_DEBUG(("VMware 4 disk geometry:"));
BX_DEBUG((" .size = " FMT_LL "d", hd_size));
BX_DEBUG((" .cylinders = %d", cylinders));
BX_DEBUG((" .heads = %d", heads));
BX_DEBUG((" .sectors = %d", spt));
return 1;
return 1;
}
void vmware4_image_t::close()
{
if(file_descriptor == -1)
return;
if (file_descriptor == -1)
return;
flush();
delete [] tlb; tlb = 0;
flush();
delete [] tlb; tlb = 0;
::close(file_descriptor);
file_descriptor = -1;
::close(file_descriptor);
file_descriptor = -1;
}
Bit64s vmware4_image_t::lseek(Bit64s offset, int whence)
{
switch(whence)
{
case SEEK_SET:
current_offset = (off_t)offset;
return current_offset;
case SEEK_CUR:
current_offset += (off_t)offset;
return current_offset;
case SEEK_END:
current_offset = header.total_sectors * SECTOR_SIZE + (off_t)offset;
return current_offset;
default:
BX_DEBUG(("unknown 'whence' value (%d) when trying to seek vmware4 image", whence));
return INVALID_OFFSET;
}
switch (whence) {
case SEEK_SET:
current_offset = (off_t)offset;
return current_offset;
case SEEK_CUR:
current_offset += (off_t)offset;
return current_offset;
case SEEK_END:
current_offset = header.total_sectors * SECTOR_SIZE + (off_t)offset;
return current_offset;
default:
BX_DEBUG(("unknown 'whence' value (%d) when trying to seek vmware4 image", whence));
return INVALID_OFFSET;
}
}
ssize_t vmware4_image_t::read(void * buf, size_t count)
{
ssize_t total = 0;
while(count > 0)
{
off_t readable = perform_seek();
if(readable == INVALID_OFFSET)
{
BX_DEBUG(("vmware4 disk image read failed on %u bytes at " FMT_LL "d", (unsigned)count, current_offset));
return -1;
}
off_t copysize = ((off_t)count > readable) ? readable : count;
memcpy(buf, tlb + current_offset - tlb_offset, (size_t)copysize);
current_offset += copysize;
total += (long)copysize;
count -= (size_t)copysize;
ssize_t total = 0;
while (count > 0) {
off_t readable = perform_seek();
if (readable == INVALID_OFFSET) {
BX_DEBUG(("vmware4 disk image read failed on %u bytes at " FMT_LL "d", (unsigned)count, current_offset));
return -1;
}
return total;
off_t copysize = ((off_t)count > readable) ? readable : count;
memcpy(buf, tlb + current_offset - tlb_offset, (size_t)copysize);
current_offset += copysize;
total += (long)copysize;
count -= (size_t)copysize;
}
return total;
}
ssize_t vmware4_image_t::write(const void * buf, size_t count)
{
ssize_t total = 0;
while(count > 0)
{
off_t writable = perform_seek();
if(writable == INVALID_OFFSET)
{
BX_DEBUG(("vmware4 disk image write failed on %u bytes at " FMT_LL "d", (unsigned)count, current_offset));
return -1;
}
off_t writesize = ((off_t)count > writable) ? writable : count;
memcpy(tlb + current_offset - tlb_offset, buf, (size_t)writesize);
current_offset += writesize;
total += (long)writesize;
count -= (size_t)writesize;
is_dirty = true;
}
return total;
}
bool vmware4_image_t::is_open() const
{
return (file_descriptor != -1);
}
bool vmware4_image_t::is_valid_header() const
{
if(header.id[0] != 'K' || header.id[1] != 'D' || header.id[2] != 'M' ||
header.id[3] != 'V')
{
BX_DEBUG(("not a vmware4 image"));
return false;
ssize_t total = 0;
while (count > 0) {
off_t writable = perform_seek();
if (writable == INVALID_OFFSET) {
BX_DEBUG(("vmware4 disk image write failed on %u bytes at " FMT_LL "d", (unsigned)count, current_offset));
return -1;
}
if(header.version != 1)
{
BX_DEBUG(("unsupported vmware4 image version"));
return false;
}
off_t writesize = ((off_t)count > writable) ? writable : count;
memcpy(tlb + current_offset - tlb_offset, buf, (size_t)writesize);
return true;
current_offset += writesize;
total += (long)writesize;
count -= (size_t)writesize;
is_dirty = 1;
}
return total;
}
bool vmware4_image_t::read_header()
bx_bool vmware4_image_t::is_open() const
{
if(!is_open())
BX_PANIC(("attempt to read vmware4 header from a closed file"));
return (file_descriptor != -1);
}
if(::read(file_descriptor, &header, sizeof(VM4_Header)) != sizeof(VM4_Header))
return false;
bx_bool vmware4_image_t::is_valid_header() const
{
if (header.id[0] != 'K' || header.id[1] != 'D' || header.id[2] != 'M' ||
header.id[3] != 'V') {
BX_DEBUG(("not a vmware4 image"));
return 0;
}
header.version = dtoh32(header.version);
header.flags = dtoh32(header.flags);
header.total_sectors = dtoh64(header.total_sectors);
header.tlb_size_sectors = dtoh64(header.tlb_size_sectors);
header.description_offset_sectors = dtoh64(header.description_offset_sectors);
header.description_size_sectors = dtoh64(header.description_size_sectors);
header.slb_count = dtoh32(header.slb_count);
header.flb_offset_sectors = dtoh64(header.flb_offset_sectors);
header.flb_copy_offset_sectors = dtoh64(header.flb_copy_offset_sectors);
header.tlb_offset_sectors = dtoh64(header.tlb_offset_sectors);
if (header.version != 1) {
BX_DEBUG(("unsupported vmware4 image version"));
return 0;
}
if(!is_valid_header())
BX_PANIC(("invalid vmware4 virtual disk image"));
return 1;
}
BX_DEBUG(("VM4_Header (size=%u)", (unsigned)sizeof(VM4_Header)));
BX_DEBUG((" .version = %d", header.version));
BX_DEBUG((" .flags = %d", header.flags));
BX_DEBUG((" .total_sectors = " FMT_LL "d", header.total_sectors));
BX_DEBUG((" .tlb_size_sectors = " FMT_LL "d", header.tlb_size_sectors));
BX_DEBUG((" .description_offset_sectors = " FMT_LL "d", header.description_offset_sectors));
BX_DEBUG((" .description_size_sectors = " FMT_LL "d", header.description_size_sectors));
BX_DEBUG((" .slb_count = %d", header.slb_count));
BX_DEBUG((" .flb_offset_sectors = " FMT_LL "d", header.flb_offset_sectors));
BX_DEBUG((" .flb_copy_offset_sectors = " FMT_LL "d", header.flb_copy_offset_sectors));
BX_DEBUG((" .tlb_offset_sectors = " FMT_LL "d", header.tlb_offset_sectors));
bx_bool vmware4_image_t::read_header()
{
if (!is_open())
BX_PANIC(("attempt to read vmware4 header from a closed file"));
return true;
if (::read(file_descriptor, &header, sizeof(VM4_Header)) != sizeof(VM4_Header))
return 0;
header.version = dtoh32(header.version);
header.flags = dtoh32(header.flags);
header.total_sectors = dtoh64(header.total_sectors);
header.tlb_size_sectors = dtoh64(header.tlb_size_sectors);
header.description_offset_sectors = dtoh64(header.description_offset_sectors);
header.description_size_sectors = dtoh64(header.description_size_sectors);
header.slb_count = dtoh32(header.slb_count);
header.flb_offset_sectors = dtoh64(header.flb_offset_sectors);
header.flb_copy_offset_sectors = dtoh64(header.flb_copy_offset_sectors);
header.tlb_offset_sectors = dtoh64(header.tlb_offset_sectors);
if(!is_valid_header())
BX_PANIC(("invalid vmware4 virtual disk image"));
BX_DEBUG(("VM4_Header (size=%u)", (unsigned)sizeof(VM4_Header)));
BX_DEBUG((" .version = %d", header.version));
BX_DEBUG((" .flags = %d", header.flags));
BX_DEBUG((" .total_sectors = " FMT_LL "d", header.total_sectors));
BX_DEBUG((" .tlb_size_sectors = " FMT_LL "d", header.tlb_size_sectors));
BX_DEBUG((" .description_offset_sectors = " FMT_LL "d", header.description_offset_sectors));
BX_DEBUG((" .description_size_sectors = " FMT_LL "d", header.description_size_sectors));
BX_DEBUG((" .slb_count = %d", header.slb_count));
BX_DEBUG((" .flb_offset_sectors = " FMT_LL "d", header.flb_offset_sectors));
BX_DEBUG((" .flb_copy_offset_sectors = " FMT_LL "d", header.flb_copy_offset_sectors));
BX_DEBUG((" .tlb_offset_sectors = " FMT_LL "d", header.tlb_offset_sectors));
return 1;
}
//
@ -235,99 +228,99 @@ bool vmware4_image_t::read_header()
//
off_t vmware4_image_t::perform_seek()
{
if(current_offset == INVALID_OFFSET)
{
BX_DEBUG(("invalid offset specified in vmware4 seek"));
return INVALID_OFFSET;
}
//
// The currently loaded tlb can service the request.
//
if(tlb_offset / (header.tlb_size_sectors * SECTOR_SIZE) == current_offset / (header.tlb_size_sectors * SECTOR_SIZE))
return (header.tlb_size_sectors * SECTOR_SIZE) - (current_offset - tlb_offset);
flush();
Bit64u index = current_offset / (header.tlb_size_sectors * SECTOR_SIZE);
Bit32u slb_index = (Bit32u)(index % header.slb_count);
Bit32u flb_index = (Bit32u)(index / header.slb_count);
Bit32u slb_sector = read_block_index(header.flb_offset_sectors, flb_index);
Bit32u slb_copy_sector = read_block_index(header.flb_copy_offset_sectors, flb_index);
if(slb_sector == 0 && slb_copy_sector == 0)
{
BX_DEBUG(("loaded vmware4 disk image requires un-implemented feature"));
return INVALID_OFFSET;
}
if(slb_sector == 0)
slb_sector = slb_copy_sector;
Bit32u tlb_sector = read_block_index(slb_sector, slb_index);
tlb_offset = index * header.tlb_size_sectors * SECTOR_SIZE;
if(tlb_sector == 0)
{
//
// Allocate a new tlb
//
memset(tlb, 0, (size_t)header.tlb_size_sectors * SECTOR_SIZE);
//
// Instead of doing a write to increase the file size, we could use
// ftruncate but it is not portable.
//
off_t eof = ((::lseek(file_descriptor, 0, SEEK_END) + SECTOR_SIZE - 1) / SECTOR_SIZE) * SECTOR_SIZE;
::write(file_descriptor, tlb, (unsigned)header.tlb_size_sectors * SECTOR_SIZE);
tlb_sector = (Bit32u)eof / SECTOR_SIZE;
write_block_index(slb_sector, slb_index, tlb_sector);
write_block_index(slb_copy_sector, slb_index, tlb_sector);
::lseek(file_descriptor, eof, SEEK_SET);
}
else
{
::lseek(file_descriptor, tlb_sector * SECTOR_SIZE, SEEK_SET);
::read(file_descriptor, tlb, (unsigned)header.tlb_size_sectors * SECTOR_SIZE);
::lseek(file_descriptor, tlb_sector * SECTOR_SIZE, SEEK_SET);
}
if (current_offset == INVALID_OFFSET) {
BX_DEBUG(("invalid offset specified in vmware4 seek"));
return INVALID_OFFSET;
}
//
// The currently loaded tlb can service the request.
//
if (tlb_offset / (header.tlb_size_sectors * SECTOR_SIZE) == current_offset / (header.tlb_size_sectors * SECTOR_SIZE))
return (header.tlb_size_sectors * SECTOR_SIZE) - (current_offset - tlb_offset);
flush();
Bit64u index = current_offset / (header.tlb_size_sectors * SECTOR_SIZE);
Bit32u slb_index = (Bit32u)(index % header.slb_count);
Bit32u flb_index = (Bit32u)(index / header.slb_count);
Bit32u slb_sector = read_block_index(header.flb_offset_sectors, flb_index);
Bit32u slb_copy_sector = read_block_index(header.flb_copy_offset_sectors, flb_index);
if (slb_sector == 0 && slb_copy_sector == 0) {
BX_DEBUG(("loaded vmware4 disk image requires un-implemented feature"));
return INVALID_OFFSET;
}
if (slb_sector == 0)
slb_sector = slb_copy_sector;
Bit32u tlb_sector = read_block_index(slb_sector, slb_index);
tlb_offset = index * header.tlb_size_sectors * SECTOR_SIZE;
if (tlb_sector == 0) {
//
// Allocate a new tlb
//
memset(tlb, 0, (size_t)header.tlb_size_sectors * SECTOR_SIZE);
//
// Instead of doing a write to increase the file size, we could use
// ftruncate but it is not portable.
//
off_t eof = ((::lseek(file_descriptor, 0, SEEK_END) + SECTOR_SIZE - 1) / SECTOR_SIZE) * SECTOR_SIZE;
::write(file_descriptor, tlb, (unsigned)header.tlb_size_sectors * SECTOR_SIZE);
tlb_sector = (Bit32u)eof / SECTOR_SIZE;
write_block_index(slb_sector, slb_index, tlb_sector);
write_block_index(slb_copy_sector, slb_index, tlb_sector);
::lseek(file_descriptor, eof, SEEK_SET);
} else {
::lseek(file_descriptor, tlb_sector * SECTOR_SIZE, SEEK_SET);
::read(file_descriptor, tlb, (unsigned)header.tlb_size_sectors * SECTOR_SIZE);
::lseek(file_descriptor, tlb_sector * SECTOR_SIZE, SEEK_SET);
}
return (header.tlb_size_sectors * SECTOR_SIZE) - (current_offset - tlb_offset);
}
void vmware4_image_t::flush()
{
if(!is_dirty)
return;
if (!is_dirty)
return;
//
// Write dirty sectors to disk first. Assume that the file is already at the
// position for the current tlb.
//
::write(file_descriptor, tlb, (unsigned)header.tlb_size_sectors * SECTOR_SIZE);
is_dirty = false;
//
// Write dirty sectors to disk first. Assume that the file is already at the
// position for the current tlb.
//
::write(file_descriptor, tlb, (unsigned)header.tlb_size_sectors * SECTOR_SIZE);
is_dirty = 0;
}
Bit32u vmware4_image_t::read_block_index(Bit64u sector, Bit32u index)
{
Bit32u ret;
Bit32u ret;
bx_read_image(file_descriptor, sector * SECTOR_SIZE + index * sizeof(Bit32u),
&ret, sizeof(Bit32u));
bx_read_image(file_descriptor, sector * SECTOR_SIZE + index * sizeof(Bit32u),
&ret, sizeof(Bit32u));
return dtoh32(ret);
return dtoh32(ret);
}
void vmware4_image_t::write_block_index(Bit64u sector, Bit32u index, Bit32u block_sector)
{
block_sector = htod32(block_sector);
block_sector = htod32(block_sector);
bx_write_image(file_descriptor, sector * SECTOR_SIZE + index * sizeof(Bit32u),
&block_sector, sizeof(Bit32u));
bx_write_image(file_descriptor, sector * SECTOR_SIZE + index * sizeof(Bit32u),
&block_sector, sizeof(Bit32u));
}
Bit32u vmware4_image_t::get_capabilities(void)
{
return HDIMAGE_HAS_GEOMETRY;
}
bx_bool vmware4_image_t::save_state(const char *backup_fname)
{
return hdimage_backup_file(file_descriptor, backup_fname);
}

View File

@ -41,6 +41,7 @@ class vmware4_image_t : public device_image_t
ssize_t read(void* buf, size_t count);
ssize_t write(const void* buf, size_t count);
Bit32u get_capabilities();
bx_bool save_state(const char *backup_fname);
private:
static const off_t INVALID_OFFSET;
@ -76,10 +77,10 @@ class vmware4_image_t : public device_image_t
#pragma options align=reset
#endif
bool is_open() const;
bool is_valid_header() const;
bx_bool is_open() const;
bx_bool is_valid_header() const;
bool read_header();
bx_bool read_header();
off_t perform_seek();
void flush();
Bit32u read_block_index(Bit64u sector, Bit32u index);
@ -90,7 +91,7 @@ class vmware4_image_t : public device_image_t
Bit8u* tlb;
off_t tlb_offset;
off_t current_offset;
bool is_dirty;
bx_bool is_dirty;
};
#endif

View File

@ -291,6 +291,11 @@ Bit32u vpc_image_t::get_capabilities(void)
return HDIMAGE_HAS_GEOMETRY;
}
bx_bool vpc_image_t::save_state(const char *backup_fname)
{
return hdimage_backup_file(fd, backup_fname);
}
Bit32u vpc_image_t::vpc_checksum(Bit8u *buf, size_t size)
{
Bit32u res = 0;

View File

@ -144,6 +144,7 @@ class vpc_image_t : public device_image_t
ssize_t read(void* buf, size_t count);
ssize_t write(const void* buf, size_t count);
Bit32u get_capabilities();
bx_bool save_state(const char *backup_fname);
private:
Bit32u vpc_checksum(Bit8u *buf, size_t size);