Pull request
Let's get the image fuzzer Python 3 changes merged in QEMU 4.2. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAl3BmJQACgkQnKSrs4Gr c8i3JwgAkTUM4b9O1irKMTHaeO3kmKmU4g920fRTXXjJXhA6i7IYDz6iYEQoNJoR RsyaFRKfBqdmzRmk5qmehBax1XIcI4IZVWeiyk2SqjuM3T8d/VOln/iHLyzIz48j zLP2AoEPiiWu+DODqz1SqOwYaUdw7bdEky/Nh0WbCRZdhS31m8D/svCIxtzoLhXk OcWcTdeqXMG+EFSYvk3LFHJajbTOV4SMK6p+jXmdwVmV3rT7sIB4cS3s8oXC3zpM bM3/Br/mRLJjnbF2FgQLCWwdV8NpvX7jk1ryoiySi5itPQil0PurAHwauPksUveo Kf3LlcuaSRwKT8eG5745hyC2qBzEAg== =7PkU -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging Pull request Let's get the image fuzzer Python 3 changes merged in QEMU 4.2. # gpg: Signature made Tue 05 Nov 2019 15:43:16 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: image-fuzzer: Use OSerror.strerror instead of tuple subscript image-fuzzer: Use errors parameter of subprocess.Popen() image-fuzzer: Run using python3 image-fuzzer: Encode file name and file format to bytes image-fuzzer: Use bytes constant for field values image-fuzzer: Return bytes objects on string fuzzing functions image-fuzzer: Use %r for all fiels at Field.__repr__() image-fuzzer: Use io.StringIO image-fuzzer: Explicitly use integer division operator image-fuzzer: Write bytes instead of string to image file image-fuzzer: Open image files in binary mode Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
2dd8fd6e96
@ -1,2 +1 @@
|
|||||||
from __future__ import absolute_import
|
|
||||||
from .layout import create_image
|
from .layout import create_image
|
||||||
|
@ -27,20 +27,20 @@ UINT64 = 0xffffffffffffffff
|
|||||||
UINT32_M = 31
|
UINT32_M = 31
|
||||||
UINT64_M = 63
|
UINT64_M = 63
|
||||||
# Fuzz vectors
|
# Fuzz vectors
|
||||||
UINT8_V = [0, 0x10, UINT8/4, UINT8/2 - 1, UINT8/2, UINT8/2 + 1, UINT8 - 1,
|
UINT8_V = [0, 0x10, UINT8//4, UINT8//2 - 1, UINT8//2, UINT8//2 + 1, UINT8 - 1,
|
||||||
UINT8]
|
UINT8]
|
||||||
UINT16_V = [0, 0x100, 0x1000, UINT16/4, UINT16/2 - 1, UINT16/2, UINT16/2 + 1,
|
UINT16_V = [0, 0x100, 0x1000, UINT16//4, UINT16//2 - 1, UINT16//2, UINT16//2 + 1,
|
||||||
UINT16 - 1, UINT16]
|
UINT16 - 1, UINT16]
|
||||||
UINT32_V = [0, 0x100, 0x1000, 0x10000, 0x100000, UINT32/4, UINT32/2 - 1,
|
UINT32_V = [0, 0x100, 0x1000, 0x10000, 0x100000, UINT32//4, UINT32//2 - 1,
|
||||||
UINT32/2, UINT32/2 + 1, UINT32 - 1, UINT32]
|
UINT32//2, UINT32//2 + 1, UINT32 - 1, UINT32]
|
||||||
UINT64_V = UINT32_V + [0x1000000, 0x10000000, 0x100000000, UINT64/4,
|
UINT64_V = UINT32_V + [0x1000000, 0x10000000, 0x100000000, UINT64//4,
|
||||||
UINT64/2 - 1, UINT64/2, UINT64/2 + 1, UINT64 - 1,
|
UINT64//2 - 1, UINT64//2, UINT64//2 + 1, UINT64 - 1,
|
||||||
UINT64]
|
UINT64]
|
||||||
STRING_V = ['%s%p%x%d', '.1024d', '%.2049d', '%p%p%p%p', '%x%x%x%x',
|
BYTES_V = [b'%s%p%x%d', b'.1024d', b'%.2049d', b'%p%p%p%p', b'%x%x%x%x',
|
||||||
'%d%d%d%d', '%s%s%s%s', '%99999999999s', '%08x', '%%20d', '%%20n',
|
b'%d%d%d%d', b'%s%s%s%s', b'%99999999999s', b'%08x', b'%%20d', b'%%20n',
|
||||||
'%%20x', '%%20s', '%s%s%s%s%s%s%s%s%s%s', '%p%p%p%p%p%p%p%p%p%p',
|
b'%%20x', b'%%20s', b'%s%s%s%s%s%s%s%s%s%s', b'%p%p%p%p%p%p%p%p%p%p',
|
||||||
'%#0123456x%08x%x%s%p%d%n%o%u%c%h%l%q%j%z%Z%t%i%e%g%f%a%C%S%08x%%',
|
b'%#0123456x%08x%x%s%p%d%n%o%u%c%h%l%q%j%z%Z%t%i%e%g%f%a%C%S%08x%%',
|
||||||
'%s x 129', '%x x 257']
|
b'%s x 129', b'%x x 257']
|
||||||
|
|
||||||
|
|
||||||
def random_from_intervals(intervals):
|
def random_from_intervals(intervals):
|
||||||
@ -76,12 +76,12 @@ def random_bits(bit_ranges):
|
|||||||
return val
|
return val
|
||||||
|
|
||||||
|
|
||||||
def truncate_string(strings, length):
|
def truncate_bytes(sequences, length):
|
||||||
"""Return strings truncated to specified length."""
|
"""Return sequences truncated to specified length."""
|
||||||
if type(strings) == list:
|
if type(sequences) == list:
|
||||||
return [s[:length] for s in strings]
|
return [s[:length] for s in sequences]
|
||||||
else:
|
else:
|
||||||
return strings[:length]
|
return sequences[:length]
|
||||||
|
|
||||||
|
|
||||||
def validator(current, pick, choices):
|
def validator(current, pick, choices):
|
||||||
@ -110,12 +110,12 @@ def bit_validator(current, bit_ranges):
|
|||||||
return validator(current, random_bits, bit_ranges)
|
return validator(current, random_bits, bit_ranges)
|
||||||
|
|
||||||
|
|
||||||
def string_validator(current, strings):
|
def bytes_validator(current, sequences):
|
||||||
"""Return a random string value from the list not equal to the current.
|
"""Return a random bytes value from the list not equal to the current.
|
||||||
|
|
||||||
This function is useful for selection from valid values except current one.
|
This function is useful for selection from valid values except current one.
|
||||||
"""
|
"""
|
||||||
return validator(current, random.choice, strings)
|
return validator(current, random.choice, sequences)
|
||||||
|
|
||||||
|
|
||||||
def selector(current, constraints, validate=int_validator):
|
def selector(current, constraints, validate=int_validator):
|
||||||
@ -283,9 +283,9 @@ def header_length(current):
|
|||||||
def bf_name(current):
|
def bf_name(current):
|
||||||
"""Fuzz the backing file name."""
|
"""Fuzz the backing file name."""
|
||||||
constraints = [
|
constraints = [
|
||||||
truncate_string(STRING_V, len(current))
|
truncate_bytes(BYTES_V, len(current))
|
||||||
]
|
]
|
||||||
return selector(current, constraints, string_validator)
|
return selector(current, constraints, bytes_validator)
|
||||||
|
|
||||||
|
|
||||||
def ext_magic(current):
|
def ext_magic(current):
|
||||||
@ -303,10 +303,10 @@ def ext_length(current):
|
|||||||
def bf_format(current):
|
def bf_format(current):
|
||||||
"""Fuzz backing file format in the corresponding header extension."""
|
"""Fuzz backing file format in the corresponding header extension."""
|
||||||
constraints = [
|
constraints = [
|
||||||
truncate_string(STRING_V, len(current)),
|
truncate_bytes(BYTES_V, len(current)),
|
||||||
truncate_string(STRING_V, (len(current) + 7) & ~7) # Fuzz padding
|
truncate_bytes(BYTES_V, (len(current) + 7) & ~7) # Fuzz padding
|
||||||
]
|
]
|
||||||
return selector(current, constraints, string_validator)
|
return selector(current, constraints, bytes_validator)
|
||||||
|
|
||||||
|
|
||||||
def feature_type(current):
|
def feature_type(current):
|
||||||
@ -324,10 +324,10 @@ def feature_bit_number(current):
|
|||||||
def feature_name(current):
|
def feature_name(current):
|
||||||
"""Fuzz feature name field of a feature name table header extension."""
|
"""Fuzz feature name field of a feature name table header extension."""
|
||||||
constraints = [
|
constraints = [
|
||||||
truncate_string(STRING_V, len(current)),
|
truncate_bytes(BYTES_V, len(current)),
|
||||||
truncate_string(STRING_V, 46) # Fuzz padding (field length = 46)
|
truncate_bytes(BYTES_V, 46) # Fuzz padding (field length = 46)
|
||||||
]
|
]
|
||||||
return selector(current, constraints, string_validator)
|
return selector(current, constraints, bytes_validator)
|
||||||
|
|
||||||
|
|
||||||
def l1_entry(current):
|
def l1_entry(current):
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
import random
|
import random
|
||||||
import struct
|
import struct
|
||||||
from . import fuzz
|
from . import fuzz
|
||||||
@ -53,8 +52,8 @@ class Field(object):
|
|||||||
return iter([self.fmt, self.offset, self.value, self.name])
|
return iter([self.fmt, self.offset, self.value, self.name])
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "Field(fmt='%s', offset=%d, value=%s, name=%s)" % \
|
return "Field(fmt=%r, offset=%r, value=%r, name=%r)" % \
|
||||||
(self.fmt, self.offset, str(self.value), self.name)
|
(self.fmt, self.offset, self.value, self.name)
|
||||||
|
|
||||||
|
|
||||||
class FieldsList(object):
|
class FieldsList(object):
|
||||||
@ -122,7 +121,7 @@ class Image(object):
|
|||||||
def create_header(self, cluster_bits, backing_file_name=None):
|
def create_header(self, cluster_bits, backing_file_name=None):
|
||||||
"""Generate a random valid header."""
|
"""Generate a random valid header."""
|
||||||
meta_header = [
|
meta_header = [
|
||||||
['>4s', 0, "QFI\xfb", 'magic'],
|
['>4s', 0, b"QFI\xfb", 'magic'],
|
||||||
['>I', 4, random.randint(2, 3), 'version'],
|
['>I', 4, random.randint(2, 3), 'version'],
|
||||||
['>Q', 8, 0, 'backing_file_offset'],
|
['>Q', 8, 0, 'backing_file_offset'],
|
||||||
['>I', 16, 0, 'backing_file_size'],
|
['>I', 16, 0, 'backing_file_size'],
|
||||||
@ -231,7 +230,7 @@ class Image(object):
|
|||||||
feature_tables = []
|
feature_tables = []
|
||||||
feature_ids = []
|
feature_ids = []
|
||||||
inner_offset = self.ext_offset + ext_header_len
|
inner_offset = self.ext_offset + ext_header_len
|
||||||
feat_name = 'some cool feature'
|
feat_name = b'some cool feature'
|
||||||
while len(feature_tables) < num_fnt_entries * 3:
|
while len(feature_tables) < num_fnt_entries * 3:
|
||||||
feat_type, feat_bit = gen_feat_ids()
|
feat_type, feat_bit = gen_feat_ids()
|
||||||
# Remove duplicates
|
# Remove duplicates
|
||||||
@ -253,7 +252,7 @@ class Image(object):
|
|||||||
['>I', self.ext_offset, 0x6803f857, 'ext_magic'],
|
['>I', self.ext_offset, 0x6803f857, 'ext_magic'],
|
||||||
# One feature table contains 3 fields and takes 48 bytes
|
# One feature table contains 3 fields and takes 48 bytes
|
||||||
['>I', self.ext_offset + UINT32_S,
|
['>I', self.ext_offset + UINT32_S,
|
||||||
len(feature_tables) / 3 * 48, 'ext_length']
|
len(feature_tables) // 3 * 48, 'ext_length']
|
||||||
] + feature_tables)
|
] + feature_tables)
|
||||||
self.ext_offset = inner_offset
|
self.ext_offset = inner_offset
|
||||||
|
|
||||||
@ -271,7 +270,7 @@ class Image(object):
|
|||||||
def create_l2_entry(host, guest, l2_cluster):
|
def create_l2_entry(host, guest, l2_cluster):
|
||||||
"""Generate one L2 entry."""
|
"""Generate one L2 entry."""
|
||||||
offset = l2_cluster * self.cluster_size
|
offset = l2_cluster * self.cluster_size
|
||||||
l2_size = self.cluster_size / UINT64_S
|
l2_size = self.cluster_size // UINT64_S
|
||||||
entry_offset = offset + UINT64_S * (guest % l2_size)
|
entry_offset = offset + UINT64_S * (guest % l2_size)
|
||||||
cluster_descriptor = host * self.cluster_size
|
cluster_descriptor = host * self.cluster_size
|
||||||
if not self.header['version'][0].value == 2:
|
if not self.header['version'][0].value == 2:
|
||||||
@ -283,8 +282,8 @@ class Image(object):
|
|||||||
|
|
||||||
def create_l1_entry(l2_cluster, l1_offset, guest):
|
def create_l1_entry(l2_cluster, l1_offset, guest):
|
||||||
"""Generate one L1 entry."""
|
"""Generate one L1 entry."""
|
||||||
l2_size = self.cluster_size / UINT64_S
|
l2_size = self.cluster_size // UINT64_S
|
||||||
entry_offset = l1_offset + UINT64_S * (guest / l2_size)
|
entry_offset = l1_offset + UINT64_S * (guest // l2_size)
|
||||||
# While snapshots are not supported bit #63 = 1
|
# While snapshots are not supported bit #63 = 1
|
||||||
entry_val = (1 << 63) + l2_cluster * self.cluster_size
|
entry_val = (1 << 63) + l2_cluster * self.cluster_size
|
||||||
return ['>Q', entry_offset, entry_val, 'l1_entry']
|
return ['>Q', entry_offset, entry_val, 'l1_entry']
|
||||||
@ -298,11 +297,11 @@ class Image(object):
|
|||||||
l2 = []
|
l2 = []
|
||||||
else:
|
else:
|
||||||
meta_data = self._get_metadata()
|
meta_data = self._get_metadata()
|
||||||
guest_clusters = random.sample(range(self.image_size /
|
guest_clusters = random.sample(range(self.image_size //
|
||||||
self.cluster_size),
|
self.cluster_size),
|
||||||
len(self.data_clusters))
|
len(self.data_clusters))
|
||||||
# Number of entries in a L1/L2 table
|
# Number of entries in a L1/L2 table
|
||||||
l_size = self.cluster_size / UINT64_S
|
l_size = self.cluster_size // UINT64_S
|
||||||
# Number of clusters necessary for L1 table
|
# Number of clusters necessary for L1 table
|
||||||
l1_size = int(ceil((max(guest_clusters) + 1) / float(l_size**2)))
|
l1_size = int(ceil((max(guest_clusters) + 1) / float(l_size**2)))
|
||||||
l1_start = self._get_adjacent_clusters(self.data_clusters |
|
l1_start = self._get_adjacent_clusters(self.data_clusters |
|
||||||
@ -318,7 +317,7 @@ class Image(object):
|
|||||||
# L2 entries
|
# L2 entries
|
||||||
l2 = []
|
l2 = []
|
||||||
for host, guest in zip(self.data_clusters, guest_clusters):
|
for host, guest in zip(self.data_clusters, guest_clusters):
|
||||||
l2_id = guest / l_size
|
l2_id = guest // l_size
|
||||||
if l2_id not in l2_ids:
|
if l2_id not in l2_ids:
|
||||||
l2_ids.append(l2_id)
|
l2_ids.append(l2_id)
|
||||||
l2_clusters.append(self._get_adjacent_clusters(
|
l2_clusters.append(self._get_adjacent_clusters(
|
||||||
@ -339,14 +338,14 @@ class Image(object):
|
|||||||
def allocate_rfc_blocks(data, size):
|
def allocate_rfc_blocks(data, size):
|
||||||
"""Return indices of clusters allocated for refcount blocks."""
|
"""Return indices of clusters allocated for refcount blocks."""
|
||||||
cluster_ids = set()
|
cluster_ids = set()
|
||||||
diff = block_ids = set([x / size for x in data])
|
diff = block_ids = set([x // size for x in data])
|
||||||
while len(diff) != 0:
|
while len(diff) != 0:
|
||||||
# Allocate all yet not allocated clusters
|
# Allocate all yet not allocated clusters
|
||||||
new = self._get_available_clusters(data | cluster_ids,
|
new = self._get_available_clusters(data | cluster_ids,
|
||||||
len(diff))
|
len(diff))
|
||||||
# Indices of new refcount blocks necessary to cover clusters
|
# Indices of new refcount blocks necessary to cover clusters
|
||||||
# in 'new'
|
# in 'new'
|
||||||
diff = set([x / size for x in new]) - block_ids
|
diff = set([x // size for x in new]) - block_ids
|
||||||
cluster_ids |= new
|
cluster_ids |= new
|
||||||
block_ids |= diff
|
block_ids |= diff
|
||||||
return cluster_ids, block_ids
|
return cluster_ids, block_ids
|
||||||
@ -359,7 +358,7 @@ class Image(object):
|
|||||||
blocks = set(init_blocks)
|
blocks = set(init_blocks)
|
||||||
clusters = set()
|
clusters = set()
|
||||||
# Number of entries in one cluster of the refcount table
|
# Number of entries in one cluster of the refcount table
|
||||||
size = self.cluster_size / UINT64_S
|
size = self.cluster_size // UINT64_S
|
||||||
# Number of clusters necessary for the refcount table based on
|
# Number of clusters necessary for the refcount table based on
|
||||||
# the current number of refcount blocks
|
# the current number of refcount blocks
|
||||||
table_size = int(ceil((max(blocks) + 1) / float(size)))
|
table_size = int(ceil((max(blocks) + 1) / float(size)))
|
||||||
@ -373,7 +372,7 @@ class Image(object):
|
|||||||
table_size + 1))
|
table_size + 1))
|
||||||
# New refcount blocks necessary for clusters occupied by the
|
# New refcount blocks necessary for clusters occupied by the
|
||||||
# refcount table
|
# refcount table
|
||||||
diff = set([c / block_size for c in table_clusters]) - blocks
|
diff = set([c // block_size for c in table_clusters]) - blocks
|
||||||
blocks |= diff
|
blocks |= diff
|
||||||
while len(diff) != 0:
|
while len(diff) != 0:
|
||||||
# Allocate clusters for new refcount blocks
|
# Allocate clusters for new refcount blocks
|
||||||
@ -382,12 +381,12 @@ class Image(object):
|
|||||||
len(diff))
|
len(diff))
|
||||||
# Indices of new refcount blocks necessary to cover
|
# Indices of new refcount blocks necessary to cover
|
||||||
# clusters in 'new'
|
# clusters in 'new'
|
||||||
diff = set([x / block_size for x in new]) - blocks
|
diff = set([x // block_size for x in new]) - blocks
|
||||||
clusters |= new
|
clusters |= new
|
||||||
blocks |= diff
|
blocks |= diff
|
||||||
# Check if the refcount table needs one more cluster
|
# Check if the refcount table needs one more cluster
|
||||||
if int(ceil((max(blocks) + 1) / float(size))) > table_size:
|
if int(ceil((max(blocks) + 1) / float(size))) > table_size:
|
||||||
new_block_id = (table_start + table_size) / block_size
|
new_block_id = (table_start + table_size) // block_size
|
||||||
# Check if the additional table cluster needs
|
# Check if the additional table cluster needs
|
||||||
# one more refcount block
|
# one more refcount block
|
||||||
if new_block_id not in blocks:
|
if new_block_id not in blocks:
|
||||||
@ -399,13 +398,13 @@ class Image(object):
|
|||||||
def create_table_entry(table_offset, block_cluster, block_size,
|
def create_table_entry(table_offset, block_cluster, block_size,
|
||||||
cluster):
|
cluster):
|
||||||
"""Generate a refcount table entry."""
|
"""Generate a refcount table entry."""
|
||||||
offset = table_offset + UINT64_S * (cluster / block_size)
|
offset = table_offset + UINT64_S * (cluster // block_size)
|
||||||
return ['>Q', offset, block_cluster * self.cluster_size,
|
return ['>Q', offset, block_cluster * self.cluster_size,
|
||||||
'refcount_table_entry']
|
'refcount_table_entry']
|
||||||
|
|
||||||
def create_block_entry(block_cluster, block_size, cluster):
|
def create_block_entry(block_cluster, block_size, cluster):
|
||||||
"""Generate a list of entries for the current block."""
|
"""Generate a list of entries for the current block."""
|
||||||
entry_size = self.cluster_size / block_size
|
entry_size = self.cluster_size // block_size
|
||||||
offset = block_cluster * self.cluster_size
|
offset = block_cluster * self.cluster_size
|
||||||
entry_offset = offset + entry_size * (cluster % block_size)
|
entry_offset = offset + entry_size * (cluster % block_size)
|
||||||
# While snapshots are not supported all refcounts are set to 1
|
# While snapshots are not supported all refcounts are set to 1
|
||||||
@ -415,7 +414,7 @@ class Image(object):
|
|||||||
# Number of refcount entries per refcount block
|
# Number of refcount entries per refcount block
|
||||||
# Convert self.cluster_size from bytes to bits to have the same
|
# Convert self.cluster_size from bytes to bits to have the same
|
||||||
# base for the numerator and denominator
|
# base for the numerator and denominator
|
||||||
block_size = self.cluster_size * 8 / refcount_bits
|
block_size = self.cluster_size * 8 // refcount_bits
|
||||||
meta_data = self._get_metadata()
|
meta_data = self._get_metadata()
|
||||||
if len(self.data_clusters) == 0:
|
if len(self.data_clusters) == 0:
|
||||||
# All metadata for an empty guest image needs 4 clusters:
|
# All metadata for an empty guest image needs 4 clusters:
|
||||||
@ -452,8 +451,8 @@ class Image(object):
|
|||||||
rfc_blocks = []
|
rfc_blocks = []
|
||||||
|
|
||||||
for cluster in sorted(self.data_clusters | meta_data):
|
for cluster in sorted(self.data_clusters | meta_data):
|
||||||
if cluster / block_size != block_id:
|
if cluster // block_size != block_id:
|
||||||
block_id = cluster / block_size
|
block_id = cluster // block_size
|
||||||
block_cluster = block_clusters[block_ids.index(block_id)]
|
block_cluster = block_clusters[block_ids.index(block_id)]
|
||||||
rfc_table.append(create_table_entry(table_offset,
|
rfc_table.append(create_table_entry(table_offset,
|
||||||
block_cluster,
|
block_cluster,
|
||||||
@ -503,7 +502,7 @@ class Image(object):
|
|||||||
|
|
||||||
def write(self, filename):
|
def write(self, filename):
|
||||||
"""Write an entire image to the file."""
|
"""Write an entire image to the file."""
|
||||||
image_file = open(filename, 'w')
|
image_file = open(filename, 'wb')
|
||||||
for field in self:
|
for field in self:
|
||||||
image_file.seek(field.offset)
|
image_file.seek(field.offset)
|
||||||
image_file.write(struct.pack(field.fmt, field.value))
|
image_file.write(struct.pack(field.fmt, field.value))
|
||||||
@ -518,7 +517,7 @@ class Image(object):
|
|||||||
rounded = (size + self.cluster_size - 1) & ~(self.cluster_size - 1)
|
rounded = (size + self.cluster_size - 1) & ~(self.cluster_size - 1)
|
||||||
if rounded > size:
|
if rounded > size:
|
||||||
image_file.seek(rounded - 1)
|
image_file.seek(rounded - 1)
|
||||||
image_file.write("\0")
|
image_file.write(b'\x00')
|
||||||
image_file.close()
|
image_file.close()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -587,7 +586,7 @@ class Image(object):
|
|||||||
def _alloc_data(img_size, cluster_size):
|
def _alloc_data(img_size, cluster_size):
|
||||||
"""Return a set of random indices of clusters allocated for guest data.
|
"""Return a set of random indices of clusters allocated for guest data.
|
||||||
"""
|
"""
|
||||||
num_of_cls = img_size/cluster_size
|
num_of_cls = img_size // cluster_size
|
||||||
return set(random.sample(range(1, num_of_cls + 1),
|
return set(random.sample(range(1, num_of_cls + 1),
|
||||||
random.randint(0, num_of_cls)))
|
random.randint(0, num_of_cls)))
|
||||||
|
|
||||||
@ -595,15 +594,15 @@ class Image(object):
|
|||||||
"""Return indices of clusters allocated for image metadata."""
|
"""Return indices of clusters allocated for image metadata."""
|
||||||
ids = set()
|
ids = set()
|
||||||
for x in self:
|
for x in self:
|
||||||
ids.add(x.offset/self.cluster_size)
|
ids.add(x.offset // self.cluster_size)
|
||||||
return ids
|
return ids
|
||||||
|
|
||||||
|
|
||||||
def create_image(test_img_path, backing_file_name=None, backing_file_fmt=None,
|
def create_image(test_img_path, backing_file_name=None, backing_file_fmt=None,
|
||||||
fields_to_fuzz=None):
|
fields_to_fuzz=None):
|
||||||
"""Create a fuzzed image and write it to the specified file."""
|
"""Create a fuzzed image and write it to the specified file."""
|
||||||
image = Image(backing_file_name)
|
image = Image(backing_file_name.encode())
|
||||||
image.set_backing_file_format(backing_file_fmt)
|
image.set_backing_file_format(backing_file_fmt.encode())
|
||||||
image.create_feature_name_table()
|
image.create_feature_name_table()
|
||||||
image.set_end_of_extension_area()
|
image.set_end_of_extension_area()
|
||||||
image.create_l_structures()
|
image.create_l_structures()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Tool for running fuzz tests
|
# Tool for running fuzz tests
|
||||||
#
|
#
|
||||||
@ -18,7 +18,6 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
@ -28,7 +27,7 @@ import shutil
|
|||||||
from itertools import count
|
from itertools import count
|
||||||
import time
|
import time
|
||||||
import getopt
|
import getopt
|
||||||
import StringIO
|
import io
|
||||||
import resource
|
import resource
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -80,7 +79,8 @@ def run_app(fd, q_args):
|
|||||||
devnull = open('/dev/null', 'r+')
|
devnull = open('/dev/null', 'r+')
|
||||||
process = subprocess.Popen(q_args, stdin=devnull,
|
process = subprocess.Popen(q_args, stdin=devnull,
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE,
|
||||||
|
errors='replace')
|
||||||
try:
|
try:
|
||||||
out, err = process.communicate()
|
out, err = process.communicate()
|
||||||
signal.alarm(0)
|
signal.alarm(0)
|
||||||
@ -159,7 +159,7 @@ class TestEnv(object):
|
|||||||
os.makedirs(self.current_dir)
|
os.makedirs(self.current_dir)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
print("Error: The working directory '%s' cannot be used. Reason: %s"\
|
print("Error: The working directory '%s' cannot be used. Reason: %s"\
|
||||||
% (self.work_dir, e[1]), file=sys.stderr)
|
% (self.work_dir, e.strerror), file=sys.stderr)
|
||||||
raise TestException
|
raise TestException
|
||||||
self.log = open(os.path.join(self.current_dir, "test.log"), "w")
|
self.log = open(os.path.join(self.current_dir, "test.log"), "w")
|
||||||
self.parent_log = open(run_log, "a")
|
self.parent_log = open(run_log, "a")
|
||||||
@ -183,7 +183,7 @@ class TestEnv(object):
|
|||||||
MAX_BACKING_FILE_SIZE) * (1 << 20)
|
MAX_BACKING_FILE_SIZE) * (1 << 20)
|
||||||
cmd = self.qemu_img + ['create', '-f', backing_file_fmt,
|
cmd = self.qemu_img + ['create', '-f', backing_file_fmt,
|
||||||
backing_file_name, str(backing_file_size)]
|
backing_file_name, str(backing_file_size)]
|
||||||
temp_log = StringIO.StringIO()
|
temp_log = io.StringIO()
|
||||||
retcode = run_app(temp_log, cmd)
|
retcode = run_app(temp_log, cmd)
|
||||||
if retcode == 0:
|
if retcode == 0:
|
||||||
temp_log.close()
|
temp_log.close()
|
||||||
@ -240,13 +240,13 @@ class TestEnv(object):
|
|||||||
"Backing file: %s\n" \
|
"Backing file: %s\n" \
|
||||||
% (self.seed, " ".join(current_cmd),
|
% (self.seed, " ".join(current_cmd),
|
||||||
self.current_dir, backing_file_name)
|
self.current_dir, backing_file_name)
|
||||||
temp_log = StringIO.StringIO()
|
temp_log = io.StringIO()
|
||||||
try:
|
try:
|
||||||
retcode = run_app(temp_log, current_cmd)
|
retcode = run_app(temp_log, current_cmd)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
multilog("%sError: Start of '%s' failed. Reason: %s\n\n"
|
multilog("%sError: Start of '%s' failed. Reason: %s\n\n"
|
||||||
% (test_summary, os.path.basename(current_cmd[0]),
|
% (test_summary, os.path.basename(current_cmd[0]),
|
||||||
e[1]),
|
e.strerror),
|
||||||
sys.stderr, self.log, self.parent_log)
|
sys.stderr, self.log, self.parent_log)
|
||||||
raise TestException
|
raise TestException
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user