fuzz: Add DMA support to the generic-fuzzer
When a virtual-device tries to access some buffer in memory over DMA, we add call-backs into the fuzzer(next commit). The fuzzer checks verifies that the DMA request maps to a physical RAM address and fills the memory with fuzzer-provided data. The patterns that we use to fill this memory are specified using add_dma_pattern and clear_dma_patterns operations. Signed-off-by: Alexander Bulekov <alxndr@bu.edu> Reviewed-by: Darren Kenny <darren.kenny@oracle.com> Message-Id: <20201023150746.107063-5-alxndr@bu.edu> [thuth: Reformatted one comment according to the QEMU coding style] Signed-off-by: Thomas Huth <thuth@redhat.com>
This commit is contained in:
parent
05efbf2497
commit
20f5a30293
@ -42,6 +42,13 @@ typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
|
|||||||
DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
|
DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
|
||||||
IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
|
IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
|
||||||
|
|
||||||
|
#ifdef CONFIG_FUZZ
|
||||||
|
void fuzz_dma_read_cb(size_t addr,
|
||||||
|
size_t len,
|
||||||
|
MemoryRegion *mr,
|
||||||
|
bool is_write);
|
||||||
|
#endif
|
||||||
|
|
||||||
extern bool global_dirty_log;
|
extern bool global_dirty_log;
|
||||||
|
|
||||||
typedef struct MemoryRegionOps MemoryRegionOps;
|
typedef struct MemoryRegionOps MemoryRegionOps;
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include "exec/address-spaces.h"
|
#include "exec/address-spaces.h"
|
||||||
#include "hw/qdev-core.h"
|
#include "hw/qdev-core.h"
|
||||||
#include "hw/pci/pci.h"
|
#include "hw/pci/pci.h"
|
||||||
|
#include "hw/boards.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SEPARATOR is used to separate "operations" in the fuzz input
|
* SEPARATOR is used to separate "operations" in the fuzz input
|
||||||
@ -38,12 +39,16 @@ enum cmds {
|
|||||||
OP_WRITE,
|
OP_WRITE,
|
||||||
OP_PCI_READ,
|
OP_PCI_READ,
|
||||||
OP_PCI_WRITE,
|
OP_PCI_WRITE,
|
||||||
|
OP_ADD_DMA_PATTERN,
|
||||||
|
OP_CLEAR_DMA_PATTERNS,
|
||||||
OP_CLOCK_STEP,
|
OP_CLOCK_STEP,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DEFAULT_TIMEOUT_US 100000
|
#define DEFAULT_TIMEOUT_US 100000
|
||||||
#define USEC_IN_SEC 1000000000
|
#define USEC_IN_SEC 1000000000
|
||||||
|
|
||||||
|
#define MAX_DMA_FILL_SIZE 0x10000
|
||||||
|
|
||||||
#define PCI_HOST_BRIDGE_CFG 0xcf8
|
#define PCI_HOST_BRIDGE_CFG 0xcf8
|
||||||
#define PCI_HOST_BRIDGE_DATA 0xcfc
|
#define PCI_HOST_BRIDGE_DATA 0xcfc
|
||||||
|
|
||||||
@ -56,6 +61,24 @@ static useconds_t timeout = DEFAULT_TIMEOUT_US;
|
|||||||
|
|
||||||
static bool qtest_log_enabled;
|
static bool qtest_log_enabled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A pattern used to populate a DMA region or perform a memwrite. This is
|
||||||
|
* useful for e.g. populating tables of unique addresses.
|
||||||
|
* Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
|
||||||
|
* Renders as: 00 01 02 00 03 02 00 05 02 00 07 02 ...
|
||||||
|
*/
|
||||||
|
typedef struct {
|
||||||
|
uint8_t index; /* Index of a byte to increment by stride */
|
||||||
|
uint8_t stride; /* Increment each index'th byte by this amount */
|
||||||
|
size_t len;
|
||||||
|
const uint8_t *data;
|
||||||
|
} pattern;
|
||||||
|
|
||||||
|
/* Avoid filling the same DMA region between MMIO/PIO commands ? */
|
||||||
|
static bool avoid_double_fetches;
|
||||||
|
|
||||||
|
static QTestState *qts_global; /* Need a global for the DMA callback */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* List of memory regions that are children of QOM objects specified by the
|
* List of memory regions that are children of QOM objects specified by the
|
||||||
* user for fuzzing.
|
* user for fuzzing.
|
||||||
@ -84,6 +107,169 @@ static int get_io_address_cb(Int128 start, Int128 size,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* List of dma regions populated since the last fuzzing command. Used to ensure
|
||||||
|
* that we only write to each DMA address once, to avoid race conditions when
|
||||||
|
* building reproducers.
|
||||||
|
*/
|
||||||
|
static GArray *dma_regions;
|
||||||
|
|
||||||
|
static GArray *dma_patterns;
|
||||||
|
static int dma_pattern_index;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate a block of memory and populate it with a pattern.
|
||||||
|
*/
|
||||||
|
static void *pattern_alloc(pattern p, size_t len)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
uint8_t *buf = g_malloc(len);
|
||||||
|
uint8_t sum = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < len; ++i) {
|
||||||
|
buf[i] = p.data[i % p.len];
|
||||||
|
if ((i % p.len) == p.index) {
|
||||||
|
buf[i] += sum;
|
||||||
|
sum += p.stride;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
|
||||||
|
{
|
||||||
|
unsigned access_size_max = mr->ops->valid.max_access_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Regions are assumed to support 1-4 byte accesses unless
|
||||||
|
* otherwise specified.
|
||||||
|
*/
|
||||||
|
if (access_size_max == 0) {
|
||||||
|
access_size_max = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Bound the maximum access by the alignment of the address. */
|
||||||
|
if (!mr->ops->impl.unaligned) {
|
||||||
|
unsigned align_size_max = addr & -addr;
|
||||||
|
if (align_size_max != 0 && align_size_max < access_size_max) {
|
||||||
|
access_size_max = align_size_max;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Don't attempt accesses larger than the maximum. */
|
||||||
|
if (l > access_size_max) {
|
||||||
|
l = access_size_max;
|
||||||
|
}
|
||||||
|
l = pow2floor(l);
|
||||||
|
|
||||||
|
return l;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Call-back for functions that perform DMA reads from guest memory. Confirm
|
||||||
|
* that the region has not already been populated since the last loop in
|
||||||
|
* generic_fuzz(), avoiding potential race-conditions, which we don't have
|
||||||
|
* a good way for reproducing right now.
|
||||||
|
*/
|
||||||
|
void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr, bool is_write)
|
||||||
|
{
|
||||||
|
/* Are we in the generic-fuzzer or are we using another fuzz-target? */
|
||||||
|
if (!qts_global) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return immediately if:
|
||||||
|
* - We have no DMA patterns defined
|
||||||
|
* - The length of the DMA read request is zero
|
||||||
|
* - The DMA read is hitting an MR other than the machine's main RAM
|
||||||
|
* - The DMA request is not a read (what happens for a address_space_map
|
||||||
|
* with is_write=True? Can the device use the same pointer to do reads?)
|
||||||
|
* - The DMA request hits past the bounds of our RAM
|
||||||
|
*/
|
||||||
|
if (dma_patterns->len == 0
|
||||||
|
|| len == 0
|
||||||
|
/* || mr != MACHINE(qdev_get_machine())->ram */
|
||||||
|
|| is_write
|
||||||
|
|| addr > current_machine->ram_size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we overlap with any existing dma_regions, split the range and only
|
||||||
|
* populate the non-overlapping parts.
|
||||||
|
*/
|
||||||
|
address_range region;
|
||||||
|
bool double_fetch = false;
|
||||||
|
for (int i = 0;
|
||||||
|
i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
|
||||||
|
++i) {
|
||||||
|
region = g_array_index(dma_regions, address_range, i);
|
||||||
|
if (addr < region.addr + region.size && addr + len > region.addr) {
|
||||||
|
double_fetch = true;
|
||||||
|
if (addr < region.addr
|
||||||
|
&& avoid_double_fetches) {
|
||||||
|
fuzz_dma_read_cb(addr, region.addr - addr, mr, is_write);
|
||||||
|
}
|
||||||
|
if (addr + len > region.addr + region.size
|
||||||
|
&& avoid_double_fetches) {
|
||||||
|
fuzz_dma_read_cb(region.addr + region.size,
|
||||||
|
addr + len - (region.addr + region.size), mr, is_write);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Cap the length of the DMA access to something reasonable */
|
||||||
|
len = MIN(len, MAX_DMA_FILL_SIZE);
|
||||||
|
|
||||||
|
address_range ar = {addr, len};
|
||||||
|
g_array_append_val(dma_regions, ar);
|
||||||
|
pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
|
||||||
|
void *buf = pattern_alloc(p, ar.size);
|
||||||
|
hwaddr l, addr1;
|
||||||
|
MemoryRegion *mr1;
|
||||||
|
uint8_t *ram_ptr;
|
||||||
|
while (len > 0) {
|
||||||
|
l = len;
|
||||||
|
mr1 = address_space_translate(first_cpu->as,
|
||||||
|
addr, &addr1, &l, true,
|
||||||
|
MEMTXATTRS_UNSPECIFIED);
|
||||||
|
|
||||||
|
if (!(memory_region_is_ram(mr1) ||
|
||||||
|
memory_region_is_romd(mr1))) {
|
||||||
|
l = memory_access_size(mr1, l, addr1);
|
||||||
|
} else {
|
||||||
|
/* ROM/RAM case */
|
||||||
|
ram_ptr = qemu_map_ram_ptr(mr1->ram_block, addr1);
|
||||||
|
memcpy(ram_ptr, buf, l);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
len -= l;
|
||||||
|
buf += l;
|
||||||
|
addr += l;
|
||||||
|
|
||||||
|
}
|
||||||
|
if (qtest_log_enabled) {
|
||||||
|
/*
|
||||||
|
* With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
|
||||||
|
* that will be written by qtest.c with a DMA tag, so we can reorder
|
||||||
|
* the resulting QTest trace so the DMA fills precede the last PIO/MMIO
|
||||||
|
* command.
|
||||||
|
*/
|
||||||
|
fprintf(stderr, "[DMA] ");
|
||||||
|
if (double_fetch) {
|
||||||
|
fprintf(stderr, "[DOUBLE-FETCH] ");
|
||||||
|
}
|
||||||
|
fflush(stderr);
|
||||||
|
}
|
||||||
|
qtest_memwrite(qts_global, ar.addr, buf, ar.size);
|
||||||
|
g_free(buf);
|
||||||
|
|
||||||
|
/* Increment the index of the pattern for the next DMA access */
|
||||||
|
dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here we want to convert a fuzzer-provided [io-region-index, offset] to
|
* Here we want to convert a fuzzer-provided [io-region-index, offset] to
|
||||||
* a physical address. To do this, we iterate over all of the matched
|
* a physical address. To do this, we iterate over all of the matched
|
||||||
@ -349,6 +535,35 @@ static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void op_add_dma_pattern(QTestState *s,
|
||||||
|
const unsigned char *data, size_t len)
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
/*
|
||||||
|
* index and stride can be used to increment the index-th byte of the
|
||||||
|
* pattern by the value stride, for each loop of the pattern.
|
||||||
|
*/
|
||||||
|
uint8_t index;
|
||||||
|
uint8_t stride;
|
||||||
|
} a;
|
||||||
|
|
||||||
|
if (len < sizeof(a) + 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
memcpy(&a, data, sizeof(a));
|
||||||
|
pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
|
||||||
|
p.index = a.index % p.len;
|
||||||
|
g_array_append_val(dma_patterns, p);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void op_clear_dma_patterns(QTestState *s,
|
||||||
|
const unsigned char *data, size_t len)
|
||||||
|
{
|
||||||
|
g_array_set_size(dma_patterns, 0);
|
||||||
|
dma_pattern_index = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
|
static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
|
||||||
{
|
{
|
||||||
qtest_clock_step_next(s);
|
qtest_clock_step_next(s);
|
||||||
@ -409,6 +624,8 @@ static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
|
|||||||
[OP_WRITE] = op_write,
|
[OP_WRITE] = op_write,
|
||||||
[OP_PCI_READ] = op_pci_read,
|
[OP_PCI_READ] = op_pci_read,
|
||||||
[OP_PCI_WRITE] = op_pci_write,
|
[OP_PCI_WRITE] = op_pci_write,
|
||||||
|
[OP_ADD_DMA_PATTERN] = op_add_dma_pattern,
|
||||||
|
[OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
|
||||||
[OP_CLOCK_STEP] = op_clock_step,
|
[OP_CLOCK_STEP] = op_clock_step,
|
||||||
};
|
};
|
||||||
const unsigned char *cmd = Data;
|
const unsigned char *cmd = Data;
|
||||||
@ -438,6 +655,8 @@ static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
|
|||||||
setitimer(ITIMER_VIRTUAL, &timer, NULL);
|
setitimer(ITIMER_VIRTUAL, &timer, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
op_clear_dma_patterns(s, NULL, 0);
|
||||||
|
|
||||||
while (cmd && Size) {
|
while (cmd && Size) {
|
||||||
/* Get the length until the next command or end of input */
|
/* Get the length until the next command or end of input */
|
||||||
nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
|
nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
|
||||||
@ -454,6 +673,7 @@ static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
|
|||||||
/* Advance to the next command */
|
/* Advance to the next command */
|
||||||
cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
|
cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
|
||||||
Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
|
Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
|
||||||
|
g_array_set_size(dma_regions, 0);
|
||||||
}
|
}
|
||||||
_Exit(0);
|
_Exit(0);
|
||||||
} else {
|
} else {
|
||||||
@ -468,6 +688,9 @@ static void usage(void)
|
|||||||
printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
|
printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
|
||||||
printf("QEMU_FUZZ_OBJECTS= "
|
printf("QEMU_FUZZ_OBJECTS= "
|
||||||
"a space separated list of QOM type names for objects to fuzz\n");
|
"a space separated list of QOM type names for objects to fuzz\n");
|
||||||
|
printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
|
||||||
|
"Try to avoid racy DMA double fetch bugs? %d by default\n",
|
||||||
|
avoid_double_fetches);
|
||||||
printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
|
printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
|
||||||
"0 to disable. %d by default\n", timeout);
|
"0 to disable. %d by default\n", timeout);
|
||||||
exit(0);
|
exit(0);
|
||||||
@ -539,9 +762,16 @@ static void generic_pre_fuzz(QTestState *s)
|
|||||||
if (getenv("QTEST_LOG")) {
|
if (getenv("QTEST_LOG")) {
|
||||||
qtest_log_enabled = 1;
|
qtest_log_enabled = 1;
|
||||||
}
|
}
|
||||||
|
if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
|
||||||
|
avoid_double_fetches = 1;
|
||||||
|
}
|
||||||
if (getenv("QEMU_FUZZ_TIMEOUT")) {
|
if (getenv("QEMU_FUZZ_TIMEOUT")) {
|
||||||
timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
|
timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
|
||||||
}
|
}
|
||||||
|
qts_global = s;
|
||||||
|
|
||||||
|
dma_regions = g_array_new(false, false, sizeof(address_range));
|
||||||
|
dma_patterns = g_array_new(false, false, sizeof(pattern));
|
||||||
|
|
||||||
fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
|
fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
|
||||||
fuzzable_pci_devices = g_ptr_array_new();
|
fuzzable_pci_devices = g_ptr_array_new();
|
||||||
|
Loading…
Reference in New Issue
Block a user