2010-05-22 22:24:51 +04:00
|
|
|
/*
|
|
|
|
* Simple trace backend
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2010
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-29 20:50:05 +03:00
|
|
|
#include "qemu/osdep.h"
|
2011-09-05 11:30:17 +04:00
|
|
|
#ifndef _WIN32
|
2011-02-26 21:38:39 +03:00
|
|
|
#include <pthread.h>
|
2011-09-05 11:30:17 +04:00
|
|
|
#endif
|
2012-12-17 21:20:00 +04:00
|
|
|
#include "qemu/timer.h"
|
2011-08-31 22:31:03 +04:00
|
|
|
#include "trace/control.h"
|
2014-01-14 19:52:55 +04:00
|
|
|
#include "trace/simple.h"
|
2017-09-11 22:52:50 +03:00
|
|
|
#include "qemu/error-report.h"
|
2019-04-17 22:17:50 +03:00
|
|
|
#include "qemu/qemu-print.h"
|
2010-05-22 22:24:51 +04:00
|
|
|
|
2016-10-04 16:35:49 +03:00
|
|
|
/** Trace file header event ID, picked to avoid conflict with real event IDs */
|
|
|
|
#define HEADER_EVENT_ID (~(uint64_t)0)
|
2010-05-22 22:24:51 +04:00
|
|
|
|
|
|
|
/** Trace file magic number */
|
|
|
|
#define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
|
|
|
|
|
|
|
|
/** Trace file version number, bump if format changes */
|
2016-10-04 16:35:50 +03:00
|
|
|
#define HEADER_VERSION 4
|
2010-05-22 22:24:51 +04:00
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
/** Records were dropped event ID */
|
|
|
|
#define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
|
|
|
|
|
|
|
|
/** Trace record is valid */
|
|
|
|
#define TRACE_RECORD_VALID ((uint64_t)1 << 63)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trace records are written out by a dedicated thread. The thread waits for
|
|
|
|
* records to become available, writes them out, and then waits again.
|
|
|
|
*/
|
2018-05-04 17:34:46 +03:00
|
|
|
static GMutex trace_lock;
|
|
|
|
static GCond trace_available_cond;
|
|
|
|
static GCond trace_empty_cond;
|
2013-02-12 17:34:05 +04:00
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
static bool trace_available;
|
|
|
|
static bool trace_writeout_enabled;
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
enum {
|
|
|
|
TRACE_BUF_LEN = 4096 * 64,
|
|
|
|
TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
|
|
|
|
};
|
|
|
|
|
|
|
|
uint8_t trace_buf[TRACE_BUF_LEN];
|
2013-02-12 17:34:04 +04:00
|
|
|
static volatile gint trace_idx;
|
2012-07-18 13:45:59 +04:00
|
|
|
static unsigned int writeout_idx;
|
2013-02-12 17:34:04 +04:00
|
|
|
static volatile gint dropped_events;
|
2014-05-07 21:24:10 +04:00
|
|
|
static uint32_t trace_pid;
|
2010-05-22 22:24:51 +04:00
|
|
|
static FILE *trace_fp;
|
2012-08-13 23:51:16 +04:00
|
|
|
static char *trace_file_name;
|
2010-05-22 22:24:51 +04:00
|
|
|
|
2016-10-04 16:35:50 +03:00
|
|
|
#define TRACE_RECORD_TYPE_MAPPING 0
|
|
|
|
#define TRACE_RECORD_TYPE_EVENT 1
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
/* * Trace buffer entry */
|
|
|
|
typedef struct {
|
2016-10-04 16:35:49 +03:00
|
|
|
uint64_t event; /* event ID value */
|
2012-07-18 13:45:59 +04:00
|
|
|
uint64_t timestamp_ns;
|
|
|
|
uint32_t length; /* in bytes */
|
2014-05-07 21:24:10 +04:00
|
|
|
uint32_t pid;
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 19:43:37 +04:00
|
|
|
uint64_t arguments[];
|
2012-07-18 13:45:59 +04:00
|
|
|
} TraceRecord;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint64_t header_event_id; /* HEADER_EVENT_ID */
|
|
|
|
uint64_t header_magic; /* HEADER_MAGIC */
|
|
|
|
uint64_t header_version; /* HEADER_VERSION */
|
2012-07-20 17:22:12 +04:00
|
|
|
} TraceLogHeader;
|
2012-07-18 13:45:59 +04:00
|
|
|
|
|
|
|
|
|
|
|
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
|
|
|
|
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
|
|
|
|
|
|
|
|
static void clear_buffer_range(unsigned int idx, size_t len)
|
|
|
|
{
|
|
|
|
uint32_t num = 0;
|
|
|
|
while (num < len) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
trace_buf[idx++] = 0;
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
}
|
2010-07-13 12:26:33 +04:00
|
|
|
/**
|
2011-02-26 21:38:39 +03:00
|
|
|
* Read a trace record from the trace buffer
|
|
|
|
*
|
|
|
|
* @idx Trace buffer index
|
|
|
|
* @record Trace record to fill
|
|
|
|
*
|
|
|
|
* Returns false if the record is not valid.
|
2010-07-13 12:26:33 +04:00
|
|
|
*/
|
2012-07-18 13:45:59 +04:00
|
|
|
static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
|
2010-07-13 12:26:32 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
uint64_t event_flag = 0;
|
|
|
|
TraceRecord record;
|
|
|
|
/* read the event flag to see if its a valid record */
|
|
|
|
read_from_buffer(idx, &record, sizeof(event_flag));
|
|
|
|
|
|
|
|
if (!(record.event & TRACE_RECORD_VALID)) {
|
2011-02-26 21:38:39 +03:00
|
|
|
return false;
|
2010-07-13 12:26:32 +04:00
|
|
|
}
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
smp_rmb(); /* read memory barrier before accessing record */
|
|
|
|
/* read the record header to know record length */
|
|
|
|
read_from_buffer(idx, &record, sizeof(TraceRecord));
|
2016-03-23 17:59:57 +03:00
|
|
|
*recordptr = malloc(record.length); /* don't use g_malloc, can deadlock when traced */
|
2012-07-18 13:45:59 +04:00
|
|
|
/* make a copy of record to avoid being overwritten */
|
|
|
|
read_from_buffer(idx, *recordptr, record.length);
|
|
|
|
smp_rmb(); /* memory barrier before clearing valid flag */
|
|
|
|
(*recordptr)->event &= ~TRACE_RECORD_VALID;
|
|
|
|
/* clear the trace buffer range for consumed record otherwise any byte
|
|
|
|
* with its MSB set may be considered as a valid event id when the writer
|
|
|
|
* thread crosses this range of buffer again.
|
|
|
|
*/
|
|
|
|
clear_buffer_range(idx, record.length);
|
2010-07-13 12:26:33 +04:00
|
|
|
return true;
|
2010-07-13 12:26:32 +04:00
|
|
|
}
|
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
/**
|
|
|
|
* Kick writeout thread
|
|
|
|
*
|
|
|
|
* @wait Whether to wait for writeout thread to complete
|
|
|
|
*/
|
|
|
|
static void flush_trace_file(bool wait)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2014-05-08 12:30:46 +04:00
|
|
|
g_mutex_lock(&trace_lock);
|
2011-02-26 21:38:39 +03:00
|
|
|
trace_available = true;
|
2014-05-08 12:30:46 +04:00
|
|
|
g_cond_signal(&trace_available_cond);
|
2010-07-13 12:26:33 +04:00
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
if (wait) {
|
2014-05-08 12:30:46 +04:00
|
|
|
g_cond_wait(&trace_empty_cond, &trace_lock);
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
2011-02-26 21:38:39 +03:00
|
|
|
|
2014-05-08 12:30:46 +04:00
|
|
|
g_mutex_unlock(&trace_lock);
|
2010-07-13 12:26:33 +04:00
|
|
|
}
|
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
static void wait_for_trace_records_available(void)
|
2010-07-13 12:26:33 +04:00
|
|
|
{
|
2014-05-08 12:30:46 +04:00
|
|
|
g_mutex_lock(&trace_lock);
|
2011-02-26 21:38:39 +03:00
|
|
|
while (!(trace_available && trace_writeout_enabled)) {
|
2014-05-08 12:30:46 +04:00
|
|
|
g_cond_signal(&trace_empty_cond);
|
|
|
|
g_cond_wait(&trace_available_cond, &trace_lock);
|
2010-07-13 12:26:33 +04:00
|
|
|
}
|
2011-02-26 21:38:39 +03:00
|
|
|
trace_available = false;
|
2014-05-08 12:30:46 +04:00
|
|
|
g_mutex_unlock(&trace_lock);
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2011-09-05 11:30:17 +04:00
|
|
|
static gpointer writeout_thread(gpointer opaque)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
TraceRecord *recordptr;
|
|
|
|
union {
|
|
|
|
TraceRecord rec;
|
|
|
|
uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
|
|
|
|
} dropped;
|
|
|
|
unsigned int idx = 0;
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 19:43:37 +04:00
|
|
|
int dropped_count;
|
2011-07-24 01:21:14 +04:00
|
|
|
size_t unused __attribute__ ((unused));
|
2016-10-04 16:35:50 +03:00
|
|
|
uint64_t type = TRACE_RECORD_TYPE_EVENT;
|
2011-02-26 21:38:39 +03:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
wait_for_trace_records_available();
|
|
|
|
|
2013-01-25 19:43:38 +04:00
|
|
|
if (g_atomic_int_get(&dropped_events)) {
|
2018-11-27 22:08:49 +03:00
|
|
|
dropped.rec.event = DROPPED_EVENT_ID;
|
2012-07-18 13:45:59 +04:00
|
|
|
dropped.rec.timestamp_ns = get_clock();
|
2018-11-27 22:08:49 +03:00
|
|
|
dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t);
|
2014-05-07 21:24:10 +04:00
|
|
|
dropped.rec.pid = trace_pid;
|
2013-01-25 19:43:39 +04:00
|
|
|
do {
|
2013-01-25 19:43:38 +04:00
|
|
|
dropped_count = g_atomic_int_get(&dropped_events);
|
2013-01-25 19:43:39 +04:00
|
|
|
} while (!g_atomic_int_compare_and_exchange(&dropped_events,
|
|
|
|
dropped_count, 0));
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 19:43:37 +04:00
|
|
|
dropped.rec.arguments[0] = dropped_count;
|
2016-10-04 16:35:50 +03:00
|
|
|
unused = fwrite(&type, sizeof(type), 1, trace_fp);
|
2012-07-18 13:45:59 +04:00
|
|
|
unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
2010-05-22 22:24:51 +04:00
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
while (get_trace_record(idx, &recordptr)) {
|
2016-10-04 16:35:50 +03:00
|
|
|
unused = fwrite(&type, sizeof(type), 1, trace_fp);
|
2012-07-18 13:45:59 +04:00
|
|
|
unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
|
|
|
|
writeout_idx += recordptr->length;
|
2016-03-23 17:59:57 +03:00
|
|
|
free(recordptr); /* don't use g_free, can deadlock when traced */
|
2012-07-18 13:45:59 +04:00
|
|
|
idx = writeout_idx % TRACE_BUF_LEN;
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
2010-05-22 22:24:51 +04:00
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
fflush(trace_fp);
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
2011-02-26 21:38:39 +03:00
|
|
|
return NULL;
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
/* Write string length first */
|
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
|
|
|
|
/* Write actual string now */
|
|
|
|
rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2016-10-04 16:35:49 +03:00
|
|
|
int trace_record_start(TraceBufferRecord *rec, uint32_t event, size_t datasize)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
unsigned int idx, rec_off, old_idx, new_idx;
|
|
|
|
uint32_t rec_len = sizeof(TraceRecord) + datasize;
|
2013-03-05 17:47:55 +04:00
|
|
|
uint64_t event_u64 = event;
|
2012-07-18 13:45:59 +04:00
|
|
|
uint64_t timestamp_ns = get_clock();
|
|
|
|
|
2013-01-25 19:43:39 +04:00
|
|
|
do {
|
2013-01-25 19:43:38 +04:00
|
|
|
old_idx = g_atomic_int_get(&trace_idx);
|
2012-07-18 13:45:59 +04:00
|
|
|
smp_rmb();
|
|
|
|
new_idx = old_idx + rec_len;
|
|
|
|
|
|
|
|
if (new_idx - writeout_idx > TRACE_BUF_LEN) {
|
|
|
|
/* Trace Buffer Full, Event dropped ! */
|
trace: Fix simple trace dropped event record for big endian
We use atomic operations to keep track of dropped events.
Inconveniently, GLib supports only int and void * atomics, but the
counter dropped_events is uint64_t. Can't stop commit 62bab732: a
quick (gint *)&dropped_events bludgeons the compiler into submission.
That cast is okay only when int is exactly 64 bits wide, which it
commonly isn't.
If int is even wider, we clobber whatever follows dropped_events. Not
worth worrying about, as none of the machines that interest us have
such morbidly obese ints.
That leaves the common case: int narrower than 64 bits.
Harmless on little endian hosts: we just don't access the most
significant bits of dropped_events. They remain zero.
On big endian hosts, we use only the most significant bits of
dropped_events as counter. The least significant bits remain zero.
However, we write out the full value, which is the correct counter
shifted left a bunch of places.
Fix by changing the variables involved to int.
There's another, equally suspicious-looking (gint *)&trace_idx
argument to g_atomic_int_compare_and_exchange(), but that one casts
unsigned *, so it's okay. But it's also superfluous, because GLib's
atomic int operations work just fine for unsigned. Drop it.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2013-01-25 19:43:37 +04:00
|
|
|
g_atomic_int_inc(&dropped_events);
|
2012-07-18 13:45:59 +04:00
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2013-01-25 19:43:39 +04:00
|
|
|
} while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx));
|
2010-05-22 22:24:51 +04:00
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
idx = old_idx % TRACE_BUF_LEN;
|
|
|
|
|
|
|
|
rec_off = idx;
|
2013-03-05 17:47:55 +04:00
|
|
|
rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64));
|
2012-07-20 17:22:13 +04:00
|
|
|
rec_off = write_to_buffer(rec_off, ×tamp_ns, sizeof(timestamp_ns));
|
|
|
|
rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len));
|
2014-05-07 21:24:10 +04:00
|
|
|
rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid));
|
2012-07-18 13:45:59 +04:00
|
|
|
|
|
|
|
rec->tbuf_idx = idx;
|
|
|
|
rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
|
|
|
|
return 0;
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
uint8_t *data_ptr = dataptr;
|
|
|
|
uint32_t x = 0;
|
|
|
|
while (x < size) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
data_ptr[x++] = trace_buf[idx++];
|
|
|
|
}
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-18 13:45:59 +04:00
|
|
|
uint8_t *data_ptr = dataptr;
|
|
|
|
uint32_t x = 0;
|
|
|
|
while (x < size) {
|
|
|
|
if (idx >= TRACE_BUF_LEN) {
|
|
|
|
idx = idx % TRACE_BUF_LEN;
|
|
|
|
}
|
|
|
|
trace_buf[idx++] = data_ptr[x++];
|
|
|
|
}
|
|
|
|
return idx; /* most callers wants to know where to write next */
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2012-07-18 13:45:59 +04:00
|
|
|
void trace_record_finish(TraceBufferRecord *rec)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2012-07-20 17:22:15 +04:00
|
|
|
TraceRecord record;
|
|
|
|
read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
|
2012-07-18 13:45:59 +04:00
|
|
|
smp_wmb(); /* write barrier before marking as valid */
|
2012-07-20 17:22:15 +04:00
|
|
|
record.event |= TRACE_RECORD_VALID;
|
|
|
|
write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
|
2012-07-18 13:45:59 +04:00
|
|
|
|
2013-02-12 17:34:04 +04:00
|
|
|
if (((unsigned int)g_atomic_int_get(&trace_idx) - writeout_idx)
|
2013-01-25 19:43:38 +04:00
|
|
|
> TRACE_BUF_FLUSH_THRESHOLD) {
|
2012-07-18 13:45:59 +04:00
|
|
|
flush_trace_file(false);
|
|
|
|
}
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
|
|
|
|
2021-06-01 16:24:05 +03:00
|
|
|
static int st_write_event_mapping(TraceEventIter *iter)
|
2016-10-04 16:35:50 +03:00
|
|
|
{
|
|
|
|
uint64_t type = TRACE_RECORD_TYPE_MAPPING;
|
|
|
|
TraceEvent *ev;
|
|
|
|
|
2021-06-01 16:24:05 +03:00
|
|
|
while ((ev = trace_event_iter_next(iter)) != NULL) {
|
2016-10-04 16:35:50 +03:00
|
|
|
uint64_t id = trace_event_get_id(ev);
|
|
|
|
const char *name = trace_event_get_name(ev);
|
|
|
|
uint32_t len = strlen(name);
|
|
|
|
if (fwrite(&type, sizeof(type), 1, trace_fp) != 1 ||
|
|
|
|
fwrite(&id, sizeof(id), 1, trace_fp) != 1 ||
|
|
|
|
fwrite(&len, sizeof(len), 1, trace_fp) != 1 ||
|
|
|
|
fwrite(name, len, 1, trace_fp) != 1) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-27 09:56:13 +03:00
|
|
|
/**
|
|
|
|
* Enable / disable tracing, return whether it was enabled.
|
|
|
|
*
|
|
|
|
* @enable: enable if %true, else disable.
|
|
|
|
*/
|
|
|
|
bool st_set_trace_file_enabled(bool enable)
|
2011-02-26 21:38:39 +03:00
|
|
|
{
|
2021-06-01 16:24:05 +03:00
|
|
|
TraceEventIter iter;
|
2020-05-27 09:56:13 +03:00
|
|
|
bool was_enabled = trace_fp;
|
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
if (enable == !!trace_fp) {
|
2020-05-27 09:56:13 +03:00
|
|
|
return was_enabled; /* no change */
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Halt trace writeout */
|
|
|
|
flush_trace_file(true);
|
|
|
|
trace_writeout_enabled = false;
|
|
|
|
flush_trace_file(true);
|
|
|
|
|
|
|
|
if (enable) {
|
2012-07-20 17:22:12 +04:00
|
|
|
static const TraceLogHeader header = {
|
2012-07-18 13:45:59 +04:00
|
|
|
.header_event_id = HEADER_EVENT_ID,
|
|
|
|
.header_magic = HEADER_MAGIC,
|
|
|
|
/* Older log readers will check for version at next location */
|
|
|
|
.header_version = HEADER_VERSION,
|
2011-02-26 21:38:39 +03:00
|
|
|
};
|
|
|
|
|
2011-09-05 21:31:21 +04:00
|
|
|
trace_fp = fopen(trace_file_name, "wb");
|
2011-02-26 21:38:39 +03:00
|
|
|
if (!trace_fp) {
|
2020-05-27 09:56:13 +03:00
|
|
|
return was_enabled;
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
|
|
|
|
2021-06-01 16:24:05 +03:00
|
|
|
trace_event_iter_init_all(&iter);
|
2016-10-04 16:35:50 +03:00
|
|
|
if (fwrite(&header, sizeof header, 1, trace_fp) != 1 ||
|
2021-06-01 16:24:05 +03:00
|
|
|
st_write_event_mapping(&iter) < 0) {
|
2011-02-26 21:38:39 +03:00
|
|
|
fclose(trace_fp);
|
|
|
|
trace_fp = NULL;
|
2020-05-27 09:56:13 +03:00
|
|
|
return was_enabled;
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Resume trace writeout */
|
|
|
|
trace_writeout_enabled = true;
|
|
|
|
flush_trace_file(false);
|
|
|
|
} else {
|
|
|
|
fclose(trace_fp);
|
|
|
|
trace_fp = NULL;
|
|
|
|
}
|
2020-05-27 09:56:13 +03:00
|
|
|
return was_enabled;
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
|
|
|
|
2010-05-22 22:24:51 +04:00
|
|
|
/**
|
2011-02-26 21:38:39 +03:00
|
|
|
* Set the name of a trace file
|
|
|
|
*
|
|
|
|
* @file The trace file name or NULL for the default name-<pid> set at
|
|
|
|
* config time
|
2010-05-22 22:24:51 +04:00
|
|
|
*/
|
2016-01-07 16:55:24 +03:00
|
|
|
void st_set_trace_file(const char *file)
|
2010-05-22 22:24:51 +04:00
|
|
|
{
|
2020-05-27 09:56:13 +03:00
|
|
|
bool saved_enable = st_set_trace_file_enabled(false);
|
2011-02-26 21:38:39 +03:00
|
|
|
|
2012-08-13 23:51:16 +04:00
|
|
|
g_free(trace_file_name);
|
2011-02-26 21:38:39 +03:00
|
|
|
|
|
|
|
if (!file) {
|
2015-03-12 00:08:56 +03:00
|
|
|
/* Type cast needed for Windows where getpid() returns an int. */
|
|
|
|
trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, (pid_t)getpid());
|
2011-02-26 21:38:39 +03:00
|
|
|
} else {
|
2012-08-13 23:51:16 +04:00
|
|
|
trace_file_name = g_strdup_printf("%s", file);
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
|
|
|
|
2020-05-27 09:56:13 +03:00
|
|
|
st_set_trace_file_enabled(saved_enable);
|
2011-02-26 21:38:39 +03:00
|
|
|
}
|
|
|
|
|
2019-04-17 22:17:50 +03:00
|
|
|
void st_print_trace_file_status(void)
|
2011-02-26 21:38:39 +03:00
|
|
|
{
|
2019-04-17 22:17:50 +03:00
|
|
|
qemu_printf("Trace file \"%s\" %s.\n",
|
|
|
|
trace_file_name, trace_fp ? "on" : "off");
|
2010-05-22 22:24:51 +04:00
|
|
|
}
|
2010-06-24 15:34:53 +04:00
|
|
|
|
2011-08-31 22:31:18 +04:00
|
|
|
void st_flush_trace_buffer(void)
|
|
|
|
{
|
|
|
|
flush_trace_file(true);
|
|
|
|
}
|
|
|
|
|
2011-09-05 11:30:17 +04:00
|
|
|
/* Helper function to create a thread with signals blocked. Use glib's
|
|
|
|
* portable threads since QEMU abstractions cannot be used due to reentrancy in
|
|
|
|
* the tracer. Also note the signal masking on POSIX hosts so that the thread
|
|
|
|
* does not steal signals when the rest of the program wants them blocked.
|
|
|
|
*/
|
|
|
|
static GThread *trace_thread_create(GThreadFunc fn)
|
2010-06-24 15:34:53 +04:00
|
|
|
{
|
2011-09-05 11:30:17 +04:00
|
|
|
GThread *thread;
|
|
|
|
#ifndef _WIN32
|
2011-02-26 21:38:39 +03:00
|
|
|
sigset_t set, oldset;
|
2010-06-24 15:34:53 +04:00
|
|
|
|
2011-02-26 21:38:39 +03:00
|
|
|
sigfillset(&set);
|
|
|
|
pthread_sigmask(SIG_SETMASK, &set, &oldset);
|
2011-09-05 11:30:17 +04:00
|
|
|
#endif
|
2013-02-12 17:34:05 +04:00
|
|
|
|
|
|
|
thread = g_thread_new("trace-thread", fn, NULL);
|
|
|
|
|
2011-09-05 11:30:17 +04:00
|
|
|
#ifndef _WIN32
|
2011-02-26 21:38:39 +03:00
|
|
|
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
|
2011-09-05 11:30:17 +04:00
|
|
|
#endif
|
2011-02-26 21:38:39 +03:00
|
|
|
|
2011-09-05 11:30:17 +04:00
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
2016-01-07 16:55:24 +03:00
|
|
|
bool st_init(void)
|
2011-09-05 11:30:17 +04:00
|
|
|
{
|
|
|
|
GThread *thread;
|
|
|
|
|
2014-05-07 21:24:10 +04:00
|
|
|
trace_pid = getpid();
|
|
|
|
|
2011-09-05 11:30:17 +04:00
|
|
|
thread = trace_thread_create(writeout_thread);
|
|
|
|
if (!thread) {
|
2017-09-11 22:52:50 +03:00
|
|
|
warn_report("unable to initialize simple trace backend");
|
2011-09-05 11:30:17 +04:00
|
|
|
return false;
|
2010-06-24 15:34:53 +04:00
|
|
|
}
|
2011-02-26 21:38:39 +03:00
|
|
|
|
2011-09-05 11:30:17 +04:00
|
|
|
atexit(st_flush_trace_buffer);
|
2011-03-13 23:14:30 +03:00
|
|
|
return true;
|
2010-06-24 15:34:53 +04:00
|
|
|
}
|
2021-06-01 16:24:06 +03:00
|
|
|
|
|
|
|
void st_init_group(size_t group)
|
|
|
|
{
|
|
|
|
TraceEventIter iter;
|
|
|
|
|
|
|
|
if (!trace_writeout_enabled) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_event_iter_init_group(&iter, group);
|
|
|
|
st_write_event_mapping(&iter);
|
|
|
|
}
|