util/oslib-posix: Introduce and use MemsetContext for touch_all_pages()
Let's minimize the number of global variables to prepare for os_mem_prealloc() getting called concurrently and make the code a bit easier to read. The only consumer that really needs a global variable is the sigbus handler, which will require protection via a mutex in the future either way as we cannot concurrently mess with the SIGBUS handler. Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> Reviewed-by: Michal Privoznik <mprivozn@redhat.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20211217134611.31172-4-david@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
a384bfa32e
commit
dba506788b
@ -73,21 +73,30 @@
|
||||
|
||||
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
|
||||
|
||||
struct MemsetThread;
|
||||
|
||||
typedef struct MemsetContext {
|
||||
bool all_threads_created;
|
||||
bool any_thread_failed;
|
||||
struct MemsetThread *threads;
|
||||
int num_threads;
|
||||
} MemsetContext;
|
||||
|
||||
struct MemsetThread {
|
||||
char *addr;
|
||||
size_t numpages;
|
||||
size_t hpagesize;
|
||||
QemuThread pgthread;
|
||||
sigjmp_buf env;
|
||||
MemsetContext *context;
|
||||
};
|
||||
typedef struct MemsetThread MemsetThread;
|
||||
|
||||
static MemsetThread *memset_thread;
|
||||
static int memset_num_threads;
|
||||
/* used by sigbus_handler() */
|
||||
static MemsetContext *sigbus_memset_context;
|
||||
|
||||
static QemuMutex page_mutex;
|
||||
static QemuCond page_cond;
|
||||
static bool threads_created_flag;
|
||||
|
||||
int qemu_get_thread_id(void)
|
||||
{
|
||||
@ -438,10 +447,13 @@ const char *qemu_get_exec_dir(void)
|
||||
static void sigbus_handler(int signal)
|
||||
{
|
||||
int i;
|
||||
if (memset_thread) {
|
||||
for (i = 0; i < memset_num_threads; i++) {
|
||||
if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
|
||||
siglongjmp(memset_thread[i].env, 1);
|
||||
|
||||
if (sigbus_memset_context) {
|
||||
for (i = 0; i < sigbus_memset_context->num_threads; i++) {
|
||||
MemsetThread *thread = &sigbus_memset_context->threads[i];
|
||||
|
||||
if (qemu_thread_is_self(&thread->pgthread)) {
|
||||
siglongjmp(thread->env, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -459,7 +471,7 @@ static void *do_touch_pages(void *arg)
|
||||
* clearing until all threads have been created.
|
||||
*/
|
||||
qemu_mutex_lock(&page_mutex);
|
||||
while(!threads_created_flag){
|
||||
while (!memset_args->context->all_threads_created) {
|
||||
qemu_cond_wait(&page_cond, &page_mutex);
|
||||
}
|
||||
qemu_mutex_unlock(&page_mutex);
|
||||
@ -502,7 +514,7 @@ static void *do_madv_populate_write_pages(void *arg)
|
||||
|
||||
/* See do_touch_pages(). */
|
||||
qemu_mutex_lock(&page_mutex);
|
||||
while (!threads_created_flag) {
|
||||
while (!memset_args->context->all_threads_created) {
|
||||
qemu_cond_wait(&page_cond, &page_mutex);
|
||||
}
|
||||
qemu_mutex_unlock(&page_mutex);
|
||||
@ -529,6 +541,9 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
|
||||
int smp_cpus, bool use_madv_populate_write)
|
||||
{
|
||||
static gsize initialized = 0;
|
||||
MemsetContext context = {
|
||||
.num_threads = get_memset_num_threads(smp_cpus),
|
||||
};
|
||||
size_t numpages_per_thread, leftover;
|
||||
void *(*touch_fn)(void *);
|
||||
int ret = 0, i = 0;
|
||||
@ -546,35 +561,41 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
|
||||
touch_fn = do_touch_pages;
|
||||
}
|
||||
|
||||
threads_created_flag = false;
|
||||
memset_num_threads = get_memset_num_threads(smp_cpus);
|
||||
memset_thread = g_new0(MemsetThread, memset_num_threads);
|
||||
numpages_per_thread = numpages / memset_num_threads;
|
||||
leftover = numpages % memset_num_threads;
|
||||
for (i = 0; i < memset_num_threads; i++) {
|
||||
memset_thread[i].addr = addr;
|
||||
memset_thread[i].numpages = numpages_per_thread + (i < leftover);
|
||||
memset_thread[i].hpagesize = hpagesize;
|
||||
qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
|
||||
touch_fn, &memset_thread[i],
|
||||
context.threads = g_new0(MemsetThread, context.num_threads);
|
||||
numpages_per_thread = numpages / context.num_threads;
|
||||
leftover = numpages % context.num_threads;
|
||||
for (i = 0; i < context.num_threads; i++) {
|
||||
context.threads[i].addr = addr;
|
||||
context.threads[i].numpages = numpages_per_thread + (i < leftover);
|
||||
context.threads[i].hpagesize = hpagesize;
|
||||
context.threads[i].context = &context;
|
||||
qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
|
||||
touch_fn, &context.threads[i],
|
||||
QEMU_THREAD_JOINABLE);
|
||||
addr += memset_thread[i].numpages * hpagesize;
|
||||
addr += context.threads[i].numpages * hpagesize;
|
||||
}
|
||||
|
||||
if (!use_madv_populate_write) {
|
||||
sigbus_memset_context = &context;
|
||||
}
|
||||
|
||||
qemu_mutex_lock(&page_mutex);
|
||||
threads_created_flag = true;
|
||||
context.all_threads_created = true;
|
||||
qemu_cond_broadcast(&page_cond);
|
||||
qemu_mutex_unlock(&page_mutex);
|
||||
|
||||
for (i = 0; i < memset_num_threads; i++) {
|
||||
int tmp = (uintptr_t)qemu_thread_join(&memset_thread[i].pgthread);
|
||||
for (i = 0; i < context.num_threads; i++) {
|
||||
int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread);
|
||||
|
||||
if (tmp) {
|
||||
ret = tmp;
|
||||
}
|
||||
}
|
||||
g_free(memset_thread);
|
||||
memset_thread = NULL;
|
||||
|
||||
if (!use_madv_populate_write) {
|
||||
sigbus_memset_context = NULL;
|
||||
}
|
||||
g_free(context.threads);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user