2019-07-11 07:52:28 +03:00
|
|
|
/* ----------------------------------------------------------------------------
|
2021-04-24 19:35:11 +03:00
|
|
|
Copyright (c) 2018-2020 Microsoft Research, Daan Leijen
|
2019-07-11 07:52:28 +03:00
|
|
|
This is free software; you can redistribute it and/or modify it under the
|
2019-07-19 18:55:02 +03:00
|
|
|
terms of the MIT license.
|
2019-07-11 07:52:28 +03:00
|
|
|
-----------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* This is a stress test for the allocator, using multiple threads and
|
2019-11-22 22:28:55 +03:00
|
|
|
transferring objects between threads. It tries to reflect real-world workloads:
|
|
|
|
- allocation size is distributed linearly in powers of two
|
|
|
|
- with some fraction extra large (and some extra extra large)
|
|
|
|
- the allocations are initialized and read again at free
|
|
|
|
- pointers transfer between threads
|
|
|
|
- threads are terminated and recreated with some objects surviving in between
|
|
|
|
- uses deterministic "randomness", but execution can still depend on
|
|
|
|
(random) thread scheduling. Do not use this test as a benchmark!
|
2019-07-11 07:52:28 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2019-07-19 18:55:02 +03:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdbool.h>
|
2019-07-11 07:52:28 +03:00
|
|
|
#include <string.h>
|
2019-07-19 18:55:02 +03:00
|
|
|
|
2019-11-20 23:59:26 +03:00
|
|
|
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
|
|
|
//
|
2019-07-19 18:55:02 +03:00
|
|
|
// argument defaults
|
2019-11-20 23:59:26 +03:00
|
|
|
static int THREADS = 32; // more repeatable if THREADS <= #processors
|
2020-09-14 21:01:17 +03:00
|
|
|
static int SCALE = 25; // scaling factor
|
2020-01-08 23:59:20 +03:00
|
|
|
static int ITER = 50; // N full iterations destructing and re-creating all threads
|
2019-07-19 18:55:02 +03:00
|
|
|
|
|
|
|
// static int THREADS = 8; // more repeatable if THREADS <= #processors
|
2019-11-16 03:28:11 +03:00
|
|
|
// static int SCALE = 100; // scaling factor
|
2019-07-11 07:52:28 +03:00
|
|
|
|
2020-01-25 23:26:08 +03:00
|
|
|
#define STRESS // undefine for leak test
|
2020-01-25 03:30:52 +03:00
|
|
|
|
2019-11-16 03:28:11 +03:00
|
|
|
static bool allow_large_objects = true; // allow very large objects?
|
2019-11-22 22:28:55 +03:00
|
|
|
static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`?
|
2019-11-16 03:28:11 +03:00
|
|
|
|
|
|
|
|
2021-06-18 05:15:09 +03:00
|
|
|
// #define USE_STD_MALLOC
|
2019-11-16 03:28:11 +03:00
|
|
|
#ifdef USE_STD_MALLOC
|
2021-11-10 07:19:31 +03:00
|
|
|
#define custom_calloc(n,s) malloc(n*s)
|
2019-11-16 03:28:11 +03:00
|
|
|
#define custom_realloc(p,s) realloc(p,s)
|
|
|
|
#define custom_free(p) free(p)
|
|
|
|
#else
|
2020-12-16 03:03:54 +03:00
|
|
|
#include <mimalloc.h>
|
2021-11-10 07:19:31 +03:00
|
|
|
#define custom_calloc(n,s) mi_malloc(n*s)
|
2019-11-16 03:28:11 +03:00
|
|
|
#define custom_realloc(p,s) mi_realloc(p,s)
|
|
|
|
#define custom_free(p) mi_free(p)
|
|
|
|
#endif
|
2019-07-11 07:52:28 +03:00
|
|
|
|
2019-11-16 03:28:11 +03:00
|
|
|
// transfer pointer between threads
|
|
|
|
#define TRANSFERS (1000)
|
2019-07-11 07:59:44 +03:00
|
|
|
static volatile void* transfer[TRANSFERS];
|
2019-07-11 07:52:28 +03:00
|
|
|
|
2019-11-16 03:28:11 +03:00
|
|
|
|
2019-07-23 19:37:36 +03:00
|
|
|
#if (UINTPTR_MAX != UINT32_MAX)
|
2019-07-11 07:52:28 +03:00
|
|
|
const uintptr_t cookie = 0xbf58476d1ce4e5b9UL;
|
|
|
|
#else
|
|
|
|
const uintptr_t cookie = 0x1ce4e5b9UL;
|
|
|
|
#endif
|
|
|
|
|
2019-07-19 18:55:02 +03:00
|
|
|
static void* atomic_exchange_ptr(volatile void** p, void* newval);
|
|
|
|
|
|
|
|
typedef uintptr_t* random_t;
|
|
|
|
|
|
|
|
static uintptr_t pick(random_t r) {
|
|
|
|
uintptr_t x = *r;
|
2019-11-20 23:59:26 +03:00
|
|
|
#if (UINTPTR_MAX > UINT32_MAX)
|
|
|
|
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
|
2019-07-19 18:55:02 +03:00
|
|
|
x ^= x >> 30;
|
|
|
|
x *= 0xbf58476d1ce4e5b9UL;
|
|
|
|
x ^= x >> 27;
|
|
|
|
x *= 0x94d049bb133111ebUL;
|
|
|
|
x ^= x >> 31;
|
2019-11-20 23:59:26 +03:00
|
|
|
#else
|
|
|
|
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
|
2019-07-19 18:55:02 +03:00
|
|
|
x ^= x >> 16;
|
|
|
|
x *= 0x7feb352dUL;
|
|
|
|
x ^= x >> 15;
|
|
|
|
x *= 0x846ca68bUL;
|
|
|
|
x ^= x >> 16;
|
2019-11-20 23:59:26 +03:00
|
|
|
#endif
|
2019-07-19 18:55:02 +03:00
|
|
|
*r = x;
|
|
|
|
return x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool chance(size_t perc, random_t r) {
|
|
|
|
return (pick(r) % 100 <= perc);
|
|
|
|
}
|
2019-07-11 07:52:28 +03:00
|
|
|
|
2019-07-19 18:55:02 +03:00
|
|
|
static void* alloc_items(size_t items, random_t r) {
|
2019-08-24 22:20:32 +03:00
|
|
|
if (chance(1, r)) {
|
2019-11-20 23:59:26 +03:00
|
|
|
if (chance(1, r) && allow_large_objects) items *= 10000; // 0.01% giant
|
|
|
|
else if (chance(10, r) && allow_large_objects) items *= 1000; // 0.1% huge
|
|
|
|
else items *= 100; // 1% large objects;
|
2019-08-24 22:20:32 +03:00
|
|
|
}
|
2019-11-20 23:59:26 +03:00
|
|
|
if (items == 40) items++; // pthreads uses that size for stack increases
|
|
|
|
if (use_one_size > 0) items = (use_one_size / sizeof(uintptr_t));
|
2020-01-24 06:49:32 +03:00
|
|
|
if (items==0) items = 1;
|
|
|
|
uintptr_t* p = (uintptr_t*)custom_calloc(items,sizeof(uintptr_t));
|
2019-11-07 21:26:52 +03:00
|
|
|
if (p != NULL) {
|
2020-01-24 06:49:32 +03:00
|
|
|
for (uintptr_t i = 0; i < items; i++) {
|
|
|
|
p[i] = (items - i) ^ cookie;
|
|
|
|
}
|
2019-11-07 21:26:52 +03:00
|
|
|
}
|
2019-07-11 07:52:28 +03:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_items(void* p) {
|
|
|
|
if (p != NULL) {
|
|
|
|
uintptr_t* q = (uintptr_t*)p;
|
|
|
|
uintptr_t items = (q[0] ^ cookie);
|
|
|
|
for (uintptr_t i = 0; i < items; i++) {
|
2019-11-20 23:59:26 +03:00
|
|
|
if ((q[i] ^ cookie) != items - i) {
|
2019-07-19 18:55:02 +03:00
|
|
|
fprintf(stderr, "memory corruption at block %p at %zu\n", p, i);
|
2019-07-11 07:52:28 +03:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-16 03:28:11 +03:00
|
|
|
custom_free(p);
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void stress(intptr_t tid) {
|
2019-07-19 18:55:02 +03:00
|
|
|
//bench_start_thread();
|
2020-10-11 23:14:43 +03:00
|
|
|
uintptr_t r = ((tid + 1) * 43); // rand();
|
2020-01-15 21:53:54 +03:00
|
|
|
const size_t max_item_shift = 5; // 128
|
2019-11-20 23:59:26 +03:00
|
|
|
const size_t max_item_retained_shift = max_item_shift + 2;
|
|
|
|
size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more
|
|
|
|
size_t retain = allocs / 2;
|
2019-07-11 07:52:28 +03:00
|
|
|
void** data = NULL;
|
|
|
|
size_t data_size = 0;
|
|
|
|
size_t data_top = 0;
|
2020-01-24 06:49:32 +03:00
|
|
|
void** retained = (void**)custom_calloc(retain,sizeof(void*));
|
2019-07-11 07:52:28 +03:00
|
|
|
size_t retain_top = 0;
|
|
|
|
|
2019-11-20 23:59:26 +03:00
|
|
|
while (allocs > 0 || retain > 0) {
|
2019-07-19 18:55:02 +03:00
|
|
|
if (retain == 0 || (chance(50, &r) && allocs > 0)) {
|
|
|
|
// 50%+ alloc
|
2019-07-11 07:52:28 +03:00
|
|
|
allocs--;
|
|
|
|
if (data_top >= data_size) {
|
|
|
|
data_size += 100000;
|
2020-01-15 21:53:54 +03:00
|
|
|
data = (void**)custom_realloc(data, data_size * sizeof(void*));
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
2020-01-09 04:45:38 +03:00
|
|
|
data[data_top++] = alloc_items(1ULL << (pick(&r) % max_item_shift), &r);
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// 25% retain
|
2019-11-20 23:59:26 +03:00
|
|
|
retained[retain_top++] = alloc_items( 1ULL << (pick(&r) % max_item_retained_shift), &r);
|
2019-07-11 07:52:28 +03:00
|
|
|
retain--;
|
|
|
|
}
|
2019-07-19 18:55:02 +03:00
|
|
|
if (chance(66, &r) && data_top > 0) {
|
2019-07-11 07:52:28 +03:00
|
|
|
// 66% free previous alloc
|
2019-07-19 18:55:02 +03:00
|
|
|
size_t idx = pick(&r) % data_top;
|
2019-07-11 07:52:28 +03:00
|
|
|
free_items(data[idx]);
|
2019-07-19 18:55:02 +03:00
|
|
|
data[idx] = NULL;
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
2019-07-19 18:55:02 +03:00
|
|
|
if (chance(25, &r) && data_top > 0) {
|
2019-11-16 03:28:11 +03:00
|
|
|
// 25% exchange a local pointer with the (shared) transfer buffer.
|
2019-07-19 18:55:02 +03:00
|
|
|
size_t data_idx = pick(&r) % data_top;
|
|
|
|
size_t transfer_idx = pick(&r) % TRANSFERS;
|
2019-07-11 07:52:28 +03:00
|
|
|
void* p = data[data_idx];
|
2019-07-19 18:55:02 +03:00
|
|
|
void* q = atomic_exchange_ptr(&transfer[transfer_idx], p);
|
2019-07-11 07:52:28 +03:00
|
|
|
data[data_idx] = q;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// free everything that is left
|
|
|
|
for (size_t i = 0; i < retain_top; i++) {
|
|
|
|
free_items(retained[i]);
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < data_top; i++) {
|
|
|
|
free_items(data[i]);
|
|
|
|
}
|
2019-11-16 03:28:11 +03:00
|
|
|
custom_free(retained);
|
|
|
|
custom_free(data);
|
2019-07-19 18:55:02 +03:00
|
|
|
//bench_end_thread();
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
|
|
|
|
2020-01-24 06:49:32 +03:00
|
|
|
static void run_os_threads(size_t nthreads, void (*entry)(intptr_t tid));
|
|
|
|
|
|
|
|
static void test_stress(void) {
|
2020-01-25 23:26:08 +03:00
|
|
|
uintptr_t r = rand();
|
2020-01-24 06:49:32 +03:00
|
|
|
for (int n = 0; n < ITER; n++) {
|
2021-01-29 04:32:42 +03:00
|
|
|
run_os_threads(THREADS, &stress);
|
2020-01-24 06:49:32 +03:00
|
|
|
for (int i = 0; i < TRANSFERS; i++) {
|
|
|
|
if (chance(50, &r) || n + 1 == ITER) { // free all on last run, otherwise free half of the transfers
|
|
|
|
void* p = atomic_exchange_ptr(&transfer[i], NULL);
|
|
|
|
free_items(p);
|
|
|
|
}
|
|
|
|
}
|
2022-02-03 03:17:21 +03:00
|
|
|
#ifndef NDEBUG
|
2021-01-29 04:32:42 +03:00
|
|
|
//mi_collect(false);
|
2022-02-03 03:17:21 +03:00
|
|
|
//mi_debug_show_arenas();
|
|
|
|
#endif
|
|
|
|
#if !defined(NDEBUG) || defined(MI_TSAN)
|
2020-01-25 03:30:52 +03:00
|
|
|
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); }
|
2022-02-03 03:17:21 +03:00
|
|
|
#endif
|
2020-01-24 06:49:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-25 23:51:56 +03:00
|
|
|
#ifndef STRESS
|
2020-01-24 06:49:32 +03:00
|
|
|
static void leak(intptr_t tid) {
|
2020-01-25 23:51:56 +03:00
|
|
|
uintptr_t r = rand();
|
2020-01-25 06:02:13 +03:00
|
|
|
void* p = alloc_items(1 /*pick(&r)%128*/, &r);
|
2020-01-24 08:37:14 +03:00
|
|
|
if (chance(50, &r)) {
|
2020-01-24 06:49:32 +03:00
|
|
|
intptr_t i = (pick(&r) % TRANSFERS);
|
|
|
|
void* q = atomic_exchange_ptr(&transfer[i], p);
|
|
|
|
free_items(q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-30 09:46:44 +03:00
|
|
|
static void test_leak(void) {
|
2020-01-24 06:49:32 +03:00
|
|
|
for (int n = 0; n < ITER; n++) {
|
|
|
|
run_os_threads(THREADS, &leak);
|
|
|
|
mi_collect(false);
|
|
|
|
#ifndef NDEBUG
|
2020-01-25 03:30:52 +03:00
|
|
|
if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); }
|
2020-01-24 06:49:32 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
2020-01-25 23:51:56 +03:00
|
|
|
#endif
|
2019-07-11 07:52:28 +03:00
|
|
|
|
2020-09-08 07:34:34 +03:00
|
|
|
int main(int argc, char** argv) {
|
2019-11-20 23:59:26 +03:00
|
|
|
// > mimalloc-test-stress [THREADS] [SCALE] [ITER]
|
|
|
|
if (argc >= 2) {
|
2019-07-19 18:55:02 +03:00
|
|
|
char* end;
|
|
|
|
long n = strtol(argv[1], &end, 10);
|
|
|
|
if (n > 0) THREADS = n;
|
|
|
|
}
|
2019-11-20 23:59:26 +03:00
|
|
|
if (argc >= 3) {
|
2019-07-19 18:55:02 +03:00
|
|
|
char* end;
|
|
|
|
long n = (strtol(argv[2], &end, 10));
|
2019-11-16 03:28:11 +03:00
|
|
|
if (n > 0) SCALE = n;
|
2019-07-19 18:55:02 +03:00
|
|
|
}
|
2019-11-20 23:59:26 +03:00
|
|
|
if (argc >= 4) {
|
|
|
|
char* end;
|
|
|
|
long n = (strtol(argv[3], &end, 10));
|
|
|
|
if (n > 0) ITER = n;
|
|
|
|
}
|
2019-11-22 22:28:55 +03:00
|
|
|
printf("Using %d threads with a %d%% load-per-thread and %d iterations\n", THREADS, SCALE, ITER);
|
2020-09-09 02:56:51 +03:00
|
|
|
//mi_reserve_os_memory(1024*1024*1024ULL, false, true);
|
2019-08-20 04:16:12 +03:00
|
|
|
//int res = mi_reserve_huge_os_pages(4,1);
|
|
|
|
//printf("(reserve huge: %i\n)", res);
|
2019-08-19 21:10:06 +03:00
|
|
|
|
2019-07-19 18:55:02 +03:00
|
|
|
//bench_start_program();
|
2019-11-16 03:28:11 +03:00
|
|
|
|
|
|
|
// Run ITER full iterations where half the objects in the transfer buffer survive to the next round.
|
2020-01-25 23:51:56 +03:00
|
|
|
srand(0x7feb352d);
|
2021-01-29 04:32:42 +03:00
|
|
|
|
|
|
|
//mi_reserve_os_memory(512ULL << 20, true, true);
|
|
|
|
|
2020-12-16 03:07:23 +03:00
|
|
|
#if !defined(NDEBUG) && !defined(USE_STD_MALLOC)
|
2020-10-11 23:38:12 +03:00
|
|
|
mi_stats_reset();
|
2020-12-16 03:07:23 +03:00
|
|
|
#endif
|
|
|
|
|
2020-01-25 03:30:52 +03:00
|
|
|
#ifdef STRESS
|
2020-12-16 03:07:23 +03:00
|
|
|
test_stress();
|
2020-01-25 03:30:52 +03:00
|
|
|
#else
|
2020-12-16 03:07:23 +03:00
|
|
|
test_leak();
|
2020-01-30 09:46:44 +03:00
|
|
|
#endif
|
2019-11-16 01:09:17 +03:00
|
|
|
|
2020-12-16 03:03:54 +03:00
|
|
|
#ifndef USE_STD_MALLOC
|
2021-06-18 05:15:09 +03:00
|
|
|
#ifndef NDEBUG
|
2020-10-11 23:38:12 +03:00
|
|
|
mi_collect(true);
|
2021-01-29 04:32:42 +03:00
|
|
|
//mi_debug_show_arenas();
|
2021-06-18 05:15:09 +03:00
|
|
|
#endif
|
2019-07-11 07:52:28 +03:00
|
|
|
mi_stats_print(NULL);
|
2020-12-16 03:03:54 +03:00
|
|
|
#endif
|
2019-07-19 18:55:02 +03:00
|
|
|
//bench_end_program();
|
2019-07-11 07:52:28 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-01-24 06:49:32 +03:00
|
|
|
static void (*thread_entry_fun)(intptr_t) = &stress;
|
|
|
|
|
2019-07-11 07:52:28 +03:00
|
|
|
#ifdef _WIN32
|
|
|
|
|
2020-08-30 05:30:38 +03:00
|
|
|
#include <Windows.h>
|
2019-07-11 07:52:28 +03:00
|
|
|
|
2020-01-30 09:46:44 +03:00
|
|
|
static DWORD WINAPI thread_entry(LPVOID param) {
|
2020-01-24 06:49:32 +03:00
|
|
|
thread_entry_fun((intptr_t)param);
|
2019-07-11 07:52:28 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-24 06:49:32 +03:00
|
|
|
static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) {
|
|
|
|
thread_entry_fun = fun;
|
|
|
|
DWORD* tids = (DWORD*)custom_calloc(nthreads,sizeof(DWORD));
|
|
|
|
HANDLE* thandles = (HANDLE*)custom_calloc(nthreads,sizeof(HANDLE));
|
2019-07-22 20:10:45 +03:00
|
|
|
for (uintptr_t i = 0; i < nthreads; i++) {
|
2020-09-06 04:00:36 +03:00
|
|
|
thandles[i] = CreateThread(0, 8*1024, &thread_entry, (void*)(i), 0, &tids[i]);
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
2019-07-23 19:37:36 +03:00
|
|
|
for (size_t i = 0; i < nthreads; i++) {
|
2019-07-11 07:52:28 +03:00
|
|
|
WaitForSingleObject(thandles[i], INFINITE);
|
|
|
|
}
|
2019-10-28 23:14:14 +03:00
|
|
|
for (size_t i = 0; i < nthreads; i++) {
|
|
|
|
CloseHandle(thandles[i]);
|
|
|
|
}
|
2019-11-16 03:28:11 +03:00
|
|
|
custom_free(tids);
|
|
|
|
custom_free(thandles);
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
|
|
|
|
2019-07-19 18:55:02 +03:00
|
|
|
static void* atomic_exchange_ptr(volatile void** p, void* newval) {
|
2020-01-24 06:49:32 +03:00
|
|
|
#if (INTPTR_MAX == INT32_MAX)
|
2019-07-19 18:55:02 +03:00
|
|
|
return (void*)InterlockedExchange((volatile LONG*)p, (LONG)newval);
|
2019-11-20 23:59:26 +03:00
|
|
|
#else
|
2019-07-19 18:55:02 +03:00
|
|
|
return (void*)InterlockedExchange64((volatile LONG64*)p, (LONG64)newval);
|
2019-11-20 23:59:26 +03:00
|
|
|
#endif
|
2019-07-19 18:55:02 +03:00
|
|
|
}
|
2019-07-11 07:52:28 +03:00
|
|
|
#else
|
|
|
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
2019-07-19 18:55:02 +03:00
|
|
|
static void* thread_entry(void* param) {
|
2020-01-24 06:49:32 +03:00
|
|
|
thread_entry_fun((uintptr_t)param);
|
2019-07-11 07:52:28 +03:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-24 06:49:32 +03:00
|
|
|
static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) {
|
|
|
|
thread_entry_fun = fun;
|
|
|
|
pthread_t* threads = (pthread_t*)custom_calloc(nthreads,sizeof(pthread_t));
|
2019-11-20 23:59:26 +03:00
|
|
|
memset(threads, 0, sizeof(pthread_t) * nthreads);
|
2019-07-19 18:55:02 +03:00
|
|
|
//pthread_setconcurrency(nthreads);
|
2020-10-22 13:15:37 +03:00
|
|
|
for (size_t i = 0; i < nthreads; i++) {
|
2019-07-11 07:52:28 +03:00
|
|
|
pthread_create(&threads[i], NULL, &thread_entry, (void*)i);
|
|
|
|
}
|
2019-07-19 18:55:02 +03:00
|
|
|
for (size_t i = 0; i < nthreads; i++) {
|
2019-07-11 07:52:28 +03:00
|
|
|
pthread_join(threads[i], NULL);
|
|
|
|
}
|
2019-11-16 03:28:11 +03:00
|
|
|
custom_free(threads);
|
2019-07-11 07:52:28 +03:00
|
|
|
}
|
|
|
|
|
2020-01-20 05:35:45 +03:00
|
|
|
#ifdef __cplusplus
|
|
|
|
#include <atomic>
|
|
|
|
static void* atomic_exchange_ptr(volatile void** p, void* newval) {
|
2020-01-23 20:57:55 +03:00
|
|
|
return std::atomic_exchange((volatile std::atomic<void*>*)p, newval);
|
2020-01-20 05:35:45 +03:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#include <stdatomic.h>
|
2019-07-19 18:55:02 +03:00
|
|
|
static void* atomic_exchange_ptr(volatile void** p, void* newval) {
|
2020-01-23 20:57:55 +03:00
|
|
|
return atomic_exchange((volatile _Atomic(void*)*)p, newval);
|
2019-07-19 18:55:02 +03:00
|
|
|
}
|
2020-01-20 05:35:45 +03:00
|
|
|
#endif
|
2019-07-19 18:55:02 +03:00
|
|
|
|
2019-07-11 07:52:28 +03:00
|
|
|
#endif
|