Remove --disable-spinlocks.

A later change will require atomic support, so it wouldn't make sense
for a hypothetical new system not to be able to implement spinlocks.

Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> (concept, not the patch)
Reviewed-by: Andres Freund <andres@anarazel.de> (concept, not the patch)
Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us
This commit is contained in:
Thomas Munro 2024-07-30 21:45:01 +12:00
parent 1330843bb7
commit e25626677f
20 changed files with 13 additions and 486 deletions

40
configure vendored
View File

@ -836,7 +836,6 @@ enable_integer_datetimes
enable_nls
with_pgport
enable_rpath
enable_spinlocks
enable_atomics
enable_debug
enable_profiling
@ -1529,7 +1528,6 @@ Optional Features:
enable Native Language Support
--disable-rpath do not embed shared library search path in
executables
--disable-spinlocks do not use spinlocks
--disable-atomics do not use atomic operations
--enable-debug build with debugging symbols (-g)
--enable-profiling build with profiling enabled
@ -3266,33 +3264,6 @@ fi
#
# Spinlocks
#
# Check whether --enable-spinlocks was given.
if test "${enable_spinlocks+set}" = set; then :
enableval=$enable_spinlocks;
case $enableval in
yes)
:
;;
no)
:
;;
*)
as_fn_error $? "no argument expected for --enable-spinlocks option" "$LINENO" 5
;;
esac
else
enable_spinlocks=yes
fi
#
# Atomic operations
#
@ -12185,17 +12156,6 @@ fi
fi
if test "$enable_spinlocks" = yes; then
$as_echo "#define HAVE_SPINLOCKS 1" >>confdefs.h
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING:
*** Not using spinlocks will cause poor performance." >&5
$as_echo "$as_me: WARNING:
*** Not using spinlocks will cause poor performance." >&2;}
fi
if test "$enable_atomics" = yes; then
$as_echo "#define HAVE_ATOMICS 1" >>confdefs.h

View File

@ -186,12 +186,6 @@ PGAC_ARG_BOOL(enable, rpath, yes,
[do not embed shared library search path in executables])
AC_SUBST(enable_rpath)
#
# Spinlocks
#
PGAC_ARG_BOOL(enable, spinlocks, yes,
[do not use spinlocks])
#
# Atomic operations
#
@ -1296,13 +1290,6 @@ failure. It is possible the compiler isn't looking in the proper directory.
Use --without-zlib to disable zlib support.])])
fi
if test "$enable_spinlocks" = yes; then
AC_DEFINE(HAVE_SPINLOCKS, 1, [Define to 1 if you have spinlocks.])
else
AC_MSG_WARN([
*** Not using spinlocks will cause poor performance.])
fi
if test "$enable_atomics" = yes; then
AC_DEFINE(HAVE_ATOMICS, 1, [Define to 1 if you want to use atomics if available.])
else

View File

@ -1258,22 +1258,6 @@ build-postgresql:
</listitem>
</varlistentry>
<varlistentry id="configure-option-disable-spinlocks">
<term><option>--disable-spinlocks</option></term>
<listitem>
<para>
Allow the build to succeed even if <productname>PostgreSQL</productname>
has no CPU spinlock support for the platform. The lack of
spinlock support will result in very poor performance; therefore,
this option should only be used if the build aborts and
informs you that the platform lacks spinlock support. If this
option is required to build <productname>PostgreSQL</productname> on
your platform, please report the problem to the
<productname>PostgreSQL</productname> developers.
</para>
</listitem>
</varlistentry>
<varlistentry id="configure-option-disable-atomics">
<term><option>--disable-atomics</option></term>
<listitem>
@ -2690,23 +2674,6 @@ ninja install
</listitem>
</varlistentry>
<varlistentry id="configure-spinlocks-meson">
<term><option>-Dspinlocks={ true | false }</option></term>
<listitem>
<para>
This option is set to true by default; setting it to false will
allow the build to succeed even if <productname>PostgreSQL</productname>
has no CPU spinlock support for the platform. The lack of
spinlock support will result in very poor performance; therefore,
this option should only be changed if the build aborts and
informs you that the platform lacks spinlock support. If setting this
option to false is required to build <productname>PostgreSQL</productname> on
your platform, please report the problem to the
<productname>PostgreSQL</productname> developers.
</para>
</listitem>
</varlistentry>
<varlistentry id="configure-atomics-meson">
<term><option>-Datomics={ true | false }</option></term>
<listitem>
@ -2719,6 +2686,7 @@ ninja install
</para>
</listitem>
</varlistentry>
</variablelist>
</sect3>
@ -3393,9 +3361,6 @@ export MANPATH
these CPU architectures: x86, PowerPC, S/390, SPARC, ARM, MIPS,
and RISC-V, including
big-endian, little-endian, 32-bit, and 64-bit variants where applicable.
It is often
possible to build on an unsupported CPU type by configuring with
<option>--disable-spinlocks</option>, but performance will be poor.
</para>
<para>

View File

@ -2089,12 +2089,6 @@ endif
# Atomics
###############################################################
if not get_option('spinlocks')
warning('Not using spinlocks will cause poor performance')
else
cdata.set('HAVE_SPINLOCKS', 1)
endif
if not get_option('atomics')
warning('Not using atomics will cause poor performance')
else

View File

@ -57,17 +57,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_flag vs slock_t");
#ifndef HAVE_SPINLOCKS
/*
* NB: If we're using semaphore based TAS emulation, be careful to use a
* separate set of semaphores. Otherwise we'd get in trouble if an atomic
* var would be manipulated while spinlock is held.
*/
s_init_lock_sema((slock_t *) &ptr->sema, true);
#else
SpinLockInit((slock_t *) &ptr->sema);
#endif
ptr->value = false;
}
@ -108,15 +98,7 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_uint32 vs slock_t");
/*
* If we're using semaphore based atomic flags, be careful about nested
* usage of atomics while a spinlock is held.
*/
#ifndef HAVE_SPINLOCKS
s_init_lock_sema((slock_t *) &ptr->sema, true);
#else
SpinLockInit((slock_t *) &ptr->sema);
#endif
ptr->value = val_;
}
@ -184,15 +166,7 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_uint64 vs slock_t");
/*
* If we're using semaphore based atomic flags, be careful about nested
* usage of atomics while a spinlock is held.
*/
#ifndef HAVE_SPINLOCKS
s_init_lock_sema((slock_t *) &ptr->sema, true);
#else
SpinLockInit((slock_t *) &ptr->sema);
#endif
ptr->value = val_;
}

View File

@ -217,8 +217,7 @@ PGReserveSemaphores(int maxSemas)
/*
* We must use ShmemAllocUnlocked(), since the spinlock protecting
* ShmemAlloc() won't be ready yet. (This ordering is necessary when we
* are emulating spinlocks with semaphores.)
* ShmemAlloc() won't be ready yet.
*/
sharedSemas = (PGSemaphore)
ShmemAllocUnlocked(PGSemaphoreShmemSize(maxSemas));

View File

@ -325,8 +325,7 @@ PGReserveSemaphores(int maxSemas)
/*
* We must use ShmemAllocUnlocked(), since the spinlock protecting
* ShmemAlloc() won't be ready yet. (This ordering is necessary when we
* are emulating spinlocks with semaphores.)
* ShmemAlloc() won't be ready yet.
*/
sharedSemas = (PGSemaphore)
ShmemAllocUnlocked(PGSemaphoreShmemSize(maxSemas));

View File

@ -108,9 +108,7 @@ typedef struct
#ifdef USE_INJECTION_POINTS
struct InjectionPointsCtl *ActiveInjectionPoints;
#endif
#ifndef HAVE_SPINLOCKS
PGSemaphore *SpinlockSemaArray;
#endif
int NamedLWLockTrancheRequests;
NamedLWLockTranche *NamedLWLockTrancheArray;
LWLockPadded *MainLWLockArray;
@ -724,9 +722,6 @@ save_backend_variables(BackendParameters *param, ClientSocket *client_sock,
param->ActiveInjectionPoints = ActiveInjectionPoints;
#endif
#ifndef HAVE_SPINLOCKS
param->SpinlockSemaArray = SpinlockSemaArray;
#endif
param->NamedLWLockTrancheRequests = NamedLWLockTrancheRequests;
param->NamedLWLockTrancheArray = NamedLWLockTrancheArray;
param->MainLWLockArray = MainLWLockArray;
@ -986,9 +981,6 @@ restore_backend_variables(BackendParameters *param)
ActiveInjectionPoints = param->ActiveInjectionPoints;
#endif
#ifndef HAVE_SPINLOCKS
SpinlockSemaArray = param->SpinlockSemaArray;
#endif
NamedLWLockTrancheRequests = param->NamedLWLockTrancheRequests;
NamedLWLockTrancheArray = param->NamedLWLockTrancheArray;
MainLWLockArray = param->MainLWLockArray;

View File

@ -94,7 +94,6 @@ CalculateShmemSize(int *num_semaphores)
/* Compute number of semaphores we'll need */
numSemas = ProcGlobalSemas();
numSemas += SpinlockSemas();
/* Return the number of semaphores if requested by the caller */
if (num_semaphores)
@ -111,7 +110,6 @@ CalculateShmemSize(int *num_semaphores)
*/
size = 100000;
size = add_size(size, PGSemaphoreShmemSize(numSemas));
size = add_size(size, SpinlockSemaSize());
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
sizeof(ShmemIndexEnt)));
size = add_size(size, dsm_estimate_size());
@ -225,14 +223,6 @@ CreateSharedMemoryAndSemaphores(void)
*/
PGReserveSemaphores(numSemas);
/*
* If spinlocks are disabled, initialize emulation layer (which depends on
* semaphores, so the order is important here).
*/
#ifndef HAVE_SPINLOCKS
SpinlockSemaInit();
#endif
/*
* Set up shared memory allocation mechanism
*/

View File

@ -21,7 +21,6 @@ OBJS = \
predicate.o \
proc.o \
s_lock.o \
spin.o
include $(top_srcdir)/src/backend/common.mk

View File

@ -9,5 +9,4 @@ backend_sources += files(
'predicate.c',
'proc.c',
's_lock.c',
'spin.c',
)

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* s_lock.c
* Hardware-dependent implementation of spinlocks.
* Implementation of spinlocks.
*
* When waiting for a contended spinlock we loop tightly for awhile, then
* delay using pg_usleep() and try again. Preferably, "awhile" should be a

View File

@ -1,180 +0,0 @@
/*-------------------------------------------------------------------------
*
* spin.c
* Hardware-independent implementation of spinlocks.
*
*
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
* define the spinlock implementation. This file contains only a stub
* implementation for spinlocks using PGSemaphores. Unless semaphores
* are implemented in a way that doesn't involve a kernel call, this
* is too slow to be very useful :-(
*
*
* Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/storage/lmgr/spin.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "storage/pg_sema.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#ifndef HAVE_SPINLOCKS
/*
* No TAS, so spinlocks are implemented as PGSemaphores.
*/
#ifndef HAVE_ATOMICS
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
#else
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
#endif /* HAVE_ATOMICS */
PGSemaphore *SpinlockSemaArray;
#else /* !HAVE_SPINLOCKS */
#define NUM_EMULATION_SEMAPHORES 0
#endif /* HAVE_SPINLOCKS */
/*
* Report the amount of shared memory needed to store semaphores for spinlock
* support.
*/
Size
SpinlockSemaSize(void)
{
return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore);
}
/*
* Report number of semaphores needed to support spinlocks.
*/
int
SpinlockSemas(void)
{
return NUM_EMULATION_SEMAPHORES;
}
#ifndef HAVE_SPINLOCKS
/*
* Initialize spinlock emulation.
*
* This must be called after PGReserveSemaphores().
*/
void
SpinlockSemaInit(void)
{
PGSemaphore *spinsemas;
int nsemas = SpinlockSemas();
int i;
/*
* We must use ShmemAllocUnlocked(), since the spinlock protecting
* ShmemAlloc() obviously can't be ready yet.
*/
spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize());
for (i = 0; i < nsemas; ++i)
spinsemas[i] = PGSemaphoreCreate();
SpinlockSemaArray = spinsemas;
}
/*
* s_lock.h hardware-spinlock emulation using semaphores
*
* We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
* map multiple spinlocks onto one semaphore because no process should ever
* hold more than one at a time. We just need enough semaphores so that we
* aren't adding too much extra contention from that.
*
* There is one exception to the restriction of only holding one spinlock at a
* time, which is that it's ok if emulated atomic operations are nested inside
* spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
* we make sure "normal" spinlocks and atomics backed by spinlocks use
* distinct semaphores (see the nested argument to s_init_lock_sema).
*
* slock_t is just an int for this implementation; it holds the spinlock
* number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0
* is not a valid value, so that testing with this code can help find
* failures to initialize spinlocks.
*/
static inline void
s_check_valid(int lockndx)
{
if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES))
elog(ERROR, "invalid spinlock number: %d", lockndx);
}
void
s_init_lock_sema(volatile slock_t *lock, bool nested)
{
static uint32 counter = 0;
uint32 offset;
uint32 sema_total;
uint32 idx;
if (nested)
{
/*
* To allow nesting atomics inside spinlocked sections, use a
* different spinlock. See comment above.
*/
offset = 1 + NUM_SPINLOCK_SEMAPHORES;
sema_total = NUM_ATOMICS_SEMAPHORES;
}
else
{
offset = 1;
sema_total = NUM_SPINLOCK_SEMAPHORES;
}
idx = (counter++ % sema_total) + offset;
/* double check we did things correctly */
s_check_valid(idx);
*lock = idx;
}
void
s_unlock_sema(volatile slock_t *lock)
{
int lockndx = *lock;
s_check_valid(lockndx);
PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
}
bool
s_lock_free_sema(volatile slock_t *lock)
{
/* We don't currently use S_LOCK_FREE anyway */
elog(ERROR, "spin.c does not support S_LOCK_FREE()");
return false;
}
int
tas_sema(volatile slock_t *lock)
{
int lockndx = *lock;
s_check_valid(lockndx);
/* Note that TAS macros return 0 if *success* */
return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
}
#endif /* !HAVE_SPINLOCKS */

View File

@ -382,9 +382,6 @@
/* Define to 1 if the system has the type `socklen_t'. */
#undef HAVE_SOCKLEN_T
/* Define to 1 if you have spinlocks. */
#undef HAVE_SPINLOCKS
/* Define to 1 if you have the `SSL_CTX_set_cert_cb' function. */
#undef HAVE_SSL_CTX_SET_CERT_CB

View File

@ -86,21 +86,6 @@
#define USE_FLOAT8_BYVAL 1
#endif
/*
* When we don't have native spinlocks, we use semaphores to simulate them.
* Decreasing this value reduces consumption of OS resources; increasing it
* may improve performance, but supplying a real spinlock implementation is
* probably far better.
*/
#define NUM_SPINLOCK_SEMAPHORES 128
/*
* When we have neither spinlocks nor atomic operations support we're
* implementing atomic operations on top of spinlock on top of semaphores. To
* be safe against atomic operations while holding a spinlock separate
* semaphores have to be used.
*/
#define NUM_ATOMICS_SEMAPHORES 64
/*
* MAXPGPATH: standard size of a pathname buffer in PostgreSQL (hence,

View File

@ -16,8 +16,8 @@
*
* There exist generic, hardware independent, implementations for several
* compilers which might be sufficient, although possibly not optimal, for a
* new platform. If no such generic implementation is available spinlocks (or
* even OS provided semaphores) will be used to implement the API.
* new platform. If no such generic implementation is available spinlocks will
* be used to implement the API.
*
* Implement _u64 atomics if and only if your platform can use them
* efficiently (and obviously correctly).

View File

@ -20,9 +20,7 @@
#ifndef pg_memory_barrier_impl
/*
* If we have no memory barrier implementation for this architecture, we
* fall back to acquiring and releasing a spinlock. This might, in turn,
* fall back to the semaphore-based spinlock implementation, which will be
* amazingly slow.
* fall back to acquiring and releasing a spinlock.
*
* It's not self-evident that every possible legal implementation of a
* spinlock acquire-and-release would be equivalent to a full memory barrier.

View File

@ -1,10 +1,10 @@
/*-------------------------------------------------------------------------
*
* s_lock.h
* Hardware-dependent implementation of spinlocks.
* Implementation of spinlocks.
*
* NOTE: none of the macros in this file are intended to be called directly.
* Call them through the hardware-independent macros in spin.h.
* Call them through the macros in spin.h.
*
* The following hardware-dependent macros must be provided for each
* supported platform:
@ -78,13 +78,6 @@
* in assembly language to execute a hardware atomic-test-and-set
* instruction. Equivalent OS-supplied mutex routines could be used too.
*
* If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
* defined), then we fall back on an emulation that uses SysV semaphores
* (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS()
* implementation, because of the cost of a kernel call per lock or unlock.
* An old report is that Postgres spends around 40% of its time in semop(2)
* when using the SysV semaphore code.
*
*
* Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@ -100,8 +93,6 @@
#error "s_lock.h may not be included from frontend code"
#endif
#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
/*************************************************************************
* All the gcc inlines
@ -655,34 +646,10 @@ spin_delay(void)
/* Blow up if we didn't have any way to do spinlocks */
#ifndef HAS_TEST_AND_SET
#error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@lists.postgresql.org.
#error PostgreSQL does not have spinlock support on this platform. Please report this to pgsql-bugs@lists.postgresql.org.
#endif
#else /* !HAVE_SPINLOCKS */
/*
* Fake spinlock implementation using semaphores --- slow and prone
* to fall foul of kernel limits on number of semaphores, so don't use this
* unless you must! The subroutines appear in spin.c.
*/
typedef int slock_t;
extern bool s_lock_free_sema(volatile slock_t *lock);
extern void s_unlock_sema(volatile slock_t *lock);
extern void s_init_lock_sema(volatile slock_t *lock, bool nested);
extern int tas_sema(volatile slock_t *lock);
#define S_LOCK_FREE(lock) s_lock_free_sema(lock)
#define S_UNLOCK(lock) s_unlock_sema(lock)
#define S_INIT_LOCK(lock) s_init_lock_sema(lock, false)
#define TAS(lock) tas_sema(lock)
#endif /* HAVE_SPINLOCKS */
/*
* Default Definitions - override these above as needed.
*/

View File

@ -1,11 +1,11 @@
/*-------------------------------------------------------------------------
*
* spin.h
* Hardware-independent implementation of spinlocks.
* API for spinlocks.
*
*
* The hardware-independent interface to spinlocks is defined by the
* typedef "slock_t" and these macros:
* The interface to spinlocks is defined by the typedef "slock_t" and
* these macros:
*
* void SpinLockInit(volatile slock_t *lock)
* Initialize a spinlock (to the unlocked state).
@ -52,9 +52,6 @@
#define SPIN_H
#include "storage/s_lock.h"
#ifndef HAVE_SPINLOCKS
#include "storage/pg_sema.h"
#endif
#define SpinLockInit(lock) S_INIT_LOCK(lock)
@ -65,13 +62,4 @@
#define SpinLockFree(lock) S_LOCK_FREE(lock)
extern int SpinlockSemas(void);
extern Size SpinlockSemaSize(void);
#ifndef HAVE_SPINLOCKS
extern void SpinlockSemaInit(void);
extern PGDLLIMPORT PGSemaphore *SpinlockSemaArray;
#endif
#endif /* SPIN_H */

View File

@ -887,92 +887,8 @@ test_spinlock(void)
if (memcmp(struct_w_lock.data_after, "ef12", 4) != 0)
elog(ERROR, "padding after spinlock modified");
}
/*
* Ensure that allocating more than INT32_MAX emulated spinlocks works.
* That's interesting because the spinlock emulation uses a 32bit integer
* to map spinlocks onto semaphores. There've been bugs...
*/
#ifndef HAVE_SPINLOCKS
{
/*
* Initialize enough spinlocks to advance counter close to wraparound.
* It's too expensive to perform acquire/release for each, as those
* may be syscalls when the spinlock emulation is used (and even just
* atomic TAS would be expensive).
*/
for (uint32 i = 0; i < INT32_MAX - 100000; i++)
{
slock_t lock;
SpinLockInit(&lock);
}
for (uint32 i = 0; i < 200000; i++)
{
slock_t lock;
SpinLockInit(&lock);
SpinLockAcquire(&lock);
SpinLockRelease(&lock);
SpinLockAcquire(&lock);
SpinLockRelease(&lock);
}
}
#endif
}
/*
* Verify that performing atomic ops inside a spinlock isn't a
* problem. Realistically that's only going to be a problem when both
* --disable-spinlocks and --disable-atomics are used, but it's cheap enough
* to just always test.
*
* The test works by initializing enough atomics that we'd conflict if there
* were an overlap between a spinlock and an atomic by holding a spinlock
* while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics.
*
* NUM_TEST_ATOMICS doesn't really need to be more than
* NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more
* extensively.
*/
static void
test_atomic_spin_nest(void)
{
slock_t lock;
#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
pg_atomic_uint32 atomics32[NUM_TEST_ATOMICS];
pg_atomic_uint64 atomics64[NUM_TEST_ATOMICS];
SpinLockInit(&lock);
for (int i = 0; i < NUM_TEST_ATOMICS; i++)
{
pg_atomic_init_u32(&atomics32[i], 0);
pg_atomic_init_u64(&atomics64[i], 0);
}
/* just so it's not all zeroes */
for (int i = 0; i < NUM_TEST_ATOMICS; i++)
{
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics32[i], i), 0);
EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&atomics64[i], i), 0);
}
/* test whether we can do atomic op with lock held */
SpinLockAcquire(&lock);
for (int i = 0; i < NUM_TEST_ATOMICS; i++)
{
EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics32[i], i), i);
EXPECT_EQ_U32(pg_atomic_read_u32(&atomics32[i]), 0);
EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&atomics64[i], i), i);
EXPECT_EQ_U64(pg_atomic_read_u64(&atomics64[i]), 0);
}
SpinLockRelease(&lock);
}
#undef NUM_TEST_ATOMICS
PG_FUNCTION_INFO_V1(test_atomic_ops);
Datum
test_atomic_ops(PG_FUNCTION_ARGS)
@ -989,8 +905,6 @@ test_atomic_ops(PG_FUNCTION_ARGS)
*/
test_spinlock();
test_atomic_spin_nest();
PG_RETURN_BOOL(true);
}