postgres/configure.in

2431 lines
80 KiB
Plaintext
Raw Normal View History

dnl Process this file with autoconf to produce a configure script.
2010-09-21 00:08:53 +04:00
dnl configure.in
dnl
dnl Developers, please strive to achieve this order:
dnl
dnl 0. Initialization and options processing
dnl 1. Programs
dnl 2. Libraries
dnl 3. Header files
dnl 4. Types
dnl 5. Structures
dnl 6. Compiler characteristics
dnl 7. Functions, global variables
dnl 8. System services
dnl
dnl Read the Autoconf manual for details.
dnl
m4_pattern_forbid(^PGAC_)dnl to catch undefined macros
2020-05-18 23:09:19 +03:00
AC_INIT([PostgreSQL], [13beta1], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/])
2013-12-19 05:53:23 +04:00
m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
Untested combinations of 'autoconf' and PostgreSQL versions are not
recommended. You can remove the check from 'configure.in' but it is then
your responsibility whether the result works or not.])])
AC_COPYRIGHT([Copyright (c) 1996-2020, PostgreSQL Global Development Group])
AC_CONFIG_SRCDIR([src/backend/access/common/heaptuple.c])
AC_CONFIG_AUX_DIR(config)
AC_PREFIX_DEFAULT(/usr/local/pgsql)
AC_DEFINE_UNQUOTED(CONFIGURE_ARGS, ["$ac_configure_args"], [Saved arguments from configure])
[PG_MAJORVERSION=`expr "$PACKAGE_VERSION" : '\([0-9][0-9]*\)'`]
[PG_MINORVERSION=`expr "$PACKAGE_VERSION" : '.*\.\([0-9][0-9]*\)'`]
test -n "$PG_MINORVERSION" || PG_MINORVERSION=0
AC_SUBST(PG_MAJORVERSION)
AC_DEFINE_UNQUOTED(PG_MAJORVERSION, "$PG_MAJORVERSION", [PostgreSQL major version as a string])
AC_DEFINE_UNQUOTED(PG_MAJORVERSION_NUM, $PG_MAJORVERSION, [PostgreSQL major version number])
AC_DEFINE_UNQUOTED(PG_MINORVERSION_NUM, $PG_MINORVERSION, [PostgreSQL minor version number])
PGAC_ARG_REQ(with, extra-version, [STRING], [append STRING to version],
[PG_VERSION="$PACKAGE_VERSION$withval"],
[PG_VERSION="$PACKAGE_VERSION"])
AC_DEFINE_UNQUOTED(PG_VERSION, "$PG_VERSION", [PostgreSQL version as a string])
AC_CANONICAL_HOST
1998-02-04 16:19:32 +03:00
template=
AC_MSG_CHECKING([which template to use])
PGAC_ARG_REQ(with, template, [NAME], [override operating system template],
[
case $withval in
list) echo; ls "$srcdir/src/template"; exit;;
*) if test -f "$srcdir/src/template/$with_template" ; then
template=$withval
else
AC_MSG_ERROR(['$withval' is not a valid template name. Use 'list' for a list.])
fi;;
esac
],
[
# --with-template not given
case $host_os in
aix*) template=aix ;;
cygwin*|msys*) template=cygwin ;;
darwin*) template=darwin ;;
dragonfly*) template=netbsd ;;
freebsd*) template=freebsd ;;
hpux*) template=hpux ;;
linux*|gnu*|k*bsd*-gnu)
template=linux ;;
mingw*) template=win32 ;;
netbsd*) template=netbsd ;;
openbsd*) template=openbsd ;;
solaris*) template=solaris ;;
esac
if test x"$template" = x"" ; then
AC_MSG_ERROR([[
*******************************************************************
PostgreSQL has apparently not been ported to your platform yet.
To try a manual configuration, look into the src/template directory
for a similar platform and use the '--with-template=' option.
Please also contact <]AC_PACKAGE_BUGREPORT[> to see about
rectifying this. Include the above 'checking host system type...'
line.
*******************************************************************
]])
fi
])
1998-02-04 16:19:32 +03:00
AC_MSG_RESULT([$template])
1998-10-23 06:49:17 +04:00
PORTNAME=$template
AC_SUBST(PORTNAME)
1999-12-17 21:18:26 +03:00
# Initialize default assumption that we do not need separate assembly code
# for TAS (test-and-set). This can be overridden by the template file
# when it's executed.
need_tas=no
tas_file=dummy.s
##
## Command line options
##
#
# Add non-standard directories to the include path
#
PGAC_ARG_REQ(with, includes, [DIRS], [look for additional header files in DIRS])
1998-04-10 06:59:38 +04:00
#
# Add non-standard directories to the library search path
#
PGAC_ARG_REQ(with, libraries, [DIRS], [look for additional libraries in DIRS],
[LIBRARY_DIRS=$withval])
PGAC_ARG_REQ(with, libs, [DIRS], [alternative spelling of --with-libraries],
[LIBRARY_DIRS=$withval])
#
# 64-bit integer date/time storage is now the only option, but to avoid
# unnecessary breakage of build scripts, continue to accept an explicit
# "--enable-integer-datetimes" switch.
#
PGAC_ARG_BOOL(enable, integer-datetimes, yes, [obsolete option, no longer supported],
[],
[AC_MSG_ERROR([--disable-integer-datetimes is no longer supported])])
#
# NLS
#
AC_MSG_CHECKING([whether NLS is wanted])
PGAC_ARG_OPTARG(enable, nls,
[LANGUAGES], [enable Native Language Support],
[],
[WANTED_LANGUAGES=$enableval],
[AC_DEFINE(ENABLE_NLS, 1,
[Define to 1 if you want National Language Support. (--enable-nls)])])
AC_MSG_RESULT([$enable_nls])
AC_SUBST(enable_nls)
AC_SUBST(WANTED_LANGUAGES)
#
# Default port number (--with-pgport), default 5432
#
AC_MSG_CHECKING([for default port number])
PGAC_ARG_REQ(with, pgport, [PORTNUM], [set default port number [5432]],
[default_port=$withval],
[default_port=5432])
AC_MSG_RESULT([$default_port])
# Need both of these because some places want an integer and some a string
AC_DEFINE_UNQUOTED(DEF_PGPORT, ${default_port},
[Define to the default TCP port number on which the server listens and
to which clients will try to connect. This can be overridden at run-time,
but it's convenient if your clients have the right default compiled in.
(--with-pgport=PORTNUM)])
AC_DEFINE_UNQUOTED(DEF_PGPORT_STR, "${default_port}",
[Define to the default TCP port number as a string constant.])
AC_SUBST(default_port)
# It's worth validating port; you can get very confusing errors otherwise
if test x"$default_port" = x""; then
AC_MSG_ERROR([invalid --with-pgport specification: empty string])
elif test ! x`echo "$default_port" | sed -e 's/[[0-9]]*//'` = x""; then
AC_MSG_ERROR([invalid --with-pgport specification: must be a number])
elif test ! x`echo "$default_port" | sed -e 's/^0.//'` = x"$default_port"; then
AC_MSG_ERROR([invalid --with-pgport specification: must not have leading 0])
elif test "$default_port" -lt "1" -o "$default_port" -gt "65535"; then
AC_MSG_ERROR([invalid --with-pgport specification: must be between 1 and 65535])
fi
#
# '-rpath'-like feature can be disabled
#
PGAC_ARG_BOOL(enable, rpath, yes,
[do not embed shared library search path in executables])
AC_SUBST(enable_rpath)
#
# Spinlocks
#
PGAC_ARG_BOOL(enable, spinlocks, yes,
[do not use spinlocks])
Add a basic atomic ops API abstracting away platform/architecture details. Several upcoming performance/scalability improvements require atomic operations. This new API avoids the need to splatter compiler and architecture dependent code over all the locations employing atomic ops. For several of the potential usages it'd be problematic to maintain both, a atomics using implementation and one using spinlocks or similar. In all likelihood one of the implementations would not get tested regularly under concurrency. To avoid that scenario the new API provides a automatic fallback of atomic operations to spinlocks. All properties of atomic operations are maintained. This fallback - obviously - isn't as fast as just using atomic ops, but it's not bad either. For one of the future users the atomics ontop spinlocks implementation was actually slightly faster than the old purely spinlock using implementation. That's important because it reduces the fear of regressing older platforms when improving the scalability for new ones. The API, loosely modeled after the C11 atomics support, currently provides 'atomic flags' and 32 bit unsigned integers. If the platform efficiently supports atomic 64 bit unsigned integers those are also provided. To implement atomics support for a platform/architecture/compiler for a type of atomics 32bit compare and exchange needs to be implemented. If available and more efficient native support for flags, 32 bit atomic addition, and corresponding 64 bit operations may also be provided. Additional useful atomic operations are implemented generically ontop of these. The implementation for various versions of gcc, msvc and sun studio have been tested. Additional existing stub implementations for * Intel icc * HUPX acc * IBM xlc are included but have never been tested. These will likely require fixes based on buildfarm and user feedback. As atomic operations also require barriers for some operations the existing barrier support has been moved into the atomics code. Author: Andres Freund with contributions from Oskari Saarenmaa Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com, 20131015123303.GH5300@awork2.anarazel.de, 20131028205522.GI20248@awork2.anarazel.de
2014-09-26 01:49:05 +04:00
#
# Atomic operations
#
PGAC_ARG_BOOL(enable, atomics, yes,
[do not use atomic operations])
#
# --enable-debug adds -g to compiler flags
#
PGAC_ARG_BOOL(enable, debug, no,
[build with debugging symbols (-g)])
AC_SUBST(enable_debug)
#
# --enable-profiling enables gcc profiling
#
PGAC_ARG_BOOL(enable, profiling, no,
[build with profiling enabled ])
#
# --enable-coverage enables generation of code coverage metrics with gcov
#
PGAC_ARG_BOOL(enable, coverage, no,
[build with coverage testing instrumentation],
[PGAC_PATH_PROGS(GCOV, gcov)
if test -z "$GCOV"; then
AC_MSG_ERROR([gcov not found])
fi
PGAC_PATH_PROGS(LCOV, lcov)
if test -z "$LCOV"; then
AC_MSG_ERROR([lcov not found])
fi
PGAC_PATH_PROGS(GENHTML, genhtml)
if test -z "$GENHTML"; then
AC_MSG_ERROR([genhtml not found])
fi])
AC_SUBST(enable_coverage)
#
# DTrace
#
PGAC_ARG_BOOL(enable, dtrace, no,
[build with DTrace support],
[PGAC_PATH_PROGS(DTRACE, dtrace)
if test -z "$DTRACE"; then
AC_MSG_ERROR([dtrace not found])
fi
AC_SUBST(DTRACEFLAGS)])
AC_SUBST(enable_dtrace)
#
# TAP tests
#
PGAC_ARG_BOOL(enable, tap-tests, no,
[enable TAP tests (requires Perl and IPC::Run)])
AC_SUBST(enable_tap_tests)
#
# Block size
#
AC_MSG_CHECKING([for block size])
PGAC_ARG_REQ(with, blocksize, [BLOCKSIZE], [set table block size in kB [8]],
[blocksize=$withval],
[blocksize=8])
case ${blocksize} in
1) BLCKSZ=1024;;
2) BLCKSZ=2048;;
4) BLCKSZ=4096;;
8) BLCKSZ=8192;;
16) BLCKSZ=16384;;
32) BLCKSZ=32768;;
*) AC_MSG_ERROR([Invalid block size. Allowed values are 1,2,4,8,16,32.])
esac
AC_MSG_RESULT([${blocksize}kB])
AC_DEFINE_UNQUOTED([BLCKSZ], ${BLCKSZ}, [
Size of a disk block --- this also limits the size of a tuple. You
can set it bigger if you need bigger tuples (although TOAST should
reduce the need to have large tuples, since fields can be spread
across multiple tuples).
BLCKSZ must be a power of 2. The maximum possible value of BLCKSZ
is currently 2^15 (32768). This is determined by the 15-bit widths
of the lp_off and lp_len fields in ItemIdData (see
include/storage/itemid.h).
Changing BLCKSZ requires an initdb.
])
#
# Relation segment size
#
AC_MSG_CHECKING([for segment size])
PGAC_ARG_REQ(with, segsize, [SEGSIZE], [set table segment size in GB [1]],
[segsize=$withval],
[segsize=1])
# this expression is set up to avoid unnecessary integer overflow
# blocksize is already guaranteed to be a factor of 1024
RELSEG_SIZE=`expr '(' 1024 / ${blocksize} ')' '*' ${segsize} '*' 1024`
test $? -eq 0 || exit 1
AC_MSG_RESULT([${segsize}GB])
AC_DEFINE_UNQUOTED([RELSEG_SIZE], ${RELSEG_SIZE}, [
RELSEG_SIZE is the maximum number of blocks allowed in one disk file.
Thus, the maximum size of a single file is RELSEG_SIZE * BLCKSZ;
relations bigger than that are divided into multiple files.
RELSEG_SIZE * BLCKSZ must be less than your OS' limit on file size.
This is often 2 GB or 4GB in a 32-bit operating system, unless you
have large file support enabled. By default, we make the limit 1 GB
to avoid any possible integer-overflow problems within the OS.
A limit smaller than necessary only means we divide a large
relation into more chunks than necessary, so it seems best to err
in the direction of a small limit.
A power-of-2 value is recommended to save a few cycles in md.c,
but is not absolutely required.
Changing RELSEG_SIZE requires an initdb.
])
#
# WAL block size
#
AC_MSG_CHECKING([for WAL block size])
PGAC_ARG_REQ(with, wal-blocksize, [BLOCKSIZE], [set WAL block size in kB [8]],
[wal_blocksize=$withval],
[wal_blocksize=8])
case ${wal_blocksize} in
1) XLOG_BLCKSZ=1024;;
2) XLOG_BLCKSZ=2048;;
4) XLOG_BLCKSZ=4096;;
8) XLOG_BLCKSZ=8192;;
16) XLOG_BLCKSZ=16384;;
32) XLOG_BLCKSZ=32768;;
64) XLOG_BLCKSZ=65536;;
*) AC_MSG_ERROR([Invalid WAL block size. Allowed values are 1,2,4,8,16,32,64.])
esac
AC_MSG_RESULT([${wal_blocksize}kB])
AC_DEFINE_UNQUOTED([XLOG_BLCKSZ], ${XLOG_BLCKSZ}, [
Size of a WAL file block. This need have no particular relation to BLCKSZ.
XLOG_BLCKSZ must be a power of 2, and if your system supports O_DIRECT I/O,
XLOG_BLCKSZ must be a multiple of the alignment requirement for direct-I/O
buffers, else direct I/O may fail.
Changing XLOG_BLCKSZ requires an initdb.
])
#
# C compiler
#
# For historical reasons you can also use --with-CC to specify the C compiler
# to use, although the standard way to do this is to set the CC environment
# variable.
PGAC_ARG_REQ(with, CC, [CMD], [set compiler (deprecated)], [CC=$with_CC])
case $template in
aix) pgac_cc_list="gcc xlc"; pgac_cxx_list="g++ xlC";;
*) pgac_cc_list="gcc cc"; pgac_cxx_list="g++ c++";;
esac
AC_PROG_CC([$pgac_cc_list])
AC_PROG_CC_C99()
# Error out if the compiler does not support C99, as the codebase
# relies on that.
if test "$ac_cv_prog_cc_c99" = no; then
AC_MSG_ERROR([C compiler "$CC" does not support C99])
fi
AC_PROG_CXX([$pgac_cxx_list])
# Check if it's Intel's compiler, which (usually) pretends to be gcc,
# but has idiosyncrasies of its own. We assume icc will define
# __INTEL_COMPILER regardless of CFLAGS.
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [@%:@ifndef __INTEL_COMPILER
choke me
@%:@endif])], [ICC=yes], [ICC=no])
# Check if it's Sun Studio compiler. We assume that
# __SUNPRO_C will be defined for Sun Studio compilers
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [@%:@ifndef __SUNPRO_C
choke me
@%:@endif])], [SUN_STUDIO_CC=yes], [SUN_STUDIO_CC=no])
AC_SUBST(SUN_STUDIO_CC)
#
# LLVM
#
# Checked early because subsequent tests depend on it.
PGAC_ARG_BOOL(with, llvm, no, [build with LLVM based JIT support],
[AC_DEFINE([USE_LLVM], 1, [Define to 1 to build with LLVM based JIT support. (--with-llvm)])])
AC_SUBST(with_llvm)
dnl must use AS_IF here, else AC_REQUIRES inside PGAC_LLVM_SUPPORT malfunctions
AS_IF([test "$with_llvm" = yes], [
PGAC_LLVM_SUPPORT()
]) # fi
unset CFLAGS
unset CXXFLAGS
#
# Read the template
#
. "$srcdir/src/template/$template" || exit
# C[XX]FLAGS are selected so:
# If the user specifies something in the environment, that is used.
# else: If the template file set something, that is used.
# else: If coverage was enabled, don't set anything.
# else: If the compiler is GCC, then we use -O2.
# else: If the compiler is something else, then we use -O, unless debugging.
if test "$ac_env_CFLAGS_set" = set; then
CFLAGS=$ac_env_CFLAGS_value
elif test "${CFLAGS+set}" = set; then
: # (keep what template set)
elif test "$enable_coverage" = yes; then
: # no optimization by default
elif test "$GCC" = yes; then
CFLAGS="-O2"
else
# if the user selected debug mode, don't use -O
if test "$enable_debug" != yes; then
CFLAGS="-O"
fi
fi
if test "$ac_env_CXXFLAGS_set" = set; then
CXXFLAGS=$ac_env_CXXFLAGS_value
elif test "${CXXFLAGS+set}" = set; then
: # (keep what template set)
elif test "$enable_coverage" = yes; then
: # no optimization by default
elif test "$GCC" = yes; then
CXXFLAGS="-O2"
else
# if the user selected debug mode, don't use -O
if test "$enable_debug" != yes; then
CXXFLAGS="-O"
fi
fi
# When generating bitcode (for inlining) we always want to use -O2
# even when --enable-debug is specified. The bitcode it's not going to
# be used for line-by-line debugging, and JIT inlining doesn't work
# without at least -O1 (otherwise clang will emit 'noinline'
# attributes everywhere), which is bad for testing. Still allow the
# environment to override if done explicitly.
if test "$ac_env_BITCODE_CFLAGS_set" = set; then
BITCODE_CFLAGS=$ac_env_BITCODE_CFLAGS_value
else
BITCODE_CFLAGS="-O2 $BITCODE_CFLAGS"
fi
if test "$ac_env_BITCODE_CXXFLAGS_set" = set; then
BITCODE_CXXFLAGS=$ac_env_BITCODE_CXXFLAGS_value
else
BITCODE_CXXFLAGS="-O2 $BITCODE_CXXFLAGS"
fi
# C[XX]FLAGS we determined above will be added back at the end
user_CFLAGS=$CFLAGS
CFLAGS=""
user_CXXFLAGS=$CXXFLAGS
CXXFLAGS=""
user_BITCODE_CFLAGS=$BITCODE_CFLAGS
BITCODE_CFLAGS=""
user_BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS
BITCODE_CXXFLAGS=""
# set CFLAGS_VECTOR from the environment, if available
if test "$ac_env_CFLAGS_VECTOR_set" = set; then
CFLAGS_VECTOR=$ac_env_CFLAGS_VECTOR_value
fi
# Some versions of GCC support some additional useful warning flags.
# Check whether they are supported, and add them to CFLAGS if so.
# ICC pretends to be GCC but it's lying; it doesn't support these flags,
# but has its own. Also check other compiler-specific flags here.
if test "$GCC" = yes -a "$ICC" = no; then
CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith"
CXXFLAGS="-Wall -Wpointer-arith"
# These work in some but not all gcc versions
Change floating-point output format for improved performance. Previously, floating-point output was done by rounding to a specific decimal precision; by default, to 6 or 15 decimal digits (losing information) or as requested using extra_float_digits. Drivers that wanted exact float values, and applications like pg_dump that must preserve values exactly, set extra_float_digits=3 (or sometimes 2 for historical reasons, though this isn't enough for float4). Unfortunately, decimal rounded output is slow enough to become a noticable bottleneck when dealing with large result sets or COPY of large tables when many floating-point values are involved. Floating-point output can be done much faster when the output is not rounded to a specific decimal length, but rather is chosen as the shortest decimal representation that is closer to the original float value than to any other value representable in the same precision. The recently published Ryu algorithm by Ulf Adams is both relatively simple and remarkably fast. Accordingly, change float4out/float8out to output shortest decimal representations if extra_float_digits is greater than 0, and make that the new default. Applications that need rounded output can set extra_float_digits back to 0 or below, and take the resulting performance hit. We make one concession to portability for systems with buggy floating-point input: we do not output decimal values that fall exactly halfway between adjacent representable binary values (which would rely on the reader doing round-to-nearest-even correctly). This is known to be a problem at least for VS2013 on Windows. Our version of the Ryu code originates from https://github.com/ulfjack/ryu/ at commit c9c3fb1979, but with the following (significant) modifications: - Output format is changed to use fixed-point notation for small exponents, as printf would, and also to use lowercase 'e', a minimum of 2 exponent digits, and a mandatory sign on the exponent, to keep the formatting as close as possible to previous output. - The output of exact midpoint values is disabled as noted above. - The integer fast-path code is changed somewhat (since we have fixed-point output and the upstream did not). - Our project style has been largely applied to the code with the exception of C99 declaration-after-statement, which has been retained as an exception to our present policy. - Most of upstream's debugging and conditionals are removed, and we use our own configure tests to determine things like uint128 availability. Changing the float output format obviously affects a number of regression tests. This patch uses an explicit setting of extra_float_digits=0 for test output that is not expected to be exactly reproducible (e.g. due to numerical instability or differing algorithms for transcendental functions). Conversions from floats to numeric are unchanged by this patch. These may appear in index expressions and it is not yet clear whether any change should be made, so that can be left for another day. This patch assumes that the only supported floating point format is now IEEE format, and the documentation is updated to reflect that. Code by me, adapting the work of Ulf Adams and other contributors. References: https://dl.acm.org/citation.cfm?id=3192369 Reviewed-by: Tom Lane, Andres Freund, Donald Dong Discussion: https://postgr.es/m/87r2el1bx6.fsf@news-spur.riddles.org.uk
2019-02-13 18:20:33 +03:00
save_CFLAGS=$CFLAGS
PGAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement])
Change floating-point output format for improved performance. Previously, floating-point output was done by rounding to a specific decimal precision; by default, to 6 or 15 decimal digits (losing information) or as requested using extra_float_digits. Drivers that wanted exact float values, and applications like pg_dump that must preserve values exactly, set extra_float_digits=3 (or sometimes 2 for historical reasons, though this isn't enough for float4). Unfortunately, decimal rounded output is slow enough to become a noticable bottleneck when dealing with large result sets or COPY of large tables when many floating-point values are involved. Floating-point output can be done much faster when the output is not rounded to a specific decimal length, but rather is chosen as the shortest decimal representation that is closer to the original float value than to any other value representable in the same precision. The recently published Ryu algorithm by Ulf Adams is both relatively simple and remarkably fast. Accordingly, change float4out/float8out to output shortest decimal representations if extra_float_digits is greater than 0, and make that the new default. Applications that need rounded output can set extra_float_digits back to 0 or below, and take the resulting performance hit. We make one concession to portability for systems with buggy floating-point input: we do not output decimal values that fall exactly halfway between adjacent representable binary values (which would rely on the reader doing round-to-nearest-even correctly). This is known to be a problem at least for VS2013 on Windows. Our version of the Ryu code originates from https://github.com/ulfjack/ryu/ at commit c9c3fb1979, but with the following (significant) modifications: - Output format is changed to use fixed-point notation for small exponents, as printf would, and also to use lowercase 'e', a minimum of 2 exponent digits, and a mandatory sign on the exponent, to keep the formatting as close as possible to previous output. - The output of exact midpoint values is disabled as noted above. - The integer fast-path code is changed somewhat (since we have fixed-point output and the upstream did not). - Our project style has been largely applied to the code with the exception of C99 declaration-after-statement, which has been retained as an exception to our present policy. - Most of upstream's debugging and conditionals are removed, and we use our own configure tests to determine things like uint128 availability. Changing the float output format obviously affects a number of regression tests. This patch uses an explicit setting of extra_float_digits=0 for test output that is not expected to be exactly reproducible (e.g. due to numerical instability or differing algorithms for transcendental functions). Conversions from floats to numeric are unchanged by this patch. These may appear in index expressions and it is not yet clear whether any change should be made, so that can be left for another day. This patch assumes that the only supported floating point format is now IEEE format, and the documentation is updated to reflect that. Code by me, adapting the work of Ulf Adams and other contributors. References: https://dl.acm.org/citation.cfm?id=3192369 Reviewed-by: Tom Lane, Andres Freund, Donald Dong Discussion: https://postgr.es/m/87r2el1bx6.fsf@news-spur.riddles.org.uk
2019-02-13 18:20:33 +03:00
# -Wdeclaration-after-statement isn't applicable for C++. Specific C files
# disable it, so AC_SUBST the negative form.
PERMIT_DECLARATION_AFTER_STATEMENT=
if test x"$save_CFLAGS" != x"$CFLAGS"; then
Change floating-point output format for improved performance. Previously, floating-point output was done by rounding to a specific decimal precision; by default, to 6 or 15 decimal digits (losing information) or as requested using extra_float_digits. Drivers that wanted exact float values, and applications like pg_dump that must preserve values exactly, set extra_float_digits=3 (or sometimes 2 for historical reasons, though this isn't enough for float4). Unfortunately, decimal rounded output is slow enough to become a noticable bottleneck when dealing with large result sets or COPY of large tables when many floating-point values are involved. Floating-point output can be done much faster when the output is not rounded to a specific decimal length, but rather is chosen as the shortest decimal representation that is closer to the original float value than to any other value representable in the same precision. The recently published Ryu algorithm by Ulf Adams is both relatively simple and remarkably fast. Accordingly, change float4out/float8out to output shortest decimal representations if extra_float_digits is greater than 0, and make that the new default. Applications that need rounded output can set extra_float_digits back to 0 or below, and take the resulting performance hit. We make one concession to portability for systems with buggy floating-point input: we do not output decimal values that fall exactly halfway between adjacent representable binary values (which would rely on the reader doing round-to-nearest-even correctly). This is known to be a problem at least for VS2013 on Windows. Our version of the Ryu code originates from https://github.com/ulfjack/ryu/ at commit c9c3fb1979, but with the following (significant) modifications: - Output format is changed to use fixed-point notation for small exponents, as printf would, and also to use lowercase 'e', a minimum of 2 exponent digits, and a mandatory sign on the exponent, to keep the formatting as close as possible to previous output. - The output of exact midpoint values is disabled as noted above. - The integer fast-path code is changed somewhat (since we have fixed-point output and the upstream did not). - Our project style has been largely applied to the code with the exception of C99 declaration-after-statement, which has been retained as an exception to our present policy. - Most of upstream's debugging and conditionals are removed, and we use our own configure tests to determine things like uint128 availability. Changing the float output format obviously affects a number of regression tests. This patch uses an explicit setting of extra_float_digits=0 for test output that is not expected to be exactly reproducible (e.g. due to numerical instability or differing algorithms for transcendental functions). Conversions from floats to numeric are unchanged by this patch. These may appear in index expressions and it is not yet clear whether any change should be made, so that can be left for another day. This patch assumes that the only supported floating point format is now IEEE format, and the documentation is updated to reflect that. Code by me, adapting the work of Ulf Adams and other contributors. References: https://dl.acm.org/citation.cfm?id=3192369 Reviewed-by: Tom Lane, Andres Freund, Donald Dong Discussion: https://postgr.es/m/87r2el1bx6.fsf@news-spur.riddles.org.uk
2019-02-13 18:20:33 +03:00
PERMIT_DECLARATION_AFTER_STATEMENT=-Wno-declaration-after-statement
fi
AC_SUBST(PERMIT_DECLARATION_AFTER_STATEMENT)
# Really don't want VLAs to be used in our dialect of C
PGAC_PROG_CC_CFLAGS_OPT([-Werror=vla])
# -Wvla is not applicable for C++
PGAC_PROG_CC_CFLAGS_OPT([-Wendif-labels])
PGAC_PROG_CXX_CFLAGS_OPT([-Wendif-labels])
PGAC_PROG_CC_CFLAGS_OPT([-Wmissing-format-attribute])
PGAC_PROG_CXX_CFLAGS_OPT([-Wmissing-format-attribute])
PGAC_PROG_CC_CFLAGS_OPT([-Wimplicit-fallthrough=3])
PGAC_PROG_CXX_CFLAGS_OPT([-Wimplicit-fallthrough=3])
2011-01-27 02:23:48 +03:00
# This was included in -Wall/-Wformat in older GCC versions
PGAC_PROG_CC_CFLAGS_OPT([-Wformat-security])
PGAC_PROG_CXX_CFLAGS_OPT([-Wformat-security])
# Disable strict-aliasing rules; needed for gcc 3.3+
PGAC_PROG_CC_CFLAGS_OPT([-fno-strict-aliasing])
PGAC_PROG_CXX_CFLAGS_OPT([-fno-strict-aliasing])
# Disable optimizations that assume no overflow; needed for gcc 4.3+
PGAC_PROG_CC_CFLAGS_OPT([-fwrapv])
PGAC_PROG_CXX_CFLAGS_OPT([-fwrapv])
# Disable FP optimizations that cause various errors on gcc 4.5+ or maybe 4.6+
PGAC_PROG_CC_CFLAGS_OPT([-fexcess-precision=standard])
PGAC_PROG_CXX_CFLAGS_OPT([-fexcess-precision=standard])
# Optimization flags for specific files that benefit from vectorization
PGAC_PROG_CC_VAR_OPT(CFLAGS_VECTOR, [-funroll-loops])
PGAC_PROG_CC_VAR_OPT(CFLAGS_VECTOR, [-ftree-vectorize])
# We want to suppress clang's unhelpful unused-command-line-argument warnings
# but gcc won't complain about unrecognized -Wno-foo switches, so we have to
# test for the positive form and if that works, add the negative form
NOT_THE_CFLAGS=""
PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wunused-command-line-argument])
if test -n "$NOT_THE_CFLAGS"; then
CFLAGS="$CFLAGS -Wno-unused-command-line-argument"
fi
# Similarly disable useless truncation warnings from gcc 8+
NOT_THE_CFLAGS=""
PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wformat-truncation])
if test -n "$NOT_THE_CFLAGS"; then
CFLAGS="$CFLAGS -Wno-format-truncation"
fi
NOT_THE_CFLAGS=""
PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wstringop-truncation])
if test -n "$NOT_THE_CFLAGS"; then
CFLAGS="$CFLAGS -Wno-stringop-truncation"
fi
elif test "$ICC" = yes; then
# Intel's compiler has a bug/misoptimization in checking for
# division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS.
PGAC_PROG_CC_CFLAGS_OPT([-mp1])
PGAC_PROG_CXX_CFLAGS_OPT([-mp1])
# Make sure strict aliasing is off (though this is said to be the default)
PGAC_PROG_CC_CFLAGS_OPT([-fno-strict-aliasing])
PGAC_PROG_CXX_CFLAGS_OPT([-fno-strict-aliasing])
elif test "$PORTNAME" = "aix"; then
# AIX's xlc has to have strict aliasing turned off too
PGAC_PROG_CC_CFLAGS_OPT([-qnoansialias])
PGAC_PROG_CXX_CFLAGS_OPT([-qnoansialias])
PGAC_PROG_CC_CFLAGS_OPT([-qlonglong])
PGAC_PROG_CXX_CFLAGS_OPT([-qlonglong])
elif test "$PORTNAME" = "hpux"; then
# On some versions of HP-UX, libm functions do not set errno by default.
# Fix that by using +Olibmerrno if the compiler recognizes it.
PGAC_PROG_CC_CFLAGS_OPT([+Olibmerrno])
PGAC_PROG_CXX_CFLAGS_OPT([+Olibmerrno])
fi
AC_SUBST(CFLAGS_VECTOR)
# Determine flags used to emit bitcode for JIT inlining. Need to test
# for behaviour changing compiler flags, to keep compatibility with
# compiler used for normal postgres code.
if test "$with_llvm" = yes ; then
CLANGXX="$CLANG -xc++"
PGAC_PROG_VARCC_VARFLAGS_OPT(CLANG, BITCODE_CFLAGS, [-fno-strict-aliasing])
PGAC_PROG_VARCXX_VARFLAGS_OPT(CLANGXX, BITCODE_CXXFLAGS, [-fno-strict-aliasing])
PGAC_PROG_VARCC_VARFLAGS_OPT(CLANG, BITCODE_CFLAGS, [-fwrapv])
PGAC_PROG_VARCXX_VARFLAGS_OPT(CLANGXX, BITCODE_CXXFLAGS, [-fwrapv])
PGAC_PROG_VARCC_VARFLAGS_OPT(CLANG, BITCODE_CFLAGS, [-fexcess-precision=standard])
PGAC_PROG_VARCXX_VARFLAGS_OPT(CLANGXX, BITCODE_CXXFLAGS, [-fexcess-precision=standard])
fi
# supply -g if --enable-debug
if test "$enable_debug" = yes && test "$ac_cv_prog_cc_g" = yes; then
CFLAGS="$CFLAGS -g"
fi
if test "$enable_debug" = yes && test "$ac_cv_prog_cxx_g" = yes; then
CXXFLAGS="$CXXFLAGS -g"
fi
# enable code coverage if --enable-coverage
if test "$enable_coverage" = yes; then
if test "$GCC" = yes; then
CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
CXXFLAGS="$CXXFLAGS -fprofile-arcs -ftest-coverage"
else
AC_MSG_ERROR([--enable-coverage is supported only when using GCC])
fi
fi
# enable profiling if --enable-profiling
if test "$enable_profiling" = yes && test "$ac_cv_prog_cc_g" = yes; then
if test "$GCC" = yes; then
AC_DEFINE([PROFILE_PID_DIR], 1,
[Define to 1 to allow profiling output to be saved separately for each process.])
CFLAGS="$CFLAGS -pg $PLATFORM_PROFILE_FLAGS"
CXXFLAGS="$CXXFLAGS -pg $PLATFORM_PROFILE_FLAGS"
else
AC_MSG_ERROR([--enable-profiling is supported only when using GCC])
fi
fi
# We already have this in Makefile.win32, but configure needs it too
if test "$PORTNAME" = "win32"; then
CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32"
fi
# Now that we're done automatically adding stuff to C[XX]FLAGS, put back the
# user-specified flags (if any) at the end. This lets users override
# the automatic additions.
CFLAGS="$CFLAGS $user_CFLAGS"
CXXFLAGS="$CXXFLAGS $user_CXXFLAGS"
BITCODE_CFLAGS="$BITCODE_CFLAGS $user_BITCODE_CFLAGS"
BITCODE_CXXFLAGS="$BITCODE_CXXFLAGS $user_BITCODE_CXXFLAGS"
AC_SUBST(BITCODE_CFLAGS)
AC_SUBST(BITCODE_CXXFLAGS)
# The template file must set up CFLAGS_SL; we don't support user override
AC_SUBST(CFLAGS_SL)
# Check if the compiler still works with the final flag settings
# (note, we're not checking that for CXX, which is optional)
AC_MSG_CHECKING([whether the C compiler still works])
AC_LINK_IFELSE([AC_LANG_PROGRAM([], [return 0;])],
[AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)
AC_MSG_ERROR([cannot proceed])])
# Defend against gcc -ffast-math
if test "$GCC" = yes; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [@%:@ifdef __FAST_MATH__
choke me
@%:@endif])], [], [AC_MSG_ERROR([do not put -ffast-math in CFLAGS])])
fi
# Defend against clang being used on x86-32 without SSE2 enabled. As current
# versions of clang do not understand -fexcess-precision=standard, the use of
# x87 floating point operations leads to problems like isinf possibly returning
# false for a value that is infinite when converted from the 80bit register to
# the 8byte memory representation.
#
# Only perform the test if the compiler doesn't understand
# -fexcess-precision=standard, that way a potentially fixed compiler will work
# automatically.
if test "$pgac_cv_prog_CC_cflags__fexcess_precision_standard" = no; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
@%:@if defined(__clang__) && defined(__i386__) && !defined(__SSE2_MATH__)
choke me
@%:@endif
])], [],
[AC_MSG_ERROR([Compiling PostgreSQL with clang, on 32bit x86, requires SSE2 support. Use -msse2 or use gcc.])])
fi
AC_PROG_CPP
AC_SUBST(GCC)
#
# Set up TAS assembly code if needed; the template file has now had its
# chance to request this.
#
AC_CONFIG_LINKS([src/backend/port/tas.s:src/backend/port/tas/${tas_file}])
if test "$need_tas" = yes ; then
TAS=tas.o
else
TAS=""
fi
AC_SUBST(TAS)
#
# Set up pkg_config in case we need it below
#
PKG_PROG_PKG_CONFIG
#
# Automatic dependency tracking
#
PGAC_ARG_BOOL(enable, depend, no, [turn on automatic dependency tracking],
[autodepend=yes])
AC_SUBST(autodepend)
#
# Enable assert checks
#
PGAC_ARG_BOOL(enable, cassert, no, [enable assertion checks (for debugging)],
[AC_DEFINE([USE_ASSERT_CHECKING], 1,
[Define to 1 to build with assertion checks. (--enable-cassert)])])
#
# Include directories
#
ac_save_IFS=$IFS
IFS="${IFS}${PATH_SEPARATOR}"
# SRCH_INC comes from the template file
for dir in $with_includes $SRCH_INC; do
if test -d "$dir"; then
INCLUDES="$INCLUDES -I$dir"
else
AC_MSG_WARN([*** Include directory $dir does not exist.])
fi
done
IFS=$ac_save_IFS
AC_SUBST(INCLUDES)
#
# Library directories
#
ac_save_IFS=$IFS
IFS="${IFS}${PATH_SEPARATOR}"
# LIBRARY_DIRS comes from command line, SRCH_LIB from template file.
for dir in $LIBRARY_DIRS $SRCH_LIB; do
if test -d "$dir"; then
LIBDIRS="$LIBDIRS -L$dir"
else
AC_MSG_WARN([*** Library directory $dir does not exist.])
fi
done
IFS=$ac_save_IFS
#
# Enable thread-safe client libraries
#
AC_MSG_CHECKING([allow thread-safe client libraries])
PGAC_ARG_BOOL(enable, thread-safety, yes, [disable thread-safety in client libraries])
if test "$enable_thread_safety" = yes; then
AC_DEFINE([ENABLE_THREAD_SAFETY], 1,
[Define to 1 to build client libraries as thread-safe code. (--enable-thread-safety)])
fi
AC_MSG_RESULT([$enable_thread_safety])
AC_SUBST(enable_thread_safety)
#
# ICU
#
AC_MSG_CHECKING([whether to build with ICU support])
PGAC_ARG_BOOL(with, icu, no, [build with ICU support],
[AC_DEFINE([USE_ICU], 1, [Define to build with ICU support. (--with-icu)])])
AC_MSG_RESULT([$with_icu])
AC_SUBST(with_icu)
if test "$with_icu" = yes; then
PKG_CHECK_MODULES(ICU, icu-uc icu-i18n)
fi
#
# Optionally build Tcl modules (PL/Tcl)
#
AC_MSG_CHECKING([whether to build with Tcl])
PGAC_ARG_BOOL(with, tcl, no, [build Tcl modules (PL/Tcl)])
AC_MSG_RESULT([$with_tcl])
AC_SUBST([with_tcl])
# We see if the path to the Tcl/Tk configuration scripts is specified.
# This will override the use of tclsh to find the paths to search.
PGAC_ARG_REQ(with, tclconfig, [DIR], [tclConfig.sh is in DIR])
#
# Optionally build Perl modules (PL/Perl)
#
AC_MSG_CHECKING([whether to build Perl modules])
PGAC_ARG_BOOL(with, perl, no, [build Perl modules (PL/Perl)])
AC_MSG_RESULT([$with_perl])
AC_SUBST(with_perl)
#
# Optionally build Python modules (PL/Python)
#
AC_MSG_CHECKING([whether to build Python modules])
PGAC_ARG_BOOL(with, python, no, [build Python modules (PL/Python)])
AC_MSG_RESULT([$with_python])
AC_SUBST(with_python)
#
# GSSAPI
#
2007-07-10 20:41:01 +04:00
AC_MSG_CHECKING([whether to build with GSSAPI support])
PGAC_ARG_BOOL(with, gssapi, no, [build with GSSAPI support],
[
AC_DEFINE(ENABLE_GSS, 1, [Define to build with GSSAPI support. (--with-gssapi)])
krb_srvtab="FILE:\$(sysconfdir)/krb5.keytab"
])
AC_MSG_RESULT([$with_gssapi])
AC_SUBST(with_gssapi)
AC_SUBST(krb_srvtab)
#
# Kerberos configuration parameters
#
PGAC_ARG_REQ(with, krb-srvnam,
[NAME], [default service principal name in Kerberos (GSSAPI) [postgres]],
[],
[with_krb_srvnam="postgres"])
AC_SUBST(with_krb_srvnam)
AC_DEFINE_UNQUOTED([PG_KRB_SRVNAM], ["$with_krb_srvnam"],
[Define to the name of the default PostgreSQL service principal in Kerberos (GSSAPI). (--with-krb-srvnam=NAME)])
#
# PAM
#
AC_MSG_CHECKING([whether to build with PAM support])
PGAC_ARG_BOOL(with, pam, no,
[build with PAM support],
[AC_DEFINE([USE_PAM], 1, [Define to 1 to build with PAM support. (--with-pam)])])
AC_MSG_RESULT([$with_pam])
#
# BSD AUTH
#
AC_MSG_CHECKING([whether to build with BSD Authentication support])
PGAC_ARG_BOOL(with, bsd-auth, no,
[build with BSD Authentication support],
[AC_DEFINE([USE_BSD_AUTH], 1, [Define to 1 to build with BSD Authentication support. (--with-bsd-auth)])])
AC_MSG_RESULT([$with_bsd_auth])
#
# LDAP
#
AC_MSG_CHECKING([whether to build with LDAP support])
PGAC_ARG_BOOL(with, ldap, no,
[build with LDAP support],
[AC_DEFINE([USE_LDAP], 1, [Define to 1 to build with LDAP support. (--with-ldap)])])
AC_MSG_RESULT([$with_ldap])
AC_SUBST(with_ldap)
#
# Bonjour
#
AC_MSG_CHECKING([whether to build with Bonjour support])
PGAC_ARG_BOOL(with, bonjour, no,
[build with Bonjour support],
[AC_DEFINE([USE_BONJOUR], 1, [Define to 1 to build with Bonjour support. (--with-bonjour)])])
AC_MSG_RESULT([$with_bonjour])
#
# OpenSSL
#
AC_MSG_CHECKING([whether to build with OpenSSL support])
PGAC_ARG_BOOL(with, openssl, no, [build with OpenSSL support],
[AC_DEFINE([USE_OPENSSL], 1, [Define to build with OpenSSL support. (--with-openssl)])])
AC_MSG_RESULT([$with_openssl])
AC_SUBST(with_openssl)
#
# SELinux
#
AC_MSG_CHECKING([whether to build with SELinux support])
PGAC_ARG_BOOL(with, selinux, no, [build with SELinux support])
AC_SUBST(with_selinux)
AC_MSG_RESULT([$with_selinux])
#
# Systemd
#
AC_MSG_CHECKING([whether to build with systemd support])
PGAC_ARG_BOOL(with, systemd, no, [build with systemd support],
[AC_DEFINE([USE_SYSTEMD], 1, [Define to build with systemd support. (--with-systemd)])])
AC_SUBST(with_systemd)
AC_MSG_RESULT([$with_systemd])
#
# Readline
#
PGAC_ARG_BOOL(with, readline, yes,
[do not use GNU Readline nor BSD Libedit for editing])
# readline on MinGW has problems with backslashes in psql and other bugs.
# This is particularly a problem with non-US code pages.
# Therefore disable its use until we understand the cause. 2004-07-20
if test "$PORTNAME" = "win32"; then
if test "$with_readline" = yes; then
AC_MSG_WARN([*** Readline does not work on MinGW --- disabling])
with_readline=no
fi
fi
AC_SUBST(with_readline)
#
# Prefer libedit
#
PGAC_ARG_BOOL(with, libedit-preferred, no,
[prefer BSD Libedit over GNU Readline])
#
# UUID library
#
# There are at least three UUID libraries in common use: the FreeBSD/NetBSD
# library, the e2fsprogs libuuid (now part of util-linux-ng), and the OSSP
# UUID library. More than one of these might be present on a given platform,
# so we make the user say which one she wants.
#
PGAC_ARG_REQ(with, uuid, [LIB], [build contrib/uuid-ossp using LIB (bsd,e2fs,ossp)])
if test x"$with_uuid" = x"" ; then
with_uuid=no
fi
PGAC_ARG_BOOL(with, ossp-uuid, no, [obsolete spelling of --with-uuid=ossp])
if test "$with_ossp_uuid" = yes ; then
with_uuid=ossp
fi
if test "$with_uuid" = bsd ; then
AC_DEFINE([HAVE_UUID_BSD], 1, [Define to 1 if you have BSD UUID support.])
UUID_EXTRA_OBJS="md5.o sha1.o"
elif test "$with_uuid" = e2fs ; then
AC_DEFINE([HAVE_UUID_E2FS], 1, [Define to 1 if you have E2FS UUID support.])
UUID_EXTRA_OBJS="md5.o sha1.o"
elif test "$with_uuid" = ossp ; then
AC_DEFINE([HAVE_UUID_OSSP], 1, [Define to 1 if you have OSSP UUID support.])
UUID_EXTRA_OBJS=""
elif test "$with_uuid" = no ; then
UUID_EXTRA_OBJS=""
else
AC_MSG_ERROR([--with-uuid must specify one of bsd, e2fs, or ossp])
fi
AC_SUBST(with_uuid)
AC_SUBST(UUID_EXTRA_OBJS)
#
# XML
#
AC_MSG_CHECKING([whether to build with XML support])
PGAC_ARG_BOOL(with, libxml, no, [build with XML support],
[AC_DEFINE([USE_LIBXML], 1, [Define to 1 to build with XML support. (--with-libxml)])])
AC_MSG_RESULT([$with_libxml])
AC_SUBST(with_libxml)
if test "$with_libxml" = yes ; then
# Check pkg-config, then xml2-config. But for backwards compatibility,
# setting XML2_CONFIG overrides pkg-config.
AC_ARG_VAR(XML2_CONFIG, [path to xml2-config utility])dnl
have_libxml2_pkg_config=no
if test -z "$XML2_CONFIG" -a -n "$PKG_CONFIG"; then
PKG_CHECK_MODULES(XML2, [libxml-2.0 >= 2.6.23],
[have_libxml2_pkg_config=yes], [# do nothing])
fi
if test "$have_libxml2_pkg_config" = no ; then
PGAC_PATH_PROGS(XML2_CONFIG, xml2-config)
if test -n "$XML2_CONFIG"; then
XML2_CFLAGS=`$XML2_CONFIG --cflags`
XML2_LIBS=`$XML2_CONFIG --libs`
fi
fi
# Note the user could also set XML2_CFLAGS/XML2_LIBS directly
for pgac_option in $XML2_CFLAGS; do
case $pgac_option in
-I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
esac
done
for pgac_option in $XML2_LIBS; do
case $pgac_option in
-L*) LDFLAGS="$LDFLAGS $pgac_option";;
esac
done
fi
2007-04-15 16:48:24 +04:00
#
# XSLT
#
PGAC_ARG_BOOL(with, libxslt, no, [use XSLT support when building contrib/xml2],
[AC_DEFINE([USE_LIBXSLT], 1, [Define to 1 to use XSLT support when building contrib/xml2. (--with-libxslt)])])
2007-04-15 16:48:24 +04:00
AC_SUBST(with_libxslt)
#
# tzdata
#
PGAC_ARG_REQ(with, system-tzdata,
[DIR], [use system time zone data in DIR])
AC_SUBST(with_system_tzdata)
#
# Zlib
#
PGAC_ARG_BOOL(with, zlib, yes,
[do not use Zlib])
AC_SUBST(with_zlib)
2003-05-27 20:36:50 +04:00
#
# Assignments
#
CPPFLAGS="$CPPFLAGS $INCLUDES"
LDFLAGS="$LDFLAGS $LIBDIRS"
AC_ARG_VAR(LDFLAGS_EX, [extra linker flags for linking executables only])
AC_ARG_VAR(LDFLAGS_SL, [extra linker flags for linking shared libraries only])
PGAC_PROG_LD
AC_SUBST(LD)
AC_SUBST(with_gnu_ld)
AC_PROG_RANLIB
2002-04-10 20:45:25 +04:00
PGAC_CHECK_STRIP
AC_CHECK_TOOL(AR, ar, ar)
if test "$PORTNAME" = "win32"; then
AC_CHECK_TOOL(DLLTOOL, dlltool, dlltool)
AC_CHECK_TOOL(DLLWRAP, dllwrap, dllwrap)
AC_CHECK_TOOL(WINDRES, windres, windres)
fi
AC_PROG_INSTALL
# When Autoconf chooses install-sh as install program it tries to generate
# a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps.
case $INSTALL in
*install-sh*) install_bin='';;
*) install_bin=$INSTALL;;
esac
AC_SUBST(install_bin)
PGAC_PATH_PROGS(TAR, tar)
AC_PROG_LN_S
AC_PROG_MKDIR_P
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
# a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps.
case $MKDIR_P in
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
esac
PGAC_PATH_BISON
PGAC_PATH_FLEX
PGAC_PATH_PERL
if test "$with_perl" = yes; then
if test -z "$PERL"; then
AC_MSG_ERROR([Perl not found])
fi
PGAC_CHECK_PERL_CONFIGS([archlibexp,privlibexp,useshrplib])
if test "$perl_useshrplib" != yes && test "$perl_useshrplib" != true; then
AC_MSG_ERROR([cannot build PL/Perl because libperl is not a shared library
You might have to rebuild your Perl installation. Refer to the
documentation for details. Use --without-perl to disable building
PL/Perl.])
fi
Make some fixes to allow building Postgres on macOS 10.14 ("Mojave"). Apple's latest rearrangements of the system-supplied headers have broken building of PL/Perl and PL/Tcl. The only practical way to fix PL/Tcl is to start using the "-isysroot" compiler flag to point to SDK-supplied headers, as Apple expects. We must also start distinguishing where to find Perl's headers from where to find its shared library; but that seems like good cleanup anyway. Extensions that formerly did something like -I$(perl_archlibexp)/CORE should now do -I$(perl_includedir)/CORE instead. perl_archlibexp is still the place to look for libperl.so, though. If for some reason you don't like the default -isysroot setting, you can override that by setting PG_SYSROOT in configure's arguments. I don't currently think people would need to do so, unless maybe for cross-version build purposes. In addition, teach configure where to find tclConfig.sh. Our traditional method of searching $auto_path hasn't worked for the last couple of macOS releases, and it now seems clear that Apple's not going to change that. The workaround of manually specifying --with-tclconfig was annoying already, but Mojave's made it a lot more so because the sysroot path now has to be included as well. Let's just wire the knowledge into configure instead. To avoid breaking builds against non-default Tcl installations (e.g. MacPorts) wherein the $auto_path method probably still works, arrange to try the additional case only after all else has failed. Back-patch to all supported versions, since at least the buildfarm cares about that. The changes are set up to not do anything on macOS releases that are old enough to not have functional sysroot trees.
2018-09-25 20:23:29 +03:00
# On most platforms, archlibexp is also where the Perl include files live ...
perl_includespec="-I$perl_archlibexp/CORE"
# ... but on newer macOS versions, we must use -iwithsysroot to look
# under $PG_SYSROOT
if test \! -f "$perl_archlibexp/CORE/perl.h" ; then
if test -f "$PG_SYSROOT$perl_archlibexp/CORE/perl.h" ; then
perl_includespec="-iwithsysroot $perl_archlibexp/CORE"
Make some fixes to allow building Postgres on macOS 10.14 ("Mojave"). Apple's latest rearrangements of the system-supplied headers have broken building of PL/Perl and PL/Tcl. The only practical way to fix PL/Tcl is to start using the "-isysroot" compiler flag to point to SDK-supplied headers, as Apple expects. We must also start distinguishing where to find Perl's headers from where to find its shared library; but that seems like good cleanup anyway. Extensions that formerly did something like -I$(perl_archlibexp)/CORE should now do -I$(perl_includedir)/CORE instead. perl_archlibexp is still the place to look for libperl.so, though. If for some reason you don't like the default -isysroot setting, you can override that by setting PG_SYSROOT in configure's arguments. I don't currently think people would need to do so, unless maybe for cross-version build purposes. In addition, teach configure where to find tclConfig.sh. Our traditional method of searching $auto_path hasn't worked for the last couple of macOS releases, and it now seems clear that Apple's not going to change that. The workaround of manually specifying --with-tclconfig was annoying already, but Mojave's made it a lot more so because the sysroot path now has to be included as well. Let's just wire the knowledge into configure instead. To avoid breaking builds against non-default Tcl installations (e.g. MacPorts) wherein the $auto_path method probably still works, arrange to try the additional case only after all else has failed. Back-patch to all supported versions, since at least the buildfarm cares about that. The changes are set up to not do anything on macOS releases that are old enough to not have functional sysroot trees.
2018-09-25 20:23:29 +03:00
fi
fi
AC_SUBST(perl_includespec)dnl
PL/Perl portability fix: absorb relevant -D switches from Perl. The Perl documentation is very clear that stuff calling libperl should be built with the compiler switches shown by Perl's $Config{ccflags}. We'd been ignoring that up to now, and mostly getting away with it, but recent Perl versions contain ABI compatibility cross-checks that fail on some builds because of this omission. In particular the sizeof(PerlInterpreter) can come out different due to some fields being added or removed; which means we have a live ABI hazard that we'd better fix rather than continuing to sweep it under the rug. However, it still seems like a bad idea to just absorb $Config{ccflags} verbatim. In some environments Perl was built with a different compiler that doesn't even use the same switch syntax. -D switch syntax is pretty universal though, and absorbing Perl's -D switches really ought to be enough to fix the problem. Furthermore, Perl likes to inject stuff like -D_LARGEFILE_SOURCE and -D_FILE_OFFSET_BITS=64 into $Config{ccflags}, which affect libc ABIs on platforms where they're relevant. Adopting those seems dangerous too. It's unclear whether a build wherein Perl and Postgres have different ideas of sizeof(off_t) etc would work, or whether anyone would care about making it work. But it's dead certain that having different stdio ABIs in core Postgres and PL/Perl will not work; we've seen that movie before. Therefore, let's also ignore -D switches for symbols beginning with underscore. The symbols that we actually need to import should be the ones mentioned in perl.h's PL_bincompat_options stanza, and none of those start with underscore, so this seems likely to work. (If it turns out not to work everywhere, we could consider intersecting the symbols mentioned in PL_bincompat_options with the -D switches. But that will be much more complicated, so let's try this way first.) This will need to be back-patched, but first let's see what the buildfarm makes of it. Ashutosh Sharma, some adjustments by me Discussion: https://postgr.es/m/CANFyU97OVQ3+Mzfmt3MhuUm5NwPU=-FtbNH5Eb7nZL9ua8=rcA@mail.gmail.com
2017-07-28 21:25:28 +03:00
PGAC_CHECK_PERL_EMBED_CCFLAGS
PGAC_CHECK_PERL_EMBED_LDFLAGS
fi
if test "$with_python" = yes; then
PGAC_PATH_PYTHON
PGAC_CHECK_PYTHON_EMBED_SETUP
fi
if test "$cross_compiling" = yes && test -z "$with_system_tzdata"; then
PGAC_PATH_PROGS(ZIC, zic)
if test -z "$ZIC"; then
AC_MSG_ERROR([
When cross-compiling, either use the option --with-system-tzdata to use
existing time-zone data, or set the environment variable ZIC to a zic
program to use during the build.])
fi
fi
#
# Pthreads
#
# For each platform, we need to know about any special compile and link
# libraries, and whether the normal C function names are thread-safe.
# See the comment at the top of src/port/thread.c for more information.
# WIN32 doesn't need the pthread tests; it always uses threads
#
# These tests are run before the library-tests, because linking with the
# other libraries can pull in the pthread functions as a side-effect. We
# want to use the -pthread or similar flags directly, and not rely on
# the side-effects of linking with some other library.
dnl note: We have to use AS_IF here rather than plain if. The AC_CHECK_HEADER
dnl invocation below is the first one in the script, and autoconf generates
dnl additional code for that, which must not be inside the if-block. AS_IF
dnl knows how to do that.
AS_IF([test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"],
[ # then
AX_PTHREAD # set thread flags
# Some platforms use these, so just define them. They can't hurt if they
# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS
# enables 5-arg getpwuid_r, among other things.
PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS"
# Check for *_r functions
_CFLAGS="$CFLAGS"
_LIBS="$LIBS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
LIBS="$LIBS $PTHREAD_LIBS"
AC_CHECK_HEADER(pthread.h, [], [AC_MSG_ERROR([
pthread.h not found; use --disable-thread-safety to disable thread safety])])
AC_CHECK_FUNCS([strerror_r getpwuid_r gethostbyname_r])
# Do test here with the proper thread flags
PGAC_FUNC_STRERROR_R_INT
CFLAGS="$_CFLAGS"
LIBS="$_LIBS"
], [ # else
# do not use values from template file
PTHREAD_CFLAGS=
PTHREAD_LIBS=
]) # fi
AC_SUBST(PTHREAD_CFLAGS)
AC_SUBST(PTHREAD_LIBS)
2002-04-26 23:47:35 +04:00
##
## Libraries
##
## Most libraries are included only if they demonstrably provide a function
## we need, but libm is an exception: always include it, because there are
## too many compilers that play cute optimization games that will break
## probes for standard functions such as pow().
##
AC_CHECK_LIB(m, main)
AC_SEARCH_LIBS(setproctitle, util)
AC_SEARCH_LIBS(dlopen, dl)
AC_SEARCH_LIBS(socket, [socket ws2_32])
AC_SEARCH_LIBS(shl_load, dld)
AC_SEARCH_LIBS(getopt_long, [getopt gnugetopt])
AC_SEARCH_LIBS(shm_open, rt)
AC_SEARCH_LIBS(shm_unlink, rt)
Use clock_gettime(), if available, in instr_time measurements. The advantage of clock_gettime() is that the API allows the result to be precise to nanoseconds, not just microseconds as in gettimeofday(). Now that it's routinely possible to do tens of plan node executions in 1us, we really need more precision than gettimeofday() can offer for EXPLAIN ANALYZE to accumulate statistics with. Some research shows that clock_gettime() is available on pretty nearly every modern Unix-ish platform, and as far as I have been able to test, it has about the same execution time as gettimeofday(), so there's no loss in switching over. (By the same token, this doesn't do anything to fix the fact that we really wish clock readings were faster. But there's enough win here to justify changing anyway.) A small side benefit is that on most platforms, we can use CLOCK_MONOTONIC instead of CLOCK_REALTIME and thereby render EXPLAIN impervious to concurrent resets of the system clock. (This means that code must not assume that the contents of struct instr_time have any well-defined interpretation as timestamps, but really that was true before.) Some platforms offer nonstandard clock IDs that might be of interest. This patch knows we should use CLOCK_MONOTONIC_RAW on macOS, because it provides more precision and is faster to read than their CLOCK_MONOTONIC. If there turn out to be many more cases where we need special rules, it might be appropriate to handle the selection of clock ID in configure, but for the moment that doesn't seem worth the trouble. Discussion: https://postgr.es/m/31856.1400021891@sss.pgh.pa.us
2017-01-02 21:41:51 +03:00
AC_SEARCH_LIBS(clock_gettime, [rt posix4])
# Solaris:
AC_SEARCH_LIBS(fdatasync, [rt posix4])
# Required for thread_test.c on Solaris
AC_SEARCH_LIBS(sched_yield, rt)
# Required for thread_test.c on Solaris 2.5:
# Other ports use it too (HP-UX) so test unconditionally
AC_SEARCH_LIBS(gethostbyname_r, nsl)
# Cygwin:
AC_SEARCH_LIBS(shmget, cygipc)
# *BSD:
AC_SEARCH_LIBS(backtrace_symbols, execinfo)
if test "$with_readline" = yes; then
PGAC_CHECK_READLINE
if test x"$pgac_cv_check_readline" = x"no"; then
AC_MSG_ERROR([readline library not found
If you have readline already installed, see config.log for details on the
failure. It is possible the compiler isn't looking in the proper directory.
Use --without-readline to disable readline support.])
fi
fi
if test "$with_zlib" = yes; then
AC_CHECK_LIB(z, inflate, [],
[AC_MSG_ERROR([zlib library not found
If you have zlib already installed, see config.log for details on the
failure. It is possible the compiler isn't looking in the proper directory.
Use --without-zlib to disable zlib support.])])
fi
if test "$enable_spinlocks" = yes; then
AC_DEFINE(HAVE_SPINLOCKS, 1, [Define to 1 if you have spinlocks.])
else
AC_MSG_WARN([
*** Not using spinlocks will cause poor performance.])
fi
Add a basic atomic ops API abstracting away platform/architecture details. Several upcoming performance/scalability improvements require atomic operations. This new API avoids the need to splatter compiler and architecture dependent code over all the locations employing atomic ops. For several of the potential usages it'd be problematic to maintain both, a atomics using implementation and one using spinlocks or similar. In all likelihood one of the implementations would not get tested regularly under concurrency. To avoid that scenario the new API provides a automatic fallback of atomic operations to spinlocks. All properties of atomic operations are maintained. This fallback - obviously - isn't as fast as just using atomic ops, but it's not bad either. For one of the future users the atomics ontop spinlocks implementation was actually slightly faster than the old purely spinlock using implementation. That's important because it reduces the fear of regressing older platforms when improving the scalability for new ones. The API, loosely modeled after the C11 atomics support, currently provides 'atomic flags' and 32 bit unsigned integers. If the platform efficiently supports atomic 64 bit unsigned integers those are also provided. To implement atomics support for a platform/architecture/compiler for a type of atomics 32bit compare and exchange needs to be implemented. If available and more efficient native support for flags, 32 bit atomic addition, and corresponding 64 bit operations may also be provided. Additional useful atomic operations are implemented generically ontop of these. The implementation for various versions of gcc, msvc and sun studio have been tested. Additional existing stub implementations for * Intel icc * HUPX acc * IBM xlc are included but have never been tested. These will likely require fixes based on buildfarm and user feedback. As atomic operations also require barriers for some operations the existing barrier support has been moved into the atomics code. Author: Andres Freund with contributions from Oskari Saarenmaa Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com, 20131015123303.GH5300@awork2.anarazel.de, 20131028205522.GI20248@awork2.anarazel.de
2014-09-26 01:49:05 +04:00
if test "$enable_atomics" = yes; then
AC_DEFINE(HAVE_ATOMICS, 1, [Define to 1 if you want to use atomics if available.])
else
AC_MSG_WARN([
*** Not using atomic operations will cause poor performance.])
fi
if test "$with_gssapi" = yes ; then
if test "$PORTNAME" != "win32"; then
AC_SEARCH_LIBS(gss_init_sec_context, [gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'], [],
[AC_MSG_ERROR([could not find function 'gss_init_sec_context' required for GSSAPI])])
else
LIBS="$LIBS -lgssapi32"
fi
fi
if test "$with_openssl" = yes ; then
dnl Order matters!
if test "$PORTNAME" != "win32"; then
AC_CHECK_LIB(crypto, CRYPTO_new_ex_data, [], [AC_MSG_ERROR([library 'crypto' is required for OpenSSL])])
Support OpenSSL 1.1.0. Changes needed to build at all: - Check for SSL_new in configure, now that SSL_library_init is a macro. - Do not access struct members directly. This includes some new code in pgcrypto, to use the resource owner mechanism to ensure that we don't leak OpenSSL handles, now that we can't embed them in other structs anymore. - RAND_SSLeay() -> RAND_OpenSSL() Changes that were needed to silence deprecation warnings, but were not strictly necessary: - RAND_pseudo_bytes() -> RAND_bytes(). - SSL_library_init() and OpenSSL_config() -> OPENSSL_init_ssl() - ASN1_STRING_data() -> ASN1_STRING_get0_data() - DH_generate_parameters() -> DH_generate_parameters() - Locking callbacks are not needed with OpenSSL 1.1.0 anymore. (Good riddance!) Also change references to SSLEAY_VERSION_NUMBER with OPENSSL_VERSION_NUMBER, for the sake of consistency. OPENSSL_VERSION_NUMBER has existed since time immemorial. Fix SSL test suite to work with OpenSSL 1.1.0. CA certificates must have the "CA:true" basic constraint extension now, or OpenSSL will refuse them. Regenerate the test certificates with that. The "openssl" binary, used to generate the certificates, is also now more picky, and throws an error if an X509 extension is specified in "req_extensions", but that section is empty. Backpatch to all supported branches, per popular demand. In back-branches, we still support OpenSSL 0.9.7 and above. OpenSSL 0.9.6 should still work too, but I didn't test it. In master, we only support 0.9.8 and above. Patch by Andreas Karlsson, with additional changes by me. Discussion: <20160627151604.GD1051@msg.df7cb.de>
2016-09-15 12:36:21 +03:00
AC_CHECK_LIB(ssl, SSL_new, [], [AC_MSG_ERROR([library 'ssl' is required for OpenSSL])])
else
AC_SEARCH_LIBS(CRYPTO_new_ex_data, [eay32 crypto], [], [AC_MSG_ERROR([library 'eay32' or 'crypto' is required for OpenSSL])])
AC_SEARCH_LIBS(SSL_new, [ssleay32 ssl], [], [AC_MSG_ERROR([library 'ssleay32' or 'ssl' is required for OpenSSL])])
fi
# Function introduced in OpenSSL 1.0.2.
AC_CHECK_FUNCS([X509_get_signature_nid])
# Functions introduced in OpenSSL 1.1.0. We used to check for
# OPENSSL_VERSION_NUMBER, but that didn't work with 1.1.0, because LibreSSL
# defines OPENSSL_VERSION_NUMBER to claim version 2.0.0, even though it
# doesn't have these OpenSSL 1.1.0 functions. So check for individual
# functions.
AC_CHECK_FUNCS([OPENSSL_init_ssl BIO_get_data BIO_meth_new ASN1_STRING_get0_data])
# OpenSSL versions before 1.1.0 required setting callback functions, for
# thread-safety. In 1.1.0, it's no longer required, and CRYPTO_lock()
# function was removed.
AC_CHECK_FUNCS([CRYPTO_lock])
fi
if test "$with_pam" = yes ; then
AC_CHECK_LIB(pam, pam_start, [], [AC_MSG_ERROR([library 'pam' is required for PAM])])
fi
if test "$with_libxml" = yes ; then
AC_CHECK_LIB(xml2, xmlSaveToBuffer, [], [AC_MSG_ERROR([library 'xml2' (version >= 2.6.23) is required for XML support])])
fi
2007-04-15 16:48:24 +04:00
if test "$with_libxslt" = yes ; then
AC_CHECK_LIB(xslt, xsltCleanupGlobals, [], [AC_MSG_ERROR([library 'xslt' is required for XSLT support])])
2007-04-15 16:48:24 +04:00
fi
# Note: We can test for libldap_r only after we know PTHREAD_LIBS
if test "$with_ldap" = yes ; then
_LIBS="$LIBS"
if test "$PORTNAME" != "win32"; then
AC_CHECK_LIB(ldap, ldap_bind, [],
[AC_MSG_ERROR([library 'ldap' is required for LDAP])],
[$EXTRA_LDAP_LIBS])
LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS"
if test "$enable_thread_safety" = yes; then
# on some platforms ldap_r fails to link without PTHREAD_LIBS
AC_CHECK_LIB(ldap_r, ldap_simple_bind, [],
[AC_MSG_ERROR([library 'ldap_r' is required for LDAP])],
[$PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS])
LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS"
else
LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS"
fi
AC_CHECK_FUNCS([ldap_initialize])
else
AC_CHECK_LIB(wldap32, ldap_bind, [], [AC_MSG_ERROR([library 'wldap32' is required for LDAP])])
LDAP_LIBS_FE="-lwldap32"
LDAP_LIBS_BE="-lwldap32"
fi
LIBS="$_LIBS"
fi
AC_SUBST(LDAP_LIBS_FE)
AC_SUBST(LDAP_LIBS_BE)
# for contrib/sepgsql
if test "$with_selinux" = yes; then
AC_CHECK_LIB(selinux, security_compute_create_name, [],
[AC_MSG_ERROR([library 'libselinux', version 2.1.10 or newer, is required for SELinux support])])
fi
# for contrib/uuid-ossp
if test "$with_uuid" = bsd ; then
# On BSD, the UUID functions are in libc
AC_CHECK_FUNC(uuid_to_string,
[UUID_LIBS=""],
[AC_MSG_ERROR([BSD UUID functions are not present])])
elif test "$with_uuid" = e2fs ; then
# On macOS, the UUID functions are in libc
AC_CHECK_FUNC(uuid_generate,
[UUID_LIBS=""],
[AC_CHECK_LIB(uuid, uuid_generate,
[UUID_LIBS="-luuid"],
[AC_MSG_ERROR([library 'uuid' is required for E2FS UUID])])])
elif test "$with_uuid" = ossp ; then
AC_CHECK_LIB(ossp-uuid, uuid_export,
[UUID_LIBS="-lossp-uuid"],
[AC_CHECK_LIB(uuid, uuid_export,
[UUID_LIBS="-luuid"],
[AC_MSG_ERROR([library 'ossp-uuid' or 'uuid' is required for OSSP UUID])])])
fi
AC_SUBST(UUID_LIBS)
##
## Header files
##
AC_HEADER_STDBOOL
AC_CHECK_HEADERS(m4_normalize([
atomic.h
copyfile.h
execinfo.h
getopt.h
ifaddrs.h
langinfo.h
mbarrier.h
poll.h
sys/epoll.h
sys/event.h
sys/ipc.h
sys/prctl.h
sys/procctl.h
sys/pstat.h
sys/resource.h
sys/select.h
sys/sem.h
sys/shm.h
sys/sockio.h
sys/tas.h
sys/un.h
termios.h
ucred.h
wctype.h
]))
# On BSD, test for net/if.h will fail unless sys/socket.h
# is included first.
AC_CHECK_HEADERS(net/if.h, [], [],
[AC_INCLUDES_DEFAULT
#include <sys/socket.h>
])
# On OpenBSD, test for sys/ucred.h will fail unless sys/param.h
# is included first.
AC_CHECK_HEADERS(sys/ucred.h, [], [],
[AC_INCLUDES_DEFAULT
#include <sys/param.h>
])
# At least on IRIX, test for netinet/tcp.h will fail unless
# netinet/in.h is included first.
AC_CHECK_HEADERS(netinet/tcp.h, [], [],
[AC_INCLUDES_DEFAULT
#include <netinet/in.h>
])
if expr x"$pgac_cv_check_readline" : 'x-lreadline' >/dev/null ; then
AC_CHECK_HEADERS(readline/readline.h, [],
[AC_CHECK_HEADERS(readline.h, [],
[AC_MSG_ERROR([readline header not found
If you have readline already installed, see config.log for details on the
failure. It is possible the compiler isn't looking in the proper directory.
Use --without-readline to disable readline support.])])])
AC_CHECK_HEADERS(readline/history.h, [],
[AC_CHECK_HEADERS(history.h, [],
[AC_MSG_ERROR([history header not found
If you have readline already installed, see config.log for details on the
failure. It is possible the compiler isn't looking in the proper directory.
Use --without-readline to disable readline support.])])])
fi
if expr x"$pgac_cv_check_readline" : 'x-ledit' >/dev/null ; then
# Some installations of libedit usurp /usr/include/readline/, which seems
# bad practice, since in combined installations readline will have its headers
# there. We might have to resort to AC_EGREP checks to make sure we found
# the proper header...
AC_CHECK_HEADERS(editline/readline.h, [],
[AC_CHECK_HEADERS(readline.h, [],
[AC_CHECK_HEADERS(readline/readline.h, [],
[AC_MSG_ERROR([readline header not found
If you have libedit already installed, see config.log for details on the
failure. It is possible the compiler isn't looking in the proper directory.
Use --without-readline to disable libedit support.])])])])
# Note: in a libedit installation, history.h is sometimes a dummy, and may
# not be there at all. Hence, don't complain if not found. We must check
# though, since in yet other versions it is an independent header.
AC_CHECK_HEADERS(editline/history.h, [],
[AC_CHECK_HEADERS(history.h, [],
[AC_CHECK_HEADERS(readline/history.h)])])
fi
if test "$with_zlib" = yes; then
AC_CHECK_HEADER(zlib.h, [], [AC_MSG_ERROR([zlib header not found
If you have zlib already installed, see config.log for details on the
failure. It is possible the compiler isn't looking in the proper directory.
Use --without-zlib to disable zlib support.])])
fi
if test "$with_gssapi" = yes ; then
AC_CHECK_HEADERS(gssapi/gssapi.h, [],
[AC_CHECK_HEADERS(gssapi.h, [], [AC_MSG_ERROR([gssapi.h header file is required for GSSAPI])])])
fi
if test "$with_openssl" = yes ; then
AC_CHECK_HEADER(openssl/ssl.h, [], [AC_MSG_ERROR([header file <openssl/ssl.h> is required for OpenSSL])])
AC_CHECK_HEADER(openssl/err.h, [], [AC_MSG_ERROR([header file <openssl/err.h> is required for OpenSSL])])
fi
if test "$with_pam" = yes ; then
2003-02-14 17:05:00 +03:00
AC_CHECK_HEADERS(security/pam_appl.h, [],
[AC_CHECK_HEADERS(pam/pam_appl.h, [],
[AC_MSG_ERROR([header file <security/pam_appl.h> or <pam/pam_appl.h> is required for PAM.])])])
fi
if test "$with_bsd_auth" = yes ; then
AC_CHECK_HEADER(bsd_auth.h, [], [AC_MSG_ERROR([header file <bsd_auth.h> is required for BSD Authentication support])])
fi
if test "$with_systemd" = yes ; then
AC_CHECK_HEADER(systemd/sd-daemon.h, [], [AC_MSG_ERROR([header file <systemd/sd-daemon.h> is required for systemd support])])
fi
if test "$with_libxml" = yes ; then
AC_CHECK_HEADER(libxml/parser.h, [], [AC_MSG_ERROR([header file <libxml/parser.h> is required for XML support])])
fi
2007-04-15 16:48:24 +04:00
if test "$with_libxslt" = yes ; then
AC_CHECK_HEADER(libxslt/xslt.h, [], [AC_MSG_ERROR([header file <libxslt/xslt.h> is required for XSLT support])])
fi
if test "$with_ldap" = yes ; then
if test "$PORTNAME" != "win32"; then
AC_CHECK_HEADERS(ldap.h, [],
[AC_MSG_ERROR([header file <ldap.h> is required for LDAP])])
PGAC_LDAP_SAFE
else
AC_CHECK_HEADERS(winldap.h, [],
[AC_MSG_ERROR([header file <winldap.h> is required for LDAP])],
[AC_INCLUDES_DEFAULT
#include <windows.h>
])
fi
fi
if test "$with_bonjour" = yes ; then
AC_CHECK_HEADER(dns_sd.h, [], [AC_MSG_ERROR([header file <dns_sd.h> is required for Bonjour])])
dnl At some point we might add something like
dnl AC_SEARCH_LIBS(DNSServiceRegister, dns_sd)
dnl but right now, what that would mainly accomplish is to encourage
dnl people to try to use the avahi implementation, which does not work.
dnl If you want to use Apple's own Bonjour code on another platform,
dnl just add -ldns_sd to LIBS manually.
fi
# for contrib/uuid-ossp
if test "$with_uuid" = bsd ; then
AC_CHECK_HEADERS(uuid.h,
[AC_EGREP_HEADER([uuid_to_string], uuid.h, [],
[AC_MSG_ERROR([header file <uuid.h> does not match BSD UUID library])])],
[AC_MSG_ERROR([header file <uuid.h> is required for BSD UUID])])
elif test "$with_uuid" = e2fs ; then
AC_CHECK_HEADERS(uuid/uuid.h,
[AC_EGREP_HEADER([uuid_generate], uuid/uuid.h, [],
[AC_MSG_ERROR([header file <uuid/uuid.h> does not match E2FS UUID library])])],
[AC_CHECK_HEADERS(uuid.h,
[AC_EGREP_HEADER([uuid_generate], uuid.h, [],
[AC_MSG_ERROR([header file <uuid.h> does not match E2FS UUID library])])],
[AC_MSG_ERROR([header file <uuid/uuid.h> or <uuid.h> is required for E2FS UUID])])])
elif test "$with_uuid" = ossp ; then
AC_CHECK_HEADERS(ossp/uuid.h,
[AC_EGREP_HEADER([uuid_export], ossp/uuid.h, [],
[AC_MSG_ERROR([header file <ossp/uuid.h> does not match OSSP UUID library])])],
[AC_CHECK_HEADERS(uuid.h,
[AC_EGREP_HEADER([uuid_export], uuid.h, [],
[AC_MSG_ERROR([header file <uuid.h> does not match OSSP UUID library])])],
[AC_MSG_ERROR([header file <ossp/uuid.h> or <uuid.h> is required for OSSP UUID])])])
fi
if test "$PORTNAME" = "win32" ; then
AC_CHECK_HEADERS(crtdefs.h)
fi
##
## Types, structures, compiler characteristics
##
m4_defun([AC_PROG_CC_STDC], []) dnl We don't want that.
AC_C_BIGENDIAN
AC_C_INLINE
PGAC_PRINTF_ARCHETYPE
PGAC_C_FUNCNAME_SUPPORT
PGAC_C_STATIC_ASSERT
PGAC_C_TYPEOF
PGAC_C_TYPES_COMPATIBLE
Improve handling of ereport(ERROR) and elog(ERROR). In commit 71450d7fd6c7cf7b3e38ac56e363bff6a681973c, we added code to inform suitably-intelligent compilers that ereport() doesn't return if the elevel is ERROR or higher. This patch extends that to elog(), and also fixes a double-evaluation hazard that the previous commit created in ereport(), as well as reducing the emitted code size. The elog() improvement requires the compiler to support __VA_ARGS__, which should be available in just about anything nowadays since it's required by C99. But our minimum language baseline is still C89, so add a configure test for that. The previous commit assumed that ereport's elevel could be evaluated twice, which isn't terribly safe --- there are already counterexamples in xlog.c. On compilers that have __builtin_constant_p, we can use that to protect the second test, since there's no possible optimization gain if the compiler doesn't know the value of elevel. Otherwise, use a local variable inside the macros to prevent double evaluation. The local-variable solution is inferior because (a) it leads to useless code being emitted when elevel isn't constant, and (b) it increases the optimization level needed for the compiler to recognize that subsequent code is unreachable. But it seems better than not teaching non-gcc compilers about unreachability at all. Lastly, if the compiler has __builtin_unreachable(), we can use that instead of abort(), resulting in a noticeable code savings since no function call is actually emitted. However, it seems wise to do this only in non-assert builds. In an assert build, continue to use abort(), so that the behavior will be predictable and debuggable if the "impossible" happens. These changes involve making the ereport and elog macros emit do-while statement blocks not just expressions, which forces small changes in a few call sites. Andres Freund, Tom Lane, Heikki Linnakangas
2013-01-14 03:39:20 +04:00
PGAC_C_BUILTIN_CONSTANT_P
PGAC_C_BUILTIN_UNREACHABLE
PGAC_C_COMPUTED_GOTO
PGAC_STRUCT_TIMEZONE
PGAC_UNION_SEMUN
PGAC_STRUCT_SOCKADDR_UN
PGAC_STRUCT_SOCKADDR_STORAGE
PGAC_STRUCT_SOCKADDR_STORAGE_MEMBERS
PGAC_STRUCT_ADDRINFO
PGAC_TYPE_LOCALE_T
# MSVC doesn't cope well with defining restrict to __restrict, the
# spelling it understands, because it conflicts with
# __declspec(restrict). Therefore we define pg_restrict to the
# appropriate definition, which presumably won't conflict.
#
# Allow platforms with buggy compilers to force restrict to not be
# used by setting $FORCE_DISABLE_RESTRICT=yes in the relevant
# template.
AC_C_RESTRICT
if test "$ac_cv_c_restrict" = "no" -o "x$FORCE_DISABLE_RESTRICT" = "xyes"; then
pg_restrict=""
else
pg_restrict="$ac_cv_c_restrict"
fi
AC_DEFINE_UNQUOTED([pg_restrict], [$pg_restrict],
[Define to keyword to use for C99 restrict support, or to nothing if not
supported])
AC_CHECK_TYPES([struct cmsgcred], [], [],
[#include <sys/socket.h>
#include <sys/param.h>
#ifdef HAVE_SYS_UCRED_H
#include <sys/ucred.h>
#endif])
AC_CHECK_TYPES([struct option], [], [],
[#ifdef HAVE_GETOPT_H
2003-08-08 01:38:55 +04:00
#include <getopt.h>
#endif])
if test "$with_zlib" = yes; then
# Check that <zlib.h> defines z_streamp (versions before about 1.0.4
# did not). While we could work around the lack of z_streamp, it
# seems unwise to encourage people to use such old zlib versions...
AC_CHECK_TYPE(z_streamp, [], [AC_MSG_ERROR([zlib version is too old
Use --without-zlib to disable zlib support.])],
[#include <zlib.h>])
fi
case $host_cpu in
Make use of compiler builtins and/or assembly for CLZ, CTZ, POPCNT. Test for the compiler builtins __builtin_clz, __builtin_ctz, and __builtin_popcount, and make use of these in preference to handwritten C code if they're available. Create src/port infrastructure for "leftmost one", "rightmost one", and "popcount" so as to centralize these decisions. On x86_64, __builtin_popcount generally won't make use of the POPCNT opcode because that's not universally supported yet. Provide code that checks CPUID and then calls POPCNT via asm() if available. This requires indirecting through a function pointer, which is an annoying amount of overhead for a one-instruction operation, but it's probably not worth working harder than this for our current use-cases. I'm not sure we've found all the existing places that could profit from this new infrastructure; but we at least touched all the ones that used copied-and-pasted versions of the bitmapset.c code, and got rid of multiple copies of the associated constant arrays. While at it, replace c-compiler.m4's one-per-builtin-function macros with a single one that can handle all the cases we need to worry about so far. Also, because I'm paranoid, make those checks into AC_LINK checks rather than just AC_COMPILE; the former coding failed to verify that libgcc has support for the builtin, in cases where it's not inline code. David Rowley, Thomas Munro, Alvaro Herrera, Tom Lane Discussion: https://postgr.es/m/CAKJS1f9WTAGG1tPeJnD18hiQW5gAk59fQ6WK-vfdAKEHyRg2RA@mail.gmail.com
2019-02-16 07:22:27 +03:00
x86_64)
# On x86_64, check if we can compile a popcntq instruction
AC_CACHE_CHECK([whether assembler supports x86_64 popcntq],
[pgac_cv_have_x86_64_popcntq],
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
[long long x = 1; long long r;
__asm__ __volatile__ (" popcntq %1,%0\n" : "=q"(r) : "rm"(x));])],
[pgac_cv_have_x86_64_popcntq=yes],
[pgac_cv_have_x86_64_popcntq=no])])
if test x"$pgac_cv_have_x86_64_popcntq" = xyes ; then
AC_DEFINE(HAVE_X86_64_POPCNTQ, 1, [Define to 1 if the assembler supports X86_64's POPCNTQ instruction.])
fi
;;
ppc*|powerpc*)
Make use of compiler builtins and/or assembly for CLZ, CTZ, POPCNT. Test for the compiler builtins __builtin_clz, __builtin_ctz, and __builtin_popcount, and make use of these in preference to handwritten C code if they're available. Create src/port infrastructure for "leftmost one", "rightmost one", and "popcount" so as to centralize these decisions. On x86_64, __builtin_popcount generally won't make use of the POPCNT opcode because that's not universally supported yet. Provide code that checks CPUID and then calls POPCNT via asm() if available. This requires indirecting through a function pointer, which is an annoying amount of overhead for a one-instruction operation, but it's probably not worth working harder than this for our current use-cases. I'm not sure we've found all the existing places that could profit from this new infrastructure; but we at least touched all the ones that used copied-and-pasted versions of the bitmapset.c code, and got rid of multiple copies of the associated constant arrays. While at it, replace c-compiler.m4's one-per-builtin-function macros with a single one that can handle all the cases we need to worry about so far. Also, because I'm paranoid, make those checks into AC_LINK checks rather than just AC_COMPILE; the former coding failed to verify that libgcc has support for the builtin, in cases where it's not inline code. David Rowley, Thomas Munro, Alvaro Herrera, Tom Lane Discussion: https://postgr.es/m/CAKJS1f9WTAGG1tPeJnD18hiQW5gAk59fQ6WK-vfdAKEHyRg2RA@mail.gmail.com
2019-02-16 07:22:27 +03:00
# On PPC, check if assembler supports LWARX instruction's mutex hint bit
AC_CACHE_CHECK([whether assembler supports lwarx hint bit],
[pgac_cv_have_ppc_mutex_hint],
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
[int a = 0; int *p = &a; int r;
__asm__ __volatile__ (" lwarx %0,0,%1,1\n" : "=&r"(r) : "r"(p));])],
[pgac_cv_have_ppc_mutex_hint=yes],
Make use of compiler builtins and/or assembly for CLZ, CTZ, POPCNT. Test for the compiler builtins __builtin_clz, __builtin_ctz, and __builtin_popcount, and make use of these in preference to handwritten C code if they're available. Create src/port infrastructure for "leftmost one", "rightmost one", and "popcount" so as to centralize these decisions. On x86_64, __builtin_popcount generally won't make use of the POPCNT opcode because that's not universally supported yet. Provide code that checks CPUID and then calls POPCNT via asm() if available. This requires indirecting through a function pointer, which is an annoying amount of overhead for a one-instruction operation, but it's probably not worth working harder than this for our current use-cases. I'm not sure we've found all the existing places that could profit from this new infrastructure; but we at least touched all the ones that used copied-and-pasted versions of the bitmapset.c code, and got rid of multiple copies of the associated constant arrays. While at it, replace c-compiler.m4's one-per-builtin-function macros with a single one that can handle all the cases we need to worry about so far. Also, because I'm paranoid, make those checks into AC_LINK checks rather than just AC_COMPILE; the former coding failed to verify that libgcc has support for the builtin, in cases where it's not inline code. David Rowley, Thomas Munro, Alvaro Herrera, Tom Lane Discussion: https://postgr.es/m/CAKJS1f9WTAGG1tPeJnD18hiQW5gAk59fQ6WK-vfdAKEHyRg2RA@mail.gmail.com
2019-02-16 07:22:27 +03:00
[pgac_cv_have_ppc_mutex_hint=no])])
if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then
AC_DEFINE(HAVE_PPC_LWARX_MUTEX_HINT, 1, [Define to 1 if the assembler supports PPC's LWARX mutex hint bit.])
fi
# Check if compiler accepts "i"(x) when __builtin_constant_p(x).
AC_CACHE_CHECK([whether __builtin_constant_p(x) implies "i"(x) acceptance],
[pgac_cv_have_i_constraint__builtin_constant_p],
[AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
[static inline int
addi(int ra, int si)
{
int res = 0;
if (__builtin_constant_p(si))
__asm__ __volatile__(
" addi %0,%1,%2\n" : "=r"(res) : "b"(ra), "i"(si));
return res;
}
int test_adds(int x) { return addi(3, x) + addi(x, 5); }], [])],
[pgac_cv_have_i_constraint__builtin_constant_p=yes],
[pgac_cv_have_i_constraint__builtin_constant_p=no])])
if test x"$pgac_cv_have_i_constraint__builtin_constant_p" = xyes ; then
AC_DEFINE(HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P, 1,
[Define to 1 if __builtin_constant_p(x) implies "i"(x) acceptance.])
fi
;;
esac
# Check largefile support. You might think this is a system service not a
# compiler characteristic, but you'd be wrong. We must check this before
# probing existence of related functions such as fseeko, since the largefile
# defines can affect what is generated for that.
if test "$PORTNAME" != "win32"; then
AC_SYS_LARGEFILE
dnl Autoconf 2.69's AC_SYS_LARGEFILE believes it's a good idea to #define
dnl _DARWIN_USE_64_BIT_INODE, but it isn't: on macOS 10.5 that activates a
dnl bug that causes readdir() to sometimes return EINVAL. On later macOS
dnl versions where the feature actually works, it's on by default anyway.
AH_VERBATIM([_DARWIN_USE_64_BIT_INODE],[])
fi
dnl Check for largefile support (must be after AC_SYS_LARGEFILE)
AC_CHECK_SIZEOF([off_t])
# If we don't have largefile support, can't handle segsize >= 2GB.
if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then
AC_MSG_ERROR([Large file support is not enabled. Segment size cannot be larger than 1GB.])
fi
AC_CHECK_SIZEOF([bool], [],
[#ifdef HAVE_STDBOOL_H
#include <stdbool.h>
#endif])
Fix ecpglib.h to declare bool consistently with c.h. This completes the task begun in commit 1408d5d86, to synchronize ECPG's exported definitions with the definition of bool used by c.h (and, therefore, the one actually in use in the ECPG library). On practically all modern platforms, ecpglib.h will now just include <stdbool.h>, which should surprise nobody anymore. That removes a header-inclusion-order hazard for ECPG clients, who previously might get build failures or unexpected behavior depending on whether they'd included <stdbool.h> themselves, and if so, whether before or after ecpglib.h. On platforms where sizeof(_Bool) is not 1 (only old PPC-based Mac systems, as far as I know), things are still messy, as inclusion of <stdbool.h> could still break ECPG client code. There doesn't seem to be any clean fix for that, and given the probably-negligible population of users who would care anymore, it's not clear we should go far out of our way to cope with it. This change at least fixes some header-inclusion-order hazards for our own code, since c.h and ecpglib.h previously disagreed on whether bool should be char or unsigned char. To implement this with minimal invasion of ECPG client namespace, move the choice of whether to rely on <stdbool.h> into configure, and have it export a configuration symbol PG_USE_STDBOOL. ecpglib.h no longer exports definitions for TRUE and FALSE, only their lowercase brethren. We could undo that if we get push-back about it. Ideally we'd back-patch this as far as v11, which is where c.h started to rely on <stdbool.h>. But the odds of creating problems for formerly-working ECPG client code seem about as large as the odds of fixing any non-working cases, so we'll just do this in HEAD. Discussion: https://postgr.es/m/CAA4eK1LmaKO7Du9M9Lo=kxGU8sB6aL8fa3sF6z6d5yYYVe3BuQ@mail.gmail.com
2019-11-12 21:00:04 +03:00
dnl We use <stdbool.h> if we have it and it declares type bool as having
dnl size 1. Otherwise, c.h will fall back to declaring bool as unsigned char.
if test "$ac_cv_header_stdbool_h" = yes -a "$ac_cv_sizeof_bool" = 1; then
AC_DEFINE([PG_USE_STDBOOL], 1,
[Define to 1 to use <stdbool.h> to define type bool.])
fi
##
## Functions, global variables
##
PGAC_VAR_INT_TIMEZONE
AC_FUNC_ACCEPT_ARGTYPES
PGAC_FUNC_GETTIMEOFDAY_1ARG
PGAC_FUNC_WCSTOMBS_L
# Some versions of libedit contain strlcpy(), setproctitle(), and other
# symbols that that library has no business exposing to the world. Pending
# acquisition of a clue by those developers, ignore libedit (including its
# possible alias of libreadline) while checking for everything else.
LIBS_including_readline="$LIBS"
LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'`
AC_CHECK_FUNCS(m4_normalize([
backtrace_symbols
clock_gettime
copyfile
fdatasync
getifaddrs
getpeerucred
getrlimit
kqueue
mbstowcs_l
memset_s
poll
posix_fallocate
ppoll
pstat
pthread_is_threaded_np
readlink
setproctitle
setproctitle_fast
setsid
shm_open
strchrnul
strsignal
symlink
sync_file_range
uselocale
wcstombs_l
]))
Make use of compiler builtins and/or assembly for CLZ, CTZ, POPCNT. Test for the compiler builtins __builtin_clz, __builtin_ctz, and __builtin_popcount, and make use of these in preference to handwritten C code if they're available. Create src/port infrastructure for "leftmost one", "rightmost one", and "popcount" so as to centralize these decisions. On x86_64, __builtin_popcount generally won't make use of the POPCNT opcode because that's not universally supported yet. Provide code that checks CPUID and then calls POPCNT via asm() if available. This requires indirecting through a function pointer, which is an annoying amount of overhead for a one-instruction operation, but it's probably not worth working harder than this for our current use-cases. I'm not sure we've found all the existing places that could profit from this new infrastructure; but we at least touched all the ones that used copied-and-pasted versions of the bitmapset.c code, and got rid of multiple copies of the associated constant arrays. While at it, replace c-compiler.m4's one-per-builtin-function macros with a single one that can handle all the cases we need to worry about so far. Also, because I'm paranoid, make those checks into AC_LINK checks rather than just AC_COMPILE; the former coding failed to verify that libgcc has support for the builtin, in cases where it's not inline code. David Rowley, Thomas Munro, Alvaro Herrera, Tom Lane Discussion: https://postgr.es/m/CAKJS1f9WTAGG1tPeJnD18hiQW5gAk59fQ6WK-vfdAKEHyRg2RA@mail.gmail.com
2019-02-16 07:22:27 +03:00
# These typically are compiler builtins, for which AC_CHECK_FUNCS fails.
PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap16], [int x])
PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap32], [int x])
PGAC_CHECK_BUILTIN_FUNC([__builtin_bswap64], [long int x])
# We assume that we needn't test all widths of these explicitly:
PGAC_CHECK_BUILTIN_FUNC([__builtin_clz], [unsigned int x])
PGAC_CHECK_BUILTIN_FUNC([__builtin_ctz], [unsigned int x])
PGAC_CHECK_BUILTIN_FUNC([__builtin_popcount], [unsigned int x])
# We require 64-bit fseeko() to be available, but run this check anyway
# in case it finds that _LARGEFILE_SOURCE has to be #define'd for that.
AC_FUNC_FSEEKO
# posix_fadvise() is a no-op on Solaris, so don't incur function overhead
# by calling it, 2009-04-02
# http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/posix_fadvise.c
dnl must use AS_IF here, else AC_REQUIRES inside AC_CHECK_DECLS malfunctions
AS_IF([test "$PORTNAME" != "solaris"], [
AC_CHECK_FUNCS(posix_fadvise)
AC_CHECK_DECLS(posix_fadvise, [], [], [#include <fcntl.h>])
]) # fi
AC_CHECK_DECLS(fdatasync, [], [], [#include <unistd.h>])
AC_CHECK_DECLS([strlcat, strlcpy, strnlen])
# This is probably only present on macOS, but may as well check always
AC_CHECK_DECLS(F_FULLFSYNC, [], [], [#include <fcntl.h>])
AC_CHECK_DECLS([RTLD_GLOBAL, RTLD_NOW], [], [], [#include <dlfcn.h>])
AC_CHECK_TYPE([struct sockaddr_in6],
[AC_DEFINE(HAVE_IPV6, 1, [Define to 1 if you have support for IPv6.])],
[],
[$ac_includes_default
#include <netinet/in.h>])
AC_CACHE_CHECK([for PS_STRINGS], [pgac_cv_var_PS_STRINGS],
[AC_LINK_IFELSE([AC_LANG_PROGRAM(
[#include <machine/vmparam.h>
#include <sys/exec.h>
],
[PS_STRINGS->ps_nargvstr = 1;
PS_STRINGS->ps_argvstr = "foo";])],
[pgac_cv_var_PS_STRINGS=yes],
[pgac_cv_var_PS_STRINGS=no])])
if test "$pgac_cv_var_PS_STRINGS" = yes ; then
AC_DEFINE([HAVE_PS_STRINGS], 1, [Define to 1 if the PS_STRINGS thing exists.])
fi
AC_REPLACE_FUNCS(m4_normalize([
dlopen
explicit_bzero
fls
getopt
getpeereid
getrusage
inet_aton
link
mkdtemp
pread
pwrite
random
srandom
strlcat
strlcpy
strnlen
strtof
]))
2009-02-12 18:12:47 +03:00
if test "$PORTNAME" = "win32" -o "$PORTNAME" = "cygwin"; then
# Cygwin and (apparently, based on test results) Mingw both
# have a broken strtof(), so substitute the same replacement
# code we use with VS2013. That's not a perfect fix, since
# (unlike with VS2013) it doesn't avoid double-rounding, but
# we have no better options. To get that, though, we have to
# force the file to be compiled despite HAVE_STRTOF.
AC_LIBOBJ([strtof])
AC_MSG_NOTICE([On $host_os we will use our strtof wrapper.])
fi
2009-02-12 18:12:47 +03:00
case $host_os in
# Windows uses a specialised env handler
mingw*)
AC_DEFINE(HAVE_UNSETENV, 1, [Define to 1 because replacement version used.])
ac_cv_func_unsetenv=yes
;;
2009-02-12 18:12:47 +03:00
*)
AC_REPLACE_FUNCS([unsetenv])
;;
2009-02-12 18:12:47 +03:00
esac
# System's version of getaddrinfo(), if any, may be used only if we found
# a definition for struct addrinfo; see notes in src/include/getaddrinfo.h.
# We use only our own getaddrinfo.c on Windows, but it's time to revisit that.
if test x"$ac_cv_type_struct_addrinfo" = xyes && \
test "$PORTNAME" != "win32"; then
AC_REPLACE_FUNCS([getaddrinfo])
else
AC_LIBOBJ(getaddrinfo)
fi
# Similarly, use system's getopt_long() only if system provides struct option.
if test x"$ac_cv_type_struct_option" = xyes ; then
AC_REPLACE_FUNCS([getopt_long])
else
AC_LIBOBJ(getopt_long)
fi
# On OpenBSD and Solaris, getopt() doesn't do what we want for long options
# (i.e., allow '-' as a flag character), so use our version on those platforms.
if test "$PORTNAME" = "openbsd" -o "$PORTNAME" = "solaris"; then
AC_LIBOBJ(getopt)
fi
# mingw has adopted a GNU-centric interpretation of optind/optreset,
# so always use our version on Windows.
if test "$PORTNAME" = "win32"; then
AC_LIBOBJ(getopt)
AC_LIBOBJ(getopt_long)
fi
# Win32 (really MinGW) support
if test "$PORTNAME" = "win32"; then
AC_CHECK_FUNCS(_configthreadlocale)
AC_REPLACE_FUNCS(gettimeofday)
AC_LIBOBJ(dirmod)
AC_LIBOBJ(kill)
AC_LIBOBJ(open)
AC_LIBOBJ(system)
AC_LIBOBJ(win32env)
AC_LIBOBJ(win32error)
AC_LIBOBJ(win32security)
AC_LIBOBJ(win32setlocale)
AC_DEFINE([HAVE_SYMLINK], 1,
[Define to 1 if you have the `symlink' function.])
AC_CHECK_TYPES(MINIDUMP_TYPE, [pgac_minidump_type=yes], [pgac_minidump_type=no], [
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <string.h>
#include <dbghelp.h>])
fi
if test x"$pgac_minidump_type" = x"yes" ; then
AC_SUBST(have_win32_dbghelp,yes)
else
AC_SUBST(have_win32_dbghelp,no)
fi
# Cygwin needs only a bit of that
if test "$PORTNAME" = "cygwin"; then
AC_LIBOBJ(dirmod)
fi
2002-12-15 06:16:58 +03:00
AC_CHECK_FUNC(syslog,
[AC_CHECK_HEADER(syslog.h,
[AC_DEFINE(HAVE_SYSLOG, 1, [Define to 1 if you have the syslog interface.])])])
AC_CACHE_CHECK([for opterr], pgac_cv_var_int_opterr,
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <unistd.h>],
[extern int opterr; opterr = 1;])],
[pgac_cv_var_int_opterr=yes],
[pgac_cv_var_int_opterr=no])])
if test x"$pgac_cv_var_int_opterr" = x"yes"; then
AC_DEFINE(HAVE_INT_OPTERR, 1, [Define to 1 if you have the global variable 'int opterr'.])
fi
AC_CACHE_CHECK([for optreset], pgac_cv_var_int_optreset,
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <unistd.h>],
[extern int optreset; optreset = 1;])],
[pgac_cv_var_int_optreset=yes],
[pgac_cv_var_int_optreset=no])])
if test x"$pgac_cv_var_int_optreset" = x"yes"; then
AC_DEFINE(HAVE_INT_OPTRESET, 1, [Define to 1 if you have the global variable 'int optreset'.])
fi
AC_CHECK_FUNCS([strtoll __strtoll strtoq], [break])
AC_CHECK_FUNCS([strtoull __strtoull strtouq], [break])
# strto[u]ll may exist but not be declared
AC_CHECK_DECLS([strtoll, strtoull])
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$ICU_CFLAGS $CPPFLAGS"
# Verify we have ICU's header files
AC_CHECK_HEADER(unicode/ucol.h, [],
[AC_MSG_ERROR([header file <unicode/ucol.h> is required for ICU])])
CPPFLAGS=$ac_save_CPPFLAGS
fi
if test "$with_llvm" = yes; then
PGAC_CHECK_LLVM_FUNCTIONS()
fi
# Lastly, restore full LIBS list and check for readline/libedit symbols
LIBS="$LIBS_including_readline"
if test "$with_readline" = yes; then
Improve psql's tab completion for filenames. The Readline library contains a fair amount of knowledge about how to tab-complete filenames, but it turns out that that doesn't work too well unless we follow its expectation that we use its filename quoting hooks to quote and de-quote filenames. We were trying to do such quote handling within complete_from_files(), and that's still what we have to do if we're using libedit, which lacks those hooks. But for Readline, it works a lot better if we tell Readline that single-quote is a quoting character and then provide hooks that know the details of the quoting rules for SQL and psql meta-commands. Hence, resurrect the quoting hook functions that existed in the original version of tab-complete.c (and were disabled by commit f6689a328 because they "didn't work so well yet"), and whack on them until they do seem to work well. Notably, this fixes bug #16059 from Steven Winfield, who pointed out that the previous coding would strip quote marks from filenames in SQL COPY commands, even though they're syntactically necessary there. Now, we not only don't do that, but we'll add a quote mark when you tab-complete, even if you didn't type one. Getting this to work across a range of libedit versions (and, to a lesser extent, libreadline versions) was depressingly difficult. It will be interesting to see whether the new regression test cases pass everywhere in the buildfarm. Some future patch might try to handle quoted SQL identifiers with similar explicit quoting/dequoting logic, but that's for another day. Patch by me, reviewed by Peter Eisentraut. Discussion: https://postgr.es/m/16059-8836946734c02b84@postgresql.org
2020-01-23 19:07:12 +03:00
PGAC_READLINE_VARIABLES
Cope with Readline's failure to track SIGWINCH events outside of input. It emerges that libreadline doesn't notice terminal window size change events unless they occur while collecting input. This is easy to stumble over if you resize the window while using a pager to look at query output, but it can be demonstrated without any pager involvement. The symptom is that queries exceeding one line are misdisplayed during subsequent input cycles, because libreadline has the wrong idea of the screen dimensions. The safest, simplest way to fix this is to call rl_reset_screen_size() just before calling readline(). That causes an extra ioctl(TIOCGWINSZ) for every command; but since it only happens when reading from a tty, the performance impact should be negligible. A more valid objection is that this still leaves a tiny window during entry to readline() wherein delivery of SIGWINCH will be missed; but the practical consequences of that are probably negligible. In any case, there doesn't seem to be any good way to avoid the race, since readline exposes no functions that seem safe to call from a generic signal handler --- rl_reset_screen_size() certainly isn't. It turns out that we also need an explicit rl_initialize() call, else rl_reset_screen_size() dumps core when called before the first readline() call. rl_reset_screen_size() is not present in old versions of libreadline, so we need a configure test for that. (rl_initialize() is present at least back to readline 4.0, so we won't bother with a test for it.) We would need a configure test anyway since libedit's emulation of libreadline doesn't currently include such a function. Fortunately, libedit seems not to have any corresponding bug. Merlin Moncure, adjusted a bit by me
2015-12-17 00:58:55 +03:00
AC_CHECK_FUNCS([rl_completion_matches rl_filename_completion_function rl_reset_screen_size])
AC_CHECK_FUNCS([append_history history_truncate_file])
fi
# This test makes sure that run tests work at all. Sometimes a shared
# library is found by the linker, but the runtime linker can't find it.
# This check should come after all modifications of compiler or linker
# variables, and before any other run tests.
AC_MSG_CHECKING([test program])
AC_RUN_IFELSE([AC_LANG_SOURCE([int main() { return 0; }])],
[AC_MSG_RESULT(ok)],
[AC_MSG_RESULT(failed)
AC_MSG_ERROR([[
Could not execute a simple test program. This may be a problem
related to locating shared libraries. Check the file 'config.log'
for the exact reason.]])],
[AC_MSG_RESULT([cross-compiling])])
# --------------------
# Run tests below here
# --------------------
dnl Check to see if we have a working 64-bit integer type.
dnl Since Postgres 8.4, we no longer support compilers without a working
dnl 64-bit type; but we have to determine whether that type is called
dnl "long int" or "long long int".
PGAC_TYPE_64BIT_INT([long int])
if test x"$HAVE_LONG_INT_64" = x"yes" ; then
pg_int64_type="long int"
else
PGAC_TYPE_64BIT_INT([long long int])
if test x"$HAVE_LONG_LONG_INT_64" = x"yes" ; then
pg_int64_type="long long int"
else
AC_MSG_ERROR([Cannot find a working 64-bit integer type.])
fi
fi
AC_DEFINE_UNQUOTED(PG_INT64_TYPE, $pg_int64_type,
[Define to the name of a signed 64-bit integer type.])
# Select the printf length modifier that goes with that, too.
if test x"$pg_int64_type" = x"long long int" ; then
INT64_MODIFIER='"ll"'
else
INT64_MODIFIER='"l"'
fi
AC_DEFINE_UNQUOTED(INT64_MODIFIER, $INT64_MODIFIER,
[Define to the appropriate printf length modifier for 64-bit ints.])
# has to be down here, rather than with the other builtins, because
# the test uses PG_INT64_TYPE.
PGAC_C_BUILTIN_OP_OVERFLOW
# Check size of void *, size_t (enables tweaks for > 32bit address space)
AC_CHECK_SIZEOF([void *])
AC_CHECK_SIZEOF([size_t])
AC_CHECK_SIZEOF([long])
# Determine memory alignment requirements for the basic C data types.
AC_CHECK_ALIGNOF(short)
AC_CHECK_ALIGNOF(int)
AC_CHECK_ALIGNOF(long)
if test x"$HAVE_LONG_LONG_INT_64" = x"yes" ; then
AC_CHECK_ALIGNOF(long long int)
fi
AC_CHECK_ALIGNOF(double)
# Compute maximum alignment of any basic type.
# We assume long's alignment is at least as strong as char, short, or int;
Prevent int128 from requiring more than MAXALIGN alignment. Our initial work with int128 neglected alignment considerations, an oversight that came back to bite us in bug #14897 from Vincent Lachenal. It is unsurprising that int128 might have a 16-byte alignment requirement; what's slightly more surprising is that even notoriously lax Intel chips sometimes enforce that. Raising MAXALIGN seems out of the question: the costs in wasted disk and memory space would be significant, and there would also be an on-disk compatibility break. Nor does it seem very practical to try to allow some data structures to have more-than-MAXALIGN alignment requirement, as we'd have to push knowledge of that throughout various code that copies data structures around. The only way out of the box is to make type int128 conform to the system's alignment assumptions. Fortunately, gcc supports that via its __attribute__(aligned()) pragma; and since we don't currently support int128 on non-gcc-workalike compilers, we shouldn't be losing any platform support this way. Although we could have just done pg_attribute_aligned(MAXIMUM_ALIGNOF) and called it a day, I did a little bit of extra work to make the code more portable than that: it will also support int128 on compilers without __attribute__(aligned()), if the native alignment of their 128-bit-int type is no more than that of int64. Add a regression test case that exercises the one known instance of the problem, in parallel aggregation over a bigint column. This will need to be back-patched, along with the preparatory commit 91aec93e6. But let's see what the buildfarm makes of it first. Discussion: https://postgr.es/m/20171110185747.31519.28038@wrigleys.postgresql.org
2017-11-14 23:03:55 +03:00
# but we must check long long (if it is being used for int64) and double.
# Note that we intentionally do not consider any types wider than 64 bits,
# as allowing MAXIMUM_ALIGNOF to exceed 8 would be too much of a penalty
# for disk and memory space.
MAX_ALIGNOF=$ac_cv_alignof_long
if test $MAX_ALIGNOF -lt $ac_cv_alignof_double ; then
MAX_ALIGNOF=$ac_cv_alignof_double
fi
if test x"$HAVE_LONG_LONG_INT_64" = xyes && test $MAX_ALIGNOF -lt $ac_cv_alignof_long_long_int ; then
MAX_ALIGNOF="$ac_cv_alignof_long_long_int"
fi
AC_DEFINE_UNQUOTED(MAXIMUM_ALIGNOF, $MAX_ALIGNOF, [Define as the maximum alignment requirement of any C data type.])
# Some platforms predefine the types int8, int16, etc. Only check
# a (hopefully) representative subset.
AC_CHECK_TYPES([int8, uint8, int64, uint64], [], [],
[#include <stdio.h>])
Prevent int128 from requiring more than MAXALIGN alignment. Our initial work with int128 neglected alignment considerations, an oversight that came back to bite us in bug #14897 from Vincent Lachenal. It is unsurprising that int128 might have a 16-byte alignment requirement; what's slightly more surprising is that even notoriously lax Intel chips sometimes enforce that. Raising MAXALIGN seems out of the question: the costs in wasted disk and memory space would be significant, and there would also be an on-disk compatibility break. Nor does it seem very practical to try to allow some data structures to have more-than-MAXALIGN alignment requirement, as we'd have to push knowledge of that throughout various code that copies data structures around. The only way out of the box is to make type int128 conform to the system's alignment assumptions. Fortunately, gcc supports that via its __attribute__(aligned()) pragma; and since we don't currently support int128 on non-gcc-workalike compilers, we shouldn't be losing any platform support this way. Although we could have just done pg_attribute_aligned(MAXIMUM_ALIGNOF) and called it a day, I did a little bit of extra work to make the code more portable than that: it will also support int128 on compilers without __attribute__(aligned()), if the native alignment of their 128-bit-int type is no more than that of int64. Add a regression test case that exercises the one known instance of the problem, in parallel aggregation over a bigint column. This will need to be back-patched, along with the preparatory commit 91aec93e6. But let's see what the buildfarm makes of it first. Discussion: https://postgr.es/m/20171110185747.31519.28038@wrigleys.postgresql.org
2017-11-14 23:03:55 +03:00
# Some compilers offer a 128-bit integer scalar type.
PGAC_TYPE_128BIT_INT
Add a basic atomic ops API abstracting away platform/architecture details. Several upcoming performance/scalability improvements require atomic operations. This new API avoids the need to splatter compiler and architecture dependent code over all the locations employing atomic ops. For several of the potential usages it'd be problematic to maintain both, a atomics using implementation and one using spinlocks or similar. In all likelihood one of the implementations would not get tested regularly under concurrency. To avoid that scenario the new API provides a automatic fallback of atomic operations to spinlocks. All properties of atomic operations are maintained. This fallback - obviously - isn't as fast as just using atomic ops, but it's not bad either. For one of the future users the atomics ontop spinlocks implementation was actually slightly faster than the old purely spinlock using implementation. That's important because it reduces the fear of regressing older platforms when improving the scalability for new ones. The API, loosely modeled after the C11 atomics support, currently provides 'atomic flags' and 32 bit unsigned integers. If the platform efficiently supports atomic 64 bit unsigned integers those are also provided. To implement atomics support for a platform/architecture/compiler for a type of atomics 32bit compare and exchange needs to be implemented. If available and more efficient native support for flags, 32 bit atomic addition, and corresponding 64 bit operations may also be provided. Additional useful atomic operations are implemented generically ontop of these. The implementation for various versions of gcc, msvc and sun studio have been tested. Additional existing stub implementations for * Intel icc * HUPX acc * IBM xlc are included but have never been tested. These will likely require fixes based on buildfarm and user feedback. As atomic operations also require barriers for some operations the existing barrier support has been moved into the atomics code. Author: Andres Freund with contributions from Oskari Saarenmaa Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com, 20131015123303.GH5300@awork2.anarazel.de, 20131028205522.GI20248@awork2.anarazel.de
2014-09-26 01:49:05 +04:00
# Check for various atomic operations now that we have checked how to declare
# 64bit integers.
PGAC_HAVE_GCC__SYNC_CHAR_TAS
PGAC_HAVE_GCC__SYNC_INT32_TAS
PGAC_HAVE_GCC__SYNC_INT32_CAS
PGAC_HAVE_GCC__SYNC_INT64_CAS
PGAC_HAVE_GCC__ATOMIC_INT32_CAS
PGAC_HAVE_GCC__ATOMIC_INT64_CAS
# Check for x86 cpuid instruction
AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <cpuid.h>],
[[unsigned int exx[4] = {0, 0, 0, 0};
__get_cpuid(1, &exx[0], &exx[1], &exx[2], &exx[3]);
]])],
[pgac_cv__get_cpuid="yes"],
[pgac_cv__get_cpuid="no"])])
if test x"$pgac_cv__get_cpuid" = x"yes"; then
AC_DEFINE(HAVE__GET_CPUID, 1, [Define to 1 if you have __get_cpuid.])
fi
AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
[[unsigned int exx[4] = {0, 0, 0, 0};
__get_cpuid(exx[0], 1);
]])],
[pgac_cv__cpuid="yes"],
[pgac_cv__cpuid="no"])])
if test x"$pgac_cv__cpuid" = x"yes"; then
AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.])
fi
# Check for Intel SSE 4.2 intrinsics to do CRC calculations.
#
# First check if the _mm_crc32_u8 and _mm_crc32_u64 intrinsics can be used
# with the default compiler flags. If not, check if adding the -msse4.2
# flag helps. CFLAGS_SSE42 is set to -msse4.2 if that's required.
PGAC_SSE42_CRC32_INTRINSICS([])
if test x"$pgac_sse42_crc32_intrinsics" != x"yes"; then
PGAC_SSE42_CRC32_INTRINSICS([-msse4.2])
fi
AC_SUBST(CFLAGS_SSE42)
# Are we targeting a processor that supports SSE 4.2? gcc, clang and icc all
# define __SSE4_2__ in that case.
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
#ifndef __SSE4_2__
#error __SSE4_2__ not defined
#endif
])], [SSE4_2_TARGETED=1])
# Check for ARMv8 CRC Extension intrinsics to do CRC calculations.
#
# First check if __crc32c* intrinsics can be used with the default compiler
# flags. If not, check if adding -march=armv8-a+crc flag helps.
# CFLAGS_ARMV8_CRC32C is set if the extra flag is required.
PGAC_ARMV8_CRC32C_INTRINSICS([])
if test x"$pgac_armv8_crc32c_intrinsics" != x"yes"; then
PGAC_ARMV8_CRC32C_INTRINSICS([-march=armv8-a+crc])
fi
AC_SUBST(CFLAGS_ARMV8_CRC32C)
# Select CRC-32C implementation.
#
# If we are targeting a processor that has Intel SSE 4.2 instructions, we can
# use the special CRC instructions for calculating CRC-32C. If we're not
# targeting such a processor, but we can nevertheless produce code that uses
# the SSE intrinsics, perhaps with some extra CFLAGS, compile both
# implementations and select which one to use at runtime, depending on whether
# SSE 4.2 is supported by the processor we're running on.
#
# Similarly, if we are targeting an ARM processor that has the CRC
# instructions that are part of the ARMv8 CRC Extension, use them. And if
# we're not targeting such a processor, but can nevertheless produce code that
# uses the CRC instructions, compile both, and select at runtime.
#
# You can override this logic by setting the appropriate USE_*_CRC32 flag to 1
# in the template or configure command line.
if test x"$USE_SLICING_BY_8_CRC32C" = x"" && test x"$USE_SSE42_CRC32C" = x"" && test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"" && test x"$USE_ARMV8_CRC32C" = x"" && test x"$USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK" = x""; then
# Use Intel SSE 4.2 if available.
if test x"$pgac_sse42_crc32_intrinsics" = x"yes" && test x"$SSE4_2_TARGETED" = x"1" ; then
USE_SSE42_CRC32C=1
else
# Intel SSE 4.2, with runtime check? The CPUID instruction is needed for
# the runtime check.
if test x"$pgac_sse42_crc32_intrinsics" = x"yes" && (test x"$pgac_cv__get_cpuid" = x"yes" || test x"$pgac_cv__cpuid" = x"yes"); then
USE_SSE42_CRC32C_WITH_RUNTIME_CHECK=1
else
# Use ARM CRC Extension if available.
if test x"$pgac_armv8_crc32c_intrinsics" = x"yes" && test x"$CFLAGS_ARMV8_CRC32C" = x""; then
USE_ARMV8_CRC32C=1
else
# ARM CRC Extension, with runtime check?
if test x"$pgac_armv8_crc32c_intrinsics" = x"yes"; then
USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK=1
else
# fall back to slicing-by-8 algorithm, which doesn't require any
# special CPU support.
USE_SLICING_BY_8_CRC32C=1
fi
fi
fi
fi
fi
# Set PG_CRC32C_OBJS appropriately depending on the selected implementation.
AC_MSG_CHECKING([which CRC-32C implementation to use])
if test x"$USE_SSE42_CRC32C" = x"1"; then
AC_DEFINE(USE_SSE42_CRC32C, 1, [Define to 1 use Intel SSE 4.2 CRC instructions.])
PG_CRC32C_OBJS="pg_crc32c_sse42.o"
AC_MSG_RESULT(SSE 4.2)
else
if test x"$USE_SSE42_CRC32C_WITH_RUNTIME_CHECK" = x"1"; then
AC_DEFINE(USE_SSE42_CRC32C_WITH_RUNTIME_CHECK, 1, [Define to 1 to use Intel SSE 4.2 CRC instructions with a runtime check.])
PG_CRC32C_OBJS="pg_crc32c_sse42.o pg_crc32c_sb8.o pg_crc32c_sse42_choose.o"
AC_MSG_RESULT(SSE 4.2 with runtime check)
else
if test x"$USE_ARMV8_CRC32C" = x"1"; then
AC_DEFINE(USE_ARMV8_CRC32C, 1, [Define to 1 to use ARMv8 CRC Extension.])
PG_CRC32C_OBJS="pg_crc32c_armv8.o"
AC_MSG_RESULT(ARMv8 CRC instructions)
else
if test x"$USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK" = x"1"; then
AC_DEFINE(USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK, 1, [Define to 1 to use ARMv8 CRC Extension with a runtime check.])
PG_CRC32C_OBJS="pg_crc32c_armv8.o pg_crc32c_sb8.o pg_crc32c_armv8_choose.o"
AC_MSG_RESULT(ARMv8 CRC instructions with runtime check)
else
AC_DEFINE(USE_SLICING_BY_8_CRC32C, 1, [Define to 1 to use software CRC-32C implementation (slicing-by-8).])
PG_CRC32C_OBJS="pg_crc32c_sb8.o"
AC_MSG_RESULT(slicing-by-8)
fi
fi
fi
fi
AC_SUBST(PG_CRC32C_OBJS)
# Select semaphore implementation type.
if test "$PORTNAME" != "win32"; then
if test x"$PREFERRED_SEMAPHORES" = x"NAMED_POSIX" ; then
# Need sem_open for this
AC_SEARCH_LIBS(sem_open, [rt pthread], [USE_NAMED_POSIX_SEMAPHORES=1])
fi
if test x"$PREFERRED_SEMAPHORES" = x"UNNAMED_POSIX" ; then
# Need sem_init for this
AC_SEARCH_LIBS(sem_init, [rt pthread], [USE_UNNAMED_POSIX_SEMAPHORES=1])
fi
AC_MSG_CHECKING([which semaphore API to use])
if test x"$USE_NAMED_POSIX_SEMAPHORES" = x"1" ; then
AC_DEFINE(USE_NAMED_POSIX_SEMAPHORES, 1, [Define to select named POSIX semaphores.])
SEMA_IMPLEMENTATION="src/backend/port/posix_sema.c"
sematype="named POSIX"
else
if test x"$USE_UNNAMED_POSIX_SEMAPHORES" = x"1" ; then
AC_DEFINE(USE_UNNAMED_POSIX_SEMAPHORES, 1, [Define to select unnamed POSIX semaphores.])
SEMA_IMPLEMENTATION="src/backend/port/posix_sema.c"
sematype="unnamed POSIX"
else
AC_DEFINE(USE_SYSV_SEMAPHORES, 1, [Define to select SysV-style semaphores.])
SEMA_IMPLEMENTATION="src/backend/port/sysv_sema.c"
sematype="System V"
fi
fi
AC_MSG_RESULT([$sematype])
else
AC_DEFINE(USE_WIN32_SEMAPHORES, 1, [Define to select Win32-style semaphores.])
SEMA_IMPLEMENTATION="src/backend/port/win32_sema.c"
fi
# Select shared-memory implementation type.
if test "$PORTNAME" != "win32"; then
AC_DEFINE(USE_SYSV_SHARED_MEMORY, 1, [Define to select SysV-style shared memory.])
SHMEM_IMPLEMENTATION="src/backend/port/sysv_shmem.c"
else
AC_DEFINE(USE_WIN32_SHARED_MEMORY, 1, [Define to select Win32-style shared memory.])
SHMEM_IMPLEMENTATION="src/backend/port/win32_shmem.c"
fi
Replace PostmasterRandom() with a stronger source, second attempt. This adds a new routine, pg_strong_random() for generating random bytes, for use in both frontend and backend. At the moment, it's only used in the backend, but the upcoming SCRAM authentication patches need strong random numbers in libpq as well. pg_strong_random() is based on, and replaces, the existing implementation in pgcrypto. It can acquire strong random numbers from a number of sources, depending on what's available: - OpenSSL RAND_bytes(), if built with OpenSSL - On Windows, the native cryptographic functions are used - /dev/urandom Unlike the current pgcrypto function, the source is chosen by configure. That makes it easier to test different implementations, and ensures that we don't accidentally fall back to a less secure implementation, if the primary source fails. All of those methods are quite reliable, it would be pretty surprising for them to fail, so we'd rather find out by failing hard. If no strong random source is available, we fall back to using erand48(), seeded from current timestamp, like PostmasterRandom() was. That isn't cryptographically secure, but allows us to still work on platforms that don't have any of the above stronger sources. Because it's not very secure, the built-in implementation is only used if explicitly requested with --disable-strong-random. This replaces the more complicated Fortuna algorithm we used to have in pgcrypto, which is unfortunate, but all modern platforms have /dev/urandom, so it doesn't seem worth the maintenance effort to keep that. pgcrypto functions that require strong random numbers will be disabled with --disable-strong-random. Original patch by Magnus Hagander, tons of further work by Michael Paquier and me. Discussion: https://www.postgresql.org/message-id/CAB7nPqRy3krN8quR9XujMVVHYtXJ0_60nqgVc6oUk8ygyVkZsA@mail.gmail.com Discussion: https://www.postgresql.org/message-id/CAB7nPqRWkNYRRPJA7-cF+LfroYV10pvjdz6GNvxk-Eee9FypKA@mail.gmail.com
2016-12-05 14:42:59 +03:00
# Select random number source
#
# You can override this logic by setting the appropriate USE_*RANDOM flag to 1
# in the template or configure command line.
# If not selected manually, try to select a source automatically.
if test x"$USE_OPENSSL_RANDOM" = x"" && test x"$USE_WIN32_RANDOM" = x"" && test x"$USE_DEV_URANDOM" = x"" ; then
Replace PostmasterRandom() with a stronger source, second attempt. This adds a new routine, pg_strong_random() for generating random bytes, for use in both frontend and backend. At the moment, it's only used in the backend, but the upcoming SCRAM authentication patches need strong random numbers in libpq as well. pg_strong_random() is based on, and replaces, the existing implementation in pgcrypto. It can acquire strong random numbers from a number of sources, depending on what's available: - OpenSSL RAND_bytes(), if built with OpenSSL - On Windows, the native cryptographic functions are used - /dev/urandom Unlike the current pgcrypto function, the source is chosen by configure. That makes it easier to test different implementations, and ensures that we don't accidentally fall back to a less secure implementation, if the primary source fails. All of those methods are quite reliable, it would be pretty surprising for them to fail, so we'd rather find out by failing hard. If no strong random source is available, we fall back to using erand48(), seeded from current timestamp, like PostmasterRandom() was. That isn't cryptographically secure, but allows us to still work on platforms that don't have any of the above stronger sources. Because it's not very secure, the built-in implementation is only used if explicitly requested with --disable-strong-random. This replaces the more complicated Fortuna algorithm we used to have in pgcrypto, which is unfortunate, but all modern platforms have /dev/urandom, so it doesn't seem worth the maintenance effort to keep that. pgcrypto functions that require strong random numbers will be disabled with --disable-strong-random. Original patch by Magnus Hagander, tons of further work by Michael Paquier and me. Discussion: https://www.postgresql.org/message-id/CAB7nPqRy3krN8quR9XujMVVHYtXJ0_60nqgVc6oUk8ygyVkZsA@mail.gmail.com Discussion: https://www.postgresql.org/message-id/CAB7nPqRWkNYRRPJA7-cF+LfroYV10pvjdz6GNvxk-Eee9FypKA@mail.gmail.com
2016-12-05 14:42:59 +03:00
if test x"$with_openssl" = x"yes" ; then
USE_OPENSSL_RANDOM=1
elif test "$PORTNAME" = "win32" ; then
Replace PostmasterRandom() with a stronger source, second attempt. This adds a new routine, pg_strong_random() for generating random bytes, for use in both frontend and backend. At the moment, it's only used in the backend, but the upcoming SCRAM authentication patches need strong random numbers in libpq as well. pg_strong_random() is based on, and replaces, the existing implementation in pgcrypto. It can acquire strong random numbers from a number of sources, depending on what's available: - OpenSSL RAND_bytes(), if built with OpenSSL - On Windows, the native cryptographic functions are used - /dev/urandom Unlike the current pgcrypto function, the source is chosen by configure. That makes it easier to test different implementations, and ensures that we don't accidentally fall back to a less secure implementation, if the primary source fails. All of those methods are quite reliable, it would be pretty surprising for them to fail, so we'd rather find out by failing hard. If no strong random source is available, we fall back to using erand48(), seeded from current timestamp, like PostmasterRandom() was. That isn't cryptographically secure, but allows us to still work on platforms that don't have any of the above stronger sources. Because it's not very secure, the built-in implementation is only used if explicitly requested with --disable-strong-random. This replaces the more complicated Fortuna algorithm we used to have in pgcrypto, which is unfortunate, but all modern platforms have /dev/urandom, so it doesn't seem worth the maintenance effort to keep that. pgcrypto functions that require strong random numbers will be disabled with --disable-strong-random. Original patch by Magnus Hagander, tons of further work by Michael Paquier and me. Discussion: https://www.postgresql.org/message-id/CAB7nPqRy3krN8quR9XujMVVHYtXJ0_60nqgVc6oUk8ygyVkZsA@mail.gmail.com Discussion: https://www.postgresql.org/message-id/CAB7nPqRWkNYRRPJA7-cF+LfroYV10pvjdz6GNvxk-Eee9FypKA@mail.gmail.com
2016-12-05 14:42:59 +03:00
USE_WIN32_RANDOM=1
else
AC_CHECK_FILE([/dev/urandom], [], [])
if test x"$ac_cv_file__dev_urandom" = x"yes" ; then
USE_DEV_URANDOM=1
fi
fi
fi
AC_MSG_CHECKING([which random number source to use])
if test x"$USE_OPENSSL_RANDOM" = x"1" ; then
AC_DEFINE(USE_OPENSSL_RANDOM, 1, [Define to use OpenSSL for random number generation])
AC_MSG_RESULT([OpenSSL])
elif test x"$USE_WIN32_RANDOM" = x"1" ; then
AC_DEFINE(USE_WIN32_RANDOM, 1, [Define to use native Windows API for random number generation])
AC_MSG_RESULT([Windows native])
elif test x"$USE_DEV_URANDOM" = x"1" ; then
AC_DEFINE(USE_DEV_URANDOM, 1, [Define to use /dev/urandom for random number generation])
AC_MSG_RESULT([/dev/urandom])
Replace PostmasterRandom() with a stronger source, second attempt. This adds a new routine, pg_strong_random() for generating random bytes, for use in both frontend and backend. At the moment, it's only used in the backend, but the upcoming SCRAM authentication patches need strong random numbers in libpq as well. pg_strong_random() is based on, and replaces, the existing implementation in pgcrypto. It can acquire strong random numbers from a number of sources, depending on what's available: - OpenSSL RAND_bytes(), if built with OpenSSL - On Windows, the native cryptographic functions are used - /dev/urandom Unlike the current pgcrypto function, the source is chosen by configure. That makes it easier to test different implementations, and ensures that we don't accidentally fall back to a less secure implementation, if the primary source fails. All of those methods are quite reliable, it would be pretty surprising for them to fail, so we'd rather find out by failing hard. If no strong random source is available, we fall back to using erand48(), seeded from current timestamp, like PostmasterRandom() was. That isn't cryptographically secure, but allows us to still work on platforms that don't have any of the above stronger sources. Because it's not very secure, the built-in implementation is only used if explicitly requested with --disable-strong-random. This replaces the more complicated Fortuna algorithm we used to have in pgcrypto, which is unfortunate, but all modern platforms have /dev/urandom, so it doesn't seem worth the maintenance effort to keep that. pgcrypto functions that require strong random numbers will be disabled with --disable-strong-random. Original patch by Magnus Hagander, tons of further work by Michael Paquier and me. Discussion: https://www.postgresql.org/message-id/CAB7nPqRy3krN8quR9XujMVVHYtXJ0_60nqgVc6oUk8ygyVkZsA@mail.gmail.com Discussion: https://www.postgresql.org/message-id/CAB7nPqRWkNYRRPJA7-cF+LfroYV10pvjdz6GNvxk-Eee9FypKA@mail.gmail.com
2016-12-05 14:42:59 +03:00
else
AC_MSG_ERROR([
no source of strong random numbers was found
PostgreSQL can use OpenSSL or /dev/urandom as a source of random numbers.])
Replace PostmasterRandom() with a stronger source, second attempt. This adds a new routine, pg_strong_random() for generating random bytes, for use in both frontend and backend. At the moment, it's only used in the backend, but the upcoming SCRAM authentication patches need strong random numbers in libpq as well. pg_strong_random() is based on, and replaces, the existing implementation in pgcrypto. It can acquire strong random numbers from a number of sources, depending on what's available: - OpenSSL RAND_bytes(), if built with OpenSSL - On Windows, the native cryptographic functions are used - /dev/urandom Unlike the current pgcrypto function, the source is chosen by configure. That makes it easier to test different implementations, and ensures that we don't accidentally fall back to a less secure implementation, if the primary source fails. All of those methods are quite reliable, it would be pretty surprising for them to fail, so we'd rather find out by failing hard. If no strong random source is available, we fall back to using erand48(), seeded from current timestamp, like PostmasterRandom() was. That isn't cryptographically secure, but allows us to still work on platforms that don't have any of the above stronger sources. Because it's not very secure, the built-in implementation is only used if explicitly requested with --disable-strong-random. This replaces the more complicated Fortuna algorithm we used to have in pgcrypto, which is unfortunate, but all modern platforms have /dev/urandom, so it doesn't seem worth the maintenance effort to keep that. pgcrypto functions that require strong random numbers will be disabled with --disable-strong-random. Original patch by Magnus Hagander, tons of further work by Michael Paquier and me. Discussion: https://www.postgresql.org/message-id/CAB7nPqRy3krN8quR9XujMVVHYtXJ0_60nqgVc6oUk8ygyVkZsA@mail.gmail.com Discussion: https://www.postgresql.org/message-id/CAB7nPqRWkNYRRPJA7-cF+LfroYV10pvjdz6GNvxk-Eee9FypKA@mail.gmail.com
2016-12-05 14:42:59 +03:00
fi
# If not set in template file, set bytes to use libc memset()
if test x"$MEMSET_LOOP_LIMIT" = x"" ; then
MEMSET_LOOP_LIMIT=1024
fi
AC_DEFINE_UNQUOTED(MEMSET_LOOP_LIMIT, ${MEMSET_LOOP_LIMIT}, [Define bytes to use libc memset().])
if test "$enable_nls" = yes ; then
PGAC_CHECK_GETTEXT
fi
# Check for Tcl configuration script tclConfig.sh
if test "$with_tcl" = yes; then
PGAC_PATH_TCLCONFIGSH([$with_tclconfig])
PGAC_EVAL_TCLCONFIGSH([$TCL_CONFIG_SH],
[TCL_INCLUDE_SPEC,TCL_LIBS,TCL_LIB_SPEC,TCL_SHARED_BUILD])
AC_SUBST(TCL_SHLIB_LD_LIBS)dnl don't want to double-evaluate that one
if test "$TCL_SHARED_BUILD" != 1; then
AC_MSG_ERROR([cannot build PL/Tcl because Tcl is not a shared library
Use --without-tcl to disable building PL/Tcl.])
fi
# now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
AC_CHECK_HEADER(tcl.h, [], [AC_MSG_ERROR([header file <tcl.h> is required for Tcl])])
CPPFLAGS=$ac_save_CPPFLAGS
1998-10-15 19:58:16 +04:00
fi
# check for <perl.h>
if test "$with_perl" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$CPPFLAGS $perl_includespec"
AC_CHECK_HEADER(perl.h, [], [AC_MSG_ERROR([header file <perl.h> is required for Perl])],
[#include <EXTERN.h>])
# While we're at it, check that we can link to libperl.
# On most platforms, if perl.h is there then libperl.so will be too, but at
# this writing Debian packages them separately. There is no known reason to
# waste cycles on separate probes for the Tcl or Python libraries, though.
# On some Red Hat platforms, the link attempt can fail if we don't use
# CFLAGS_SL while building the test program.
ac_save_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $CFLAGS_SL"
pgac_save_LIBS=$LIBS
LIBS="$perl_embed_ldflags"
AC_MSG_CHECKING([for libperl])
AC_LINK_IFELSE([AC_LANG_PROGRAM([
#include <EXTERN.h>
#include <perl.h>
], [perl_alloc();])],
[AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)
AC_MSG_ERROR([libperl library is required for Perl])])
LIBS=$pgac_save_LIBS
CFLAGS=$ac_save_CFLAGS
CPPFLAGS=$ac_save_CPPFLAGS
fi
# check for <Python.h>
if test "$with_python" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$python_includespec $CPPFLAGS"
AC_CHECK_HEADER(Python.h, [], [AC_MSG_ERROR([header file <Python.h> is required for Python])])
CPPFLAGS=$ac_save_CPPFLAGS
fi
#
# Check for DocBook and tools
#
PGAC_PATH_XMLLINT
PGAC_CHECK_DOCBOOK(4.5)
PGAC_PATH_PROGS(DBTOEPUB, dbtoepub)
PGAC_PATH_PROGS(XSLTPROC, xsltproc)
PGAC_PATH_PROGS(FOP, fop)
#
# Check for test tools
#
if test "$enable_tap_tests" = yes; then
# Check for necessary modules, unless user has specified the "prove" to use;
# in that case it's her responsibility to have a working configuration.
# (prove might be part of a different Perl installation than perl, eg on
# MSys, so the result of AX_PROG_PERL_MODULES could be irrelevant anyway.)
if test -z "$PROVE"; then
# Test::More and Time::HiRes are supposed to be part of core Perl,
# but some distros omit them in a minimal installation.
AX_PROG_PERL_MODULES([IPC::Run Test::More=0.87 Time::HiRes], ,
[AC_MSG_ERROR([Additional Perl modules are required to run TAP tests])])
fi
# Now make sure we know where prove is
PGAC_PATH_PROGS(PROVE, prove)
if test -z "$PROVE"; then
AC_MSG_ERROR([prove not found])
fi
fi
# Thread testing
# We have to run the thread test near the end so we have all our symbols
# defined. Cross compiling throws a warning.
#
if test "$enable_thread_safety" = yes; then
2007-03-27 01:30:56 +04:00
if test "$PORTNAME" != "win32"
then
AC_MSG_CHECKING([thread safety of required library functions])
_CFLAGS="$CFLAGS"
_LIBS="$LIBS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS -DIN_CONFIGURE"
LIBS="$LIBS $PTHREAD_LIBS"
AC_RUN_IFELSE(
[AC_LANG_SOURCE([[#include "$srcdir/src/test/thread/thread_test.c"]])],
[AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)
AC_MSG_ERROR([thread test program failed
This platform is not thread-safe. Check the file 'config.log' or compile
and run src/test/thread/thread_test for the exact reason.
Use --disable-thread-safety to disable thread safety.])],
[AC_MSG_RESULT(maybe)
AC_MSG_WARN([
*** Skipping thread test program because of cross-compile build.
*** Run the program in src/test/thread on the target machine.
])])
CFLAGS="$_CFLAGS"
LIBS="$_LIBS"
else
AC_MSG_WARN([*** skipping thread test on Win32])
fi
fi
# If compiler will take -Wl,--as-needed (or various platform-specific
# spellings thereof) then add that to LDFLAGS. This is much easier than
# trying to filter LIBS to the minimum for each executable.
# On (at least) some Red-Hat-derived systems, this switch breaks linking to
# libreadline; therefore we postpone testing it until we know what library
# dependencies readline has. The test code will try to link with $LIBS.
if test "$with_readline" = yes; then
link_test_func=readline
else
link_test_func=exit
fi
if test "$PORTNAME" = "darwin"; then
PGAC_PROG_CC_LDFLAGS_OPT([-Wl,-dead_strip_dylibs], $link_test_func)
elif test "$PORTNAME" = "openbsd"; then
PGAC_PROG_CC_LDFLAGS_OPT([-Wl,-Bdynamic], $link_test_func)
else
PGAC_PROG_CC_LDFLAGS_OPT([-Wl,--as-needed], $link_test_func)
fi
# Create compiler version string
if test x"$GCC" = x"yes" ; then
cc_string=`${CC} --version | sed q`
case $cc_string in [[A-Za-z]]*) ;; *) cc_string="GCC $cc_string";; esac
elif test x"$SUN_STUDIO_CC" = x"yes" ; then
cc_string=`${CC} -V 2>&1 | sed q`
else
cc_string=$CC
fi
AC_DEFINE_UNQUOTED(PG_VERSION_STR,
["PostgreSQL $PG_VERSION on $host, compiled by $cc_string, `expr $ac_cv_sizeof_void_p \* 8`-bit"],
[A string containing the version number, platform, and C compiler])
# Supply a numeric version string for use by 3rd party add-ons
# awk -F is a regex on some platforms, and not on others, so make "." a tab
[PG_VERSION_NUM="`echo $PG_MAJORVERSION $PG_MINORVERSION |
$AWK '{printf "%d%04d", $1, $2}'`"]
AC_DEFINE_UNQUOTED(PG_VERSION_NUM, $PG_VERSION_NUM, [PostgreSQL version as a number])
AC_SUBST(PG_VERSION_NUM)
# If we are inserting PG_SYSROOT into CPPFLAGS, do so symbolically not
# literally, so that it's possible to override it at build time using
# a command like "make ... PG_SYSROOT=path". This has to be done after
# we've finished all configure checks that depend on CPPFLAGS.
if test x"$PG_SYSROOT" != x; then
CPPFLAGS=`echo "$CPPFLAGS" | sed -e "s| $PG_SYSROOT | \\\$(PG_SYSROOT) |"`
fi
AC_SUBST(PG_SYSROOT)
# Begin output steps
AC_MSG_NOTICE([using compiler=$cc_string])
AC_MSG_NOTICE([using CFLAGS=$CFLAGS])
AC_MSG_NOTICE([using CPPFLAGS=$CPPFLAGS])
AC_MSG_NOTICE([using LDFLAGS=$LDFLAGS])
# Currently only used when LLVM is used
if test "$with_llvm" = yes ; then
AC_MSG_NOTICE([using CXX=$CXX])
AC_MSG_NOTICE([using CXXFLAGS=$CXXFLAGS])
AC_MSG_NOTICE([using CLANG=$CLANG])
AC_MSG_NOTICE([using BITCODE_CFLAGS=$BITCODE_CFLAGS])
AC_MSG_NOTICE([using BITCODE_CXXFLAGS=$BITCODE_CXXFLAGS])
fi
# prepare build tree if outside source tree
# Note 1: test -ef might not exist, but it's more reliable than `pwd`.
# Note 2: /bin/pwd might be better than shell's built-in at getting
# a symlink-free name.
if ( test "$srcdir" -ef . ) >/dev/null 2>&1 || test "`cd $srcdir && /bin/pwd`" = "`/bin/pwd`"; then
vpath_build=no
else
vpath_build=yes
if test "$no_create" != yes; then
_AS_ECHO_N([preparing build tree... ])
pgac_abs_top_srcdir=`cd "$srcdir" && pwd`
$SHELL "$ac_aux_dir/prep_buildtree" "$pgac_abs_top_srcdir" "." \
|| AC_MSG_ERROR(failed)
AC_MSG_RESULT(done)
fi
fi
AC_SUBST(vpath_build)
AC_CONFIG_FILES([GNUmakefile src/Makefile.global])
AC_CONFIG_LINKS([
src/backend/port/pg_sema.c:${SEMA_IMPLEMENTATION}
src/backend/port/pg_shmem.c:${SHMEM_IMPLEMENTATION}
src/include/pg_config_os.h:src/include/port/${template}.h
src/Makefile.port:src/makefiles/Makefile.${template}
])
if test "$PORTNAME" = "win32"; then
AC_CONFIG_COMMANDS([check_win32_symlinks],[
# Links sometimes fail undetected on Mingw -
# so here we detect it and warn the user
for FILE in $CONFIG_LINKS
do
# test -e works for symlinks in the MinGW console
test -e `expr "$FILE" : '\([[^:]]*\)'` || AC_MSG_WARN([*** link for $FILE -- please fix by hand])
done
])
fi
AC_CONFIG_HEADERS([src/include/pg_config.h],
[
# Update timestamp for pg_config.h (see Makefile.global)
echo >src/include/stamp-h
])
AC_CONFIG_HEADERS([src/include/pg_config_ext.h],
[
# Update timestamp for pg_config_ext.h (see Makefile.global)
echo >src/include/stamp-ext-h
])
AC_CONFIG_HEADERS([src/interfaces/ecpg/include/ecpg_config.h],
[echo >src/interfaces/ecpg/include/stamp-h])
AC_OUTPUT