diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 852cb30ae1..307ad88b50 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -3042,7 +3042,8 @@ SCRAM-SHA-256$<iteration count>:&l
Sets maximum number of concurrent connections that can be made
- to this database. -1 means no limit.
+ to this database. -1 means no limit, -2 indicates the database is
+ invalid.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 09f1ab41ad..307729ab7e 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -719,7 +719,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
int encoding = -1;
bool dbistemplate = false;
bool dballowconnections = true;
- int dbconnlimit = -1;
+ int dbconnlimit = DATCONNLIMIT_UNLIMITED;
char *dbcollversion = NULL;
int notherbackends;
int npreparedxacts;
@@ -926,7 +926,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
if (dconnlimit && dconnlimit->arg)
{
dbconnlimit = defGetInt32(dconnlimit);
- if (dbconnlimit < -1)
+ if (dbconnlimit < DATCONNLIMIT_UNLIMITED)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid connection limit: %d", dbconnlimit)));
@@ -977,6 +977,16 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
errmsg("template database \"%s\" does not exist",
dbtemplate)));
+ /*
+ * If the source database was in the process of being dropped, we can't
+ * use it as a template.
+ */
+ if (database_is_invalid_oid(src_dboid))
+ ereport(ERROR,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot use invalid database \"%s\" as template", dbtemplate),
+ errhint("Use DROP DATABASE to drop invalid databases."));
+
/*
* Permission check: to copy a DB that's not marked datistemplate, you
* must be superuser or the owner thereof.
@@ -1576,6 +1586,7 @@ dropdb(const char *dbname, bool missing_ok, bool force)
bool db_istemplate;
Relation pgdbrel;
HeapTuple tup;
+ Form_pg_database datform;
int notherbackends;
int npreparedxacts;
int nslots,
@@ -1691,17 +1702,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
dbname),
errdetail_busy_db(notherbackends, npreparedxacts)));
- /*
- * Remove the database's tuple from pg_database.
- */
- tup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(db_id));
- if (!HeapTupleIsValid(tup))
- elog(ERROR, "cache lookup failed for database %u", db_id);
-
- CatalogTupleDelete(pgdbrel, &tup->t_self);
-
- ReleaseSysCache(tup);
-
/*
* Delete any comments or security labels associated with the database.
*/
@@ -1718,6 +1718,37 @@ dropdb(const char *dbname, bool missing_ok, bool force)
*/
dropDatabaseDependencies(db_id);
+ /*
+ * Tell the cumulative stats system to forget it immediately, too.
+ */
+ pgstat_drop_database(db_id);
+
+ tup = SearchSysCacheCopy1(DATABASEOID, ObjectIdGetDatum(db_id));
+ if (!HeapTupleIsValid(tup))
+ elog(ERROR, "cache lookup failed for database %u", db_id);
+ datform = (Form_pg_database) GETSTRUCT(tup);
+
+ /*
+ * Except for the deletion of the catalog row, subsequent actions are not
+ * transactional (consider DropDatabaseBuffers() discarding modified
+ * buffers). But we might crash or get interrupted below. To prevent
+ * accesses to a database with invalid contents, mark the database as
+ * invalid using an in-place update.
+ *
+ * We need to flush the WAL before continuing, to guarantee the
+ * modification is durable before performing irreversible filesystem
+ * operations.
+ */
+ datform->datconnlimit = DATCONNLIMIT_INVALID_DB;
+ heap_inplace_update(pgdbrel, tup);
+ XLogFlush(XactLastRecEnd);
+
+ /*
+ * Also delete the tuple - transactionally. If this transaction commits,
+ * the row will be gone, but if we fail, dropdb() can be invoked again.
+ */
+ CatalogTupleDelete(pgdbrel, &tup->t_self);
+
/*
* Drop db-specific replication slots.
*/
@@ -1730,11 +1761,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
*/
DropDatabaseBuffers(db_id);
- /*
- * Tell the cumulative stats system to forget it immediately, too.
- */
- pgstat_drop_database(db_id);
-
/*
* Tell checkpointer to forget any pending fsync and unlink requests for
* files in the database; else the fsyncs will fail at next checkpoint, or
@@ -2248,7 +2274,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
ListCell *option;
bool dbistemplate = false;
bool dballowconnections = true;
- int dbconnlimit = -1;
+ int dbconnlimit = DATCONNLIMIT_UNLIMITED;
DefElem *distemplate = NULL;
DefElem *dallowconnections = NULL;
DefElem *dconnlimit = NULL;
@@ -2319,7 +2345,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
if (dconnlimit && dconnlimit->arg)
{
dbconnlimit = defGetInt32(dconnlimit);
- if (dbconnlimit < -1)
+ if (dbconnlimit < DATCONNLIMIT_UNLIMITED)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid connection limit: %d", dbconnlimit)));
@@ -2346,6 +2372,14 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
datform = (Form_pg_database) GETSTRUCT(tuple);
dboid = datform->oid;
+ if (database_is_invalid_form(datform))
+ {
+ ereport(FATAL,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot alter invalid database \"%s\"", stmt->dbname),
+ errhint("Use DROP DATABASE to drop invalid databases."));
+ }
+
if (!object_ownercheck(DatabaseRelationId, dboid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE,
stmt->dbname);
@@ -3064,6 +3098,42 @@ get_database_name(Oid dbid)
return result;
}
+
+/*
+ * While dropping a database the pg_database row is marked invalid, but the
+ * catalog contents still exist. Connections to such a database are not
+ * allowed.
+ */
+bool
+database_is_invalid_form(Form_pg_database datform)
+{
+ return datform->datconnlimit == DATCONNLIMIT_INVALID_DB;
+}
+
+
+/*
+ * Convenience wrapper around database_is_invalid_form()
+ */
+bool
+database_is_invalid_oid(Oid dboid)
+{
+ HeapTuple dbtup;
+ Form_pg_database dbform;
+ bool invalid;
+
+ dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dboid));
+ if (!HeapTupleIsValid(dbtup))
+ elog(ERROR, "cache lookup failed for database %u", dboid);
+ dbform = (Form_pg_database) GETSTRUCT(dbtup);
+
+ invalid = database_is_invalid_form(dbform);
+
+ ReleaseSysCache(dbtup);
+
+ return invalid;
+}
+
+
/*
* recovery_create_dbdir()
*
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 841188f71c..69ac276687 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1850,6 +1850,20 @@ vac_truncate_clog(TransactionId frozenXID,
Assert(TransactionIdIsNormal(datfrozenxid));
Assert(MultiXactIdIsValid(datminmxid));
+ /*
+ * If database is in the process of getting dropped, or has been
+ * interrupted while doing so, no connections to it are possible
+ * anymore. Therefore we don't need to take it into account here.
+ * Which is good, because it can't be processed by autovacuum either.
+ */
+ if (database_is_invalid_form((Form_pg_database) dbform))
+ {
+ elog(DEBUG2,
+ "skipping invalid database \"%s\" while computing relfrozenxid",
+ NameStr(dbform->datname));
+ continue;
+ }
+
/*
* If things are working properly, no database should have a
* datfrozenxid or datminmxid that is "in the future". However, such
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f929b62e8a..ae9be9b911 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -1972,6 +1972,18 @@ get_database_list(void)
avw_dbase *avdb;
MemoryContext oldcxt;
+ /*
+ * If database has partially been dropped, we can't, nor need to,
+ * vacuum it.
+ */
+ if (database_is_invalid_form(pgdatabase))
+ {
+ elog(DEBUG2,
+ "autovacuum: skipping invalid database \"%s\"",
+ NameStr(pgdatabase->datname));
+ continue;
+ }
+
/*
* Allocate our results in the caller's context, not the
* transaction's. We do this inside the loop, and restore the original
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 0f9b92b32e..f31b85c013 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -1116,6 +1116,7 @@ InitPostgres(const char *in_dbname, Oid dboid,
if (!bootstrap)
{
HeapTuple tuple;
+ Form_pg_database datform;
tuple = GetDatabaseTuple(dbname);
if (!HeapTupleIsValid(tuple) ||
@@ -1125,6 +1126,15 @@ InitPostgres(const char *in_dbname, Oid dboid,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname),
errdetail("It seems to have just been dropped or renamed.")));
+
+ datform = (Form_pg_database) GETSTRUCT(tuple);
+ if (database_is_invalid_form(datform))
+ {
+ ereport(FATAL,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot connect to invalid database \"%s\"", dbname),
+ errhint("Use DROP DATABASE to drop invalid databases."));
+ }
}
/*
diff --git a/src/bin/pg_amcheck/pg_amcheck.c b/src/bin/pg_amcheck/pg_amcheck.c
index 68f8180c19..57df14bc1e 100644
--- a/src/bin/pg_amcheck/pg_amcheck.c
+++ b/src/bin/pg_amcheck/pg_amcheck.c
@@ -1590,7 +1590,7 @@ compile_database_list(PGconn *conn, SimplePtrList *databases,
"FROM pg_catalog.pg_database d "
"LEFT OUTER JOIN exclude_raw e "
"ON d.datname ~ e.rgx "
- "\nWHERE d.datallowconn "
+ "\nWHERE d.datallowconn AND datconnlimit != -2 "
"AND e.pattern_id IS NULL"
"),"
diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl
index cf2438717e..99b94dd022 100644
--- a/src/bin/pg_amcheck/t/002_nonesuch.pl
+++ b/src/bin/pg_amcheck/t/002_nonesuch.pl
@@ -291,6 +291,40 @@ $node->command_checks_all(
'many unmatched patterns and one matched pattern under --no-strict-names'
);
+
+#########################################
+# Test that an invalid / partially dropped database won't be targeted
+
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+
+$node->command_checks_all(
+ [
+ 'pg_amcheck', '-d', 'regression_invalid'
+ ],
+ 1,
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "regression_invalid"/,
+ ],
+ 'checking handling of invalid database');
+
+$node->command_checks_all(
+ [
+ 'pg_amcheck', '-d', 'postgres',
+ '-t', 'regression_invalid.public.foo',
+ ],
+ 1,
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "regression_invalid.public.foo"/,
+ ],
+ 'checking handling of object in invalid database');
+
+
#########################################
# Test checking otherwise existent objects but in databases where they do not exist
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 36f134137f..0ab52ca81d 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -1345,7 +1345,7 @@ dropDBs(PGconn *conn)
res = executeQuery(conn,
"SELECT datname "
"FROM pg_database d "
- "WHERE datallowconn "
+ "WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY datname");
if (PQntuples(res) > 0)
@@ -1488,7 +1488,7 @@ dumpDatabases(PGconn *conn)
res = executeQuery(conn,
"SELECT datname "
"FROM pg_database d "
- "WHERE datallowconn "
+ "WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY (datname <> 'template1'), datname");
if (PQntuples(res) > 0)
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index d42243bf71..0efeb3367d 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -1907,6 +1907,17 @@ my %tests = (
},
},
+ 'CREATE DATABASE regression_invalid...' => {
+ create_order => 1,
+ create_sql => q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'),
+ regexp => qr/^CREATE DATABASE regression_invalid/m,
+ not_like => {
+ pg_dumpall_dbprivs => 1,
+ },
+ },
+
'CREATE ACCESS METHOD gist2' => {
create_order => 52,
create_sql =>
@@ -4690,6 +4701,14 @@ command_fails_like(
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/,
'connecting to a non-existent database');
+#########################################
+# Test connecting to an invalid database
+
+$node->command_fails_like(
+ [ 'pg_dump', '-d', 'regression_invalid' ],
+ qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'connecting to an invalid database');
+
#########################################
# Test connecting with an unprivileged user
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 41fce089d6..a5688a1cf2 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -317,6 +317,12 @@ if (defined($ENV{oldinstall}))
}
}
+# Create an invalid database, will be deleted below
+$oldnode->safe_psql('postgres', qq(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+
# In a VPATH build, we'll be started in the source directory, but we want
# to run pg_upgrade in the build directory so that any files generated finish
# in it, like delete_old_cluster.{sh,bat}.
@@ -345,6 +351,26 @@ ok(-d $newnode->data_dir . "/pg_upgrade_output.d",
"pg_upgrade_output.d/ not removed after pg_upgrade failure");
rmtree($newnode->data_dir . "/pg_upgrade_output.d");
+# Check that pg_upgrade aborts when encountering an invalid database
+command_checks_all(
+ [
+ 'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
+ '-D', $newnode->data_dir, '-b', $oldbindir,
+ '-B', $newbindir, '-s', $newnode->host,
+ '-p', $oldnode->port, '-P', $newnode->port,
+ $mode, '--check',
+ ],
+ 1,
+ [qr/invalid/], # pg_upgrade prints errors on stdout :(
+ [qr//],
+ 'invalid database causes failure');
+rmtree($newnode->data_dir . "/pg_upgrade_output.d");
+
+# And drop it, so we can continue
+$oldnode->start;
+$oldnode->safe_psql('postgres', 'DROP DATABASE regression_invalid');
+$oldnode->stop;
+
# --check command works here, cleans up pg_upgrade_output.d.
command_ok(
[
diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c
index e3585b3272..58a774013b 100644
--- a/src/bin/scripts/clusterdb.c
+++ b/src/bin/scripts/clusterdb.c
@@ -234,7 +234,9 @@ cluster_all_databases(ConnParams *cparams, const char *progname,
int i;
conn = connectMaintenanceDatabase(cparams, progname, echo);
- result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo);
+ result = executeQuery(conn,
+ "SELECT datname FROM pg_database WHERE datallowconn AND datconnlimit <> -2 ORDER BY 1;",
+ echo);
PQfinish(conn);
for (i = 0; i < PQntuples(result); i++)
diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c
index 3e8f6aca40..5b297d1dc1 100644
--- a/src/bin/scripts/reindexdb.c
+++ b/src/bin/scripts/reindexdb.c
@@ -718,7 +718,9 @@ reindex_all_databases(ConnParams *cparams,
int i;
conn = connectMaintenanceDatabase(cparams, progname, echo);
- result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo);
+ result = executeQuery(conn,
+ "SELECT datname FROM pg_database WHERE datallowconn AND datconnlimit <> -2 ORDER BY 1;",
+ echo);
PQfinish(conn);
for (i = 0; i < PQntuples(result); i++)
diff --git a/src/bin/scripts/t/011_clusterdb_all.pl b/src/bin/scripts/t/011_clusterdb_all.pl
index eb904c08c7..35f0b18f50 100644
--- a/src/bin/scripts/t/011_clusterdb_all.pl
+++ b/src/bin/scripts/t/011_clusterdb_all.pl
@@ -21,4 +21,18 @@ $node->issues_sql_like(
qr/statement: CLUSTER.*statement: CLUSTER/s,
'cluster all databases');
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'clusterdb', '-a' ],
+ 'invalid database not targeted by clusterdb -a');
+
+# Doesn't quite belong here, but don't want to waste time by creating an
+# invalid database in 010_clusterdb.pl as well.
+$node->command_fails_like([ 'clusterdb', '-d', 'regression_invalid'],
+ qr/FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'clusterdb cannot target invalid database');
+
done_testing();
diff --git a/src/bin/scripts/t/050_dropdb.pl b/src/bin/scripts/t/050_dropdb.pl
index 9f2b6463b8..3ed670bb0a 100644
--- a/src/bin/scripts/t/050_dropdb.pl
+++ b/src/bin/scripts/t/050_dropdb.pl
@@ -31,4 +31,13 @@ $node->issues_sql_like(
$node->command_fails([ 'dropdb', 'nonexistent' ],
'fails with nonexistent database');
+# check that invalid database can be dropped with dropdb
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'dropdb', 'regression_invalid' ],
+ 'invalid database can be dropped');
+
done_testing();
diff --git a/src/bin/scripts/t/091_reindexdb_all.pl b/src/bin/scripts/t/091_reindexdb_all.pl
index ac62b9b558..7f3e081ceb 100644
--- a/src/bin/scripts/t/091_reindexdb_all.pl
+++ b/src/bin/scripts/t/091_reindexdb_all.pl
@@ -18,4 +18,18 @@ $node->issues_sql_like(
qr/statement: REINDEX.*statement: REINDEX/s,
'reindex all databases');
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'reindexdb', '-a' ],
+ 'invalid database not targeted by reindexdb -a');
+
+# Doesn't quite belong here, but don't want to waste time by creating an
+# invalid database in 090_reindexdb.pl as well.
+$node->command_fails_like([ 'reindexdb', '-d', 'regression_invalid'],
+ qr/FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'reindexdb cannot target invalid database');
+
done_testing();
diff --git a/src/bin/scripts/t/101_vacuumdb_all.pl b/src/bin/scripts/t/101_vacuumdb_all.pl
index 0f9d5adc48..8d7c3ab014 100644
--- a/src/bin/scripts/t/101_vacuumdb_all.pl
+++ b/src/bin/scripts/t/101_vacuumdb_all.pl
@@ -16,4 +16,18 @@ $node->issues_sql_like(
qr/statement: VACUUM.*statement: VACUUM/s,
'vacuum all databases');
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'vacuumdb', '-a' ],
+ 'invalid database not targeted by vacuumdb -a');
+
+# Doesn't quite belong here, but don't want to waste time by creating an
+# invalid database in 010_vacuumdb.pl as well.
+$node->command_fails_like([ 'vacuumdb', '-d', 'regression_invalid'],
+ qr/FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'vacuumdb cannot target invalid database');
+
done_testing();
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index 4b17a07089..f03d5b1c6c 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -883,7 +883,7 @@ vacuum_all_databases(ConnParams *cparams,
conn = connectMaintenanceDatabase(cparams, progname, echo);
result = executeQuery(conn,
- "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;",
+ "SELECT datname FROM pg_database WHERE datallowconn AND datconnlimit <> -2 ORDER BY 1;",
echo);
PQfinish(conn);
diff --git a/src/include/catalog/pg_database.h b/src/include/catalog/pg_database.h
index d004f4dc8a..ff5baaf514 100644
--- a/src/include/catalog/pg_database.h
+++ b/src/include/catalog/pg_database.h
@@ -49,7 +49,10 @@ CATALOG(pg_database,1262,DatabaseRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_OID
/* new connections allowed? */
bool datallowconn;
- /* max connections allowed (-1=no limit) */
+ /*
+ * Max connections allowed. Negative values have special meaning, see
+ * DATCONNLIMIT_* defines below.
+ */
int32 datconnlimit;
/* all Xids < this are frozen in this DB */
@@ -103,4 +106,19 @@ DECLARE_UNIQUE_INDEX_PKEY(pg_database_oid_index, 2672, DatabaseOidIndexId, on pg
DECLARE_OID_DEFINING_MACRO(Template0DbOid, 4);
DECLARE_OID_DEFINING_MACRO(PostgresDbOid, 5);
+/*
+ * Special values for pg_database.datconnlimit. Normal values are >= 0.
+ */
+#define DATCONNLIMIT_UNLIMITED -1 /* no limit */
+
+/*
+ * A database is set to invalid partway through being dropped. Using
+ * datconnlimit=-2 for this purpose isn't particularly clean, but is
+ * backpatchable.
+ */
+#define DATCONNLIMIT_INVALID_DB -2
+
+extern bool database_is_invalid_form(Form_pg_database datform);
+extern bool database_is_invalid_oid(Oid dboid);
+
#endif /* PG_DATABASE_H */
diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build
index 2008958010..e7328e4894 100644
--- a/src/test/recovery/meson.build
+++ b/src/test/recovery/meson.build
@@ -42,6 +42,7 @@ tests += {
't/034_create_database.pl',
't/035_standby_logical_decoding.pl',
't/036_truncated_dropped.pl',
+ 't/037_invalid_database.pl',
],
},
}
diff --git a/src/test/recovery/t/037_invalid_database.pl b/src/test/recovery/t/037_invalid_database.pl
new file mode 100644
index 0000000000..29b9bb6977
--- /dev/null
+++ b/src/test/recovery/t/037_invalid_database.pl
@@ -0,0 +1,133 @@
+# Copyright (c) 2023, PostgreSQL Global Development Group
+#
+# Test we handle interrupted DROP DATABASE correctly.
+
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $node = PostgreSQL::Test::Cluster->new('node');
+$node->init;
+$node->append_conf(
+ "postgresql.conf", qq(
+autovacuum = off
+max_prepared_transactions=5
+log_min_duration_statement=0
+log_connections=on
+log_disconnections=on
+));
+
+$node->start;
+
+
+# First verify that we can't connect to or ALTER an invalid database. Just
+# mark the database as invalid ourselves, that's more reliable than hitting the
+# required race conditions (see testing further down)...
+
+$node->safe_psql(
+ "postgres", qq(
+CREATE DATABASE regression_invalid;
+UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+
+my $psql_stdout = '';
+my $psql_stderr = '';
+
+is($node->psql('regression_invalid', '', stderr => \$psql_stderr),
+ 2, "can't connect to invalid database - error code");
+like(
+ $psql_stderr,
+ qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/,
+ "can't connect to invalid database - error message");
+
+is($node->psql('postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
+ 2, "can't ALTER invalid database");
+
+# check invalid database can't be used as a template
+is( $node->psql('postgres', 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
+ 3,
+ "can't use invalid database as template");
+
+
+# Verify that VACUUM ignores an invalid database when computing how much of
+# the clog is needed (vac_truncate_clog()). For that we modify the pg_database
+# row of the invalid database to have an outdated datfrozenxid.
+$psql_stderr = '';
+$node->psql(
+ 'postgres',
+ qq(
+UPDATE pg_database SET datfrozenxid = '123456' WHERE datname = 'regression_invalid';
+DROP TABLE IF EXISTS foo_tbl; CREATE TABLE foo_tbl();
+VACUUM FREEZE;),
+ stderr => \$psql_stderr);
+unlike(
+ $psql_stderr,
+ qr/some databases have not been vacuumed in over 2 billion transactions/,
+ "invalid databases are ignored by vac_truncate_clog");
+
+
+# But we need to be able to drop an invalid database.
+is( $node->psql(
+ 'postgres', 'DROP DATABASE regression_invalid',
+ stdout => \$psql_stdout,
+ stderr => \$psql_stderr),
+ 0,
+ "can DROP invalid database");
+
+# Ensure database is gone
+is($node->psql('postgres', 'DROP DATABASE regression_invalid'),
+ 3, "can't drop already dropped database");
+
+
+# Test that interruption of DROP DATABASE is handled properly. To ensure the
+# interruption happens at the appropriate moment, we lock pg_tablespace. DROP
+# DATABASE scans pg_tablespace once it has reached the "irreversible" part of
+# dropping the database, making it a suitable point to wait.
+my $bgpsql = $node->background_psql('postgres', on_error_stop => 0);
+my $pid = $bgpsql->query('SELECT pg_backend_pid()');
+
+# create the database, prevent drop database via lock held by a 2PC transaction
+ok( $bgpsql->query_safe(
+ qq(
+ CREATE DATABASE regression_invalid_interrupt;
+ BEGIN;
+ LOCK pg_tablespace;
+ PREPARE TRANSACTION 'lock_tblspc';)),
+ "blocked DROP DATABASE completion");
+
+# Try to drop. This will wait due to the still held lock.
+$bgpsql->query_until(qr//, "DROP DATABASE regression_invalid_interrupt;\n");
+
+# Ensure we're waiting for the lock
+$node->poll_query_until('postgres',
+ qq(SELECT EXISTS(SELECT * FROM pg_locks WHERE NOT granted AND relation = 'pg_tablespace'::regclass AND mode = 'AccessShareLock');)
+);
+
+# and finally interrupt the DROP DATABASE
+ok($node->safe_psql('postgres', "SELECT pg_cancel_backend($pid)"),
+ "canceling DROP DATABASE");
+
+# wait for cancellation to be processed
+ok( pump_until(
+ $bgpsql->{run}, $bgpsql->{timeout},
+ \$bgpsql->{stderr}, qr/canceling statement due to user request/),
+ "cancel processed");
+$bgpsql->{stderr} = '';
+
+# verify that connection to the database aren't allowed
+is($node->psql('regression_invalid_interrupt', ''),
+ 2, "can't connect to invalid_interrupt database");
+
+# To properly drop the database, we need to release the lock previously preventing
+# doing so.
+ok($bgpsql->query_safe(qq(ROLLBACK PREPARED 'lock_tblspc')),
+ "unblock DROP DATABASE");
+
+ok($bgpsql->query(qq(DROP DATABASE regression_invalid_interrupt)),
+ "DROP DATABASE invalid_interrupt");
+
+$bgpsql->quit();
+
+done_testing();