diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 8af125276a..0f58cb102d 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -2978,7 +2978,8 @@ SCRAM-SHA-256$<iteration count>:&l
Sets maximum number of concurrent connections that can be made
- to this database. -1 means no limit.
+ to this database. -1 means no limit, -2 indicates the database is
+ invalid.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index d4ab736c69..974c5777bc 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -140,7 +140,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
int encoding = -1;
bool dbistemplate = false;
bool dballowconnections = true;
- int dbconnlimit = -1;
+ int dbconnlimit = DATCONNLIMIT_UNLIMITED;
int notherbackends;
int npreparedxacts;
createdb_failure_params fparms;
@@ -309,7 +309,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
if (dconnlimit && dconnlimit->arg)
{
dbconnlimit = defGetInt32(dconnlimit);
- if (dbconnlimit < -1)
+ if (dbconnlimit < DATCONNLIMIT_UNLIMITED)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid connection limit: %d", dbconnlimit)));
@@ -357,6 +357,16 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
errmsg("template database \"%s\" does not exist",
dbtemplate)));
+ /*
+ * If the source database was in the process of being dropped, we can't
+ * use it as a template.
+ */
+ if (database_is_invalid_oid(src_dboid))
+ ereport(ERROR,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot use invalid database \"%s\" as template", dbtemplate),
+ errhint("Use DROP DATABASE to drop invalid databases."));
+
/*
* Permission check: to copy a DB that's not marked datistemplate, you
* must be superuser or the owner thereof.
@@ -817,6 +827,7 @@ dropdb(const char *dbname, bool missing_ok, bool force)
bool db_istemplate;
Relation pgdbrel;
HeapTuple tup;
+ Form_pg_database datform;
int notherbackends;
int npreparedxacts;
int nslots,
@@ -932,17 +943,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
dbname),
errdetail_busy_db(notherbackends, npreparedxacts)));
- /*
- * Remove the database's tuple from pg_database.
- */
- tup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(db_id));
- if (!HeapTupleIsValid(tup))
- elog(ERROR, "cache lookup failed for database %u", db_id);
-
- CatalogTupleDelete(pgdbrel, &tup->t_self);
-
- ReleaseSysCache(tup);
-
/*
* Delete any comments or security labels associated with the database.
*/
@@ -959,6 +959,32 @@ dropdb(const char *dbname, bool missing_ok, bool force)
*/
dropDatabaseDependencies(db_id);
+ tup = SearchSysCacheCopy1(DATABASEOID, ObjectIdGetDatum(db_id));
+ if (!HeapTupleIsValid(tup))
+ elog(ERROR, "cache lookup failed for database %u", db_id);
+ datform = (Form_pg_database) GETSTRUCT(tup);
+
+ /*
+ * Except for the deletion of the catalog row, subsequent actions are not
+ * transactional (consider DropDatabaseBuffers() discarding modified
+ * buffers). But we might crash or get interrupted below. To prevent
+ * accesses to a database with invalid contents, mark the database as
+ * invalid using an in-place update.
+ *
+ * We need to flush the WAL before continuing, to guarantee the
+ * modification is durable before performing irreversible filesystem
+ * operations.
+ */
+ datform->datconnlimit = DATCONNLIMIT_INVALID_DB;
+ heap_inplace_update(pgdbrel, tup);
+ XLogFlush(XactLastRecEnd);
+
+ /*
+ * Also delete the tuple - transactionally. If this transaction commits,
+ * the row will be gone, but if we fail, dropdb() can be invoked again.
+ */
+ CatalogTupleDelete(pgdbrel, &tup->t_self);
+
/*
* Drop db-specific replication slots.
*/
@@ -1481,7 +1507,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
ListCell *option;
bool dbistemplate = false;
bool dballowconnections = true;
- int dbconnlimit = -1;
+ int dbconnlimit = DATCONNLIMIT_UNLIMITED;
DefElem *distemplate = NULL;
DefElem *dallowconnections = NULL;
DefElem *dconnlimit = NULL;
@@ -1564,7 +1590,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
if (dconnlimit && dconnlimit->arg)
{
dbconnlimit = defGetInt32(dconnlimit);
- if (dbconnlimit < -1)
+ if (dbconnlimit < DATCONNLIMIT_UNLIMITED)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid connection limit: %d", dbconnlimit)));
@@ -1591,6 +1617,14 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
datform = (Form_pg_database) GETSTRUCT(tuple);
dboid = datform->oid;
+ if (database_is_invalid_form(datform))
+ {
+ ereport(FATAL,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot alter invalid database \"%s\"", stmt->dbname),
+ errhint("Use DROP DATABASE to drop invalid databases."));
+ }
+
if (!pg_database_ownercheck(dboid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE,
stmt->dbname);
@@ -2170,6 +2204,42 @@ get_database_name(Oid dbid)
return result;
}
+
+/*
+ * While dropping a database the pg_database row is marked invalid, but the
+ * catalog contents still exist. Connections to such a database are not
+ * allowed.
+ */
+bool
+database_is_invalid_form(Form_pg_database datform)
+{
+ return datform->datconnlimit == DATCONNLIMIT_INVALID_DB;
+}
+
+
+/*
+ * Convenience wrapper around database_is_invalid_form()
+ */
+bool
+database_is_invalid_oid(Oid dboid)
+{
+ HeapTuple dbtup;
+ Form_pg_database dbform;
+ bool invalid;
+
+ dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dboid));
+ if (!HeapTupleIsValid(dbtup))
+ elog(ERROR, "cache lookup failed for database %u", dboid);
+ dbform = (Form_pg_database) GETSTRUCT(dbtup);
+
+ invalid = database_is_invalid_form(dbform);
+
+ ReleaseSysCache(dbtup);
+
+ return invalid;
+}
+
+
/*
* recovery_create_dbdir()
*
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index bf2799a854..18018a4e8a 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1697,6 +1697,20 @@ vac_truncate_clog(TransactionId frozenXID,
Assert(TransactionIdIsNormal(datfrozenxid));
Assert(MultiXactIdIsValid(datminmxid));
+ /*
+ * If database is in the process of getting dropped, or has been
+ * interrupted while doing so, no connections to it are possible
+ * anymore. Therefore we don't need to take it into account here.
+ * Which is good, because it can't be processed by autovacuum either.
+ */
+ if (database_is_invalid_form((Form_pg_database) dbform))
+ {
+ elog(DEBUG2,
+ "skipping invalid database \"%s\" while computing relfrozenxid",
+ NameStr(dbform->datname));
+ continue;
+ }
+
/*
* If things are working properly, no database should have a
* datfrozenxid or datminmxid that is "in the future". However, such
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 13fa79b06b..18cb8a8e8d 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -1918,6 +1918,18 @@ get_database_list(void)
avw_dbase *avdb;
MemoryContext oldcxt;
+ /*
+ * If database has partially been dropped, we can't, nor need to,
+ * vacuum it.
+ */
+ if (database_is_invalid_form(pgdatabase))
+ {
+ elog(DEBUG2,
+ "autovacuum: skipping invalid database \"%s\"",
+ NameStr(pgdatabase->datname));
+ continue;
+ }
+
/*
* Allocate our results in the caller's context, not the
* transaction's. We do this inside the loop, and restore the original
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 51d1bbef30..0b5a3050f1 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -982,6 +982,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
if (!bootstrap)
{
HeapTuple tuple;
+ Form_pg_database datform;
tuple = GetDatabaseTuple(dbname);
if (!HeapTupleIsValid(tuple) ||
@@ -991,6 +992,15 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname),
errdetail("It seems to have just been dropped or renamed.")));
+
+ datform = (Form_pg_database) GETSTRUCT(tuple);
+ if (database_is_invalid_form(datform))
+ {
+ ereport(FATAL,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot connect to invalid database \"%s\"", dbname),
+ errhint("Use DROP DATABASE to drop invalid databases."));
+ }
}
/*
diff --git a/src/bin/pg_amcheck/pg_amcheck.c b/src/bin/pg_amcheck/pg_amcheck.c
index 2c86ddae21..8ae0d7c77c 100644
--- a/src/bin/pg_amcheck/pg_amcheck.c
+++ b/src/bin/pg_amcheck/pg_amcheck.c
@@ -1623,7 +1623,7 @@ compile_database_list(PGconn *conn, SimplePtrList *databases,
"FROM pg_catalog.pg_database d "
"LEFT OUTER JOIN exclude_raw e "
"ON d.datname ~ e.rgx "
- "\nWHERE d.datallowconn "
+ "\nWHERE d.datallowconn AND datconnlimit != -2 "
"AND e.pattern_id IS NULL"
"),"
diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl
index df0cb036cd..beea3d34f7 100644
--- a/src/bin/pg_amcheck/t/002_nonesuch.pl
+++ b/src/bin/pg_amcheck/t/002_nonesuch.pl
@@ -6,7 +6,7 @@ use warnings;
use PostgresNode;
use TestLib;
-use Test::More tests => 100;
+use Test::More tests => 106;
# Test set-up
my ($node, $port);
@@ -288,6 +288,40 @@ $node->command_checks_all(
'many unmatched patterns and one matched pattern under --no-strict-names'
);
+
+#########################################
+# Test that an invalid / partially dropped database won't be targeted
+
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+
+$node->command_checks_all(
+ [
+ 'pg_amcheck', '-d', 'regression_invalid'
+ ],
+ 1,
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "regression_invalid"/,
+ ],
+ 'checking handling of invalid database');
+
+$node->command_checks_all(
+ [
+ 'pg_amcheck', '-d', 'postgres',
+ '-t', 'regression_invalid.public.foo',
+ ],
+ 1,
+ [qr/^$/],
+ [
+ qr/pg_amcheck: error: no connectable databases to check matching "regression_invalid.public.foo"/,
+ ],
+ 'checking handling of object in invalid database');
+
+
#########################################
# Test checking otherwise existent objects but in databases where they do not exist
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index f8ea8236ff..434e6e8504 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -1326,7 +1326,7 @@ dropDBs(PGconn *conn)
res = executeQuery(conn,
"SELECT datname "
"FROM pg_database d "
- "WHERE datallowconn "
+ "WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY datname");
if (PQntuples(res) > 0)
@@ -1490,7 +1490,7 @@ dumpDatabases(PGconn *conn)
res = executeQuery(conn,
"SELECT datname "
"FROM pg_database d "
- "WHERE datallowconn "
+ "WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY (datname <> 'template1'), datname");
if (PQntuples(res) > 0)
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 976b3a33d0..1d2e683fb4 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -1449,6 +1449,17 @@ my %tests = (
},
},
+ 'CREATE DATABASE regression_invalid...' => {
+ create_order => 1,
+ create_sql => q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'),
+ regexp => qr/^CREATE DATABASE regression_invalid/m,
+ not_like => {
+ pg_dumpall_dbprivs => 1,
+ },
+ },
+
'CREATE ACCESS METHOD gist2' => {
create_order => 52,
create_sql =>
@@ -3642,7 +3653,7 @@ $node->psql('postgres', 'create database regress_pg_dump_test;');
# Start with number of command_fails_like()*2 tests below (each
# command_fails_like is actually 2 tests) + number of command_ok()*3
-my $num_tests = 33;
+my $num_tests = 35;
foreach my $run (sort keys %pgdump_runs)
{
@@ -3784,6 +3795,14 @@ command_fails_like(
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/,
'connecting to a non-existent database');
+#########################################
+# Test connecting to an invalid database
+
+command_fails_like(
+ [ 'pg_dump', '-p', "$port", '-d', 'regression_invalid' ],
+ qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/,
+ 'connecting to an invalid database');
+
#########################################
# Test connecting with an unprivileged user
diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c
index fc771eed77..af0a0a041f 100644
--- a/src/bin/scripts/clusterdb.c
+++ b/src/bin/scripts/clusterdb.c
@@ -239,7 +239,9 @@ cluster_all_databases(ConnParams *cparams, const char *progname,
int i;
conn = connectMaintenanceDatabase(cparams, progname, echo);
- result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo);
+ result = executeQuery(conn,
+ "SELECT datname FROM pg_database WHERE datallowconn AND datconnlimit <> -2 ORDER BY 1;",
+ echo);
PQfinish(conn);
for (i = 0; i < PQntuples(result); i++)
diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c
index 7638b01fbd..947cfc4065 100644
--- a/src/bin/scripts/reindexdb.c
+++ b/src/bin/scripts/reindexdb.c
@@ -762,7 +762,9 @@ reindex_all_databases(ConnParams *cparams,
int i;
conn = connectMaintenanceDatabase(cparams, progname, echo);
- result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", echo);
+ result = executeQuery(conn,
+ "SELECT datname FROM pg_database WHERE datallowconn AND datconnlimit <> -2 ORDER BY 1;",
+ echo);
PQfinish(conn);
for (i = 0; i < PQntuples(result); i++)
diff --git a/src/bin/scripts/t/011_clusterdb_all.pl b/src/bin/scripts/t/011_clusterdb_all.pl
index c7e8514fb6..da10ccf18b 100644
--- a/src/bin/scripts/t/011_clusterdb_all.pl
+++ b/src/bin/scripts/t/011_clusterdb_all.pl
@@ -6,7 +6,7 @@ use warnings;
use PostgresNode;
use TestLib;
-use Test::More tests => 2;
+use Test::More tests => 4;
my $node = get_new_node('main');
$node->init;
@@ -20,3 +20,16 @@ $node->issues_sql_like(
[ 'clusterdb', '-a' ],
qr/statement: CLUSTER.*statement: CLUSTER/s,
'cluster all databases');
+
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'clusterdb', '-a' ],
+ 'invalid database not targeted by clusterdb -a');
+
+# Doesn't quite belong here, but don't want to waste time by creating an
+# invalid database in 010_clusterdb.pl as well.
+$node->command_fails([ 'clusterdb', '-d', 'regression_invalid'],
+ 'clusterdb cannot target invalid database');
diff --git a/src/bin/scripts/t/050_dropdb.pl b/src/bin/scripts/t/050_dropdb.pl
index 646cb4e82f..8b49108284 100644
--- a/src/bin/scripts/t/050_dropdb.pl
+++ b/src/bin/scripts/t/050_dropdb.pl
@@ -6,7 +6,7 @@ use warnings;
use PostgresNode;
use TestLib;
-use Test::More tests => 13;
+use Test::More tests => 14;
program_help_ok('dropdb');
program_version_ok('dropdb');
@@ -30,3 +30,12 @@ $node->issues_sql_like(
$node->command_fails([ 'dropdb', 'nonexistent' ],
'fails with nonexistent database');
+
+# check that invalid database can be dropped with dropdb
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'dropdb', 'regression_invalid' ],
+ 'invalid database can be dropped');
diff --git a/src/bin/scripts/t/091_reindexdb_all.pl b/src/bin/scripts/t/091_reindexdb_all.pl
index 299b198d15..ff3dfb6bc9 100644
--- a/src/bin/scripts/t/091_reindexdb_all.pl
+++ b/src/bin/scripts/t/091_reindexdb_all.pl
@@ -5,7 +5,7 @@ use strict;
use warnings;
use PostgresNode;
-use Test::More tests => 2;
+use Test::More tests => 4;
my $node = get_new_node('main');
$node->init;
@@ -17,3 +17,16 @@ $node->issues_sql_like(
[ 'reindexdb', '-a' ],
qr/statement: REINDEX.*statement: REINDEX/s,
'reindex all databases');
+
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'reindexdb', '-a' ],
+ 'invalid database not targeted by reindexdb -a');
+
+# Doesn't quite belong here, but don't want to waste time by creating an
+# invalid database in 090_reindexdb.pl as well.
+$node->command_fails([ 'reindexdb', '-d', 'regression_invalid'],
+ 'reindexdb cannot target invalid database');
diff --git a/src/bin/scripts/t/101_vacuumdb_all.pl b/src/bin/scripts/t/101_vacuumdb_all.pl
index 504f252748..4ebf665bdf 100644
--- a/src/bin/scripts/t/101_vacuumdb_all.pl
+++ b/src/bin/scripts/t/101_vacuumdb_all.pl
@@ -5,7 +5,7 @@ use strict;
use warnings;
use PostgresNode;
-use Test::More tests => 2;
+use Test::More tests => 4;
my $node = get_new_node('main');
$node->init;
@@ -15,3 +15,16 @@ $node->issues_sql_like(
[ 'vacuumdb', '-a' ],
qr/statement: VACUUM.*statement: VACUUM/s,
'vacuum all databases');
+
+$node->safe_psql(
+ 'postgres', q(
+ CREATE DATABASE regression_invalid;
+ UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+$node->command_ok([ 'vacuumdb', '-a' ],
+ 'invalid database not targeted by vacuumdb -a');
+
+# Doesn't quite belong here, but don't want to waste time by creating an
+# invalid database in 010_vacuumdb.pl as well.
+$node->command_fails([ 'vacuumdb', '-d', 'regression_invalid'],
+ 'vacuumdb cannot target invalid database');
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index a85919c5c1..c884ffc887 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -802,7 +802,7 @@ vacuum_all_databases(ConnParams *cparams,
conn = connectMaintenanceDatabase(cparams, progname, echo);
result = executeQuery(conn,
- "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;",
+ "SELECT datname FROM pg_database WHERE datallowconn AND datconnlimit <> -2 ORDER BY 1;",
echo);
PQfinish(conn);
diff --git a/src/include/catalog/pg_database.h b/src/include/catalog/pg_database.h
index d3de45821c..acba8a3943 100644
--- a/src/include/catalog/pg_database.h
+++ b/src/include/catalog/pg_database.h
@@ -52,7 +52,10 @@ CATALOG(pg_database,1262,DatabaseRelationId) BKI_SHARED_RELATION BKI_ROWTYPE_OID
/* new connections allowed? */
bool datallowconn;
- /* max connections allowed (-1=no limit) */
+ /*
+ * Max connections allowed. Negative values have special meaning, see
+ * DATCONNLIMIT_* defines below.
+ */
int32 datconnlimit;
/* highest OID to consider a system OID */
@@ -89,4 +92,19 @@ DECLARE_UNIQUE_INDEX(pg_database_datname_index, 2671, on pg_database using btree
DECLARE_UNIQUE_INDEX_PKEY(pg_database_oid_index, 2672, on pg_database using btree(oid oid_ops));
#define DatabaseOidIndexId 2672
+/*
+ * Special values for pg_database.datconnlimit. Normal values are >= 0.
+ */
+#define DATCONNLIMIT_UNLIMITED -1 /* no limit */
+
+/*
+ * A database is set to invalid partway through being dropped. Using
+ * datconnlimit=-2 for this purpose isn't particularly clean, but is
+ * backpatchable.
+ */
+#define DATCONNLIMIT_INVALID_DB -2
+
+extern bool database_is_invalid_form(Form_pg_database datform);
+extern bool database_is_invalid_oid(Oid dboid);
+
#endif /* PG_DATABASE_H */
diff --git a/src/test/recovery/t/037_invalid_database.pl b/src/test/recovery/t/037_invalid_database.pl
new file mode 100644
index 0000000000..a061fab5fc
--- /dev/null
+++ b/src/test/recovery/t/037_invalid_database.pl
@@ -0,0 +1,157 @@
+# Copyright (c) 2023, PostgreSQL Global Development Group
+#
+# Test we handle interrupted DROP DATABASE correctly.
+
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+my $node = PostgreSQL::Test::Cluster->new('node');
+$node->init;
+$node->append_conf(
+ "postgresql.conf", qq(
+autovacuum = off
+max_prepared_transactions=5
+log_min_duration_statement=0
+log_connections=on
+log_disconnections=on
+));
+
+$node->start;
+
+
+# First verify that we can't connect to or ALTER an invalid database. Just
+# mark the database as invalid ourselves, that's more reliable than hitting the
+# required race conditions (see testing further down)...
+
+$node->safe_psql(
+ "postgres", qq(
+CREATE DATABASE regression_invalid;
+UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid';
+));
+
+my $psql_stdout = '';
+my $psql_stderr = '';
+
+is($node->psql('regression_invalid', '', stderr => \$psql_stderr),
+ 2, "can't connect to invalid database - error code");
+like(
+ $psql_stderr,
+ qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/,
+ "can't connect to invalid database - error message");
+
+is($node->psql('postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'),
+ 2, "can't ALTER invalid database");
+
+# check invalid database can't be used as a template
+is( $node->psql('postgres', 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'),
+ 3,
+ "can't use invalid database as template");
+
+
+# Verify that VACUUM ignores an invalid database when computing how much of
+# the clog is needed (vac_truncate_clog()). For that we modify the pg_database
+# row of the invalid database to have an outdated datfrozenxid.
+$psql_stderr = '';
+$node->psql(
+ 'postgres',
+ qq(
+UPDATE pg_database SET datfrozenxid = '123456' WHERE datname = 'regression_invalid';
+DROP TABLE IF EXISTS foo_tbl; CREATE TABLE foo_tbl();
+VACUUM FREEZE;),
+ stderr => \$psql_stderr);
+unlike(
+ $psql_stderr,
+ qr/some databases have not been vacuumed in over 2 billion transactions/,
+ "invalid databases are ignored by vac_truncate_clog");
+
+
+# But we need to be able to drop an invalid database.
+is( $node->psql(
+ 'postgres', 'DROP DATABASE regression_invalid',
+ stdout => \$psql_stdout,
+ stderr => \$psql_stderr),
+ 0,
+ "can DROP invalid database");
+
+# Ensure database is gone
+is($node->psql('postgres', 'DROP DATABASE regression_invalid'),
+ 3, "can't drop already dropped database");
+
+
+# Test that interruption of DROP DATABASE is handled properly. To ensure the
+# interruption happens at the appropriate moment, we lock pg_tablespace. DROP
+# DATABASE scans pg_tablespace once it has reached the "irreversible" part of
+# dropping the database, making it a suitable point to wait.
+my $bgpsql_in = '';
+my $bgpsql_out = '';
+my $bgpsql_err = '';
+my $bgpsql_timer = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
+my $bgpsql = $node->background_psql('postgres', \$bgpsql_in, \$bgpsql_out,
+ $bgpsql_timer, on_error_stop => 0);
+$bgpsql_out = '';
+$bgpsql_in .= "SELECT pg_backend_pid();\n";
+
+pump_until($bgpsql, $bgpsql_timer, \$bgpsql_out, qr/\d/);
+
+my $pid = $bgpsql_out;
+$bgpsql_out = '';
+
+# create the database, prevent drop database via lock held by a 2PC transaction
+$bgpsql_in .= qq(
+ CREATE DATABASE regression_invalid_interrupt;
+ BEGIN;
+ LOCK pg_tablespace;
+ PREPARE TRANSACTION 'lock_tblspc';
+ \\echo done
+);
+
+ok(pump_until($bgpsql, $bgpsql_timer, \$bgpsql_out, qr/done/),
+ "blocked DROP DATABASE completion");
+$bgpsql_out = '';
+
+# Try to drop. This will wait due to the still held lock.
+$bgpsql_in .= qq(
+ DROP DATABASE regression_invalid_interrupt;
+ \\echo DROP DATABASE completed
+);
+$bgpsql->pump_nb;
+
+# Ensure we're waiting for the lock
+$node->poll_query_until('postgres',
+ qq(SELECT EXISTS(SELECT * FROM pg_locks WHERE NOT granted AND relation = 'pg_tablespace'::regclass AND mode = 'AccessShareLock');)
+);
+
+# and finally interrupt the DROP DATABASE
+ok($node->safe_psql('postgres', "SELECT pg_cancel_backend($pid)"),
+ "canceling DROP DATABASE");
+
+# wait for cancellation to be processed
+ok( pump_until(
+ $bgpsql, $bgpsql_timer, \$bgpsql_out, qr/DROP DATABASE completed/),
+ "cancel processed");
+$bgpsql_out = '';
+
+# verify that connection to the database aren't allowed
+is($node->psql('regression_invalid_interrupt', ''),
+ 2, "can't connect to invalid_interrupt database");
+
+# To properly drop the database, we need to release the lock previously preventing
+# doing so.
+$bgpsql_in .= qq(
+ ROLLBACK PREPARED 'lock_tblspc';
+ \\echo ROLLBACK PREPARED
+);
+ok(pump_until($bgpsql, $bgpsql_timer, \$bgpsql_out, qr/ROLLBACK PREPARED/),
+ "unblock DROP DATABASE");
+$bgpsql_out = '';
+
+is($node->psql('postgres', "DROP DATABASE regression_invalid_interrupt"),
+ 0, "DROP DATABASE invalid_interrupt");
+
+$bgpsql_in .= "\\q\n";
+$bgpsql->finish();
+
+done_testing();