diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index 6480fb8e74..983a999fcd 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -120,6 +120,7 @@ typedef struct _restoreOptions int enable_row_security; int sequence_data; /* dump sequence data even in schema-only mode */ int include_subscriptions; + int binary_upgrade; } RestoreOptions; typedef struct _dumpOptions diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 7e2bed38b3..b11d6cb0c4 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -2874,7 +2874,15 @@ _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt) /* Mask it if we only want schema */ if (ropt->schemaOnly) { - if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0)) + /* + * In binary-upgrade mode, even with schema-only set, we do not mask + * out large objects. Only large object definitions, comments and + * other information should be generated in binary-upgrade mode (not + * the actual data). + */ + if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) && + !(ropt->binary_upgrade && strcmp(te->desc, "BLOB") == 0) && + !(ropt->binary_upgrade && strncmp(te->tag, "LARGE OBJECT ", 13) == 0)) res = res & REQ_SCHEMA; } diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 7273ec8fe2..cfa1831f87 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -772,7 +772,15 @@ main(int argc, char **argv) if (dopt.schemaOnly && dopt.sequence_data) getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE); - if (dopt.outputBlobs) + /* + * In binary-upgrade mode, we do not have to worry about the actual blob + * data or the associated metadata that resides in the pg_largeobject and + * pg_largeobject_metadata tables, respectivly. + * + * However, we do need to collect blob information as there may be + * comments or other information on blobs that we do need to dump out. + */ + if (dopt.outputBlobs || dopt.binary_upgrade) getBlobs(fout); /* @@ -852,6 +860,7 @@ main(int argc, char **argv) ropt->enable_row_security = dopt.enable_row_security; ropt->sequence_data = dopt.sequence_data; ropt->include_subscriptions = dopt.include_subscriptions; + ropt->binary_upgrade = dopt.binary_upgrade; if (compressLevel == -1) ropt->compression = 0; @@ -2900,6 +2909,20 @@ getBlobs(Archive *fout) PQgetisnull(res, i, i_initlomacl) && PQgetisnull(res, i, i_initrlomacl)) binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL; + + /* + * In binary-upgrade mode for blobs, we do *not* dump out the data or + * the ACLs, should any exist. The data and ACL (if any) will be + * copied by pg_upgrade, which simply copies the pg_largeobject and + * pg_largeobject_metadata tables. + * + * We *do* dump out the definition of the blob because we need that to + * make the restoration of the comments, and anything else, work since + * pg_upgrade copies the files behind pg_largeobject and + * pg_largeobject_metadata after the dump is restored. + */ + if (dopt->binary_upgrade) + binfo[i].dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL); } /* @@ -8828,7 +8851,8 @@ dumpComment(Archive *fout, const char *target, } else { - if (dopt->schemaOnly) + /* We do dump blob comments in binary-upgrade mode */ + if (dopt->schemaOnly && !dopt->binary_upgrade) return; } @@ -14223,7 +14247,8 @@ dumpSecLabel(Archive *fout, const char *target, } else { - if (dopt->schemaOnly) + /* We do dump blob security labels in binary-upgrade mode */ + if (dopt->schemaOnly && !dopt->binary_upgrade) return; } diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index f73bf8974d..c51088a49c 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -39,11 +39,17 @@ my %pgdump_runs = ( binary_upgrade => { dump_cmd => [ 'pg_dump', - "--file=$tempdir/binary_upgrade.sql", + '--format=custom', + "--file=$tempdir/binary_upgrade.dump", '--schema-only', '--binary-upgrade', '-d', 'postgres', # alternative way to specify database - ], }, + ], + restore_cmd => [ + 'pg_restore', '-Fc', + '--verbose', + "--file=$tempdir/binary_upgrade.sql", + "$tempdir/binary_upgrade.dump", ], }, clean => { dump_cmd => [ 'pg_dump', @@ -334,6 +340,7 @@ my %tests = ( all_runs => 1, regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m, like => { + binary_upgrade => 1, clean => 1, clean_if_exists => 1, column_inserts => 1, @@ -348,7 +355,6 @@ my %tests = ( section_pre_data => 1, test_schema_plus_blobs => 1, }, unlike => { - binary_upgrade => 1, no_blobs => 1, no_owner => 1, only_dump_test_schema => 1, @@ -666,6 +672,7 @@ my %tests = ( 'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');', regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m, like => { + binary_upgrade => 1, clean => 1, clean_if_exists => 1, column_inserts => 1, @@ -681,7 +688,6 @@ my %tests = ( section_pre_data => 1, test_schema_plus_blobs => 1, }, unlike => { - binary_upgrade => 1, no_blobs => 1, only_dump_test_schema => 1, only_dump_test_table => 1, diff --git a/src/test/regress/expected/large_object.out b/src/test/regress/expected/large_object.out new file mode 100644 index 0000000000..b00d47cc75 --- /dev/null +++ b/src/test/regress/expected/large_object.out @@ -0,0 +1,15 @@ +-- This is more-or-less DROP IF EXISTS LARGE OBJECT 3001; +WITH unlink AS (SELECT lo_unlink(loid) FROM pg_largeobject WHERE loid = 3001) SELECT 1; + ?column? +---------- + 1 +(1 row) + +-- Test creation of a large object and leave it for testing pg_upgrade +SELECT lo_create(3001); + lo_create +----------- + 3001 +(1 row) + +COMMENT ON LARGE OBJECT 3001 IS 'testing comments'; diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out index f66b4432a1..8ac46ecef2 100644 --- a/src/test/regress/expected/privileges.out +++ b/src/test/regress/expected/privileges.out @@ -12,7 +12,7 @@ DROP ROLE IF EXISTS regress_user3; DROP ROLE IF EXISTS regress_user4; DROP ROLE IF EXISTS regress_user5; DROP ROLE IF EXISTS regress_user6; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata; +SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; lo_unlink ----------- (0 rows) @@ -1173,11 +1173,11 @@ SELECT lo_unlink(2002); \c - -- confirm ACL setting -SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata; +SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; oid | ownername | lomacl ------+---------------+------------------------------------------------------------------------------------------------ - 1002 | regress_user1 | 1001 | regress_user1 | {regress_user1=rw/regress_user1,=rw/regress_user1} + 1002 | regress_user1 | 1003 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r/regress_user1} 1004 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=rw/regress_user1} 1005 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r*w/regress_user1,regress_user3=r/regress_user2} @@ -1546,7 +1546,7 @@ DROP TABLE atest6; DROP TABLE atestc; DROP TABLE atestp1; DROP TABLE atestp2; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata; +SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; lo_unlink ----------- 1 diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index edeb2d6bc7..1f2fb597c4 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -84,7 +84,7 @@ test: select_into select_distinct select_distinct_on select_implicit select_havi # ---------- # Another group of parallel tests # ---------- -test: brin gin gist spgist privileges init_privs security_label collate matview lock replica_identity rowsecurity object_address tablesample groupingsets drop_operator +test: brin gin gist spgist privileges init_privs security_label collate matview lock replica_identity rowsecurity object_address tablesample groupingsets drop_operator large_object # ---------- # Another group of parallel tests diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule index 27a46d76d5..9ffceff5e0 100644 --- a/src/test/regress/serial_schedule +++ b/src/test/regress/serial_schedule @@ -116,6 +116,7 @@ test: object_address test: tablesample test: groupingsets test: drop_operator +test: large_object test: alter_generic test: alter_operator test: misc diff --git a/src/test/regress/sql/large_object.sql b/src/test/regress/sql/large_object.sql new file mode 100644 index 0000000000..a9e18b7c60 --- /dev/null +++ b/src/test/regress/sql/large_object.sql @@ -0,0 +1,8 @@ + +-- This is more-or-less DROP IF EXISTS LARGE OBJECT 3001; +WITH unlink AS (SELECT lo_unlink(loid) FROM pg_largeobject WHERE loid = 3001) SELECT 1; + +-- Test creation of a large object and leave it for testing pg_upgrade +SELECT lo_create(3001); + +COMMENT ON LARGE OBJECT 3001 IS 'testing comments'; diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql index 00dc7bd4ab..3d74abf043 100644 --- a/src/test/regress/sql/privileges.sql +++ b/src/test/regress/sql/privileges.sql @@ -17,7 +17,7 @@ DROP ROLE IF EXISTS regress_user4; DROP ROLE IF EXISTS regress_user5; DROP ROLE IF EXISTS regress_user6; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata; +SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; RESET client_min_messages; @@ -729,7 +729,7 @@ SELECT lo_unlink(2002); \c - -- confirm ACL setting -SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata; +SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; SET SESSION AUTHORIZATION regress_user3; @@ -960,7 +960,7 @@ DROP TABLE atestc; DROP TABLE atestp1; DROP TABLE atestp2; -SELECT lo_unlink(oid) FROM pg_largeobject_metadata; +SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; DROP GROUP regress_group1; DROP GROUP regress_group2;