pg_dump can now dump large objects even in plain-text output mode, by
using the recently added lo_create() function. The restore logic in pg_restore is greatly simplified as well, since there's no need anymore to try to adjust database references to match a new set of blob OIDs.
This commit is contained in:
parent
b49d871f6a
commit
7a28de2052
@ -1,5 +1,5 @@
|
||||
<!--
|
||||
$PostgreSQL: pgsql/doc/src/sgml/backup.sgml,v 2.67 2005/06/21 04:02:29 tgl Exp $
|
||||
$PostgreSQL: pgsql/doc/src/sgml/backup.sgml,v 2.68 2005/06/21 20:45:43 tgl Exp $
|
||||
-->
|
||||
<chapter id="backup">
|
||||
<title>Backup and Restore</title>
|
||||
@ -88,9 +88,7 @@ pg_dump <replaceable class="parameter">dbname</replaceable> > <replaceable cl
|
||||
When your database schema relies on OIDs (for instance as foreign
|
||||
keys) you must instruct <application>pg_dump</> to dump the OIDs
|
||||
as well. To do this, use the <option>-o</option> command line
|
||||
option. <quote>Large objects</> are not dumped by default,
|
||||
either. See <xref linkend="app-pgdump">'s reference page if you
|
||||
use large objects.
|
||||
option.
|
||||
</para>
|
||||
</important>
|
||||
|
||||
@ -267,28 +265,6 @@ pg_dump -Fc <replaceable class="parameter">dbname</replaceable> > <replaceabl
|
||||
</formalpara>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2 id="backup-dump-caveats">
|
||||
<title>Caveats</title>
|
||||
|
||||
<para>
|
||||
For reasons of backward compatibility, <application>pg_dump</>
|
||||
does not dump large objects by default.<indexterm><primary>large
|
||||
object</primary><secondary>backup</secondary></indexterm> To dump
|
||||
large objects you must use either the custom or the tar output
|
||||
format, and use the <option>-b</> option in
|
||||
<application>pg_dump</>. See the <xref linkend="app-pgdump"> reference
|
||||
page for details. The
|
||||
directory <filename>contrib/pg_dumplo</> of the
|
||||
<productname>PostgreSQL</> source tree also contains a program
|
||||
that can dump large objects.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Please familiarize yourself with the <xref linkend="app-pgdump">
|
||||
reference page.
|
||||
</para>
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<sect1 id="backup-file">
|
||||
|
@ -1,4 +1,4 @@
|
||||
<!-- $PostgreSQL: pgsql/doc/src/sgml/installation.sgml,v 1.236 2005/06/21 04:02:29 tgl Exp $ -->
|
||||
<!-- $PostgreSQL: pgsql/doc/src/sgml/installation.sgml,v 1.237 2005/06/21 20:45:43 tgl Exp $ -->
|
||||
|
||||
<chapter id="installation">
|
||||
<title><![%standalone-include[<productname>PostgreSQL</>]]>
|
||||
@ -389,14 +389,6 @@ su - postgres
|
||||
<application>pg_dumpall</>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<application>pg_dumpall</application> does not
|
||||
save large objects. Check
|
||||
<![%standalone-include[the documentation]]>
|
||||
<![%standalone-ignore[<xref linkend="backup-dump-caveats">]]>
|
||||
if you need to do this.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To make the backup, you can use the <application>pg_dumpall</application>
|
||||
command from the version you are currently running. For best
|
||||
|
@ -1,5 +1,5 @@
|
||||
<!--
|
||||
$PostgreSQL: pgsql/doc/src/sgml/ref/pg_dump.sgml,v 1.77 2005/05/29 03:32:18 momjian Exp $
|
||||
$PostgreSQL: pgsql/doc/src/sgml/ref/pg_dump.sgml,v 1.78 2005/06/21 20:45:43 tgl Exp $
|
||||
PostgreSQL documentation
|
||||
-->
|
||||
|
||||
@ -60,9 +60,8 @@ PostgreSQL documentation
|
||||
<xref linkend="app-pgrestore"> to rebuild the database. They
|
||||
allow <application>pg_restore</application> to be selective about
|
||||
what is restored, or even to reorder the items prior to being
|
||||
restored. The archive formats also allow saving and restoring
|
||||
<quote>large objects</>, which is not possible in a script dump.
|
||||
The archive files are also designed to be portable across
|
||||
restored.
|
||||
The archive file formats are designed to be portable across
|
||||
architectures.
|
||||
</para>
|
||||
|
||||
@ -127,17 +126,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-b</></term>
|
||||
<term><option>--blobs</></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Include large objects in the dump. A non-text output format
|
||||
must be selected.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-c</option></term>
|
||||
<term><option>--clean</option></term>
|
||||
@ -600,14 +588,6 @@ CREATE DATABASE foo WITH TEMPLATE template0;
|
||||
<application>pg_dump</application> has a few limitations:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
When dumping a single table or as plain text, <application>pg_dump</application>
|
||||
does not handle large objects. Large objects must be dumped with the
|
||||
entire database using one of the non-text archive formats.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
When a data-only dump is chosen and the option
|
||||
@ -660,17 +640,16 @@ CREATE DATABASE foo WITH TEMPLATE template0;
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To dump a database called <literal>mydb</> that contains
|
||||
large objects to a <filename>tar</filename> file:
|
||||
To dump a database called <literal>mydb</> to a <filename>tar</filename>
|
||||
file:
|
||||
|
||||
<screen>
|
||||
<prompt>$</prompt> <userinput>pg_dump -Ft -b mydb > db.tar</userinput>
|
||||
<prompt>$</prompt> <userinput>pg_dump -Ft mydb > db.tar</userinput>
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To reload this database (with large objects) to an
|
||||
existing database called <literal>newdb</>:
|
||||
To reload this dump into an existing database called <literal>newdb</>:
|
||||
|
||||
<screen>
|
||||
<prompt>$</prompt> <userinput>pg_restore -d newdb db.tar</userinput>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<!--
|
||||
$PostgreSQL: pgsql/doc/src/sgml/ref/pg_dumpall.sgml,v 1.50 2005/06/21 04:02:31 tgl Exp $
|
||||
$PostgreSQL: pgsql/doc/src/sgml/ref/pg_dumpall.sgml,v 1.51 2005/06/21 20:45:43 tgl Exp $
|
||||
PostgreSQL documentation
|
||||
-->
|
||||
|
||||
@ -43,16 +43,6 @@ PostgreSQL documentation
|
||||
groups, and access permissions that apply to databases as a whole.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Thus, <application>pg_dumpall</application> is an integrated
|
||||
solution for backing up your databases. But note a limitation:
|
||||
it cannot dump <quote>large objects</quote>, since
|
||||
<application>pg_dump</application> cannot dump such objects into
|
||||
text files. If you have databases containing large objects,
|
||||
they should be dumped using one of <application>pg_dump</application>'s
|
||||
non-text output modes.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Since <application>pg_dumpall</application> reads tables from all
|
||||
databases you will most likely have to connect as a database
|
||||
|
@ -1,4 +1,4 @@
|
||||
<!-- $PostgreSQL: pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.52 2005/06/09 17:56:51 momjian Exp $ -->
|
||||
<!-- $PostgreSQL: pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.53 2005/06/21 20:45:43 tgl Exp $ -->
|
||||
|
||||
<refentry id="APP-PGRESTORE">
|
||||
<refmeta>
|
||||
@ -44,14 +44,13 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<application>pg_restore</application> can operate in two modes: If
|
||||
a database name is specified, the archive is restored directly into
|
||||
the database. (Large objects can only be restored by using such a direct
|
||||
database connection.) Otherwise, a script containing the SQL
|
||||
commands necessary to rebuild the database is created (and written
|
||||
to a file or standard output), similar to the ones created by the
|
||||
<application>pg_dump</application> plain text format. Some of the
|
||||
options controlling the script output are therefore analogous to
|
||||
<application>pg_restore</application> can operate in two modes.
|
||||
If a database name is specified, the archive is restored directly into
|
||||
the database. Otherwise, a script containing the SQL
|
||||
commands necessary to rebuild the database is created and written
|
||||
to a file or standard output. The script output is equivalent to
|
||||
the plain text output format of <application>pg_dump</application>.
|
||||
Some of the options controlling the output are therefore analogous to
|
||||
<application>pg_dump</application> options.
|
||||
</para>
|
||||
|
||||
@ -541,16 +540,16 @@ CREATE DATABASE foo WITH TEMPLATE template0;
|
||||
<title>Examples</title>
|
||||
|
||||
<para>
|
||||
To dump a database called <literal>mydb</> that contains
|
||||
large objects to a <filename>tar</filename> file:
|
||||
To dump a database called <literal>mydb</> to a <filename>tar</filename>
|
||||
file:
|
||||
|
||||
<screen>
|
||||
<prompt>$</prompt> <userinput>pg_dump -Ft -b mydb > db.tar</userinput>
|
||||
<prompt>$</prompt> <userinput>pg_dump -Ft mydb > db.tar</userinput>
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To reload this database (with large objects) to an
|
||||
To reload this dump into an
|
||||
existing database called <literal>newdb</>:
|
||||
|
||||
<screen>
|
||||
|
@ -5,16 +5,14 @@ Notes on pg_dump
|
||||
|
||||
2. pg_dumpall forces all pg_dump output to be text, since it also outputs text into the same output stream.
|
||||
|
||||
3. The plain text output format can not be used as input into pg_restore.
|
||||
|
||||
4. pg_dump now dumps the items in a modified OID order to try to improve relaibility of default restores.
|
||||
3. The plain text output format cannot be used as input into pg_restore.
|
||||
|
||||
|
||||
To dump a database into the next custom format, type:
|
||||
To dump a database into the new custom format, type:
|
||||
|
||||
pg_dump <db-name> -Fc > <backup-file>
|
||||
|
||||
or, in TAR format
|
||||
or, to dump in TAR format
|
||||
|
||||
pg_dump <db-name> -Ft > <backup-file>
|
||||
|
||||
@ -28,7 +26,7 @@ To restore, try
|
||||
|
||||
pg_restore <backup-file> --table | less
|
||||
|
||||
or to list in a differnet orderL
|
||||
or to list in a different order
|
||||
|
||||
pg_restore <backup-file> -l --oid --rearrange | less
|
||||
|
||||
@ -59,27 +57,12 @@ or, simply:
|
||||
pg_restore backup.bck --use=toc.lis | psql newdbname
|
||||
|
||||
|
||||
BLOBs
|
||||
=====
|
||||
|
||||
To dump blobs you must use the custom archive format (-Fc) or TAR format (-Ft), and specify the
|
||||
--blobs qualifier to the pg_dump command.
|
||||
|
||||
To restore blobs you must use a direct database connection (--db=db-to-restore-to).
|
||||
|
||||
eg.
|
||||
|
||||
pg_dump --blob -Fc db-to-backup -f backup.bck
|
||||
|
||||
pg_restore backup.bck --db=db-to-restore-into
|
||||
|
||||
|
||||
TAR
|
||||
===
|
||||
|
||||
The TAR archive that pg_dump creates currently has a blank username & group for the files,
|
||||
but should be otherwise valid. It also includes a 'restore.sql' script which is there for
|
||||
the benefit of humans. It is never used by pg_restore.
|
||||
the benefit of humans. The script is never used by pg_restore.
|
||||
|
||||
Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form.
|
||||
(ie. you should not extract the files then expect pg_restore to work).
|
||||
@ -91,6 +74,3 @@ the BLOB files at the end.
|
||||
|
||||
Philip Warner, 16-Jul-2000
|
||||
pjw@rhyme.com.au
|
||||
|
||||
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.35 2005/06/09 17:56:51 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.36 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -152,10 +152,6 @@ extern void ArchiveEntry(Archive *AHX,
|
||||
/* Called to write *data* to the archive */
|
||||
extern size_t WriteData(Archive *AH, const void *data, size_t dLen);
|
||||
|
||||
/*
|
||||
extern int StartBlobs(Archive* AH);
|
||||
extern int EndBlobs(Archive* AH);
|
||||
*/
|
||||
extern int StartBlob(Archive *AH, Oid oid);
|
||||
extern int EndBlob(Archive *AH, Oid oid);
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.110 2005/06/09 17:56:51 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.111 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -49,8 +49,6 @@ static void _getObjectDescription(PQExpBuffer buf, TocEntry *te,
|
||||
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass);
|
||||
|
||||
|
||||
static void fixPriorBlobRefs(ArchiveHandle *AH, TocEntry *blobte,
|
||||
RestoreOptions *ropt);
|
||||
static void _doSetFixedOutputState(ArchiveHandle *AH);
|
||||
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
|
||||
static void _doSetWithOids(ArchiveHandle *AH, const bool withOids);
|
||||
@ -67,12 +65,10 @@ static TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id);
|
||||
static void _moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te);
|
||||
static int _discoverArchiveFormat(ArchiveHandle *AH);
|
||||
|
||||
static void dump_lo_buf(ArchiveHandle *AH);
|
||||
static void _write_msg(const char *modulename, const char *fmt, va_list ap);
|
||||
static void _die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt, va_list ap);
|
||||
|
||||
static int _canRestoreBlobs(ArchiveHandle *AH);
|
||||
static int _restoringToDB(ArchiveHandle *AH);
|
||||
|
||||
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
|
||||
|
||||
|
||||
@ -306,22 +302,13 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
|
||||
|
||||
_printTocEntry(AH, te, ropt, true, false);
|
||||
|
||||
/*
|
||||
* Maybe we can't do BLOBS, so check if this node is
|
||||
* for BLOBS
|
||||
*/
|
||||
if ((strcmp(te->desc, "BLOBS") == 0) &&
|
||||
!_canRestoreBlobs(AH))
|
||||
if (strcmp(te->desc, "BLOBS") == 0)
|
||||
{
|
||||
ahprintf(AH, "--\n-- SKIPPED \n--\n\n");
|
||||
ahlog(AH, 1, "restoring blob data\n");
|
||||
|
||||
/*
|
||||
* This is a bit nasty - we assume, for the
|
||||
* moment, that if a custom output is used, then
|
||||
* we don't want warnings.
|
||||
*/
|
||||
if (!AH->CustomOutPtr)
|
||||
write_msg(modulename, "WARNING: skipping large-object restoration\n");
|
||||
_selectOutputSchema(AH, "pg_catalog");
|
||||
|
||||
(*AH->PrintTocDataPtr) (AH, te, ropt);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -331,7 +318,8 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
|
||||
_becomeOwner(AH, te);
|
||||
_selectOutputSchema(AH, te->namespace);
|
||||
|
||||
ahlog(AH, 1, "restoring data for table \"%s\"\n", te->tag);
|
||||
ahlog(AH, 1, "restoring data for table \"%s\"\n",
|
||||
te->tag);
|
||||
|
||||
/*
|
||||
* If we have a copy statement, use it. As of
|
||||
@ -349,24 +337,6 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
|
||||
|
||||
(*AH->PrintTocDataPtr) (AH, te, ropt);
|
||||
|
||||
/*
|
||||
* If we just restored blobs, fix references in
|
||||
* previously-loaded tables; otherwise, if we
|
||||
* previously restored blobs, fix references in
|
||||
* this table. Note that in standard cases the
|
||||
* BLOBS entry comes after all TABLE DATA entries,
|
||||
* but we should cope with other orders in case
|
||||
* the user demands reordering.
|
||||
*/
|
||||
if (strcmp(te->desc, "BLOBS") == 0)
|
||||
fixPriorBlobRefs(AH, te, ropt);
|
||||
else if (AH->createdBlobXref &&
|
||||
strcmp(te->desc, "TABLE DATA") == 0)
|
||||
{
|
||||
ahlog(AH, 1, "fixing up large-object cross-reference for \"%s\"\n", te->tag);
|
||||
FixupBlobRefs(AH, te);
|
||||
}
|
||||
|
||||
_enableTriggersIfNecessary(AH, te, ropt);
|
||||
}
|
||||
}
|
||||
@ -415,47 +385,6 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
|
||||
{
|
||||
PQfinish(AH->connection);
|
||||
AH->connection = NULL;
|
||||
|
||||
if (AH->blobConnection)
|
||||
{
|
||||
PQfinish(AH->blobConnection);
|
||||
AH->blobConnection = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* After restoring BLOBS, fix all blob references in previously-restored
|
||||
* tables. (Normally, the BLOBS entry should appear after all TABLE DATA
|
||||
* entries, so this will in fact handle all blob references.)
|
||||
*/
|
||||
static void
|
||||
fixPriorBlobRefs(ArchiveHandle *AH, TocEntry *blobte, RestoreOptions *ropt)
|
||||
{
|
||||
TocEntry *te;
|
||||
teReqs reqs;
|
||||
|
||||
if (AH->createdBlobXref)
|
||||
{
|
||||
/* NULL parameter means disable ALL user triggers */
|
||||
_disableTriggersIfNecessary(AH, NULL, ropt);
|
||||
|
||||
for (te = AH->toc->next; te != blobte; te = te->next)
|
||||
{
|
||||
if (strcmp(te->desc, "TABLE DATA") == 0)
|
||||
{
|
||||
reqs = _tocEntryRequired(te, ropt, false);
|
||||
|
||||
if ((reqs & REQ_DATA) != 0) /* We loaded the data */
|
||||
{
|
||||
ahlog(AH, 1, "fixing up large-object cross-reference for \"%s\"\n", te->tag);
|
||||
FixupBlobRefs(AH, te);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* NULL parameter means enable ALL user triggers */
|
||||
_enableTriggersIfNecessary(AH, NULL, ropt);
|
||||
}
|
||||
}
|
||||
|
||||
@ -477,22 +406,6 @@ NewRestoreOptions(void)
|
||||
return opts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if we're restoring directly to the database (and
|
||||
* aren't just making a psql script that can do the restoration).
|
||||
*/
|
||||
static int
|
||||
_restoringToDB(ArchiveHandle *AH)
|
||||
{
|
||||
return (AH->ropt->useDB && AH->connection);
|
||||
}
|
||||
|
||||
static int
|
||||
_canRestoreBlobs(ArchiveHandle *AH)
|
||||
{
|
||||
return _restoringToDB(AH);
|
||||
}
|
||||
|
||||
static void
|
||||
_disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
||||
{
|
||||
@ -500,10 +413,6 @@ _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *rop
|
||||
if (!ropt->dataOnly || !ropt->disable_triggers)
|
||||
return;
|
||||
|
||||
/* Don't do it for the BLOBS TocEntry, either */
|
||||
if (te && strcmp(te->desc, "BLOBS") == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Become superuser if possible, since they are the only ones who can
|
||||
* update pg_class. If -S was not given, assume the initial user
|
||||
@ -539,10 +448,6 @@ _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt
|
||||
if (!ropt->dataOnly || !ropt->disable_triggers)
|
||||
return;
|
||||
|
||||
/* Don't do it for the BLOBS TocEntry, either */
|
||||
if (te && strcmp(te->desc, "BLOBS") == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Become superuser if possible, since they are the only ones who can
|
||||
* update pg_class. If -S was not given, assume the initial user
|
||||
@ -757,6 +662,11 @@ EndBlob(Archive *AHX, Oid oid)
|
||||
void
|
||||
StartRestoreBlobs(ArchiveHandle *AH)
|
||||
{
|
||||
if (AH->connection)
|
||||
StartTransaction(AH);
|
||||
else
|
||||
ahprintf(AH, "BEGIN;\n\n");
|
||||
|
||||
AH->blobCount = 0;
|
||||
}
|
||||
|
||||
@ -766,17 +676,10 @@ StartRestoreBlobs(ArchiveHandle *AH)
|
||||
void
|
||||
EndRestoreBlobs(ArchiveHandle *AH)
|
||||
{
|
||||
if (AH->txActive)
|
||||
{
|
||||
ahlog(AH, 2, "committing large-object transactions\n");
|
||||
if (AH->connection)
|
||||
CommitTransaction(AH);
|
||||
}
|
||||
|
||||
if (AH->blobTxActive)
|
||||
CommitTransactionXref(AH);
|
||||
|
||||
if (AH->createdBlobXref)
|
||||
CreateBlobXrefIndex(AH);
|
||||
else
|
||||
ahprintf(AH, "COMMIT;\n\n");
|
||||
|
||||
ahlog(AH, 1, "restored %d large objects\n", AH->blobCount);
|
||||
}
|
||||
@ -792,40 +695,26 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid)
|
||||
|
||||
AH->blobCount++;
|
||||
|
||||
if (!AH->createdBlobXref)
|
||||
{
|
||||
if (!AH->connection)
|
||||
die_horribly(AH, modulename, "cannot restore large objects without a database connection\n");
|
||||
|
||||
CreateBlobXrefTable(AH);
|
||||
AH->createdBlobXref = 1;
|
||||
}
|
||||
|
||||
/* Initialize the LO Buffer */
|
||||
AH->lo_buf_used = 0;
|
||||
|
||||
/*
|
||||
* Start long-running TXs if necessary
|
||||
*/
|
||||
if (!AH->txActive)
|
||||
ahlog(AH, 2, "restoring large object with OID %u\n", oid);
|
||||
|
||||
if (AH->connection)
|
||||
{
|
||||
ahlog(AH, 2, "starting large-object transactions\n");
|
||||
StartTransaction(AH);
|
||||
loOid = lo_create(AH->connection, oid);
|
||||
if (loOid == 0 || loOid != oid)
|
||||
die_horribly(AH, modulename, "could not create large object %u\n",
|
||||
oid);
|
||||
|
||||
AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
|
||||
if (AH->loFd == -1)
|
||||
die_horribly(AH, modulename, "could not open large object\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
ahprintf(AH, "SELECT lo_open(lo_create(%u), %d);\n", oid, INV_WRITE);
|
||||
}
|
||||
if (!AH->blobTxActive)
|
||||
StartTransactionXref(AH);
|
||||
|
||||
loOid = lo_creat(AH->connection, INV_READ | INV_WRITE);
|
||||
if (loOid == 0)
|
||||
die_horribly(AH, modulename, "could not create large object\n");
|
||||
|
||||
ahlog(AH, 2, "restoring large object with OID %u as %u\n", oid, loOid);
|
||||
|
||||
InsertBlobXref(AH, oid, loOid);
|
||||
|
||||
AH->loFd = lo_open(AH->connection, loOid, INV_WRITE);
|
||||
if (AH->loFd == -1)
|
||||
die_horribly(AH, modulename, "could not open large object\n");
|
||||
|
||||
AH->writingBlob = 1;
|
||||
}
|
||||
@ -836,29 +725,19 @@ EndRestoreBlob(ArchiveHandle *AH, Oid oid)
|
||||
if (AH->lo_buf_used > 0)
|
||||
{
|
||||
/* Write remaining bytes from the LO buffer */
|
||||
size_t res;
|
||||
|
||||
res = lo_write(AH->connection, AH->loFd, (void *) AH->lo_buf, AH->lo_buf_used);
|
||||
|
||||
ahlog(AH, 5, "wrote remaining %lu bytes of large-object data (result = %lu)\n",
|
||||
(unsigned long) AH->lo_buf_used, (unsigned long) res);
|
||||
if (res != AH->lo_buf_used)
|
||||
die_horribly(AH, modulename, "could not write to large object (result: %lu, expected: %lu)\n",
|
||||
(unsigned long) res, (unsigned long) AH->lo_buf_used);
|
||||
AH->lo_buf_used = 0;
|
||||
dump_lo_buf(AH);
|
||||
}
|
||||
|
||||
lo_close(AH->connection, AH->loFd);
|
||||
AH->writingBlob = 0;
|
||||
|
||||
/*
|
||||
* Commit every BLOB_BATCH_SIZE blobs...
|
||||
*/
|
||||
if (((AH->blobCount / BLOB_BATCH_SIZE) * BLOB_BATCH_SIZE) == AH->blobCount)
|
||||
if (AH->connection)
|
||||
{
|
||||
ahlog(AH, 2, "committing large-object transactions\n");
|
||||
CommitTransaction(AH);
|
||||
CommitTransactionXref(AH);
|
||||
lo_close(AH->connection, AH->loFd);
|
||||
AH->loFd = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
ahprintf(AH, "SELECT lo_close(0);\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1107,6 +986,45 @@ RestoringToDB(ArchiveHandle *AH)
|
||||
return (AH->ropt && AH->ropt->useDB && AH->connection);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump the current contents of the LO data buffer while writing a BLOB
|
||||
*/
|
||||
static void
|
||||
dump_lo_buf(ArchiveHandle *AH)
|
||||
{
|
||||
if (AH->connection)
|
||||
{
|
||||
size_t res;
|
||||
|
||||
res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_used);
|
||||
ahlog(AH, 5, "wrote %lu bytes of large object data (result = %lu)\n",
|
||||
(unsigned long) AH->lo_buf_used, (unsigned long) res);
|
||||
if (res != AH->lo_buf_used)
|
||||
die_horribly(AH, modulename,
|
||||
"could not write to large object (result: %lu, expected: %lu)\n",
|
||||
(unsigned long) res, (unsigned long) AH->lo_buf_used);
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned char *str;
|
||||
size_t len;
|
||||
|
||||
str = PQescapeBytea((const unsigned char *) AH->lo_buf,
|
||||
AH->lo_buf_used, &len);
|
||||
if (!str)
|
||||
die_horribly(AH, modulename, "out of memory\n");
|
||||
|
||||
/* Hack: turn off writingBlob so ahwrite doesn't recurse to here */
|
||||
AH->writingBlob = 0;
|
||||
ahprintf(AH, "SELECT lowrite(0, '%s');\n", str);
|
||||
AH->writingBlob = 1;
|
||||
|
||||
free(str);
|
||||
}
|
||||
AH->lo_buf_used = 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Write buffer to the output file (usually stdout). This is user for
|
||||
* outputting 'restore' scripts etc. It is even possible for an archive
|
||||
@ -1120,30 +1038,22 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
|
||||
|
||||
if (AH->writingBlob)
|
||||
{
|
||||
if (AH->lo_buf_used + size * nmemb > AH->lo_buf_size)
|
||||
{
|
||||
/* Split LO buffer */
|
||||
size_t remaining = AH->lo_buf_size - AH->lo_buf_used;
|
||||
size_t slack = nmemb * size - remaining;
|
||||
size_t remaining = size * nmemb;
|
||||
|
||||
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
|
||||
res = lo_write(AH->connection, AH->loFd, AH->lo_buf, AH->lo_buf_size);
|
||||
ahlog(AH, 5, "wrote %lu bytes of large object data (result = %lu)\n",
|
||||
(unsigned long) AH->lo_buf_size, (unsigned long) res);
|
||||
if (res != AH->lo_buf_size)
|
||||
die_horribly(AH, modulename,
|
||||
"could not write to large object (result: %lu, expected: %lu)\n",
|
||||
(unsigned long) res, (unsigned long) AH->lo_buf_size);
|
||||
memcpy(AH->lo_buf, (char *) ptr + remaining, slack);
|
||||
AH->lo_buf_used = slack;
|
||||
}
|
||||
else
|
||||
while (AH->lo_buf_used + remaining > AH->lo_buf_size)
|
||||
{
|
||||
/* LO Buffer is still large enough, buffer it */
|
||||
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, size * nmemb);
|
||||
AH->lo_buf_used += size * nmemb;
|
||||
size_t avail = AH->lo_buf_size - AH->lo_buf_used;
|
||||
|
||||
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, avail);
|
||||
ptr = (const void *) ((const char *) ptr + avail);
|
||||
remaining -= avail;
|
||||
AH->lo_buf_used += avail;
|
||||
dump_lo_buf(AH);
|
||||
}
|
||||
|
||||
memcpy((char *) AH->lo_buf + AH->lo_buf_used, ptr, remaining);
|
||||
AH->lo_buf_used += remaining;
|
||||
|
||||
return size * nmemb;
|
||||
}
|
||||
else if (AH->gzOut)
|
||||
@ -1213,8 +1123,6 @@ _die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt, va_lis
|
||||
write_msg(NULL, "*** aborted because of error\n");
|
||||
if (AH->connection)
|
||||
PQfinish(AH->connection);
|
||||
if (AH->blobConnection)
|
||||
PQfinish(AH->blobConnection);
|
||||
}
|
||||
|
||||
exit(1);
|
||||
|
@ -17,7 +17,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.64 2005/05/25 21:40:41 momjian Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.65 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -34,7 +34,7 @@
|
||||
#include "libpq-fe.h"
|
||||
#include "pqexpbuffer.h"
|
||||
|
||||
#define LOBBUFSIZE 32768
|
||||
#define LOBBUFSIZE 16384
|
||||
|
||||
/*
|
||||
* Note: zlib.h must be included *after* libpq-fe.h, because the latter may
|
||||
@ -88,8 +88,6 @@ typedef z_stream *z_streamp;
|
||||
|
||||
#define K_VERS_MAX (( (1 * 256 + 10) * 256 + 255) * 256 + 0)
|
||||
|
||||
/* No of BLOBs to restore in 1 TX */
|
||||
#define BLOB_BATCH_SIZE 100
|
||||
|
||||
/* Flags to indicate disposition of offsets stored in files */
|
||||
#define K_OFFSET_POS_NOT_SET 1
|
||||
@ -239,9 +237,6 @@ typedef struct _archiveHandle
|
||||
char *archdbname; /* DB name *read* from archive */
|
||||
bool requirePassword;
|
||||
PGconn *connection;
|
||||
PGconn *blobConnection; /* Connection for BLOB xref */
|
||||
int txActive; /* Flag set if TX active on connection */
|
||||
int blobTxActive; /* Flag set if TX active on blobConnection */
|
||||
int connectToDB; /* Flag to indicate if direct DB
|
||||
* connection is required */
|
||||
int pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
|
||||
@ -250,7 +245,6 @@ typedef struct _archiveHandle
|
||||
|
||||
int loFd; /* BLOB fd */
|
||||
int writingBlob; /* Flag */
|
||||
int createdBlobXref; /* Flag */
|
||||
int blobCount; /* # of blobs restored */
|
||||
|
||||
char *fSpec; /* Archive File Spec */
|
||||
|
@ -19,7 +19,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.30 2005/01/25 22:44:31 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.31 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -314,10 +314,9 @@ _StartData(ArchiveHandle *AH, TocEntry *te)
|
||||
* called for both BLOB and TABLE data; it is the responsibility of
|
||||
* the format to manage each kind of data using StartBlob/StartData.
|
||||
*
|
||||
* It should only be called from withing a DataDumper routine.
|
||||
* It should only be called from within a DataDumper routine.
|
||||
*
|
||||
* Mandatory.
|
||||
*
|
||||
*/
|
||||
static size_t
|
||||
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
||||
@ -360,7 +359,6 @@ _EndData(ArchiveHandle *AH, TocEntry *te)
|
||||
* It is called just prior to the dumper's DataDumper routine.
|
||||
*
|
||||
* Optional, but strongly recommended.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
@ -396,7 +394,6 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
* Called by the archiver when the dumper calls EndBlob.
|
||||
*
|
||||
* Optional.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
@ -408,7 +405,6 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
* Called by the archiver when finishing saving all BLOB DATA.
|
||||
*
|
||||
* Optional.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
@ -487,9 +483,6 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
||||
break;
|
||||
|
||||
case BLK_BLOBS:
|
||||
if (!AH->connection)
|
||||
die_horribly(AH, modulename, "large objects cannot be loaded without a database connection\n");
|
||||
|
||||
_LoadBlobs(AH);
|
||||
break;
|
||||
|
||||
@ -870,7 +863,6 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
|
||||
/*
|
||||
* If zlib is available, then startit up. This is called from
|
||||
* StartData & StartBlob. The buffers are setup in the Init routine.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_StartDataCompressor(ArchiveHandle *AH, TocEntry *te)
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Implements the basic DB functions used by the archiver.
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.61 2004/11/06 19:36:01 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.62 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -32,7 +32,6 @@ static const char *modulename = gettext_noop("archiver (db)");
|
||||
|
||||
static void _check_database_version(ArchiveHandle *AH, bool ignoreVersion);
|
||||
static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser);
|
||||
static int _executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc);
|
||||
static void notice_processor(void *arg, const char *message);
|
||||
static char *_sendSQLLine(ArchiveHandle *AH, char *qry, char *eos);
|
||||
static char *_sendCopyLine(ArchiveHandle *AH, char *qry, char *eos);
|
||||
@ -288,22 +287,9 @@ notice_processor(void *arg, const char *message)
|
||||
/* Public interface */
|
||||
/* Convenience function to send a query. Monitors result to handle COPY statements */
|
||||
int
|
||||
ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc, bool use_blob)
|
||||
{
|
||||
if (use_blob)
|
||||
return _executeSqlCommand(AH, AH->blobConnection, qry, desc);
|
||||
else
|
||||
return _executeSqlCommand(AH, AH->connection, qry, desc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle command execution. This is used to execute a command on more than one connection,
|
||||
* but the 'pgCopyIn' setting assumes the COPY commands are ONLY executed on the primary
|
||||
* setting...an error will be raised otherwise.
|
||||
*/
|
||||
static int
|
||||
_executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc)
|
||||
ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc)
|
||||
{
|
||||
PGconn *conn = AH->connection;
|
||||
PGresult *res;
|
||||
char errStmt[DB_MAX_ERR_STMT];
|
||||
|
||||
@ -316,9 +302,6 @@ _executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc)
|
||||
{
|
||||
if (PQresultStatus(res) == PGRES_COPY_IN)
|
||||
{
|
||||
if (conn != AH->connection)
|
||||
die_horribly(AH, modulename, "COPY command executed in non-primary connection\n");
|
||||
|
||||
AH->pgCopyIn = 1;
|
||||
}
|
||||
else
|
||||
@ -478,7 +461,7 @@ _sendSQLLine(ArchiveHandle *AH, char *qry, char *eos)
|
||||
* fprintf(stderr, " sending: '%s'\n\n",
|
||||
* AH->sqlBuf->data);
|
||||
*/
|
||||
ExecuteSqlCommand(AH, AH->sqlBuf, "could not execute query", false);
|
||||
ExecuteSqlCommand(AH, AH->sqlBuf, "could not execute query");
|
||||
resetPQExpBuffer(AH->sqlBuf);
|
||||
AH->sqlparse.lastChar = '\0';
|
||||
|
||||
@ -667,164 +650,6 @@ ExecuteSqlCommandBuf(ArchiveHandle *AH, void *qryv, size_t bufLen)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
FixupBlobRefs(ArchiveHandle *AH, TocEntry *te)
|
||||
{
|
||||
PQExpBuffer tblName;
|
||||
PQExpBuffer tblQry;
|
||||
PGresult *res,
|
||||
*uRes;
|
||||
int i,
|
||||
n;
|
||||
|
||||
if (strcmp(te->tag, BLOB_XREF_TABLE) == 0)
|
||||
return;
|
||||
|
||||
tblName = createPQExpBuffer();
|
||||
tblQry = createPQExpBuffer();
|
||||
|
||||
if (te->namespace && strlen(te->namespace) > 0)
|
||||
appendPQExpBuffer(tblName, "%s.",
|
||||
fmtId(te->namespace));
|
||||
appendPQExpBuffer(tblName, "%s",
|
||||
fmtId(te->tag));
|
||||
|
||||
appendPQExpBuffer(tblQry,
|
||||
"SELECT a.attname, t.typname FROM "
|
||||
"pg_catalog.pg_attribute a, pg_catalog.pg_type t "
|
||||
"WHERE a.attnum > 0 AND a.attrelid = '%s'::pg_catalog.regclass "
|
||||
"AND a.atttypid = t.oid AND t.typname in ('oid', 'lo')",
|
||||
tblName->data);
|
||||
|
||||
res = PQexec(AH->blobConnection, tblQry->data);
|
||||
if (!res)
|
||||
die_horribly(AH, modulename, "could not find OID columns of table \"%s\": %s",
|
||||
te->tag, PQerrorMessage(AH->connection));
|
||||
|
||||
if ((n = PQntuples(res)) == 0)
|
||||
{
|
||||
/* nothing to do */
|
||||
ahlog(AH, 1, "no OID type columns in table %s\n", te->tag);
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
{
|
||||
char *attr;
|
||||
char *typname;
|
||||
bool typeisoid;
|
||||
|
||||
attr = PQgetvalue(res, i, 0);
|
||||
typname = PQgetvalue(res, i, 1);
|
||||
|
||||
typeisoid = (strcmp(typname, "oid") == 0);
|
||||
|
||||
ahlog(AH, 1, "fixing large object cross-references for %s.%s\n",
|
||||
te->tag, attr);
|
||||
|
||||
resetPQExpBuffer(tblQry);
|
||||
|
||||
/*
|
||||
* Note: we use explicit typename() cast style here because if we
|
||||
* are dealing with a dump from a pre-7.3 database containing LO
|
||||
* columns, the dump probably will not have CREATE CAST commands
|
||||
* for lo<->oid conversions. What it will have is functions,
|
||||
* which we will invoke as functions.
|
||||
*/
|
||||
|
||||
/* Can't use fmtId more than once per call... */
|
||||
appendPQExpBuffer(tblQry,
|
||||
"UPDATE %s SET %s = ",
|
||||
tblName->data, fmtId(attr));
|
||||
if (typeisoid)
|
||||
appendPQExpBuffer(tblQry,
|
||||
"%s.newOid",
|
||||
BLOB_XREF_TABLE);
|
||||
else
|
||||
appendPQExpBuffer(tblQry,
|
||||
"%s(%s.newOid)",
|
||||
fmtId(typname),
|
||||
BLOB_XREF_TABLE);
|
||||
appendPQExpBuffer(tblQry,
|
||||
" FROM %s WHERE %s.oldOid = ",
|
||||
BLOB_XREF_TABLE,
|
||||
BLOB_XREF_TABLE);
|
||||
if (typeisoid)
|
||||
appendPQExpBuffer(tblQry,
|
||||
"%s.%s",
|
||||
tblName->data, fmtId(attr));
|
||||
else
|
||||
appendPQExpBuffer(tblQry,
|
||||
"oid(%s.%s)",
|
||||
tblName->data, fmtId(attr));
|
||||
|
||||
ahlog(AH, 10, "SQL: %s\n", tblQry->data);
|
||||
|
||||
uRes = PQexec(AH->blobConnection, tblQry->data);
|
||||
if (!uRes)
|
||||
die_horribly(AH, modulename,
|
||||
"could not update column \"%s\" of table \"%s\": %s",
|
||||
attr, te->tag, PQerrorMessage(AH->blobConnection));
|
||||
|
||||
if (PQresultStatus(uRes) != PGRES_COMMAND_OK)
|
||||
die_horribly(AH, modulename,
|
||||
"error while updating column \"%s\" of table \"%s\": %s",
|
||||
attr, te->tag, PQerrorMessage(AH->blobConnection));
|
||||
|
||||
PQclear(uRes);
|
||||
}
|
||||
|
||||
PQclear(res);
|
||||
destroyPQExpBuffer(tblName);
|
||||
destroyPQExpBuffer(tblQry);
|
||||
}
|
||||
|
||||
/**********
|
||||
* Convenient SQL calls
|
||||
**********/
|
||||
void
|
||||
CreateBlobXrefTable(ArchiveHandle *AH)
|
||||
{
|
||||
PQExpBuffer qry = createPQExpBuffer();
|
||||
|
||||
/* IF we don't have a BLOB connection, then create one */
|
||||
if (!AH->blobConnection)
|
||||
AH->blobConnection = _connectDB(AH, NULL, NULL);
|
||||
|
||||
ahlog(AH, 1, "creating table for large object cross-references\n");
|
||||
|
||||
appendPQExpBuffer(qry, "CREATE TEMPORARY TABLE %s(oldOid pg_catalog.oid, newOid pg_catalog.oid) WITHOUT OIDS", BLOB_XREF_TABLE);
|
||||
ExecuteSqlCommand(AH, qry, "could not create large object cross-reference table", true);
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
|
||||
void
|
||||
CreateBlobXrefIndex(ArchiveHandle *AH)
|
||||
{
|
||||
PQExpBuffer qry = createPQExpBuffer();
|
||||
|
||||
ahlog(AH, 1, "creating index for large object cross-references\n");
|
||||
|
||||
appendPQExpBuffer(qry, "CREATE UNIQUE INDEX %s_ix ON %s(oldOid)",
|
||||
BLOB_XREF_TABLE, BLOB_XREF_TABLE);
|
||||
ExecuteSqlCommand(AH, qry, "could not create index on large object cross-reference table", true);
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
|
||||
void
|
||||
InsertBlobXref(ArchiveHandle *AH, Oid old, Oid new)
|
||||
{
|
||||
PQExpBuffer qry = createPQExpBuffer();
|
||||
|
||||
appendPQExpBuffer(qry,
|
||||
"INSERT INTO %s(oldOid, newOid) VALUES ('%u', '%u')",
|
||||
BLOB_XREF_TABLE, old, new);
|
||||
ExecuteSqlCommand(AH, qry, "could not create large object cross-reference entry", true);
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
|
||||
void
|
||||
StartTransaction(ArchiveHandle *AH)
|
||||
{
|
||||
@ -832,22 +657,7 @@ StartTransaction(ArchiveHandle *AH)
|
||||
|
||||
appendPQExpBuffer(qry, "BEGIN");
|
||||
|
||||
ExecuteSqlCommand(AH, qry, "could not start database transaction", false);
|
||||
AH->txActive = true;
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
|
||||
void
|
||||
StartTransactionXref(ArchiveHandle *AH)
|
||||
{
|
||||
PQExpBuffer qry = createPQExpBuffer();
|
||||
|
||||
appendPQExpBuffer(qry, "BEGIN");
|
||||
|
||||
ExecuteSqlCommand(AH, qry,
|
||||
"could not start transaction for large object cross-references", true);
|
||||
AH->blobTxActive = true;
|
||||
ExecuteSqlCommand(AH, qry, "could not start database transaction");
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
@ -859,21 +669,7 @@ CommitTransaction(ArchiveHandle *AH)
|
||||
|
||||
appendPQExpBuffer(qry, "COMMIT");
|
||||
|
||||
ExecuteSqlCommand(AH, qry, "could not commit database transaction", false);
|
||||
AH->txActive = false;
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
|
||||
void
|
||||
CommitTransactionXref(ArchiveHandle *AH)
|
||||
{
|
||||
PQExpBuffer qry = createPQExpBuffer();
|
||||
|
||||
appendPQExpBuffer(qry, "COMMIT");
|
||||
|
||||
ExecuteSqlCommand(AH, qry, "could not commit transaction for large object cross-references", true);
|
||||
AH->blobTxActive = false;
|
||||
ExecuteSqlCommand(AH, qry, "could not commit database transaction");
|
||||
|
||||
destroyPQExpBuffer(qry);
|
||||
}
|
||||
|
@ -2,19 +2,11 @@
|
||||
* Definitions for pg_backup_db.c
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.h,v 1.10 2004/03/03 21:28:54 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.h,v 1.11 2005/06/21 20:45:44 tgl Exp $
|
||||
*/
|
||||
|
||||
#define BLOB_XREF_TABLE "pg_dump_blob_xref" /* MUST be lower case */
|
||||
|
||||
extern void FixupBlobRefs(ArchiveHandle *AH, TocEntry *te);
|
||||
extern int ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc, bool use_blob);
|
||||
extern int ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc);
|
||||
extern int ExecuteSqlCommandBuf(ArchiveHandle *AH, void *qry, size_t bufLen);
|
||||
|
||||
extern void CreateBlobXrefTable(ArchiveHandle *AH);
|
||||
extern void CreateBlobXrefIndex(ArchiveHandle *AH);
|
||||
extern void InsertBlobXref(ArchiveHandle *AH, Oid old, Oid new);
|
||||
extern void StartTransaction(ArchiveHandle *AH);
|
||||
extern void StartTransactionXref(ArchiveHandle *AH);
|
||||
extern void CommitTransaction(ArchiveHandle *AH);
|
||||
extern void CommitTransactionXref(ArchiveHandle *AH);
|
||||
|
@ -20,7 +20,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.25 2004/03/03 21:28:54 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.26 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -457,7 +457,6 @@ _CloseArchive(ArchiveHandle *AH)
|
||||
* It is called just prior to the dumper's DataDumper routine.
|
||||
*
|
||||
* Optional, but strongly recommended.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
@ -516,7 +515,6 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
* Called by the archiver when the dumper calls EndBlob.
|
||||
*
|
||||
* Optional.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
@ -531,7 +529,6 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
* Called by the archiver when finishing saving all BLOB DATA.
|
||||
*
|
||||
* Optional.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
@ -543,5 +540,4 @@ _EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
|
||||
if (fclose(ctx->blobToc) != 0)
|
||||
die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno));
|
||||
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
* pg_backup_null.c
|
||||
*
|
||||
* Implementation of an archive that is never saved; it is used by
|
||||
* pg_dump to output a plain text SQL script instead of save
|
||||
* pg_dump to output a plain text SQL script instead of saving
|
||||
* a real archive.
|
||||
*
|
||||
* See the headers to pg_restore for more details.
|
||||
@ -17,7 +17,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.14 2003/12/08 16:39:05 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.15 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -27,12 +27,21 @@
|
||||
|
||||
#include <unistd.h> /* for dup */
|
||||
|
||||
#include "libpq/libpq-fs.h"
|
||||
|
||||
|
||||
static size_t _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
|
||||
static size_t _WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen);
|
||||
static void _EndData(ArchiveHandle *AH, TocEntry *te);
|
||||
static int _WriteByte(ArchiveHandle *AH, const int i);
|
||||
static size_t _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
|
||||
static void _CloseArchive(ArchiveHandle *AH);
|
||||
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
|
||||
static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
|
||||
static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
||||
static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
|
||||
static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
|
||||
|
||||
|
||||
/*
|
||||
* Initializer
|
||||
@ -48,6 +57,17 @@ InitArchiveFmt_Null(ArchiveHandle *AH)
|
||||
AH->ClosePtr = _CloseArchive;
|
||||
AH->PrintTocDataPtr = _PrintTocData;
|
||||
|
||||
AH->StartBlobsPtr = _StartBlobs;
|
||||
AH->StartBlobPtr = _StartBlob;
|
||||
AH->EndBlobPtr = _EndBlob;
|
||||
AH->EndBlobsPtr = _EndBlobs;
|
||||
|
||||
/* Initialize LO buffering */
|
||||
AH->lo_buf_size = LOBBUFSIZE;
|
||||
AH->lo_buf = (void *) malloc(LOBBUFSIZE);
|
||||
if (AH->lo_buf == NULL)
|
||||
die_horribly(AH, NULL, "out of memory\n");
|
||||
|
||||
/*
|
||||
* Now prevent reading...
|
||||
*/
|
||||
@ -59,10 +79,8 @@ InitArchiveFmt_Null(ArchiveHandle *AH)
|
||||
* - Start a new TOC entry
|
||||
*/
|
||||
|
||||
/*------
|
||||
/*
|
||||
* Called by dumper via archiver from within a data dump routine
|
||||
* As at V1.3, this is only called for COPY FROM dfata, and BLOB data
|
||||
*------
|
||||
*/
|
||||
static size_t
|
||||
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
||||
@ -72,12 +90,91 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
|
||||
return dLen;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by dumper via archiver from within a data dump routine
|
||||
* We substitute this for _WriteData while emitting a BLOB
|
||||
*/
|
||||
static size_t
|
||||
_WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen)
|
||||
{
|
||||
if (dLen > 0)
|
||||
{
|
||||
unsigned char *str;
|
||||
size_t len;
|
||||
|
||||
str = PQescapeBytea((const unsigned char *) data, dLen, &len);
|
||||
if (!str)
|
||||
die_horribly(AH, NULL, "out of memory\n");
|
||||
|
||||
ahprintf(AH, "SELECT lowrite(0, '%s');\n", str);
|
||||
|
||||
free(str);
|
||||
}
|
||||
return dLen;
|
||||
}
|
||||
|
||||
static void
|
||||
_EndData(ArchiveHandle *AH, TocEntry *te)
|
||||
{
|
||||
ahprintf(AH, "\n\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the archiver when starting to save all BLOB DATA (not schema).
|
||||
* This routine should save whatever format-specific information is needed
|
||||
* to read the BLOBs back into memory.
|
||||
*
|
||||
* It is called just prior to the dumper's DataDumper routine.
|
||||
*
|
||||
* Optional, but strongly recommended.
|
||||
*/
|
||||
static void
|
||||
_StartBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
{
|
||||
ahprintf(AH, "BEGIN;\n\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the archiver when the dumper calls StartBlob.
|
||||
*
|
||||
* Mandatory.
|
||||
*
|
||||
* Must save the passed OID for retrieval at restore-time.
|
||||
*/
|
||||
static void
|
||||
_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
{
|
||||
if (oid == 0)
|
||||
die_horribly(AH, NULL, "invalid OID for large object\n");
|
||||
|
||||
ahprintf(AH, "SELECT lo_open(lo_create(%u), %d);\n", oid, INV_WRITE);
|
||||
|
||||
AH->WriteDataPtr = _WriteBlobData;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the archiver when the dumper calls EndBlob.
|
||||
*
|
||||
* Optional.
|
||||
*/
|
||||
static void
|
||||
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
|
||||
{
|
||||
AH->WriteDataPtr = _WriteData;
|
||||
ahprintf(AH, "SELECT lo_close(0);\n\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by the archiver when finishing saving all BLOB DATA.
|
||||
*
|
||||
* Optional.
|
||||
*/
|
||||
static void
|
||||
_EndBlobs(ArchiveHandle *AH, TocEntry *te)
|
||||
{
|
||||
ahprintf(AH, "COMMIT;\n\n");
|
||||
}
|
||||
|
||||
/*------
|
||||
* Called as part of a RestoreArchive call; for the NULL archive, this
|
||||
* just sends the data for a given TOC entry to the output.
|
||||
@ -89,7 +186,15 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
|
||||
if (te->dataDumper)
|
||||
{
|
||||
AH->currToc = te;
|
||||
|
||||
if (strcmp(te->desc, "BLOBS") == 0)
|
||||
_StartBlobs(AH, te);
|
||||
|
||||
(*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
|
||||
|
||||
if (strcmp(te->desc, "BLOBS") == 0)
|
||||
_EndBlobs(AH, te);
|
||||
|
||||
AH->currToc = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
* by PostgreSQL
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.409 2005/06/07 14:04:48 tgl Exp $
|
||||
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.410 2005/06/21 20:45:44 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -195,7 +195,7 @@ main(int argc, char **argv)
|
||||
int plainText = 0;
|
||||
int outputClean = 0;
|
||||
int outputCreate = 0;
|
||||
int outputBlobs = 0;
|
||||
bool outputBlobs = true;
|
||||
int outputNoOwner = 0;
|
||||
static int use_setsessauth = 0;
|
||||
static int disable_triggers = 0;
|
||||
@ -258,10 +258,7 @@ main(int argc, char **argv)
|
||||
|
||||
/* Set default options based on progname */
|
||||
if (strcmp(progname, "pg_backup") == 0)
|
||||
{
|
||||
format = "c";
|
||||
outputBlobs = true;
|
||||
}
|
||||
|
||||
if (argc > 1)
|
||||
{
|
||||
@ -287,7 +284,7 @@ main(int argc, char **argv)
|
||||
break;
|
||||
|
||||
case 'b': /* Dump blobs */
|
||||
outputBlobs = true;
|
||||
/* this is now default, so just ignore the switch */
|
||||
break;
|
||||
|
||||
case 'c': /* clean (i.e., drop) schema prior to
|
||||
@ -442,19 +439,8 @@ main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (outputBlobs && selectTableName != NULL)
|
||||
{
|
||||
write_msg(NULL, "large-object output not supported for a single table\n");
|
||||
write_msg(NULL, "use a full dump instead\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (outputBlobs && selectSchemaName != NULL)
|
||||
{
|
||||
write_msg(NULL, "large-object output not supported for a single schema\n");
|
||||
write_msg(NULL, "use a full dump instead\n");
|
||||
exit(1);
|
||||
}
|
||||
if (selectTableName != NULL || selectSchemaName != NULL)
|
||||
outputBlobs = false;
|
||||
|
||||
if (dumpInserts == true && oids == true)
|
||||
{
|
||||
@ -463,13 +449,6 @@ main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (outputBlobs == true && (format[0] == 'p' || format[0] == 'P'))
|
||||
{
|
||||
write_msg(NULL, "large-object output is not supported for plain-text dump files\n");
|
||||
write_msg(NULL, "(Use a different output format.)\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open the output file */
|
||||
switch (format[0])
|
||||
{
|
||||
@ -670,7 +649,6 @@ help(const char *progname)
|
||||
|
||||
printf(_("\nOptions controlling the output content:\n"));
|
||||
printf(_(" -a, --data-only dump only the data, not the schema\n"));
|
||||
printf(_(" -b, --blobs include large objects in dump\n"));
|
||||
printf(_(" -c, --clean clean (drop) schema prior to create\n"));
|
||||
printf(_(" -C, --create include commands to create database in dump\n"));
|
||||
printf(_(" -d, --inserts dump data as INSERT, rather than COPY, commands\n"));
|
||||
@ -1340,10 +1318,6 @@ dumpEncoding(Archive *AH)
|
||||
* dump all blobs
|
||||
*
|
||||
*/
|
||||
|
||||
#define loBufSize 16384
|
||||
#define loFetchSize 1000
|
||||
|
||||
static int
|
||||
dumpBlobs(Archive *AH, void *arg)
|
||||
{
|
||||
@ -1352,7 +1326,7 @@ dumpBlobs(Archive *AH, void *arg)
|
||||
PGresult *res;
|
||||
int i;
|
||||
int loFd;
|
||||
char buf[loBufSize];
|
||||
char buf[LOBBUFSIZE];
|
||||
int cnt;
|
||||
Oid blobOid;
|
||||
|
||||
@ -1372,13 +1346,13 @@ dumpBlobs(Archive *AH, void *arg)
|
||||
check_sql_result(res, g_conn, oidQry->data, PGRES_COMMAND_OK);
|
||||
|
||||
/* Fetch for cursor */
|
||||
appendPQExpBuffer(oidFetchQry, "FETCH %d IN bloboid", loFetchSize);
|
||||
appendPQExpBuffer(oidFetchQry, "FETCH 1000 IN bloboid");
|
||||
|
||||
do
|
||||
{
|
||||
/* Do a fetch */
|
||||
PQclear(res);
|
||||
|
||||
/* Do a fetch */
|
||||
res = PQexec(g_conn, oidFetchQry->data);
|
||||
check_sql_result(res, g_conn, oidFetchQry->data, PGRES_TUPLES_OK);
|
||||
|
||||
@ -1400,7 +1374,7 @@ dumpBlobs(Archive *AH, void *arg)
|
||||
/* Now read it in chunks, sending data to archive */
|
||||
do
|
||||
{
|
||||
cnt = lo_read(g_conn, loFd, buf, loBufSize);
|
||||
cnt = lo_read(g_conn, loFd, buf, LOBBUFSIZE);
|
||||
if (cnt < 0)
|
||||
{
|
||||
write_msg(NULL, "dumpBlobs(): error reading large object: %s",
|
||||
@ -1409,16 +1383,16 @@ dumpBlobs(Archive *AH, void *arg)
|
||||
}
|
||||
|
||||
WriteData(AH, buf, cnt);
|
||||
|
||||
} while (cnt > 0);
|
||||
|
||||
lo_close(g_conn, loFd);
|
||||
|
||||
EndBlob(AH, blobOid);
|
||||
|
||||
}
|
||||
} while (PQntuples(res) > 0);
|
||||
|
||||
PQclear(res);
|
||||
|
||||
destroyPQExpBuffer(oidQry);
|
||||
destroyPQExpBuffer(oidFetchQry);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user