pg_upgrade: Move all the files generated internally to a subdirectory
Historically, the location of any files generated by pg_upgrade, as of the per-database logs and internal dumps, has been the current working directory, leaving all those files behind when using --retain or on a failure. Putting all those contents in a targeted subdirectory makes the whole easier to debug, and simplifies the code in charge of cleaning up the logs. Note that another reason is that this facilitates the move of pg_upgrade to TAP with a fixed location for all the logs to grab if the test fails repeatedly. Initially, we thought about being able to specify the output directory with a new option, but we have settled on using a subdirectory located at the root of the new cluster's data folder, "pg_upgrade_output.d", instead, as at the end the new data directory is the location of all the data generated by pg_upgrade. There is a take with group permissions here though: if the new data folder has been initialized with this option, we need to create all the files and paths with the correct permissions or a base backup taken after a pg_upgrade --retain would fail, meaning that GetDataDirectoryCreatePerm() has to be called before creating the log paths, before a couple of sanity checks on the clusters and before getting the socket directory for the cluster's host settings. The idea of the new location is based on a suggestion from Peter Eisentraut. Also thanks to Andrew Dunstan, Peter Eisentraut, Daniel Gustafsson, Tom Lane and Bruce Momjian for the discussion (in alphabetical order). Author: Justin Pryzby Discussion: https://postgr.es/m/20211212025017.GN17618@telsasoft.com
This commit is contained in:
parent
cbadfc1f8a
commit
38bfae3652
@ -768,8 +768,8 @@ psql --username=postgres --file=script.sql postgres
|
|||||||
|
|
||||||
<para>
|
<para>
|
||||||
<application>pg_upgrade</application> creates various working files, such
|
<application>pg_upgrade</application> creates various working files, such
|
||||||
as schema dumps, in the current working directory. For security, be sure
|
as schema dumps, stored within <literal>pg_upgrade_output.d</literal> in
|
||||||
that that directory is not readable or writable by any other users.
|
the directory of the new cluster.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
|
2
src/bin/pg_upgrade/.gitignore
vendored
2
src/bin/pg_upgrade/.gitignore
vendored
@ -1,9 +1,7 @@
|
|||||||
/pg_upgrade
|
/pg_upgrade
|
||||||
# Generated by test suite
|
# Generated by test suite
|
||||||
/pg_upgrade_internal.log
|
|
||||||
/delete_old_cluster.sh
|
/delete_old_cluster.sh
|
||||||
/delete_old_cluster.bat
|
/delete_old_cluster.bat
|
||||||
/reindex_hash.sql
|
/reindex_hash.sql
|
||||||
/loadable_libraries.txt
|
|
||||||
/log/
|
/log/
|
||||||
/tmp_check/
|
/tmp_check/
|
||||||
|
@ -45,9 +45,7 @@ uninstall:
|
|||||||
clean distclean maintainer-clean:
|
clean distclean maintainer-clean:
|
||||||
rm -f pg_upgrade$(X) $(OBJS)
|
rm -f pg_upgrade$(X) $(OBJS)
|
||||||
rm -rf delete_old_cluster.sh log/ tmp_check/ \
|
rm -rf delete_old_cluster.sh log/ tmp_check/ \
|
||||||
loadable_libraries.txt reindex_hash.sql \
|
reindex_hash.sql
|
||||||
pg_upgrade_dump_globals.sql \
|
|
||||||
pg_upgrade_dump_*.custom pg_upgrade_*.log
|
|
||||||
|
|
||||||
# When $(MAKE) is present, make automatically infers that this is a
|
# When $(MAKE) is present, make automatically infers that this is a
|
||||||
# recursive make. which is not actually what we want here, as that
|
# recursive make. which is not actually what we want here, as that
|
||||||
|
@ -783,7 +783,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(output_path, sizeof(output_path),
|
snprintf(output_path, sizeof(output_path), "%s/%s",
|
||||||
|
log_opts.basedir,
|
||||||
"contrib_isn_and_int8_pass_by_value.txt");
|
"contrib_isn_and_int8_pass_by_value.txt");
|
||||||
|
|
||||||
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
|
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
|
||||||
@ -860,7 +861,8 @@ check_for_user_defined_postfix_ops(ClusterInfo *cluster)
|
|||||||
|
|
||||||
prep_status("Checking for user-defined postfix operators");
|
prep_status("Checking for user-defined postfix operators");
|
||||||
|
|
||||||
snprintf(output_path, sizeof(output_path),
|
snprintf(output_path, sizeof(output_path), "%s/%s",
|
||||||
|
log_opts.basedir,
|
||||||
"postfix_ops.txt");
|
"postfix_ops.txt");
|
||||||
|
|
||||||
/* Find any user defined postfix operators */
|
/* Find any user defined postfix operators */
|
||||||
@ -959,7 +961,8 @@ check_for_tables_with_oids(ClusterInfo *cluster)
|
|||||||
|
|
||||||
prep_status("Checking for tables WITH OIDS");
|
prep_status("Checking for tables WITH OIDS");
|
||||||
|
|
||||||
snprintf(output_path, sizeof(output_path),
|
snprintf(output_path, sizeof(output_path), "%s/%s",
|
||||||
|
log_opts.basedir,
|
||||||
"tables_with_oids.txt");
|
"tables_with_oids.txt");
|
||||||
|
|
||||||
/* Find any tables declared WITH OIDS */
|
/* Find any tables declared WITH OIDS */
|
||||||
@ -1214,7 +1217,8 @@ check_for_user_defined_encoding_conversions(ClusterInfo *cluster)
|
|||||||
|
|
||||||
prep_status("Checking for user-defined encoding conversions");
|
prep_status("Checking for user-defined encoding conversions");
|
||||||
|
|
||||||
snprintf(output_path, sizeof(output_path),
|
snprintf(output_path, sizeof(output_path), "%s/%s",
|
||||||
|
log_opts.basedir,
|
||||||
"encoding_conversions.txt");
|
"encoding_conversions.txt");
|
||||||
|
|
||||||
/* Find any user defined encoding conversions */
|
/* Find any user defined encoding conversions */
|
||||||
|
@ -22,9 +22,10 @@ generate_old_dump(void)
|
|||||||
/* run new pg_dumpall binary for globals */
|
/* run new pg_dumpall binary for globals */
|
||||||
exec_prog(UTILITY_LOG_FILE, NULL, true, true,
|
exec_prog(UTILITY_LOG_FILE, NULL, true, true,
|
||||||
"\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers "
|
"\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers "
|
||||||
"--binary-upgrade %s -f %s",
|
"--binary-upgrade %s -f \"%s/%s\"",
|
||||||
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
||||||
log_opts.verbose ? "--verbose" : "",
|
log_opts.verbose ? "--verbose" : "",
|
||||||
|
log_opts.dumpdir,
|
||||||
GLOBALS_DUMP_FILE);
|
GLOBALS_DUMP_FILE);
|
||||||
check_ok();
|
check_ok();
|
||||||
|
|
||||||
@ -52,9 +53,10 @@ generate_old_dump(void)
|
|||||||
|
|
||||||
parallel_exec_prog(log_file_name, NULL,
|
parallel_exec_prog(log_file_name, NULL,
|
||||||
"\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
|
"\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
|
||||||
"--binary-upgrade --format=custom %s --file=\"%s\" %s",
|
"--binary-upgrade --format=custom %s --file=\"%s/%s\" %s",
|
||||||
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
||||||
log_opts.verbose ? "--verbose" : "",
|
log_opts.verbose ? "--verbose" : "",
|
||||||
|
log_opts.dumpdir,
|
||||||
sql_file_name, escaped_connstr.data);
|
sql_file_name, escaped_connstr.data);
|
||||||
|
|
||||||
termPQExpBuffer(&escaped_connstr);
|
termPQExpBuffer(&escaped_connstr);
|
||||||
|
@ -78,11 +78,12 @@ get_bin_version(ClusterInfo *cluster)
|
|||||||
* The code requires it be called first from the primary thread on Windows.
|
* The code requires it be called first from the primary thread on Windows.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
exec_prog(const char *log_file, const char *opt_log_file,
|
exec_prog(const char *log_filename, const char *opt_log_file,
|
||||||
bool report_error, bool exit_on_error, const char *fmt,...)
|
bool report_error, bool exit_on_error, const char *fmt,...)
|
||||||
{
|
{
|
||||||
int result = 0;
|
int result = 0;
|
||||||
int written;
|
int written;
|
||||||
|
char log_file[MAXPGPATH];
|
||||||
|
|
||||||
#define MAXCMDLEN (2 * MAXPGPATH)
|
#define MAXCMDLEN (2 * MAXPGPATH)
|
||||||
char cmd[MAXCMDLEN];
|
char cmd[MAXCMDLEN];
|
||||||
@ -97,6 +98,8 @@ exec_prog(const char *log_file, const char *opt_log_file,
|
|||||||
mainThreadId = GetCurrentThreadId();
|
mainThreadId = GetCurrentThreadId();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
snprintf(log_file, MAXPGPATH, "%s/%s", log_opts.logdir, log_filename);
|
||||||
|
|
||||||
written = 0;
|
written = 0;
|
||||||
va_start(ap, fmt);
|
va_start(ap, fmt);
|
||||||
written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap);
|
written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap);
|
||||||
|
@ -128,7 +128,8 @@ check_loadable_libraries(void)
|
|||||||
|
|
||||||
prep_status("Checking for presence of required libraries");
|
prep_status("Checking for presence of required libraries");
|
||||||
|
|
||||||
snprintf(output_path, sizeof(output_path), "loadable_libraries.txt");
|
snprintf(output_path, sizeof(output_path), "%s/%s",
|
||||||
|
log_opts.basedir, "loadable_libraries.txt");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now we want to sort the library names into order. This avoids multiple
|
* Now we want to sort the library names into order. This avoids multiple
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
|
|
||||||
#include "postgres_fe.h"
|
#include "postgres_fe.h"
|
||||||
|
|
||||||
#include <time.h>
|
|
||||||
#ifdef WIN32
|
#ifdef WIN32
|
||||||
#include <io.h>
|
#include <io.h>
|
||||||
#endif
|
#endif
|
||||||
@ -63,9 +62,6 @@ parseCommandLine(int argc, char *argv[])
|
|||||||
int option; /* Command line option */
|
int option; /* Command line option */
|
||||||
int optindex = 0; /* used by getopt_long */
|
int optindex = 0; /* used by getopt_long */
|
||||||
int os_user_effective_id;
|
int os_user_effective_id;
|
||||||
FILE *fp;
|
|
||||||
char **filename;
|
|
||||||
time_t run_time = time(NULL);
|
|
||||||
|
|
||||||
user_opts.do_sync = true;
|
user_opts.do_sync = true;
|
||||||
user_opts.transfer_mode = TRANSFER_MODE_COPY;
|
user_opts.transfer_mode = TRANSFER_MODE_COPY;
|
||||||
@ -208,27 +204,9 @@ parseCommandLine(int argc, char *argv[])
|
|||||||
if (optind < argc)
|
if (optind < argc)
|
||||||
pg_fatal("too many command-line arguments (first is \"%s\")\n", argv[optind]);
|
pg_fatal("too many command-line arguments (first is \"%s\")\n", argv[optind]);
|
||||||
|
|
||||||
if ((log_opts.internal = fopen_priv(INTERNAL_LOG_FILE, "a")) == NULL)
|
|
||||||
pg_fatal("could not open log file \"%s\": %m\n", INTERNAL_LOG_FILE);
|
|
||||||
|
|
||||||
if (log_opts.verbose)
|
if (log_opts.verbose)
|
||||||
pg_log(PG_REPORT, "Running in verbose mode\n");
|
pg_log(PG_REPORT, "Running in verbose mode\n");
|
||||||
|
|
||||||
/* label start of upgrade in logfiles */
|
|
||||||
for (filename = output_files; *filename != NULL; filename++)
|
|
||||||
{
|
|
||||||
if ((fp = fopen_priv(*filename, "a")) == NULL)
|
|
||||||
pg_fatal("could not write to log file \"%s\": %m\n", *filename);
|
|
||||||
|
|
||||||
/* Start with newline because we might be appending to a file. */
|
|
||||||
fprintf(fp, "\n"
|
|
||||||
"-----------------------------------------------------------------\n"
|
|
||||||
" pg_upgrade run on %s"
|
|
||||||
"-----------------------------------------------------------------\n\n",
|
|
||||||
ctime(&run_time));
|
|
||||||
fclose(fp);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Turn off read-only mode; add prefix to PGOPTIONS? */
|
/* Turn off read-only mode; add prefix to PGOPTIONS? */
|
||||||
if (getenv("PGOPTIONS"))
|
if (getenv("PGOPTIONS"))
|
||||||
{
|
{
|
||||||
|
@ -38,6 +38,8 @@
|
|||||||
|
|
||||||
#include "postgres_fe.h"
|
#include "postgres_fe.h"
|
||||||
|
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
#ifdef HAVE_LANGINFO_H
|
#ifdef HAVE_LANGINFO_H
|
||||||
#include <langinfo.h>
|
#include <langinfo.h>
|
||||||
#endif
|
#endif
|
||||||
@ -54,6 +56,7 @@ static void prepare_new_globals(void);
|
|||||||
static void create_new_objects(void);
|
static void create_new_objects(void);
|
||||||
static void copy_xact_xlog_xid(void);
|
static void copy_xact_xlog_xid(void);
|
||||||
static void set_frozenxids(bool minmxid_only);
|
static void set_frozenxids(bool minmxid_only);
|
||||||
|
static void make_outputdirs(char *pgdata);
|
||||||
static void setup(char *argv0, bool *live_check);
|
static void setup(char *argv0, bool *live_check);
|
||||||
static void cleanup(void);
|
static void cleanup(void);
|
||||||
|
|
||||||
@ -92,6 +95,22 @@ main(int argc, char **argv)
|
|||||||
adjust_data_dir(&old_cluster);
|
adjust_data_dir(&old_cluster);
|
||||||
adjust_data_dir(&new_cluster);
|
adjust_data_dir(&new_cluster);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set mask based on PGDATA permissions, needed for the creation of the
|
||||||
|
* output directories with correct permissions.
|
||||||
|
*/
|
||||||
|
if (!GetDataDirectoryCreatePerm(new_cluster.pgdata))
|
||||||
|
pg_fatal("could not read permissions of directory \"%s\": %s\n",
|
||||||
|
new_cluster.pgdata, strerror(errno));
|
||||||
|
|
||||||
|
umask(pg_mode_mask);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This needs to happen after adjusting the data directory of the new
|
||||||
|
* cluster in adjust_data_dir().
|
||||||
|
*/
|
||||||
|
make_outputdirs(new_cluster.pgdata);
|
||||||
|
|
||||||
setup(argv[0], &live_check);
|
setup(argv[0], &live_check);
|
||||||
|
|
||||||
output_check_banner(live_check);
|
output_check_banner(live_check);
|
||||||
@ -103,13 +122,6 @@ main(int argc, char **argv)
|
|||||||
|
|
||||||
check_cluster_compatibility(live_check);
|
check_cluster_compatibility(live_check);
|
||||||
|
|
||||||
/* Set mask based on PGDATA permissions */
|
|
||||||
if (!GetDataDirectoryCreatePerm(new_cluster.pgdata))
|
|
||||||
pg_fatal("could not read permissions of directory \"%s\": %s\n",
|
|
||||||
new_cluster.pgdata, strerror(errno));
|
|
||||||
|
|
||||||
umask(pg_mode_mask);
|
|
||||||
|
|
||||||
check_and_dump_old_cluster(live_check);
|
check_and_dump_old_cluster(live_check);
|
||||||
|
|
||||||
|
|
||||||
@ -197,6 +209,56 @@ main(int argc, char **argv)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create and assign proper permissions to the set of output directories
|
||||||
|
* used to store any data generated internally, filling in log_opts in
|
||||||
|
* the process.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
make_outputdirs(char *pgdata)
|
||||||
|
{
|
||||||
|
FILE *fp;
|
||||||
|
char **filename;
|
||||||
|
time_t run_time = time(NULL);
|
||||||
|
char filename_path[MAXPGPATH];
|
||||||
|
|
||||||
|
log_opts.basedir = (char *) pg_malloc(MAXPGPATH);
|
||||||
|
snprintf(log_opts.basedir, MAXPGPATH, "%s/%s", pgdata, BASE_OUTPUTDIR);
|
||||||
|
log_opts.dumpdir = (char *) pg_malloc(MAXPGPATH);
|
||||||
|
snprintf(log_opts.dumpdir, MAXPGPATH, "%s/%s", pgdata, LOG_OUTPUTDIR);
|
||||||
|
log_opts.logdir = (char *) pg_malloc(MAXPGPATH);
|
||||||
|
snprintf(log_opts.logdir, MAXPGPATH, "%s/%s", pgdata, DUMP_OUTPUTDIR);
|
||||||
|
|
||||||
|
if (mkdir(log_opts.basedir, pg_dir_create_mode))
|
||||||
|
pg_fatal("could not create directory \"%s\": %m\n", log_opts.basedir);
|
||||||
|
if (mkdir(log_opts.dumpdir, pg_dir_create_mode))
|
||||||
|
pg_fatal("could not create directory \"%s\": %m\n", log_opts.dumpdir);
|
||||||
|
if (mkdir(log_opts.logdir, pg_dir_create_mode))
|
||||||
|
pg_fatal("could not create directory \"%s\": %m\n", log_opts.logdir);
|
||||||
|
|
||||||
|
snprintf(filename_path, sizeof(filename_path), "%s/%s", log_opts.logdir,
|
||||||
|
INTERNAL_LOG_FILE);
|
||||||
|
if ((log_opts.internal = fopen_priv(filename_path, "a")) == NULL)
|
||||||
|
pg_fatal("could not open log file \"%s\": %m\n", filename_path);
|
||||||
|
|
||||||
|
/* label start of upgrade in logfiles */
|
||||||
|
for (filename = output_files; *filename != NULL; filename++)
|
||||||
|
{
|
||||||
|
snprintf(filename_path, sizeof(filename_path), "%s/%s",
|
||||||
|
log_opts.logdir, *filename);
|
||||||
|
if ((fp = fopen_priv(filename_path, "a")) == NULL)
|
||||||
|
pg_fatal("could not write to log file \"%s\": %m\n", filename_path);
|
||||||
|
|
||||||
|
/* Start with newline because we might be appending to a file. */
|
||||||
|
fprintf(fp, "\n"
|
||||||
|
"-----------------------------------------------------------------\n"
|
||||||
|
" pg_upgrade run on %s"
|
||||||
|
"-----------------------------------------------------------------\n\n",
|
||||||
|
ctime(&run_time));
|
||||||
|
fclose(fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
setup(char *argv0, bool *live_check)
|
setup(char *argv0, bool *live_check)
|
||||||
@ -306,8 +368,9 @@ prepare_new_globals(void)
|
|||||||
prep_status("Restoring global objects in the new cluster");
|
prep_status("Restoring global objects in the new cluster");
|
||||||
|
|
||||||
exec_prog(UTILITY_LOG_FILE, NULL, true, true,
|
exec_prog(UTILITY_LOG_FILE, NULL, true, true,
|
||||||
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
|
"\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s/%s\"",
|
||||||
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
new_cluster.bindir, cluster_conn_opts(&new_cluster),
|
||||||
|
log_opts.dumpdir,
|
||||||
GLOBALS_DUMP_FILE);
|
GLOBALS_DUMP_FILE);
|
||||||
check_ok();
|
check_ok();
|
||||||
}
|
}
|
||||||
@ -352,10 +415,11 @@ create_new_objects(void)
|
|||||||
true,
|
true,
|
||||||
true,
|
true,
|
||||||
"\"%s/pg_restore\" %s %s --exit-on-error --verbose "
|
"\"%s/pg_restore\" %s %s --exit-on-error --verbose "
|
||||||
"--dbname postgres \"%s\"",
|
"--dbname postgres \"%s/%s\"",
|
||||||
new_cluster.bindir,
|
new_cluster.bindir,
|
||||||
cluster_conn_opts(&new_cluster),
|
cluster_conn_opts(&new_cluster),
|
||||||
create_opts,
|
create_opts,
|
||||||
|
log_opts.dumpdir,
|
||||||
sql_file_name);
|
sql_file_name);
|
||||||
|
|
||||||
break; /* done once we've processed template1 */
|
break; /* done once we've processed template1 */
|
||||||
@ -389,10 +453,11 @@ create_new_objects(void)
|
|||||||
parallel_exec_prog(log_file_name,
|
parallel_exec_prog(log_file_name,
|
||||||
NULL,
|
NULL,
|
||||||
"\"%s/pg_restore\" %s %s --exit-on-error --verbose "
|
"\"%s/pg_restore\" %s %s --exit-on-error --verbose "
|
||||||
"--dbname template1 \"%s\"",
|
"--dbname template1 \"%s/%s\"",
|
||||||
new_cluster.bindir,
|
new_cluster.bindir,
|
||||||
cluster_conn_opts(&new_cluster),
|
cluster_conn_opts(&new_cluster),
|
||||||
create_opts,
|
create_opts,
|
||||||
|
log_opts.dumpdir,
|
||||||
sql_file_name);
|
sql_file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -689,28 +754,5 @@ cleanup(void)
|
|||||||
|
|
||||||
/* Remove dump and log files? */
|
/* Remove dump and log files? */
|
||||||
if (!log_opts.retain)
|
if (!log_opts.retain)
|
||||||
{
|
rmtree(log_opts.basedir, true);
|
||||||
int dbnum;
|
|
||||||
char **filename;
|
|
||||||
|
|
||||||
for (filename = output_files; *filename != NULL; filename++)
|
|
||||||
unlink(*filename);
|
|
||||||
|
|
||||||
/* remove dump files */
|
|
||||||
unlink(GLOBALS_DUMP_FILE);
|
|
||||||
|
|
||||||
if (old_cluster.dbarr.dbs)
|
|
||||||
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
|
||||||
{
|
|
||||||
char sql_file_name[MAXPGPATH],
|
|
||||||
log_file_name[MAXPGPATH];
|
|
||||||
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
|
||||||
|
|
||||||
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
|
||||||
unlink(sql_file_name);
|
|
||||||
|
|
||||||
snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
|
|
||||||
unlink(log_file_name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,14 @@
|
|||||||
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
|
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
|
||||||
#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom"
|
#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Base directories that include all the files generated internally,
|
||||||
|
* from the root path of the new cluster.
|
||||||
|
*/
|
||||||
|
#define BASE_OUTPUTDIR "pg_upgrade_output.d"
|
||||||
|
#define LOG_OUTPUTDIR BASE_OUTPUTDIR "/log"
|
||||||
|
#define DUMP_OUTPUTDIR BASE_OUTPUTDIR "/dump"
|
||||||
|
|
||||||
#define DB_DUMP_LOG_FILE_MASK "pg_upgrade_dump_%u.log"
|
#define DB_DUMP_LOG_FILE_MASK "pg_upgrade_dump_%u.log"
|
||||||
#define SERVER_LOG_FILE "pg_upgrade_server.log"
|
#define SERVER_LOG_FILE "pg_upgrade_server.log"
|
||||||
#define UTILITY_LOG_FILE "pg_upgrade_utility.log"
|
#define UTILITY_LOG_FILE "pg_upgrade_utility.log"
|
||||||
@ -262,6 +270,10 @@ typedef struct
|
|||||||
FILE *internal; /* internal log FILE */
|
FILE *internal; /* internal log FILE */
|
||||||
bool verbose; /* true -> be verbose in messages */
|
bool verbose; /* true -> be verbose in messages */
|
||||||
bool retain; /* retain log files on success */
|
bool retain; /* retain log files on success */
|
||||||
|
/* Set of internal directories for output files */
|
||||||
|
char *basedir; /* Base output directory */
|
||||||
|
char *dumpdir; /* Dumps */
|
||||||
|
char *logdir; /* Log files */
|
||||||
} LogOpts;
|
} LogOpts;
|
||||||
|
|
||||||
|
|
||||||
|
@ -238,8 +238,10 @@ start_postmaster(ClusterInfo *cluster, bool report_and_exit_on_error)
|
|||||||
* vacuumdb --freeze actually freezes the tuples.
|
* vacuumdb --freeze actually freezes the tuples.
|
||||||
*/
|
*/
|
||||||
snprintf(cmd, sizeof(cmd),
|
snprintf(cmd, sizeof(cmd),
|
||||||
"\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d -b%s %s%s\" start",
|
"\"%s/pg_ctl\" -w -l \"%s/%s\" -D \"%s\" -o \"-p %d -b%s %s%s\" start",
|
||||||
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
|
cluster->bindir,
|
||||||
|
log_opts.logdir,
|
||||||
|
SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
|
||||||
(cluster == &new_cluster) ?
|
(cluster == &new_cluster) ?
|
||||||
" -c synchronous_commit=off -c fsync=off -c full_page_writes=off -c vacuum_defer_cleanup_age=0" : "",
|
" -c synchronous_commit=off -c fsync=off -c full_page_writes=off -c vacuum_defer_cleanup_age=0" : "",
|
||||||
cluster->pgopts ? cluster->pgopts : "", socket_string);
|
cluster->pgopts ? cluster->pgopts : "", socket_string);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user