Commit to match discussed elog() changes. Only update is that LOG is
now just below FATAL in server_min_messages. Added more text to highlight ordering difference between it and client_min_messages. --------------------------------------------------------------------------- REALLYFATAL => PANIC STOP => PANIC New INFO level the prints to client by default New LOG level the prints to server log by default Cause VACUUM information to print only to the client NOTICE => INFO where purely information messages are sent DEBUG => LOG for purely server status messages DEBUG removed, kept as backward compatible DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1 added DebugLvl removed in favor of new DEBUG[1-5] symbols New server_min_messages GUC parameter with values: DEBUG[5-1], INFO, NOTICE, ERROR, LOG, FATAL, PANIC New client_min_messages GUC parameter with values: DEBUG[5-1], LOG, INFO, NOTICE, ERROR, FATAL, PANIC Server startup now logged with LOG instead of DEBUG Remove debug_level GUC parameter elog() numbers now start at 10 Add test to print error message if older elog() values are passed to elog() Bootstrap mode now has a -d that requires an argument, like postmaster
This commit is contained in:
parent
8d8aa931ef
commit
a033daf566
@ -1,5 +1,5 @@
|
||||
<!--
|
||||
$Header: /cvsroot/pgsql/doc/src/sgml/ref/postgres-ref.sgml,v 1.23 2001/12/08 03:24:38 thomas Exp $
|
||||
$Header: /cvsroot/pgsql/doc/src/sgml/ref/postgres-ref.sgml,v 1.24 2002/03/02 21:39:16 momjian Exp $
|
||||
PostgreSQL documentation
|
||||
-->
|
||||
|
||||
@ -141,8 +141,10 @@ PostgreSQL documentation
|
||||
<para>
|
||||
The options <option>-A</option>, <option>-B</option>,
|
||||
<option>-c</option>, <option>-d</option>, <option>-D</option>,
|
||||
<option>-F</option>, and <option>--name</> have the same meanings as
|
||||
for the <xref linkend="app-postmaster">.
|
||||
<option>-F</option>, and <option>--name</> have the same meanings
|
||||
as the <xref linkend="app-postmaster"> except that
|
||||
<option>-d</option> <literal>0</> prevents the debugging level of
|
||||
the postmaster from being propogated to the backend.
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<!--
|
||||
$Header: /cvsroot/pgsql/doc/src/sgml/ref/postmaster.sgml,v 1.26 2001/12/08 03:24:38 thomas Exp $
|
||||
$Header: /cvsroot/pgsql/doc/src/sgml/ref/postmaster.sgml,v 1.27 2002/03/02 21:39:16 momjian Exp $
|
||||
PostgreSQL documentation
|
||||
-->
|
||||
|
||||
@ -127,9 +127,8 @@ PostgreSQL documentation
|
||||
<listitem>
|
||||
<para>
|
||||
Sets the debug level. The higher this value is set, the more
|
||||
debugging output is written to the server log. The default is
|
||||
0, which means no debugging. Values up to 4 are useful; higher
|
||||
numbers produce no additional output.
|
||||
debugging output is written to the server log. Values are from
|
||||
1 to 5.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -1,5 +1,5 @@
|
||||
<!--
|
||||
$Header: /cvsroot/pgsql/doc/src/sgml/runtime.sgml,v 1.104 2002/03/01 22:45:05 petere Exp $
|
||||
$Header: /cvsroot/pgsql/doc/src/sgml/runtime.sgml,v 1.105 2002/03/02 21:39:15 momjian Exp $
|
||||
-->
|
||||
|
||||
<Chapter Id="runtime">
|
||||
@ -813,6 +813,38 @@ env PGOPTIONS='-c geqo=off' psql
|
||||
|
||||
<para>
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><varname>SERVER_MIN_MESSAGES</varname> (<type>string</type>)</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This controls how much detail is written to the server logs. The
|
||||
default is <literal>NOTICE</>. Valid values are <literal>DEBUG5</>,
|
||||
<literal>DEBUG4</>, <literal>DEBUG3</>, <literal>DEBUG2</>,
|
||||
<literal>DEBUG1</>, <literal>INFO</>, <literal>NOTICE</>,
|
||||
<literal>ERROR</>, <literal>LOG</>, <literal>FATAL</>,
|
||||
<literal>PANIC</>. Later values send less detail to the logs.
|
||||
<literal>LOG</> has a different precedence here than in
|
||||
<literal>CLIENT_MIN_MESSAGES</>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><varname>CLIENT_MIN_MESSAGES</varname> (<type>string</type>)</term>
|
||||
<listitem>
|
||||
<para>
|
||||
This controls how much detail is written to the client. The
|
||||
default is <literal>INFO</>. Valid values are
|
||||
<literal>DEBUG5</>, <literal>DEBUG4</>, <literal>DEBUG3</>,
|
||||
<literal>DEBUG2</>, <literal>DEBUG1</>, <literal>LOG</>,
|
||||
<literal>INFO</>, <literal>NOTICE</>, <literal>ERROR</>,
|
||||
<literal>FATAL</>, <literal>PANIC</>. Later values send less
|
||||
information to the user. literal>LOG</> has a different
|
||||
precedence here than in <literal>SERVER_MIN_MESSAGES</>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><varname>DEBUG_ASSERTIONS</varname> (<type>boolean</type>)</term>
|
||||
<listitem>
|
||||
@ -829,19 +861,6 @@ env PGOPTIONS='-c geqo=off' psql
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><varname>DEBUG_LEVEL</varname> (<type>integer</type>)</term>
|
||||
<listitem>
|
||||
<para>
|
||||
The higher this value is set, the more
|
||||
<quote>debugging</quote> output of various sorts is generated
|
||||
in the server log during operation. This option is 0 by
|
||||
default, which means no debugging output. Values up to about 4
|
||||
currently make sense.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><varname>DEBUG_PRINT_QUERY</varname> (<type>boolean</type>)</term>
|
||||
<term><varname>DEBUG_PRINT_PARSE</varname> (<type>boolean</type>)</term>
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.88 2002/02/11 22:41:59 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.89 2002/03/02 21:39:16 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1953,13 +1953,13 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
|
||||
void
|
||||
gist_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
elog(STOP, "gist_redo: unimplemented");
|
||||
elog(PANIC, "gist_redo: unimplemented");
|
||||
}
|
||||
|
||||
void
|
||||
gist_undo(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
elog(STOP, "gist_undo: unimplemented");
|
||||
elog(PANIC, "gist_undo: unimplemented");
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.53 2001/10/25 05:49:20 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.54 2002/03/02 21:39:16 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* This file contains only the public interface routines.
|
||||
@ -468,13 +468,13 @@ hashbulkdelete(PG_FUNCTION_ARGS)
|
||||
void
|
||||
hash_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
elog(STOP, "hash_redo: unimplemented");
|
||||
elog(PANIC, "hash_redo: unimplemented");
|
||||
}
|
||||
|
||||
void
|
||||
hash_undo(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
elog(STOP, "hash_undo: unimplemented");
|
||||
elog(PANIC, "hash_undo: unimplemented");
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.129 2002/01/15 22:14:17 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.130 2002/03/02 21:39:17 momjian Exp $
|
||||
*
|
||||
*
|
||||
* INTERFACE ROUTINES
|
||||
@ -142,21 +142,21 @@ heapgettup(Relation relation,
|
||||
#ifdef HEAPDEBUGALL
|
||||
if (ItemPointerIsValid(tid))
|
||||
{
|
||||
elog(DEBUG, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
|
||||
elog(LOG, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
|
||||
RelationGetRelationName(relation), tid, tid->ip_blkid,
|
||||
tid->ip_posid, dir);
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(DEBUG, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
|
||||
elog(LOG, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
|
||||
RelationGetRelationName(relation), tid, dir);
|
||||
}
|
||||
elog(DEBUG, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
|
||||
elog(LOG, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
|
||||
|
||||
elog(DEBUG, "heapgettup: relation(%c)=`%s', %p",
|
||||
elog(LOG, "heapgettup: relation(%c)=`%s', %p",
|
||||
relation->rd_rel->relkind, RelationGetRelationName(relation),
|
||||
snapshot);
|
||||
#endif /* !defined(HEAPDEBUGALL) */
|
||||
#endif /* !defined(HEAPLOGALL) */
|
||||
|
||||
if (!ItemPointerIsValid(tid))
|
||||
{
|
||||
@ -745,14 +745,14 @@ heap_endscan(HeapScanDesc scan)
|
||||
|
||||
#ifdef HEAPDEBUGALL
|
||||
#define HEAPDEBUG_1 \
|
||||
elog(DEBUG, "heap_getnext([%s,nkeys=%d],backw=%d) called", \
|
||||
elog(LOG, "heap_getnext([%s,nkeys=%d],backw=%d) called", \
|
||||
RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, backw)
|
||||
|
||||
#define HEAPDEBUG_2 \
|
||||
elog(DEBUG, "heap_getnext returning EOS")
|
||||
elog(LOG, "heap_getnext returning EOS")
|
||||
|
||||
#define HEAPDEBUG_3 \
|
||||
elog(DEBUG, "heap_getnext returning tuple");
|
||||
elog(LOG, "heap_getnext returning tuple");
|
||||
#else
|
||||
#define HEAPDEBUG_1
|
||||
#define HEAPDEBUG_2
|
||||
@ -1958,11 +1958,11 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
buffer = XLogReadBuffer(false, reln, xlrec->block);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "heap_clean_redo: no block");
|
||||
elog(PANIC, "heap_clean_redo: no block");
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "heap_clean_redo: uninitialized page");
|
||||
elog(PANIC, "heap_clean_redo: uninitialized page");
|
||||
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
@ -2015,11 +2015,11 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
buffer = XLogReadBuffer(false, reln,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "heap_delete_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_delete_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
|
||||
if (redo)
|
||||
{
|
||||
@ -2031,14 +2031,14 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
}
|
||||
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
|
||||
* ?! */
|
||||
elog(STOP, "heap_delete_undo: bad page LSN");
|
||||
elog(PANIC, "heap_delete_undo: bad page LSN");
|
||||
|
||||
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
|
||||
if (PageGetMaxOffsetNumber(page) >= offnum)
|
||||
lp = PageGetItemId(page, offnum);
|
||||
|
||||
if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp))
|
||||
elog(STOP, "heap_delete_%sdo: invalid lp", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_delete_%sdo: invalid lp", (redo) ? "re" : "un");
|
||||
|
||||
htup = (HeapTupleHeader) PageGetItem(page, lp);
|
||||
|
||||
@ -2054,7 +2054,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
return;
|
||||
}
|
||||
|
||||
elog(STOP, "heap_delete_undo: unimplemented");
|
||||
elog(PANIC, "heap_delete_undo: unimplemented");
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2080,7 +2080,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page) &&
|
||||
(!redo || !(record->xl_info & XLOG_HEAP_INIT_PAGE)))
|
||||
elog(STOP, "heap_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
|
||||
if (redo)
|
||||
{
|
||||
@ -2104,7 +2104,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
|
||||
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
|
||||
elog(STOP, "heap_insert_redo: invalid max offset number");
|
||||
elog(PANIC, "heap_insert_redo: invalid max offset number");
|
||||
|
||||
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
|
||||
Assert(newlen <= MaxTupleSize);
|
||||
@ -2128,7 +2128,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
offnum = PageAddItem(page, (Item) htup, newlen, offnum,
|
||||
LP_USED | OverwritePageMode);
|
||||
if (offnum == InvalidOffsetNumber)
|
||||
elog(STOP, "heap_insert_redo: failed to add tuple");
|
||||
elog(PANIC, "heap_insert_redo: failed to add tuple");
|
||||
PageSetLSN(page, lsn);
|
||||
PageSetSUI(page, ThisStartUpID); /* prev sui */
|
||||
UnlockAndWriteBuffer(buffer);
|
||||
@ -2138,9 +2138,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
/* undo insert */
|
||||
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
|
||||
* ?! */
|
||||
elog(STOP, "heap_insert_undo: bad page LSN");
|
||||
elog(PANIC, "heap_insert_undo: bad page LSN");
|
||||
|
||||
elog(STOP, "heap_insert_undo: unimplemented");
|
||||
elog(PANIC, "heap_insert_undo: unimplemented");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2171,11 +2171,11 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
|
||||
buffer = XLogReadBuffer(false, reln,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "heap_update_%sdo: no block", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_update_%sdo: no block", (redo) ? "re" : "un");
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "heap_update_%sdo: uninitialized old page", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_update_%sdo: uninitialized old page", (redo) ? "re" : "un");
|
||||
|
||||
if (redo)
|
||||
{
|
||||
@ -2189,14 +2189,14 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
|
||||
}
|
||||
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
|
||||
* ?! */
|
||||
elog(STOP, "heap_update_undo: bad old tuple page LSN");
|
||||
elog(PANIC, "heap_update_undo: bad old tuple page LSN");
|
||||
|
||||
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
|
||||
if (PageGetMaxOffsetNumber(page) >= offnum)
|
||||
lp = PageGetItemId(page, offnum);
|
||||
|
||||
if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp))
|
||||
elog(STOP, "heap_update_%sdo: invalid lp", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_update_%sdo: invalid lp", (redo) ? "re" : "un");
|
||||
|
||||
htup = (HeapTupleHeader) PageGetItem(page, lp);
|
||||
|
||||
@ -2224,7 +2224,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
|
||||
goto newt;
|
||||
}
|
||||
|
||||
elog(STOP, "heap_update_undo: unimplemented");
|
||||
elog(PANIC, "heap_update_undo: unimplemented");
|
||||
|
||||
/* Deal with new tuple */
|
||||
|
||||
@ -2245,7 +2245,7 @@ newt:;
|
||||
newsame:;
|
||||
if (PageIsNew((PageHeader) page) &&
|
||||
(!redo || !(record->xl_info & XLOG_HEAP_INIT_PAGE)))
|
||||
elog(STOP, "heap_update_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
elog(PANIC, "heap_update_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
|
||||
if (redo)
|
||||
{
|
||||
@ -2269,7 +2269,7 @@ newsame:;
|
||||
|
||||
offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
|
||||
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
|
||||
elog(STOP, "heap_update_redo: invalid max offset number");
|
||||
elog(PANIC, "heap_update_redo: invalid max offset number");
|
||||
|
||||
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
|
||||
if (move)
|
||||
@ -2315,7 +2315,7 @@ newsame:;
|
||||
offnum = PageAddItem(page, (Item) htup, newlen, offnum,
|
||||
LP_USED | OverwritePageMode);
|
||||
if (offnum == InvalidOffsetNumber)
|
||||
elog(STOP, "heap_update_redo: failed to add tuple");
|
||||
elog(PANIC, "heap_update_redo: failed to add tuple");
|
||||
PageSetLSN(page, lsn);
|
||||
PageSetSUI(page, ThisStartUpID); /* prev sui */
|
||||
UnlockAndWriteBuffer(buffer);
|
||||
@ -2324,9 +2324,9 @@ newsame:;
|
||||
|
||||
/* undo */
|
||||
if (XLByteLT(PageGetLSN(page), lsn)) /* changes not applied?! */
|
||||
elog(STOP, "heap_update_undo: bad new tuple page LSN");
|
||||
elog(PANIC, "heap_update_undo: bad new tuple page LSN");
|
||||
|
||||
elog(STOP, "heap_update_undo: unimplemented");
|
||||
elog(PANIC, "heap_update_undo: unimplemented");
|
||||
|
||||
}
|
||||
|
||||
@ -2342,30 +2342,30 @@ _heap_unlock_tuple(void *data)
|
||||
HeapTupleHeader htup;
|
||||
|
||||
if (!RelationIsValid(reln))
|
||||
elog(STOP, "_heap_unlock_tuple: can't open relation");
|
||||
elog(PANIC, "_heap_unlock_tuple: can't open relation");
|
||||
|
||||
buffer = XLogReadBuffer(false, reln,
|
||||
ItemPointerGetBlockNumber(&(xltid->tid)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "_heap_unlock_tuple: can't read buffer");
|
||||
elog(PANIC, "_heap_unlock_tuple: can't read buffer");
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "_heap_unlock_tuple: uninitialized page");
|
||||
elog(PANIC, "_heap_unlock_tuple: uninitialized page");
|
||||
|
||||
offnum = ItemPointerGetOffsetNumber(&(xltid->tid));
|
||||
if (offnum > PageGetMaxOffsetNumber(page))
|
||||
elog(STOP, "_heap_unlock_tuple: invalid itemid");
|
||||
elog(PANIC, "_heap_unlock_tuple: invalid itemid");
|
||||
lp = PageGetItemId(page, offnum);
|
||||
|
||||
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
|
||||
elog(STOP, "_heap_unlock_tuple: unused/deleted tuple in rollback");
|
||||
elog(PANIC, "_heap_unlock_tuple: unused/deleted tuple in rollback");
|
||||
|
||||
htup = (HeapTupleHeader) PageGetItem(page, lp);
|
||||
|
||||
if (!TransactionIdEquals(htup->t_xmax, GetCurrentTransactionId()) ||
|
||||
htup->t_cmax != GetCurrentCommandId())
|
||||
elog(STOP, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
|
||||
elog(PANIC, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
|
||||
htup->t_infomask &= ~HEAP_XMAX_UNLOGGED;
|
||||
htup->t_infomask |= HEAP_XMAX_INVALID;
|
||||
UnlockAndWriteBuffer(buffer);
|
||||
@ -2389,7 +2389,7 @@ heap_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
else if (info == XLOG_HEAP_CLEAN)
|
||||
heap_xlog_clean(true, lsn, record);
|
||||
else
|
||||
elog(STOP, "heap_redo: unknown op code %u", info);
|
||||
elog(PANIC, "heap_redo: unknown op code %u", info);
|
||||
}
|
||||
|
||||
void
|
||||
@ -2409,7 +2409,7 @@ heap_undo(XLogRecPtr lsn, XLogRecord *record)
|
||||
else if (info == XLOG_HEAP_CLEAN)
|
||||
heap_xlog_clean(false, lsn, record);
|
||||
else
|
||||
elog(STOP, "heap_undo: unknown op code %u", info);
|
||||
elog(PANIC, "heap_undo: unknown op code %u", info);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Id: hio.c,v 1.43 2001/10/25 05:49:21 momjian Exp $
|
||||
* $Id: hio.c,v 1.44 2002/03/02 21:39:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -50,7 +50,7 @@ RelationPutHeapTuple(Relation relation,
|
||||
tuple->t_len, InvalidOffsetNumber, LP_USED);
|
||||
|
||||
if (offnum == InvalidOffsetNumber)
|
||||
elog(STOP, "RelationPutHeapTuple: failed to add tuple");
|
||||
elog(PANIC, "RelationPutHeapTuple: failed to add tuple");
|
||||
|
||||
/* Update tuple->t_self to the actual position where it was stored */
|
||||
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
|
||||
@ -270,7 +270,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
|
||||
if (len > PageGetFreeSpace(pageHeader))
|
||||
{
|
||||
/* We should not get here given the test at the top */
|
||||
elog(STOP, "Tuple is too big: size %lu", (unsigned long) len);
|
||||
elog(PANIC, "Tuple is too big: size %lu", (unsigned long) len);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.88 2002/01/01 20:32:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.89 2002/03/02 21:39:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -507,7 +507,7 @@ _bt_insertonpg(Relation rel,
|
||||
/* If root page was splitted */
|
||||
if (stack == (BTStack) NULL)
|
||||
{
|
||||
elog(DEBUG, "btree: concurrent ROOT page split");
|
||||
elog(LOG, "btree: concurrent ROOT page split");
|
||||
|
||||
/*
|
||||
* If root page splitter failed to create new root page
|
||||
@ -735,7 +735,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
|
||||
item = (BTItem) PageGetItem(origpage, itemid);
|
||||
if (PageAddItem(rightpage, (Item) item, itemsz, rightoff,
|
||||
LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "btree: failed to add hikey to the right sibling");
|
||||
elog(PANIC, "btree: failed to add hikey to the right sibling");
|
||||
rightoff = OffsetNumberNext(rightoff);
|
||||
}
|
||||
|
||||
@ -761,7 +761,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
|
||||
lhikey = item;
|
||||
if (PageAddItem(leftpage, (Item) item, itemsz, leftoff,
|
||||
LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "btree: failed to add hikey to the left sibling");
|
||||
elog(PANIC, "btree: failed to add hikey to the left sibling");
|
||||
leftoff = OffsetNumberNext(leftoff);
|
||||
|
||||
/*
|
||||
@ -1316,7 +1316,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
|
||||
* the two items will go into positions P_HIKEY and P_FIRSTKEY.
|
||||
*/
|
||||
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "btree: failed to add leftkey to new root page");
|
||||
elog(PANIC, "btree: failed to add leftkey to new root page");
|
||||
pfree(new_item);
|
||||
|
||||
/*
|
||||
@ -1333,7 +1333,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
|
||||
* insert the right page pointer into the new root page.
|
||||
*/
|
||||
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "btree: failed to add rightkey to new root page");
|
||||
elog(PANIC, "btree: failed to add rightkey to new root page");
|
||||
pfree(new_item);
|
||||
|
||||
metad->btm_root = rootblknum;
|
||||
@ -2034,7 +2034,7 @@ _bt_pgaddtup(Relation rel,
|
||||
|
||||
if (PageAddItem(page, (Item) btitem, itemsize, itup_off,
|
||||
LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "btree: failed to add item to the %s for %s",
|
||||
elog(PANIC, "btree: failed to add item to the %s for %s",
|
||||
where, RelationGetRelationName(rel));
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.87 2002/01/06 00:37:43 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.88 2002/03/02 21:39:18 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -776,7 +776,7 @@ _bt_restore_page(Page page, char *from, int len)
|
||||
itemsz = MAXALIGN(itemsz);
|
||||
if (PageAddItem(page, (Item) from, itemsz,
|
||||
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "_bt_restore_page: can't add item to page");
|
||||
elog(PANIC, "_bt_restore_page: can't add item to page");
|
||||
from += itemsz;
|
||||
}
|
||||
}
|
||||
@ -799,10 +799,10 @@ btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
buffer = XLogReadBuffer(false, reln,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_delete_redo: block unfound");
|
||||
elog(PANIC, "btree_delete_redo: block unfound");
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "btree_delete_redo: uninitialized page");
|
||||
elog(PANIC, "btree_delete_redo: uninitialized page");
|
||||
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
@ -838,10 +838,10 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
buffer = XLogReadBuffer(false, reln,
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
|
||||
elog(PANIC, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "btree_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
elog(PANIC, "btree_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
if (redo)
|
||||
@ -855,7 +855,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
record->xl_len - SizeOfBtreeInsert,
|
||||
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
|
||||
LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "btree_insert_redo: failed to add item");
|
||||
elog(PANIC, "btree_insert_redo: failed to add item");
|
||||
|
||||
PageSetLSN(page, lsn);
|
||||
PageSetSUI(page, ThisStartUpID);
|
||||
@ -864,7 +864,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
else
|
||||
{
|
||||
if (XLByteLT(PageGetLSN(page), lsn))
|
||||
elog(STOP, "btree_insert_undo: bad page LSN");
|
||||
elog(PANIC, "btree_insert_undo: bad page LSN");
|
||||
|
||||
if (!P_ISLEAF(pageop))
|
||||
{
|
||||
@ -872,7 +872,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
return;
|
||||
}
|
||||
|
||||
elog(STOP, "btree_insert_undo: unimplemented");
|
||||
elog(PANIC, "btree_insert_undo: unimplemented");
|
||||
}
|
||||
|
||||
return;
|
||||
@ -899,13 +899,13 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
|
||||
BlockIdGetBlockNumber(&(xlrec->otherblk));
|
||||
buffer = XLogReadBuffer(false, reln, blkno);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_split_%s: lost left sibling", op);
|
||||
elog(PANIC, "btree_split_%s: lost left sibling", op);
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (redo)
|
||||
_bt_pageinit(page, BufferGetPageSize(buffer));
|
||||
else if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "btree_split_undo: uninitialized left sibling");
|
||||
elog(PANIC, "btree_split_undo: uninitialized left sibling");
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
if (redo)
|
||||
@ -928,8 +928,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
|
||||
/* undo */
|
||||
{
|
||||
if (XLByteLT(PageGetLSN(page), lsn))
|
||||
elog(STOP, "btree_split_undo: bad left sibling LSN");
|
||||
elog(STOP, "btree_split_undo: unimplemented");
|
||||
elog(PANIC, "btree_split_undo: bad left sibling LSN");
|
||||
elog(PANIC, "btree_split_undo: unimplemented");
|
||||
}
|
||||
|
||||
/* Right (new) sibling */
|
||||
@ -937,13 +937,13 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
|
||||
ItemPointerGetBlockNumber(&(xlrec->target.tid));
|
||||
buffer = XLogReadBuffer((redo) ? true : false, reln, blkno);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_split_%s: lost right sibling", op);
|
||||
elog(PANIC, "btree_split_%s: lost right sibling", op);
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (redo)
|
||||
_bt_pageinit(page, BufferGetPageSize(buffer));
|
||||
else if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "btree_split_undo: uninitialized right sibling");
|
||||
elog(PANIC, "btree_split_undo: uninitialized right sibling");
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
|
||||
if (redo)
|
||||
@ -967,8 +967,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
|
||||
/* undo */
|
||||
{
|
||||
if (XLByteLT(PageGetLSN(page), lsn))
|
||||
elog(STOP, "btree_split_undo: bad right sibling LSN");
|
||||
elog(STOP, "btree_split_undo: unimplemented");
|
||||
elog(PANIC, "btree_split_undo: bad right sibling LSN");
|
||||
elog(PANIC, "btree_split_undo: unimplemented");
|
||||
}
|
||||
|
||||
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
|
||||
@ -981,11 +981,11 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
buffer = XLogReadBuffer(false, reln, blkno);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_split_redo: lost next right page");
|
||||
elog(PANIC, "btree_split_redo: lost next right page");
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
if (PageIsNew((PageHeader) page))
|
||||
elog(STOP, "btree_split_redo: uninitialized next right page");
|
||||
elog(PANIC, "btree_split_redo: uninitialized next right page");
|
||||
|
||||
if (XLByteLE(lsn, PageGetLSN(page)))
|
||||
{
|
||||
@ -1022,10 +1022,10 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
|
||||
return;
|
||||
buffer = XLogReadBuffer(true, reln, BlockIdGetBlockNumber(&(xlrec->rootblk)));
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_newroot_redo: no root page");
|
||||
elog(PANIC, "btree_newroot_redo: no root page");
|
||||
metabuf = XLogReadBuffer(false, reln, BTREE_METAPAGE);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "btree_newroot_redo: no metapage");
|
||||
elog(PANIC, "btree_newroot_redo: no metapage");
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
_bt_pageinit(page, BufferGetPageSize(buffer));
|
||||
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
|
||||
@ -1079,7 +1079,7 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
else if (info == XLOG_BTREE_NEWROOT)
|
||||
btree_xlog_newroot(true, lsn, record);
|
||||
else
|
||||
elog(STOP, "btree_redo: unknown op code %u", info);
|
||||
elog(PANIC, "btree_redo: unknown op code %u", info);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1099,7 +1099,7 @@ btree_undo(XLogRecPtr lsn, XLogRecord *record)
|
||||
else if (info == XLOG_BTREE_NEWROOT)
|
||||
btree_xlog_newroot(false, lsn, record);
|
||||
else
|
||||
elog(STOP, "btree_undo: unknown op code %u", info);
|
||||
elog(PANIC, "btree_undo: unknown op code %u", info);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.69 2002/01/15 22:14:17 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.70 2002/03/02 21:39:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1345,13 +1345,13 @@ _rtdump(Relation r)
|
||||
void
|
||||
rtree_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
elog(STOP, "rtree_redo: unimplemented");
|
||||
elog(PANIC, "rtree_redo: unimplemented");
|
||||
}
|
||||
|
||||
void
|
||||
rtree_undo(XLogRecPtr lsn, XLogRecord *record)
|
||||
{
|
||||
elog(STOP, "rtree_undo: unimplemented");
|
||||
elog(PANIC, "rtree_undo: unimplemented");
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -13,7 +13,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.7 2001/10/28 06:25:42 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.8 2002/03/02 21:39:19 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -544,19 +544,19 @@ CLOGPhysicalReadPage(int pageno, int slotno)
|
||||
if (fd < 0)
|
||||
{
|
||||
if (errno != ENOENT || !InRecovery)
|
||||
elog(STOP, "open of %s failed: %m", path);
|
||||
elog(DEBUG, "clog file %s doesn't exist, reading as zeroes", path);
|
||||
elog(PANIC, "open of %s failed: %m", path);
|
||||
elog(LOG, "clog file %s doesn't exist, reading as zeroes", path);
|
||||
MemSet(ClogCtl->page_buffer[slotno], 0, CLOG_BLCKSZ);
|
||||
return;
|
||||
}
|
||||
|
||||
if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
|
||||
elog(STOP, "lseek of clog file %u, offset %u failed: %m",
|
||||
elog(PANIC, "lseek of clog file %u, offset %u failed: %m",
|
||||
segno, offset);
|
||||
|
||||
errno = 0;
|
||||
if (read(fd, ClogCtl->page_buffer[slotno], CLOG_BLCKSZ) != CLOG_BLCKSZ)
|
||||
elog(STOP, "read of clog file %u, offset %u failed: %m",
|
||||
elog(PANIC, "read of clog file %u, offset %u failed: %m",
|
||||
segno, offset);
|
||||
|
||||
close(fd);
|
||||
@ -596,15 +596,15 @@ CLOGPhysicalWritePage(int pageno, int slotno)
|
||||
if (fd < 0)
|
||||
{
|
||||
if (errno != ENOENT)
|
||||
elog(STOP, "open of %s failed: %m", path);
|
||||
elog(PANIC, "open of %s failed: %m", path);
|
||||
fd = BasicOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
|
||||
S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
elog(STOP, "creation of file %s failed: %m", path);
|
||||
elog(PANIC, "creation of file %s failed: %m", path);
|
||||
}
|
||||
|
||||
if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
|
||||
elog(STOP, "lseek of clog file %u, offset %u failed: %m",
|
||||
elog(PANIC, "lseek of clog file %u, offset %u failed: %m",
|
||||
segno, offset);
|
||||
|
||||
errno = 0;
|
||||
@ -613,7 +613,7 @@ CLOGPhysicalWritePage(int pageno, int slotno)
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(STOP, "write of clog file %u, offset %u failed: %m",
|
||||
elog(PANIC, "write of clog file %u, offset %u failed: %m",
|
||||
segno, offset);
|
||||
}
|
||||
|
||||
@ -887,7 +887,7 @@ ScanCLOGDirectory(int cutoffPage, bool doDeletions)
|
||||
|
||||
cldir = opendir(ClogDir);
|
||||
if (cldir == NULL)
|
||||
elog(STOP, "could not open transaction-commit log directory (%s): %m",
|
||||
elog(PANIC, "could not open transaction-commit log directory (%s): %m",
|
||||
ClogDir);
|
||||
|
||||
errno = 0;
|
||||
@ -912,7 +912,7 @@ ScanCLOGDirectory(int cutoffPage, bool doDeletions)
|
||||
errno = 0;
|
||||
}
|
||||
if (errno)
|
||||
elog(STOP, "could not read transaction-commit log directory (%s): %m",
|
||||
elog(PANIC, "could not read transaction-commit log directory (%s): %m",
|
||||
ClogDir);
|
||||
closedir(cldir);
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.115 2001/11/01 06:17:01 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.116 2002/03/02 21:39:19 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Transaction aborts can now occur two ways:
|
||||
@ -1606,7 +1606,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
/* SHOULD REMOVE FILES OF ALL FAILED-TO-BE-CREATED RELATIONS */
|
||||
}
|
||||
else
|
||||
elog(STOP, "xact_redo: unknown op code %u", info);
|
||||
elog(PANIC, "xact_redo: unknown op code %u", info);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1615,9 +1615,9 @@ xact_undo(XLogRecPtr lsn, XLogRecord *record)
|
||||
uint8 info = record->xl_info & ~XLR_INFO_MASK;
|
||||
|
||||
if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
|
||||
elog(STOP, "xact_undo: can't undo committed xaction");
|
||||
elog(PANIC, "xact_undo: can't undo committed xaction");
|
||||
else if (info != XLOG_XACT_ABORT)
|
||||
elog(STOP, "xact_redo: unknown op code %u", info);
|
||||
elog(PANIC, "xact_redo: unknown op code %u", info);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1652,7 +1652,7 @@ void
|
||||
{
|
||||
#ifdef XLOG_II
|
||||
if (_RollbackFunc != NULL)
|
||||
elog(STOP, "XactPushRollback: already installed");
|
||||
elog(PANIC, "XactPushRollback: already installed");
|
||||
#endif
|
||||
|
||||
_RollbackFunc = func;
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.87 2002/02/18 05:44:45 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.88 2002/03/02 21:39:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -489,7 +489,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
|
||||
if (info & XLR_INFO_MASK)
|
||||
{
|
||||
if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
|
||||
elog(STOP, "XLogInsert: invalid info mask %02X",
|
||||
elog(PANIC, "XLogInsert: invalid info mask %02X",
|
||||
(info & XLR_INFO_MASK));
|
||||
no_tran = true;
|
||||
info &= ~XLR_INFO_MASK;
|
||||
@ -594,7 +594,7 @@ begin:;
|
||||
}
|
||||
}
|
||||
if (i >= XLR_MAX_BKP_BLOCKS)
|
||||
elog(STOP, "XLogInsert: can backup %d blocks at most",
|
||||
elog(PANIC, "XLogInsert: can backup %d blocks at most",
|
||||
XLR_MAX_BKP_BLOCKS);
|
||||
}
|
||||
/* Break out of loop when rdt points to last list item */
|
||||
@ -612,7 +612,7 @@ begin:;
|
||||
* also remove the check for xl_len == 0 in ReadRecord, below.
|
||||
*/
|
||||
if (len == 0 || len > MAXLOGRECSZ)
|
||||
elog(STOP, "XLogInsert: invalid record length %u", len);
|
||||
elog(PANIC, "XLogInsert: invalid record length %u", len);
|
||||
|
||||
START_CRIT_SECTION();
|
||||
|
||||
@ -769,7 +769,7 @@ begin:;
|
||||
strcat(buf, " - ");
|
||||
RmgrTable[record->xl_rmid].rm_desc(buf, record->xl_info, rdata->data);
|
||||
}
|
||||
elog(DEBUG, "%s", buf);
|
||||
elog(LOG, "%s", buf);
|
||||
}
|
||||
|
||||
/* Record begin of record in appropriate places */
|
||||
@ -1010,7 +1010,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
* AdvanceXLInsertBuffer.
|
||||
*/
|
||||
if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[Write->curridx]))
|
||||
elog(STOP, "XLogWrite: write request %X/%X is past end of log %X/%X",
|
||||
elog(PANIC, "XLogWrite: write request %X/%X is past end of log %X/%X",
|
||||
LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
|
||||
XLogCtl->xlblocks[Write->curridx].xlogid,
|
||||
XLogCtl->xlblocks[Write->curridx].xrecoff);
|
||||
@ -1027,7 +1027,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
if (openLogFile >= 0)
|
||||
{
|
||||
if (close(openLogFile) != 0)
|
||||
elog(STOP, "close of log file %u, segment %u failed: %m",
|
||||
elog(PANIC, "close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg);
|
||||
openLogFile = -1;
|
||||
}
|
||||
@ -1066,7 +1066,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
(uint32) CheckPointSegments))
|
||||
{
|
||||
if (XLOG_DEBUG)
|
||||
elog(DEBUG, "XLogWrite: time for a checkpoint, signaling postmaster");
|
||||
elog(LOG, "XLogWrite: time for a checkpoint, signaling postmaster");
|
||||
SendPostmasterSignal(PMSIGNAL_DO_CHECKPOINT);
|
||||
}
|
||||
}
|
||||
@ -1085,7 +1085,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
{
|
||||
openLogOff = (LogwrtResult.Write.xrecoff - BLCKSZ) % XLogSegSize;
|
||||
if (lseek(openLogFile, (off_t) openLogOff, SEEK_SET) < 0)
|
||||
elog(STOP, "lseek of log file %u, segment %u, offset %u failed: %m",
|
||||
elog(PANIC, "lseek of log file %u, segment %u, offset %u failed: %m",
|
||||
openLogId, openLogSeg, openLogOff);
|
||||
}
|
||||
|
||||
@ -1097,7 +1097,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(STOP, "write of log file %u, segment %u, offset %u failed: %m",
|
||||
elog(PANIC, "write of log file %u, segment %u, offset %u failed: %m",
|
||||
openLogId, openLogSeg, openLogOff);
|
||||
}
|
||||
openLogOff += BLCKSZ;
|
||||
@ -1141,7 +1141,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
|
||||
!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
|
||||
{
|
||||
if (close(openLogFile) != 0)
|
||||
elog(STOP, "close of log file %u, segment %u failed: %m",
|
||||
elog(PANIC, "close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg);
|
||||
openLogFile = -1;
|
||||
}
|
||||
@ -1193,7 +1193,7 @@ XLogFlush(XLogRecPtr record)
|
||||
|
||||
if (XLOG_DEBUG)
|
||||
{
|
||||
elog(DEBUG, "XLogFlush%s%s: request %X/%X; write %X/%X; flush %X/%X\n",
|
||||
elog(LOG, "XLogFlush%s%s: request %X/%X; write %X/%X; flush %X/%X\n",
|
||||
(IsBootstrapProcessingMode()) ? "(bootstrap)" : "",
|
||||
(InRedo) ? "(redo)" : "",
|
||||
record.xlogid, record.xrecoff,
|
||||
@ -1273,7 +1273,7 @@ XLogFlush(XLogRecPtr record)
|
||||
* problem; most likely, the requested flush point is past end of XLOG.
|
||||
* This has been seen to occur when a disk page has a corrupted LSN.
|
||||
*
|
||||
* Formerly we treated this as a STOP condition, but that hurts the
|
||||
* Formerly we treated this as a PANIC condition, but that hurts the
|
||||
* system's robustness rather than helping it: we do not want to take
|
||||
* down the whole system due to corruption on one data page. In
|
||||
* particular, if the bad page is encountered again during recovery then
|
||||
@ -1286,7 +1286,7 @@ XLogFlush(XLogRecPtr record)
|
||||
* The current approach is to ERROR under normal conditions, but only
|
||||
* NOTICE during recovery, so that the system can be brought up even if
|
||||
* there's a corrupt LSN. Note that for calls from xact.c, the ERROR
|
||||
* will be promoted to STOP since xact.c calls this routine inside a
|
||||
* will be promoted to PANIC since xact.c calls this routine inside a
|
||||
* critical section. However, calls from bufmgr.c are not within
|
||||
* critical sections and so we will not force a restart for a bad LSN
|
||||
* on a data page.
|
||||
@ -1336,7 +1336,7 @@ XLogFileInit(uint32 log, uint32 seg,
|
||||
if (fd < 0)
|
||||
{
|
||||
if (errno != ENOENT)
|
||||
elog(STOP, "open of %s (log file %u, segment %u) failed: %m",
|
||||
elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
|
||||
path, log, seg);
|
||||
}
|
||||
else
|
||||
@ -1358,7 +1358,7 @@ XLogFileInit(uint32 log, uint32 seg,
|
||||
fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
|
||||
S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
elog(STOP, "creation of file %s failed: %m", tmppath);
|
||||
elog(PANIC, "creation of file %s failed: %m", tmppath);
|
||||
|
||||
/*
|
||||
* Zero-fill the file. We have to do this the hard way to ensure that
|
||||
@ -1385,12 +1385,12 @@ XLogFileInit(uint32 log, uint32 seg,
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
errno = save_errno ? save_errno : ENOSPC;
|
||||
|
||||
elog(STOP, "ZeroFill failed to write %s: %m", tmppath);
|
||||
elog(PANIC, "ZeroFill failed to write %s: %m", tmppath);
|
||||
}
|
||||
}
|
||||
|
||||
if (pg_fsync(fd) != 0)
|
||||
elog(STOP, "fsync of file %s failed: %m", tmppath);
|
||||
elog(PANIC, "fsync of file %s failed: %m", tmppath);
|
||||
|
||||
close(fd);
|
||||
|
||||
@ -1417,7 +1417,7 @@ XLogFileInit(uint32 log, uint32 seg,
|
||||
fd = BasicOpenFile(path, O_RDWR | PG_BINARY | XLOG_SYNC_BIT,
|
||||
S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
elog(STOP, "open of %s (log file %u, segment %u) failed: %m",
|
||||
elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
|
||||
path, log, seg);
|
||||
|
||||
return (fd);
|
||||
@ -1495,12 +1495,12 @@ InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath,
|
||||
*/
|
||||
#ifndef __BEOS__
|
||||
if (link(tmppath, path) < 0)
|
||||
elog(STOP, "link from %s to %s (initialization of log file %u, segment %u) failed: %m",
|
||||
elog(PANIC, "link from %s to %s (initialization of log file %u, segment %u) failed: %m",
|
||||
tmppath, path, log, seg);
|
||||
unlink(tmppath);
|
||||
#else
|
||||
if (rename(tmppath, path) < 0)
|
||||
elog(STOP, "rename from %s to %s (initialization of log file %u, segment %u) failed: %m",
|
||||
elog(PANIC, "rename from %s to %s (initialization of log file %u, segment %u) failed: %m",
|
||||
tmppath, path, log, seg);
|
||||
#endif
|
||||
|
||||
@ -1531,7 +1531,7 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
|
||||
path, log, seg);
|
||||
return (fd);
|
||||
}
|
||||
elog(STOP, "open of %s (log file %u, segment %u) failed: %m",
|
||||
elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
|
||||
path, log, seg);
|
||||
}
|
||||
|
||||
@ -1592,7 +1592,7 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
|
||||
|
||||
xldir = opendir(XLogDir);
|
||||
if (xldir == NULL)
|
||||
elog(STOP, "could not open transaction log directory (%s): %m",
|
||||
elog(PANIC, "could not open transaction log directory (%s): %m",
|
||||
XLogDir);
|
||||
|
||||
sprintf(lastoff, "%08X%08X", log, seg);
|
||||
@ -1638,7 +1638,7 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
|
||||
errno = 0;
|
||||
}
|
||||
if (errno)
|
||||
elog(STOP, "could not read transaction log directory (%s): %m",
|
||||
elog(PANIC, "could not read transaction log directory (%s): %m",
|
||||
XLogDir);
|
||||
closedir(xldir);
|
||||
}
|
||||
@ -1749,8 +1749,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
|
||||
* If RecPtr is not NULL, try to read a record at that position. Otherwise
|
||||
* try to read a record just after the last one previously read.
|
||||
*
|
||||
* If no valid record is available, returns NULL, or fails if emode is STOP.
|
||||
* (emode must be either STOP or LOG.)
|
||||
* If no valid record is available, returns NULL, or fails if emode is PANIC.
|
||||
* (emode must be either PANIC or LOG.)
|
||||
*
|
||||
* buffer is a workspace at least _INTL_MAXLOGRECSZ bytes long. It is needed
|
||||
* to reassemble a record that crosses block boundaries. Note that on
|
||||
@ -1802,7 +1802,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
|
||||
tmpRecPtr.xrecoff += SizeOfXLogPHD;
|
||||
}
|
||||
else if (!XRecOffIsValid(RecPtr->xrecoff))
|
||||
elog(STOP, "ReadRecord: invalid record offset at %X/%X",
|
||||
elog(PANIC, "ReadRecord: invalid record offset at %X/%X",
|
||||
RecPtr->xlogid, RecPtr->xrecoff);
|
||||
|
||||
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
|
||||
@ -2083,11 +2083,11 @@ WriteControlFile(void)
|
||||
#ifdef USE_LOCALE
|
||||
localeptr = setlocale(LC_COLLATE, NULL);
|
||||
if (!localeptr)
|
||||
elog(STOP, "invalid LC_COLLATE setting");
|
||||
elog(PANIC, "invalid LC_COLLATE setting");
|
||||
StrNCpy(ControlFile->lc_collate, localeptr, LOCALE_NAME_BUFLEN);
|
||||
localeptr = setlocale(LC_CTYPE, NULL);
|
||||
if (!localeptr)
|
||||
elog(STOP, "invalid LC_CTYPE setting");
|
||||
elog(PANIC, "invalid LC_CTYPE setting");
|
||||
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
|
||||
|
||||
/*
|
||||
@ -2122,7 +2122,7 @@ WriteControlFile(void)
|
||||
* specific error than "couldn't read pg_control".
|
||||
*/
|
||||
if (sizeof(ControlFileData) > BLCKSZ)
|
||||
elog(STOP, "sizeof(ControlFileData) is larger than BLCKSZ; fix either one");
|
||||
elog(PANIC, "sizeof(ControlFileData) is larger than BLCKSZ; fix either one");
|
||||
|
||||
memset(buffer, 0, BLCKSZ);
|
||||
memcpy(buffer, ControlFile, sizeof(ControlFileData));
|
||||
@ -2130,7 +2130,7 @@ WriteControlFile(void)
|
||||
fd = BasicOpenFile(ControlFilePath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
|
||||
S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
elog(STOP, "WriteControlFile: could not create control file (%s): %m",
|
||||
elog(PANIC, "WriteControlFile: could not create control file (%s): %m",
|
||||
ControlFilePath);
|
||||
|
||||
errno = 0;
|
||||
@ -2139,11 +2139,11 @@ WriteControlFile(void)
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(STOP, "WriteControlFile: write to control file failed: %m");
|
||||
elog(PANIC, "WriteControlFile: write to control file failed: %m");
|
||||
}
|
||||
|
||||
if (pg_fsync(fd) != 0)
|
||||
elog(STOP, "WriteControlFile: fsync of control file failed: %m");
|
||||
elog(PANIC, "WriteControlFile: fsync of control file failed: %m");
|
||||
|
||||
close(fd);
|
||||
}
|
||||
@ -2159,10 +2159,10 @@ ReadControlFile(void)
|
||||
*/
|
||||
fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
elog(STOP, "could not open control file (%s): %m", ControlFilePath);
|
||||
elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
|
||||
|
||||
if (read(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
|
||||
elog(STOP, "read from control file failed: %m");
|
||||
elog(PANIC, "read from control file failed: %m");
|
||||
|
||||
close(fd);
|
||||
|
||||
@ -2173,7 +2173,7 @@ ReadControlFile(void)
|
||||
* more enlightening than complaining about wrong CRC.
|
||||
*/
|
||||
if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with PG_CONTROL_VERSION %d,\n"
|
||||
"\tbut the server was compiled with PG_CONTROL_VERSION %d.\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
@ -2187,7 +2187,7 @@ ReadControlFile(void)
|
||||
FIN_CRC64(crc);
|
||||
|
||||
if (!EQ_CRC64(crc, ControlFile->crc))
|
||||
elog(STOP, "invalid checksum in control file");
|
||||
elog(PANIC, "invalid checksum in control file");
|
||||
|
||||
/*
|
||||
* Do compatibility checking immediately. We do this here for 2
|
||||
@ -2202,32 +2202,32 @@ ReadControlFile(void)
|
||||
* compatibility items because they can affect sort order of indexes.)
|
||||
*/
|
||||
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
|
||||
"\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->catalog_version_no, CATALOG_VERSION_NO);
|
||||
if (ControlFile->blcksz != BLCKSZ)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with BLCKSZ %d,\n"
|
||||
"\tbut the backend was compiled with BLCKSZ %d.\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->blcksz, BLCKSZ);
|
||||
if (ControlFile->relseg_size != RELSEG_SIZE)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with RELSEG_SIZE %d,\n"
|
||||
"\tbut the backend was compiled with RELSEG_SIZE %d.\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->relseg_size, RELSEG_SIZE);
|
||||
#ifdef USE_LOCALE
|
||||
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with LC_COLLATE '%s',\n"
|
||||
"\twhich is not recognized by setlocale().\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
ControlFile->lc_collate);
|
||||
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with LC_CTYPE '%s',\n"
|
||||
"\twhich is not recognized by setlocale().\n"
|
||||
"\tIt looks like you need to initdb.",
|
||||
@ -2235,7 +2235,7 @@ ReadControlFile(void)
|
||||
#else /* not USE_LOCALE */
|
||||
if (strcmp(ControlFile->lc_collate, "C") != 0 ||
|
||||
strcmp(ControlFile->lc_ctype, "C") != 0)
|
||||
elog(STOP,
|
||||
elog(PANIC,
|
||||
"The database cluster was initialized with LC_COLLATE '%s' and\n"
|
||||
"\tLC_CTYPE '%s', but the server was compiled without locale support.\n"
|
||||
"\tIt looks like you need to initdb or recompile.",
|
||||
@ -2256,7 +2256,7 @@ UpdateControlFile(void)
|
||||
|
||||
fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
|
||||
if (fd < 0)
|
||||
elog(STOP, "could not open control file (%s): %m", ControlFilePath);
|
||||
elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
|
||||
|
||||
errno = 0;
|
||||
if (write(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
|
||||
@ -2264,11 +2264,11 @@ UpdateControlFile(void)
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(STOP, "write to control file failed: %m");
|
||||
elog(PANIC, "write to control file failed: %m");
|
||||
}
|
||||
|
||||
if (pg_fsync(fd) != 0)
|
||||
elog(STOP, "fsync of control file failed: %m");
|
||||
elog(PANIC, "fsync of control file failed: %m");
|
||||
|
||||
close(fd);
|
||||
}
|
||||
@ -2408,11 +2408,11 @@ BootStrapXLOG(void)
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(STOP, "BootStrapXLOG failed to write log file: %m");
|
||||
elog(PANIC, "BootStrapXLOG failed to write log file: %m");
|
||||
}
|
||||
|
||||
if (pg_fsync(openLogFile) != 0)
|
||||
elog(STOP, "BootStrapXLOG failed to fsync log file: %m");
|
||||
elog(PANIC, "BootStrapXLOG failed to fsync log file: %m");
|
||||
|
||||
close(openLogFile);
|
||||
openLogFile = -1;
|
||||
@ -2478,7 +2478,7 @@ StartupXLOG(void)
|
||||
ControlFile->state < DB_SHUTDOWNED ||
|
||||
ControlFile->state > DB_IN_PRODUCTION ||
|
||||
!XRecOffIsValid(ControlFile->checkPoint.xrecoff))
|
||||
elog(STOP, "control file context is broken");
|
||||
elog(PANIC, "control file context is broken");
|
||||
|
||||
if (ControlFile->state == DB_SHUTDOWNED)
|
||||
elog(LOG, "database system was shut down at %s",
|
||||
@ -2517,7 +2517,7 @@ StartupXLOG(void)
|
||||
InRecovery = true; /* force recovery even if SHUTDOWNED */
|
||||
}
|
||||
else
|
||||
elog(STOP, "unable to locate a valid checkpoint record");
|
||||
elog(PANIC, "unable to locate a valid checkpoint record");
|
||||
}
|
||||
LastRec = RecPtr = checkPointLoc;
|
||||
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
|
||||
@ -2530,7 +2530,7 @@ StartupXLOG(void)
|
||||
elog(LOG, "next transaction id: %u; next oid: %u",
|
||||
checkPoint.nextXid, checkPoint.nextOid);
|
||||
if (!TransactionIdIsNormal(checkPoint.nextXid))
|
||||
elog(STOP, "invalid next transaction id");
|
||||
elog(PANIC, "invalid next transaction id");
|
||||
|
||||
ShmemVariableCache->nextXid = checkPoint.nextXid;
|
||||
ShmemVariableCache->nextOid = checkPoint.nextOid;
|
||||
@ -2541,7 +2541,7 @@ StartupXLOG(void)
|
||||
XLogCtl->RedoRecPtr = checkPoint.redo;
|
||||
|
||||
if (XLByteLT(RecPtr, checkPoint.redo))
|
||||
elog(STOP, "invalid redo in checkpoint record");
|
||||
elog(PANIC, "invalid redo in checkpoint record");
|
||||
if (checkPoint.undo.xrecoff == 0)
|
||||
checkPoint.undo = RecPtr;
|
||||
|
||||
@ -2549,7 +2549,7 @@ StartupXLOG(void)
|
||||
XLByteLT(checkPoint.redo, RecPtr))
|
||||
{
|
||||
if (wasShutdown)
|
||||
elog(STOP, "invalid redo/undo record in shutdown checkpoint");
|
||||
elog(PANIC, "invalid redo/undo record in shutdown checkpoint");
|
||||
InRecovery = true;
|
||||
}
|
||||
else if (ControlFile->state != DB_SHUTDOWNED)
|
||||
@ -2568,7 +2568,7 @@ StartupXLOG(void)
|
||||
|
||||
/* Is REDO required ? */
|
||||
if (XLByteLT(checkPoint.redo, RecPtr))
|
||||
record = ReadRecord(&(checkPoint.redo), STOP, buffer);
|
||||
record = ReadRecord(&(checkPoint.redo), PANIC, buffer);
|
||||
else
|
||||
{
|
||||
/* read past CheckPoint record */
|
||||
@ -2600,7 +2600,7 @@ StartupXLOG(void)
|
||||
strcat(buf, " - ");
|
||||
RmgrTable[record->xl_rmid].rm_desc(buf,
|
||||
record->xl_info, XLogRecGetData(record));
|
||||
elog(DEBUG, "%s", buf);
|
||||
elog(LOG, "%s", buf);
|
||||
}
|
||||
|
||||
if (record->xl_info & XLR_BKP_BLOCK_MASK)
|
||||
@ -2622,7 +2622,7 @@ StartupXLOG(void)
|
||||
* Init xlog buffer cache using the block containing the last valid
|
||||
* record from the previous incarnation.
|
||||
*/
|
||||
record = ReadRecord(&LastRec, STOP, buffer);
|
||||
record = ReadRecord(&LastRec, PANIC, buffer);
|
||||
EndOfLog = EndRecPtr;
|
||||
XLByteToPrevSeg(EndOfLog, openLogId, openLogSeg);
|
||||
openLogFile = XLogFileOpen(openLogId, openLogSeg, false);
|
||||
@ -2701,7 +2701,7 @@ StartupXLOG(void)
|
||||
RecPtr.xlogid, RecPtr.xrecoff);
|
||||
do
|
||||
{
|
||||
record = ReadRecord(&RecPtr, STOP, buffer);
|
||||
record = ReadRecord(&RecPtr, PANIC, buffer);
|
||||
if (TransactionIdIsValid(record->xl_xid) &&
|
||||
!TransactionIdDidCommit(record->xl_xid))
|
||||
RmgrTable[record->xl_rmid].rm_undo(EndRecPtr, record);
|
||||
@ -2995,7 +2995,7 @@ CreateCheckPoint(bool shutdown)
|
||||
checkPoint.undo = GetUndoRecPtr();
|
||||
|
||||
if (shutdown && checkPoint.undo.xrecoff != 0)
|
||||
elog(STOP, "active transaction while database system is shutting down");
|
||||
elog(PANIC, "active transaction while database system is shutting down");
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -3043,7 +3043,7 @@ CreateCheckPoint(bool shutdown)
|
||||
* recptr = end of actual checkpoint record.
|
||||
*/
|
||||
if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr))
|
||||
elog(STOP, "concurrent transaction log activity while database system is shutting down");
|
||||
elog(PANIC, "concurrent transaction log activity while database system is shutting down");
|
||||
|
||||
/*
|
||||
* Select point at which we can truncate the log, which we base on the
|
||||
@ -3297,12 +3297,12 @@ assign_xlog_sync_method(const char *method)
|
||||
if (openLogFile >= 0)
|
||||
{
|
||||
if (pg_fsync(openLogFile) != 0)
|
||||
elog(STOP, "fsync of log file %u, segment %u failed: %m",
|
||||
elog(PANIC, "fsync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg);
|
||||
if (open_sync_bit != new_sync_bit)
|
||||
{
|
||||
if (close(openLogFile) != 0)
|
||||
elog(STOP, "close of log file %u, segment %u failed: %m",
|
||||
elog(PANIC, "close of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg);
|
||||
openLogFile = -1;
|
||||
}
|
||||
@ -3323,13 +3323,13 @@ issue_xlog_fsync(void)
|
||||
{
|
||||
case SYNC_METHOD_FSYNC:
|
||||
if (pg_fsync(openLogFile) != 0)
|
||||
elog(STOP, "fsync of log file %u, segment %u failed: %m",
|
||||
elog(PANIC, "fsync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg);
|
||||
break;
|
||||
#ifdef HAVE_FDATASYNC
|
||||
case SYNC_METHOD_FDATASYNC:
|
||||
if (pg_fdatasync(openLogFile) != 0)
|
||||
elog(STOP, "fdatasync of log file %u, segment %u failed: %m",
|
||||
elog(PANIC, "fdatasync of log file %u, segment %u failed: %m",
|
||||
openLogId, openLogSeg);
|
||||
break;
|
||||
#endif
|
||||
@ -3337,7 +3337,7 @@ issue_xlog_fsync(void)
|
||||
/* write synced it already */
|
||||
break;
|
||||
default:
|
||||
elog(STOP, "bogus wal_sync_method %d", sync_method);
|
||||
elog(PANIC, "bogus wal_sync_method %d", sync_method);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.21 2001/10/25 05:49:22 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.22 2002/03/02 21:39:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -249,7 +249,7 @@ _xl_remove_hash_entry(XLogRelDesc *rdesc)
|
||||
hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
|
||||
(void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
|
||||
if (hentry == NULL)
|
||||
elog(STOP, "_xl_remove_hash_entry: file was not found in cache");
|
||||
elog(PANIC, "_xl_remove_hash_entry: file was not found in cache");
|
||||
|
||||
if (rdesc->reldata.rd_fd >= 0)
|
||||
smgrclose(DEFAULT_SMGR, &(rdesc->reldata));
|
||||
@ -346,10 +346,10 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
|
||||
hash_search(_xlrelcache, (void *) &rnode, HASH_ENTER, &found);
|
||||
|
||||
if (hentry == NULL)
|
||||
elog(STOP, "XLogOpenRelation: out of memory for cache");
|
||||
elog(PANIC, "XLogOpenRelation: out of memory for cache");
|
||||
|
||||
if (found)
|
||||
elog(STOP, "XLogOpenRelation: file found on insert into cache");
|
||||
elog(PANIC, "XLogOpenRelation: file found on insert into cache");
|
||||
|
||||
hentry->rdesc = res;
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootparse.y,v 1.39 2001/09/29 04:02:22 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootparse.y,v 1.40 2002/03/02 21:39:20 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -54,8 +54,7 @@ static void
|
||||
do_start()
|
||||
{
|
||||
StartTransactionCommand();
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "start transaction");
|
||||
elog(DEBUG3, "start transaction");
|
||||
}
|
||||
|
||||
|
||||
@ -63,8 +62,7 @@ static void
|
||||
do_end()
|
||||
{
|
||||
CommitTransactionCommand();
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "commit transaction");
|
||||
elog(DEBUG3, "commit transaction");
|
||||
if (isatty(0))
|
||||
{
|
||||
printf("bootstrap> ");
|
||||
@ -154,15 +152,12 @@ Boot_CreateStmt:
|
||||
{
|
||||
do_start();
|
||||
numattr = 0;
|
||||
if (DebugMode)
|
||||
{
|
||||
if ($2)
|
||||
elog(DEBUG, "creating bootstrap relation %s...",
|
||||
LexIDStr($4));
|
||||
else
|
||||
elog(DEBUG, "creating relation %s...",
|
||||
LexIDStr($4));
|
||||
}
|
||||
if ($2)
|
||||
elog(DEBUG3, "creating bootstrap relation %s...",
|
||||
LexIDStr($4));
|
||||
else
|
||||
elog(DEBUG3, "creating relation %s...",
|
||||
LexIDStr($4));
|
||||
}
|
||||
boot_typelist
|
||||
{
|
||||
@ -179,7 +174,7 @@ Boot_CreateStmt:
|
||||
|
||||
if (reldesc)
|
||||
{
|
||||
elog(DEBUG, "create bootstrap: warning, open relation exists, closing first");
|
||||
elog(DEBUG3, "create bootstrap: warning, open relation exists, closing first");
|
||||
closerel(NULL);
|
||||
}
|
||||
|
||||
@ -187,8 +182,7 @@ Boot_CreateStmt:
|
||||
reldesc = heap_create(LexIDStr($4), tupdesc,
|
||||
false, true, true);
|
||||
reldesc->rd_rel->relhasoids = ! ($3);
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "bootstrap relation created");
|
||||
elog(DEBUG3, "bootstrap relation created");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -202,8 +196,7 @@ Boot_CreateStmt:
|
||||
! ($3),
|
||||
false,
|
||||
true);
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "relation created with oid %u", id);
|
||||
elog(DEBUG3, "relation created with oid %u", id);
|
||||
}
|
||||
do_end();
|
||||
}
|
||||
@ -213,13 +206,10 @@ Boot_InsertStmt:
|
||||
INSERT_TUPLE optoideq
|
||||
{
|
||||
do_start();
|
||||
if (DebugMode)
|
||||
{
|
||||
if ($2)
|
||||
elog(DEBUG, "inserting row with oid %u...", $2);
|
||||
else
|
||||
elog(DEBUG, "inserting row...");
|
||||
}
|
||||
if ($2)
|
||||
elog(DEBUG3, "inserting row with oid %u...", $2);
|
||||
else
|
||||
elog(DEBUG3, "inserting row...");
|
||||
num_columns_read = 0;
|
||||
}
|
||||
LPAREN boot_tuplelist RPAREN
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.121 2002/02/23 01:31:34 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.122 2002/03/02 21:39:21 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -140,8 +140,6 @@ Form_pg_attribute attrtypes[MAXATTR]; /* points to attribute info */
|
||||
static Datum values[MAXATTR]; /* corresponding attribute values */
|
||||
int numattr; /* number of attributes for cur. rel */
|
||||
|
||||
int DebugMode;
|
||||
|
||||
static MemoryContext nogc = NULL; /* special no-gc mem context */
|
||||
|
||||
extern int optind;
|
||||
@ -188,8 +186,8 @@ usage(void)
|
||||
{
|
||||
fprintf(stderr,
|
||||
gettext("Usage:\n"
|
||||
" postgres -boot [-d] [-D datadir] [-F] [-o file] [-x num] dbname\n"
|
||||
" -d debug mode\n"
|
||||
" postgres -boot [-d level] [-D datadir] [-F] [-o file] [-x num] dbname\n"
|
||||
" -d 1-5 debug mode\n"
|
||||
" -D datadir data directory\n"
|
||||
" -F turn off fsync\n"
|
||||
" -o file send debug output to file\n"
|
||||
@ -258,9 +256,17 @@ BootstrapMain(int argc, char *argv[])
|
||||
potential_DataDir = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
DebugMode = true; /* print out debugging info while
|
||||
* parsing */
|
||||
{
|
||||
/* Turn on debugging for the postmaster. */
|
||||
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
|
||||
sprintf(debugstr, "debug%s", optarg);
|
||||
/* We use PGC_S_SESSION because we will reset in backend */
|
||||
SetConfigOption("server_min_messages", debugstr, PGC_POSTMASTER, PGC_S_ARGV);
|
||||
SetConfigOption("client_min_messages", debugstr, PGC_POSTMASTER, PGC_S_ARGV);
|
||||
pfree(debugstr);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 'F':
|
||||
SetConfigOption("fsync", "false", PGC_POSTMASTER, PGC_S_ARGV);
|
||||
break;
|
||||
@ -392,7 +398,7 @@ BootstrapMain(int argc, char *argv[])
|
||||
proc_exit(0); /* done */
|
||||
|
||||
default:
|
||||
elog(STOP, "Unsupported XLOG op %d", xlogop);
|
||||
elog(PANIC, "Unsupported XLOG op %d", xlogop);
|
||||
proc_exit(0);
|
||||
}
|
||||
|
||||
@ -495,9 +501,8 @@ boot_openrel(char *relname)
|
||||
if (reldesc != NULL)
|
||||
closerel(NULL);
|
||||
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "open relation %s, attrsize %d", relname ? relname : "(null)",
|
||||
(int) ATTRIBUTE_TUPLE_SIZE);
|
||||
elog(DEBUG3, "open relation %s, attrsize %d", relname ? relname : "(null)",
|
||||
(int) ATTRIBUTE_TUPLE_SIZE);
|
||||
|
||||
reldesc = heap_openr(relname, NoLock);
|
||||
numattr = reldesc->rd_rel->relnatts;
|
||||
@ -521,14 +526,12 @@ boot_openrel(char *relname)
|
||||
else
|
||||
attrtypes[i]->attisset = false;
|
||||
|
||||
if (DebugMode)
|
||||
{
|
||||
Form_pg_attribute at = attrtypes[i];
|
||||
|
||||
elog(DEBUG, "create attribute %d name %s len %d num %d type %u",
|
||||
elog(DEBUG3, "create attribute %d name %s len %d num %d type %u",
|
||||
i, NameStr(at->attname), at->attlen, at->attnum,
|
||||
at->atttypid
|
||||
);
|
||||
at->atttypid);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -558,8 +561,7 @@ closerel(char *name)
|
||||
elog(ERROR, "no open relation to close");
|
||||
else
|
||||
{
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "close relation %s", relname ? relname : "(null)");
|
||||
elog(DEBUG3, "close relation %s", relname ? relname : "(null)");
|
||||
heap_close(reldesc, NoLock);
|
||||
reldesc = (Relation) NULL;
|
||||
}
|
||||
@ -583,7 +585,7 @@ DefineAttr(char *name, char *type, int attnum)
|
||||
|
||||
if (reldesc != NULL)
|
||||
{
|
||||
elog(DEBUG, "warning: no open relations allowed with 'create' command");
|
||||
elog(LOG, "warning: no open relations allowed with 'create' command");
|
||||
closerel(relname);
|
||||
}
|
||||
|
||||
@ -594,8 +596,7 @@ DefineAttr(char *name, char *type, int attnum)
|
||||
{
|
||||
attrtypes[attnum]->atttypid = Ap->am_oid;
|
||||
namestrcpy(&attrtypes[attnum]->attname, name);
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
|
||||
elog(DEBUG3, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
|
||||
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
|
||||
attlen = attrtypes[attnum]->attlen = Ap->am_typ.typlen;
|
||||
attrtypes[attnum]->attbyval = Ap->am_typ.typbyval;
|
||||
@ -606,8 +607,7 @@ DefineAttr(char *name, char *type, int attnum)
|
||||
{
|
||||
attrtypes[attnum]->atttypid = Procid[typeoid].oid;
|
||||
namestrcpy(&attrtypes[attnum]->attname, name);
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
|
||||
elog(DEBUG3, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
|
||||
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
|
||||
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
|
||||
attrtypes[attnum]->attstorage = 'p';
|
||||
@ -655,8 +655,7 @@ InsertOneTuple(Oid objectid)
|
||||
TupleDesc tupDesc;
|
||||
int i;
|
||||
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "inserting row oid %u, %d columns", objectid, numattr);
|
||||
elog(DEBUG3, "inserting row oid %u, %d columns", objectid, numattr);
|
||||
|
||||
tupDesc = CreateTupleDesc(numattr, attrtypes);
|
||||
tuple = heap_formtuple(tupDesc, values, Blanks);
|
||||
@ -666,8 +665,7 @@ InsertOneTuple(Oid objectid)
|
||||
tuple->t_data->t_oid = objectid;
|
||||
heap_insert(reldesc, tuple);
|
||||
heap_freetuple(tuple);
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "row inserted");
|
||||
elog(DEBUG3, "row inserted");
|
||||
|
||||
/*
|
||||
* Reset blanks for next tuple
|
||||
@ -689,15 +687,13 @@ InsertOneValue(char *value, int i)
|
||||
|
||||
AssertArg(i >= 0 || i < MAXATTR);
|
||||
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "inserting column %d value '%s'", i, value);
|
||||
elog(DEBUG3, "inserting column %d value '%s'", i, value);
|
||||
|
||||
if (Typ != (struct typmap **) NULL)
|
||||
{
|
||||
struct typmap *ap;
|
||||
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "Typ != NULL");
|
||||
elog(DEBUG3, "Typ != NULL");
|
||||
app = Typ;
|
||||
while (*app && (*app)->am_oid != reldesc->rd_att->attrs[i]->atttypid)
|
||||
++app;
|
||||
@ -715,8 +711,7 @@ InsertOneValue(char *value, int i)
|
||||
values[i],
|
||||
ObjectIdGetDatum(ap->am_typ.typelem),
|
||||
Int32GetDatum(-1)));
|
||||
if (DebugMode)
|
||||
elog(DEBUG, " -> %s", prt);
|
||||
elog(DEBUG3, " -> %s", prt);
|
||||
pfree(prt);
|
||||
}
|
||||
else
|
||||
@ -728,8 +723,7 @@ InsertOneValue(char *value, int i)
|
||||
}
|
||||
if (typeindex >= n_types)
|
||||
elog(ERROR, "type oid %u not found", attrtypes[i]->atttypid);
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "Typ == NULL, typeindex = %u", typeindex);
|
||||
elog(DEBUG3, "Typ == NULL, typeindex = %u", typeindex);
|
||||
values[i] = OidFunctionCall3(Procid[typeindex].inproc,
|
||||
CStringGetDatum(value),
|
||||
ObjectIdGetDatum(Procid[typeindex].elem),
|
||||
@ -738,12 +732,10 @@ InsertOneValue(char *value, int i)
|
||||
values[i],
|
||||
ObjectIdGetDatum(Procid[typeindex].elem),
|
||||
Int32GetDatum(-1)));
|
||||
if (DebugMode)
|
||||
elog(DEBUG, " -> %s", prt);
|
||||
elog(DEBUG3, " -> %s", prt);
|
||||
pfree(prt);
|
||||
}
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "inserted");
|
||||
elog(DEBUG3, "inserted");
|
||||
}
|
||||
|
||||
/* ----------------
|
||||
@ -753,8 +745,7 @@ InsertOneValue(char *value, int i)
|
||||
void
|
||||
InsertOneNull(int i)
|
||||
{
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "inserting column %d NULL", i);
|
||||
elog(DEBUG3, "inserting column %d NULL", i);
|
||||
Assert(i >= 0 || i < MAXATTR);
|
||||
values[i] = PointerGetDatum(NULL);
|
||||
Blanks[i] = 'n';
|
||||
@ -841,8 +832,7 @@ gettype(char *type)
|
||||
if (strncmp(type, Procid[i].name, NAMEDATALEN) == 0)
|
||||
return i;
|
||||
}
|
||||
if (DebugMode)
|
||||
elog(DEBUG, "external type: %s", type);
|
||||
elog(DEBUG3, "external type: %s", type);
|
||||
rel = heap_openr(TypeRelationName, NoLock);
|
||||
scan = heap_beginscan(rel, 0, SnapshotNow, 0, (ScanKey) NULL);
|
||||
i = 0;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.54 2002/02/18 23:11:07 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.55 2002/03/02 21:39:22 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* See acl.h.
|
||||
@ -66,11 +66,11 @@ dumpacl(Acl *acl)
|
||||
int i;
|
||||
AclItem *aip;
|
||||
|
||||
elog(DEBUG, "acl size = %d, # acls = %d",
|
||||
elog(LOG, "acl size = %d, # acls = %d",
|
||||
ACL_SIZE(acl), ACL_NUM(acl));
|
||||
aip = ACL_DAT(acl);
|
||||
for (i = 0; i < ACL_NUM(acl); ++i)
|
||||
elog(DEBUG, " acl[%d]: %s", i,
|
||||
elog(LOG, " acl[%d]: %s", i,
|
||||
DatumGetCString(DirectFunctionCall1(aclitemout,
|
||||
PointerGetDatum(aip + i))));
|
||||
}
|
||||
@ -291,7 +291,7 @@ find_function_with_arglist(char *name, List *arguments)
|
||||
Oid argoids[FUNC_MAX_ARGS];
|
||||
int i;
|
||||
int16 argcount;
|
||||
|
||||
|
||||
MemSet(argoids, 0, FUNC_MAX_ARGS * sizeof(Oid));
|
||||
argcount = length(arguments);
|
||||
if (argcount > FUNC_MAX_ARGS)
|
||||
@ -302,9 +302,9 @@ find_function_with_arglist(char *name, List *arguments)
|
||||
{
|
||||
TypeName *t = (TypeName *) lfirst(arguments);
|
||||
char *typnam = TypeNameToInternalName(t);
|
||||
|
||||
|
||||
arguments = lnext(arguments);
|
||||
|
||||
|
||||
if (strcmp(typnam, "opaque") == 0)
|
||||
argoids[i] = InvalidOid;
|
||||
else
|
||||
@ -327,7 +327,7 @@ find_function_with_arglist(char *name, List *arguments)
|
||||
func_error(NULL, name, argcount, argoids, NULL);
|
||||
|
||||
return oid;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
@ -658,7 +658,7 @@ aclcheck(Acl *acl, AclId id, AclIdType idtype, AclMode mode)
|
||||
*/
|
||||
if (!acl)
|
||||
{
|
||||
elog(DEBUG, "aclcheck: null ACL, returning OK");
|
||||
elog(LOG, "aclcheck: null ACL, returning OK");
|
||||
return ACLCHECK_OK;
|
||||
}
|
||||
|
||||
@ -673,7 +673,7 @@ aclcheck(Acl *acl, AclId id, AclIdType idtype, AclMode mode)
|
||||
*/
|
||||
if (num < 1)
|
||||
{
|
||||
elog(DEBUG, "aclcheck: zero-length ACL, returning OK");
|
||||
elog(LOG, "aclcheck: zero-length ACL, returning OK");
|
||||
return ACLCHECK_OK;
|
||||
}
|
||||
|
||||
@ -686,7 +686,7 @@ aclcheck(Acl *acl, AclId id, AclIdType idtype, AclMode mode)
|
||||
if (aidat->ai_mode & mode)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "aclcheck: using world=%d", aidat->ai_mode);
|
||||
elog(LOG, "aclcheck: using world=%d", aidat->ai_mode);
|
||||
#endif
|
||||
return ACLCHECK_OK;
|
||||
}
|
||||
@ -702,7 +702,7 @@ aclcheck(Acl *acl, AclId id, AclIdType idtype, AclMode mode)
|
||||
if (aip->ai_id == id)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "aclcheck: found user %u/%d",
|
||||
elog(LOG, "aclcheck: found user %u/%d",
|
||||
aip->ai_id, aip->ai_mode);
|
||||
#endif
|
||||
if (aip->ai_mode & mode)
|
||||
@ -719,7 +719,7 @@ aclcheck(Acl *acl, AclId id, AclIdType idtype, AclMode mode)
|
||||
if (in_group(id, aip->ai_id))
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "aclcheck: found group %u/%d",
|
||||
elog(LOG, "aclcheck: found group %u/%d",
|
||||
aip->ai_id, aip->ai_mode);
|
||||
#endif
|
||||
return ACLCHECK_OK;
|
||||
@ -740,7 +740,7 @@ aclcheck(Acl *acl, AclId id, AclIdType idtype, AclMode mode)
|
||||
if (aip->ai_id == id)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "aclcheck: found group %u/%d",
|
||||
elog(LOG, "aclcheck: found group %u/%d",
|
||||
aip->ai_id, aip->ai_mode);
|
||||
#endif
|
||||
if (aip->ai_mode & mode)
|
||||
@ -798,7 +798,7 @@ pg_aclcheck(char *relname, Oid userid, AclMode mode)
|
||||
!((Form_pg_shadow) GETSTRUCT(tuple))->usecatupd)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "pg_aclcheck: catalog update to \"%s\": permission denied",
|
||||
elog(LOG, "pg_aclcheck: catalog update to \"%s\": permission denied",
|
||||
relname);
|
||||
#endif
|
||||
ReleaseSysCache(tuple);
|
||||
@ -811,7 +811,7 @@ pg_aclcheck(char *relname, Oid userid, AclMode mode)
|
||||
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "pg_aclcheck: \"%s\" is superuser",
|
||||
elog(LOG, "pg_aclcheck: \"%s\" is superuser",
|
||||
usename);
|
||||
#endif
|
||||
ReleaseSysCache(tuple);
|
||||
@ -888,7 +888,7 @@ pg_ownercheck(Oid userid,
|
||||
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "pg_ownercheck: user \"%s\" is superuser",
|
||||
elog(LOG, "pg_ownercheck: user \"%s\" is superuser",
|
||||
usename);
|
||||
#endif
|
||||
ReleaseSysCache(tuple);
|
||||
@ -950,7 +950,7 @@ pg_oper_ownercheck(Oid userid, Oid oprid)
|
||||
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "pg_ownercheck: user \"%s\" is superuser",
|
||||
elog(LOG, "pg_ownercheck: user \"%s\" is superuser",
|
||||
usename);
|
||||
#endif
|
||||
ReleaseSysCache(tuple);
|
||||
@ -1001,7 +1001,7 @@ pg_func_ownercheck(Oid userid,
|
||||
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "pg_ownercheck: user \"%s\" is superuser",
|
||||
elog(LOG, "pg_ownercheck: user \"%s\" is superuser",
|
||||
usename);
|
||||
#endif
|
||||
ReleaseSysCache(tuple);
|
||||
@ -1053,7 +1053,7 @@ pg_aggr_ownercheck(Oid userid,
|
||||
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
|
||||
{
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "pg_aggr_ownercheck: user \"%s\" is superuser",
|
||||
elog(LOG, "pg_aggr_ownercheck: user \"%s\" is superuser",
|
||||
usename);
|
||||
#endif
|
||||
ReleaseSysCache(tuple);
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.26 2002/02/18 16:04:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.27 2002/03/02 21:39:22 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -105,8 +105,7 @@ typedef struct
|
||||
#define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
|
||||
#define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
|
||||
|
||||
|
||||
static int MESSAGE_LEVEL;
|
||||
static int elevel = -1;
|
||||
|
||||
/* context information for compare_scalars() */
|
||||
static FmgrInfo *datumCmpFn;
|
||||
@ -151,10 +150,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
HeapTuple tuple;
|
||||
|
||||
if (vacstmt->verbose)
|
||||
MESSAGE_LEVEL = NOTICE;
|
||||
elevel = INFO;
|
||||
else
|
||||
MESSAGE_LEVEL = DEBUG;
|
||||
|
||||
elevel = DEBUG1;
|
||||
|
||||
/*
|
||||
* Begin a transaction for analyzing this relation.
|
||||
*
|
||||
@ -214,7 +213,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
|
||||
return;
|
||||
}
|
||||
|
||||
elog(MESSAGE_LEVEL, "Analyzing %s", RelationGetRelationName(onerel));
|
||||
elog(elevel, "Analyzing %s", RelationGetRelationName(onerel));
|
||||
|
||||
/*
|
||||
* Determine which columns to analyze
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.81 2001/10/25 05:49:23 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.82 2002/03/02 21:39:22 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -150,7 +150,7 @@ void
|
||||
Async_Notify(char *relname)
|
||||
{
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "Async_Notify: %s", relname);
|
||||
elog(LOG, "Async_Notify: %s", relname);
|
||||
|
||||
/* no point in making duplicate entries in the list ... */
|
||||
if (!AsyncExistsPendingNotify(relname))
|
||||
@ -198,7 +198,7 @@ Async_Listen(char *relname, int pid)
|
||||
bool alreadyListener = false;
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "Async_Listen: %s", relname);
|
||||
elog(LOG, "Async_Listen: %s", relname);
|
||||
|
||||
lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
|
||||
|
||||
@ -300,7 +300,7 @@ Async_Unlisten(char *relname, int pid)
|
||||
}
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "Async_Unlisten %s", relname);
|
||||
elog(LOG, "Async_Unlisten %s", relname);
|
||||
|
||||
lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
|
||||
|
||||
@ -358,7 +358,7 @@ Async_UnlistenAll(void)
|
||||
ScanKeyData key[1];
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "Async_UnlistenAll");
|
||||
elog(LOG, "Async_UnlistenAll");
|
||||
|
||||
lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
|
||||
tdesc = RelationGetDescr(lRel);
|
||||
@ -460,7 +460,7 @@ AtCommit_Notify(void)
|
||||
}
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "AtCommit_Notify");
|
||||
elog(LOG, "AtCommit_Notify");
|
||||
|
||||
/* preset data to update notify column to MyProcPid */
|
||||
nulls[0] = nulls[1] = nulls[2] = ' ';
|
||||
@ -492,14 +492,14 @@ AtCommit_Notify(void)
|
||||
*/
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "AtCommit_Notify: notifying self");
|
||||
elog(LOG, "AtCommit_Notify: notifying self");
|
||||
|
||||
NotifyMyFrontEnd(relname, listenerPID);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "AtCommit_Notify: notifying pid %d",
|
||||
elog(LOG, "AtCommit_Notify: notifying pid %d",
|
||||
listenerPID);
|
||||
|
||||
/*
|
||||
@ -555,7 +555,7 @@ AtCommit_Notify(void)
|
||||
ClearPendingNotifies();
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "AtCommit_Notify: done");
|
||||
elog(LOG, "AtCommit_Notify: done");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -628,12 +628,12 @@ Async_NotifyHandler(SIGNAL_ARGS)
|
||||
{
|
||||
/* Here, it is finally safe to do stuff. */
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "Async_NotifyHandler: perform async notify");
|
||||
elog(LOG, "Async_NotifyHandler: perform async notify");
|
||||
|
||||
ProcessIncomingNotify();
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "Async_NotifyHandler: done");
|
||||
elog(LOG, "Async_NotifyHandler: done");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -700,12 +700,12 @@ EnableNotifyInterrupt(void)
|
||||
if (notifyInterruptOccurred)
|
||||
{
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "EnableNotifyInterrupt: perform async notify");
|
||||
elog(LOG, "EnableNotifyInterrupt: perform async notify");
|
||||
|
||||
ProcessIncomingNotify();
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "EnableNotifyInterrupt: done");
|
||||
elog(LOG, "EnableNotifyInterrupt: done");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -756,7 +756,7 @@ ProcessIncomingNotify(void)
|
||||
nulls[Natts_pg_listener];
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "ProcessIncomingNotify");
|
||||
elog(LOG, "ProcessIncomingNotify");
|
||||
|
||||
set_ps_display("async_notify");
|
||||
|
||||
@ -792,7 +792,7 @@ ProcessIncomingNotify(void)
|
||||
/* Notify the frontend */
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "ProcessIncomingNotify: received %s from %d",
|
||||
elog(LOG, "ProcessIncomingNotify: received %s from %d",
|
||||
relname, (int) sourcePID);
|
||||
|
||||
NotifyMyFrontEnd(relname, sourcePID);
|
||||
@ -834,7 +834,7 @@ ProcessIncomingNotify(void)
|
||||
set_ps_display("idle");
|
||||
|
||||
if (Trace_notify)
|
||||
elog(DEBUG, "ProcessIncomingNotify: done");
|
||||
elog(LOG, "ProcessIncomingNotify: done");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -861,7 +861,7 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
|
||||
*/
|
||||
}
|
||||
else
|
||||
elog(NOTICE, "NOTIFY for %s", relname);
|
||||
elog(INFO, "NOTIFY for %s", relname);
|
||||
}
|
||||
|
||||
/* Does pendingNotifies include the given relname? */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.156 2002/02/27 19:34:38 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.157 2002/03/02 21:39:22 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* The PerformAddAttribute() code, like most of the relation
|
||||
@ -1501,8 +1501,7 @@ AlterTableDropConstraint(const char *relationName,
|
||||
constrName);
|
||||
/* Otherwise if more than one constraint deleted, notify */
|
||||
else if (deleted > 1)
|
||||
elog(NOTICE, "Multiple constraints dropped");
|
||||
|
||||
elog(INFO, "Multiple constraints dropped");
|
||||
}
|
||||
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.81 2001/10/25 05:49:24 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.82 2002/03/02 21:39:22 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -425,7 +425,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
|
||||
* Yes, try to merge the two column definitions. They must
|
||||
* have the same type and typmod.
|
||||
*/
|
||||
elog(NOTICE, "CREATE TABLE: merging multiple inherited definitions of attribute \"%s\"",
|
||||
elog(INFO, "CREATE TABLE: merging multiple inherited definitions of attribute \"%s\"",
|
||||
attributeName);
|
||||
def = (ColumnDef *) nth(exist_attno - 1, inhSchema);
|
||||
if (strcmp(def->typename->name, attributeType) != 0 ||
|
||||
@ -564,7 +564,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
|
||||
* Yes, try to merge the two column definitions. They must
|
||||
* have the same type and typmod.
|
||||
*/
|
||||
elog(NOTICE, "CREATE TABLE: merging attribute \"%s\" with inherited definition",
|
||||
elog(INFO, "CREATE TABLE: merging attribute \"%s\" with inherited definition",
|
||||
attributeName);
|
||||
def = (ColumnDef *) nth(exist_attno - 1, inhSchema);
|
||||
if (strcmp(def->typename->name, attributeType) != 0 ||
|
||||
|
@ -10,7 +10,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.65 2002/02/18 23:11:10 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.66 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
* DESCRIPTION
|
||||
* The "DefineFoo" routines take the parse tree and pick out the
|
||||
@ -348,12 +348,12 @@ DefineOperator(char *oprName,
|
||||
else if (strcasecmp(defel->defname, "precedence") == 0)
|
||||
{
|
||||
/* NOT IMPLEMENTED (never worked in v4.2) */
|
||||
elog(NOTICE, "CREATE OPERATOR: precedence not implemented");
|
||||
elog(INFO, "CREATE OPERATOR: precedence not implemented");
|
||||
}
|
||||
else if (strcasecmp(defel->defname, "associativity") == 0)
|
||||
{
|
||||
/* NOT IMPLEMENTED (never worked in v4.2) */
|
||||
elog(NOTICE, "CREATE OPERATOR: associativity not implemented");
|
||||
elog(INFO, "CREATE OPERATOR: associativity not implemented");
|
||||
}
|
||||
else if (strcasecmp(defel->defname, "commutator") == 0)
|
||||
commutatorName = defGetString(defel);
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994-5, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.68 2002/02/26 22:47:04 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.69 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
@ -59,7 +59,7 @@ ExplainQuery(Query *query, bool verbose, bool analyze, CommandDest dest)
|
||||
/* rewriter will not cope with utility statements */
|
||||
if (query->commandType == CMD_UTILITY)
|
||||
{
|
||||
elog(NOTICE, "Utility statements have no plan structure");
|
||||
elog(INFO, "Utility statements have no plan structure");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ ExplainQuery(Query *query, bool verbose, bool analyze, CommandDest dest)
|
||||
/* In the case of an INSTEAD NOTHING, tell at least that */
|
||||
if (rewritten == NIL)
|
||||
{
|
||||
elog(NOTICE, "Query rewrites to nothing");
|
||||
elog(INFO, "Query rewrites to nothing");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -94,9 +94,9 @@ ExplainOneQuery(Query *query, bool verbose, bool analyze, CommandDest dest)
|
||||
if (query->commandType == CMD_UTILITY)
|
||||
{
|
||||
if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt))
|
||||
elog(NOTICE, "QUERY PLAN:\n\nNOTIFY\n");
|
||||
elog(INFO, "QUERY PLAN:\n\nNOTIFY\n");
|
||||
else
|
||||
elog(NOTICE, "QUERY PLAN:\n\nUTILITY\n");
|
||||
elog(INFO, "QUERY PLAN:\n\nUTILITY\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -152,7 +152,7 @@ ExplainOneQuery(Query *query, bool verbose, bool analyze, CommandDest dest)
|
||||
s = nodeToString(plan);
|
||||
if (s)
|
||||
{
|
||||
elog(NOTICE, "QUERY DUMP:\n\n%s", s);
|
||||
elog(INFO, "QUERY DUMP:\n\n%s", s);
|
||||
pfree(s);
|
||||
}
|
||||
}
|
||||
@ -165,7 +165,7 @@ ExplainOneQuery(Query *query, bool verbose, bool analyze, CommandDest dest)
|
||||
if (analyze)
|
||||
appendStringInfo(str, "Total runtime: %.2f msec\n",
|
||||
1000.0 * totaltime);
|
||||
elog(NOTICE, "QUERY PLAN:\n\n%s", str->data);
|
||||
elog(INFO, "QUERY PLAN:\n\n%s", str->data);
|
||||
pfree(str->data);
|
||||
pfree(str);
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.68 2002/01/11 18:16:04 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.69 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -879,7 +879,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
sequence_magic *sm;
|
||||
|
||||
if (info != XLOG_SEQ_LOG)
|
||||
elog(STOP, "seq_redo: unknown op code %u", info);
|
||||
elog(PANIC, "seq_redo: unknown op code %u", info);
|
||||
|
||||
reln = XLogOpenRelation(true, RM_SEQ_ID, xlrec->node);
|
||||
if (!RelationIsValid(reln))
|
||||
@ -887,7 +887,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
|
||||
buffer = XLogReadBuffer(true, reln, 0);
|
||||
if (!BufferIsValid(buffer))
|
||||
elog(STOP, "seq_redo: can't read block of %u/%u",
|
||||
elog(PANIC, "seq_redo: can't read block of %u/%u",
|
||||
xlrec->node.tblNode, xlrec->node.relNode);
|
||||
|
||||
page = (Page) BufferGetPage(buffer);
|
||||
@ -903,7 +903,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
|
||||
itemsz = MAXALIGN(itemsz);
|
||||
if (PageAddItem(page, (Item) item, itemsz,
|
||||
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
|
||||
elog(STOP, "seq_redo: failed to add item to page");
|
||||
elog(PANIC, "seq_redo: failed to add item to page");
|
||||
|
||||
PageSetLSN(page, lsn);
|
||||
PageSetSUI(page, ThisStartUpID);
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.102 2002/02/19 20:11:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.103 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -368,7 +368,7 @@ DropTrigger(DropTrigStmt *stmt)
|
||||
elog(ERROR, "DropTrigger: there is no trigger %s on relation %s",
|
||||
stmt->trigname, stmt->relname);
|
||||
if (tgfound > 1)
|
||||
elog(NOTICE, "DropTrigger: found (and deleted) %d triggers %s on relation %s",
|
||||
elog(INFO, "DropTrigger: found (and deleted) %d triggers %s on relation %s",
|
||||
tgfound, stmt->trigname, stmt->relname);
|
||||
|
||||
/*
|
||||
@ -489,7 +489,7 @@ RelationRemoveTriggers(Relation rel)
|
||||
stmt.relname = pstrdup(RelationGetRelationName(refrel));
|
||||
heap_close(refrel, NoLock);
|
||||
|
||||
elog(NOTICE, "DROP TABLE implicitly drops referential integrity trigger from table \"%s\"", stmt.relname);
|
||||
elog(INFO, "DROP TABLE implicitly drops referential integrity trigger from table \"%s\"", stmt.relname);
|
||||
|
||||
DropTrigger(&stmt);
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.91 2002/03/01 22:45:08 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.92 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -547,7 +547,7 @@ AlterUser(AlterUserStmt *stmt)
|
||||
|
||||
/* changes to the flat password file cannot be rolled back */
|
||||
if (IsTransactionBlock() && password)
|
||||
elog(NOTICE, "ALTER USER: password changes cannot be rolled back");
|
||||
elog(INFO, "ALTER USER: password changes cannot be rolled back");
|
||||
|
||||
/*
|
||||
* Scan the pg_shadow relation to be certain the user exists. Note we
|
||||
@ -785,7 +785,7 @@ DropUser(DropUserStmt *stmt)
|
||||
elog(ERROR, "DROP USER: permission denied");
|
||||
|
||||
if (IsTransactionBlock())
|
||||
elog(NOTICE, "DROP USER cannot be rolled back completely");
|
||||
elog(INFO, "DROP USER cannot be rolled back completely");
|
||||
|
||||
/*
|
||||
* Scan the pg_shadow relation to find the usesysid of the user to be
|
||||
|
@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.214 2002/02/19 20:11:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.215 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -108,7 +108,7 @@ typedef struct VRelStats
|
||||
|
||||
static MemoryContext vac_context = NULL;
|
||||
|
||||
static int MESSAGE_LEVEL; /* message level */
|
||||
static int elevel = -1;
|
||||
|
||||
static TransactionId OldestXmin;
|
||||
static TransactionId FreezeLimit;
|
||||
@ -192,10 +192,10 @@ vacuum(VacuumStmt *vacstmt)
|
||||
pgstat_vacuum_tabstat();
|
||||
|
||||
if (vacstmt->verbose)
|
||||
MESSAGE_LEVEL = NOTICE;
|
||||
elevel = INFO;
|
||||
else
|
||||
MESSAGE_LEVEL = DEBUG;
|
||||
|
||||
elevel = DEBUG1;
|
||||
|
||||
/*
|
||||
* Create special memory context for cross-transaction storage.
|
||||
*
|
||||
@ -964,7 +964,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
vac_init_rusage(&ru0);
|
||||
|
||||
relname = RelationGetRelationName(onerel);
|
||||
elog(MESSAGE_LEVEL, "--Relation %s--", relname);
|
||||
elog(elevel, "--Relation %s--", relname);
|
||||
|
||||
empty_pages = new_pages = changed_pages = empty_end_pages = 0;
|
||||
num_tuples = tups_vacuumed = nkeep = nunused = 0;
|
||||
@ -1275,7 +1275,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
pfree(vtlinks);
|
||||
}
|
||||
|
||||
elog(MESSAGE_LEVEL, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
|
||||
elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
|
||||
Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, MaxLen %lu; \
|
||||
Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u.\n\t%s",
|
||||
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
|
||||
@ -1849,7 +1849,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
InvalidOffsetNumber, LP_USED);
|
||||
if (newoff == InvalidOffsetNumber)
|
||||
{
|
||||
elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
|
||||
elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
|
||||
(unsigned long) tuple_len, destvacpage->blkno);
|
||||
}
|
||||
newitemid = PageGetItemId(ToPage, newoff);
|
||||
@ -1972,7 +1972,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
InvalidOffsetNumber, LP_USED);
|
||||
if (newoff == InvalidOffsetNumber)
|
||||
{
|
||||
elog(STOP, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
|
||||
elog(PANIC, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
|
||||
(unsigned long) tuple_len,
|
||||
cur_page->blkno, (unsigned long) cur_page->free,
|
||||
cur_page->offsets_used, cur_page->offsets_free);
|
||||
@ -2197,7 +2197,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
|
||||
}
|
||||
Assert(num_moved == checked_moved);
|
||||
|
||||
elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
|
||||
elog(elevel, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
|
||||
RelationGetRelationName(onerel),
|
||||
nblocks, blkno, num_moved,
|
||||
vac_show_rusage(&ru0));
|
||||
@ -2369,7 +2369,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
|
||||
/* truncate relation if there are some empty end-pages */
|
||||
if (vacuum_pages->empty_end_pages > 0)
|
||||
{
|
||||
elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u.",
|
||||
elog(elevel, "Rel %s: Pages: %u --> %u.",
|
||||
RelationGetRelationName(onerel),
|
||||
vacrelstats->rel_pages, relblocks);
|
||||
relblocks = smgrtruncate(DEFAULT_SMGR, onerel, relblocks);
|
||||
@ -2443,7 +2443,7 @@ scan_index(Relation indrel, double num_tuples)
|
||||
stats->num_pages, stats->num_index_tuples,
|
||||
false);
|
||||
|
||||
elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
|
||||
elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_pages, stats->num_index_tuples,
|
||||
vac_show_rusage(&ru0));
|
||||
@ -2497,7 +2497,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
stats->num_pages, stats->num_index_tuples,
|
||||
false);
|
||||
|
||||
elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
|
||||
elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
|
||||
RelationGetRelationName(indrel), stats->num_pages,
|
||||
stats->num_index_tuples - keep_tuples, stats->tuples_removed,
|
||||
vac_show_rusage(&ru0));
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.11 2002/01/06 00:37:44 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.12 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -92,7 +92,7 @@ typedef struct LVRelStats
|
||||
} LVRelStats;
|
||||
|
||||
|
||||
static int MESSAGE_LEVEL; /* message level */
|
||||
static int elevel = -1;
|
||||
|
||||
static TransactionId OldestXmin;
|
||||
static TransactionId FreezeLimit;
|
||||
@ -138,12 +138,11 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
|
||||
bool hasindex;
|
||||
BlockNumber possibly_freeable;
|
||||
|
||||
/* initialize */
|
||||
if (vacstmt->verbose)
|
||||
MESSAGE_LEVEL = NOTICE;
|
||||
elevel = INFO;
|
||||
else
|
||||
MESSAGE_LEVEL = DEBUG;
|
||||
|
||||
elevel = DEBUG1;
|
||||
|
||||
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
|
||||
&OldestXmin, &FreezeLimit);
|
||||
|
||||
@ -208,7 +207,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
vac_init_rusage(&ru0);
|
||||
|
||||
relname = RelationGetRelationName(onerel);
|
||||
elog(MESSAGE_LEVEL, "--Relation %s--", relname);
|
||||
elog(elevel, "--Relation %s--", relname);
|
||||
|
||||
empty_pages = changed_pages = 0;
|
||||
num_tuples = tups_vacuumed = nkeep = nunused = 0;
|
||||
@ -430,7 +429,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
lazy_scan_index(Irel[i], vacrelstats);
|
||||
}
|
||||
|
||||
elog(MESSAGE_LEVEL, "Pages %u: Changed %u, Empty %u; \
|
||||
elog(elevel, "Pages %u: Changed %u, Empty %u; \
|
||||
Tup %.0f: Vac %.0f, Keep %.0f, UnUsed %.0f.\n\tTotal %s",
|
||||
nblocks, changed_pages, empty_pages,
|
||||
num_tuples, tups_vacuumed, nkeep, nunused,
|
||||
@ -481,8 +480,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
|
||||
npages++;
|
||||
}
|
||||
|
||||
elog(MESSAGE_LEVEL, "Removed %d tuples in %d pages.\n\t%s",
|
||||
tupindex, npages,
|
||||
elog(elevel, "Removed %d tuples in %d pages.\n\t%s", tupindex, npages,
|
||||
vac_show_rusage(&ru0));
|
||||
}
|
||||
|
||||
@ -589,7 +587,7 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats)
|
||||
stats->num_pages, stats->num_index_tuples,
|
||||
false);
|
||||
|
||||
elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
|
||||
elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_pages, stats->num_index_tuples,
|
||||
vac_show_rusage(&ru0));
|
||||
@ -636,7 +634,7 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
|
||||
stats->num_pages, stats->num_index_tuples,
|
||||
false);
|
||||
|
||||
elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
|
||||
elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
|
||||
RelationGetRelationName(indrel), stats->num_pages,
|
||||
stats->num_index_tuples, stats->tuples_removed,
|
||||
vac_show_rusage(&ru0));
|
||||
@ -746,9 +744,8 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
|
||||
* We keep the exclusive lock until commit (perhaps not necessary)?
|
||||
*/
|
||||
|
||||
elog(MESSAGE_LEVEL, "Truncated %u --> %u pages.\n\t%s",
|
||||
old_rel_pages, new_rel_pages,
|
||||
vac_show_rusage(&ru0));
|
||||
elog(elevel, "Truncated %u --> %u pages.\n\t%s", old_rel_pages,
|
||||
new_rel_pages, vac_show_rusage(&ru0));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.58 2002/02/23 01:31:35 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.59 2002/03/02 21:39:23 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -295,7 +295,7 @@ show_datestyle(void)
|
||||
strcat(buf, ((EuroDates) ? "European" : "US (NonEuropean)"));
|
||||
strcat(buf, " conventions");
|
||||
|
||||
elog(NOTICE, buf, NULL);
|
||||
elog(INFO, buf, NULL);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
@ -482,9 +482,9 @@ show_timezone(void)
|
||||
tzn = getenv("TZ");
|
||||
|
||||
if (tzn != NULL)
|
||||
elog(NOTICE, "Time zone is '%s'", tzn);
|
||||
elog(INFO, "Time zone is '%s'", tzn);
|
||||
else
|
||||
elog(NOTICE, "Time zone is unset");
|
||||
elog(INFO, "Time zone is unset");
|
||||
|
||||
return TRUE;
|
||||
} /* show_timezone() */
|
||||
@ -576,9 +576,9 @@ show_XactIsoLevel(void)
|
||||
{
|
||||
|
||||
if (XactIsoLevel == XACT_SERIALIZABLE)
|
||||
elog(NOTICE, "TRANSACTION ISOLATION LEVEL is SERIALIZABLE");
|
||||
elog(INFO, "TRANSACTION ISOLATION LEVEL is SERIALIZABLE");
|
||||
else
|
||||
elog(NOTICE, "TRANSACTION ISOLATION LEVEL is READ COMMITTED");
|
||||
elog(INFO, "TRANSACTION ISOLATION LEVEL is READ COMMITTED");
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
@ -623,7 +623,7 @@ parse_random_seed(List *args)
|
||||
static bool
|
||||
show_random_seed(void)
|
||||
{
|
||||
elog(NOTICE, "Seed for random number generator is unavailable");
|
||||
elog(INFO, "Seed for random number generator is unavailable");
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
@ -690,7 +690,7 @@ parse_client_encoding(List *args)
|
||||
static bool
|
||||
show_client_encoding(void)
|
||||
{
|
||||
elog(NOTICE, "Current client encoding is '%s'",
|
||||
elog(INFO, "Current client encoding is '%s'",
|
||||
pg_get_client_encoding_name());
|
||||
return TRUE;
|
||||
}
|
||||
@ -727,21 +727,21 @@ set_default_client_encoding(void)
|
||||
static bool
|
||||
parse_server_encoding(List *args)
|
||||
{
|
||||
elog(NOTICE, "SET SERVER_ENCODING is not supported");
|
||||
elog(INFO, "SET SERVER_ENCODING is not supported");
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static bool
|
||||
show_server_encoding(void)
|
||||
{
|
||||
elog(NOTICE, "Current server encoding is '%s'", GetDatabaseEncodingName());
|
||||
elog(INFO, "Current server encoding is '%s'", GetDatabaseEncodingName());
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static bool
|
||||
reset_server_encoding(void)
|
||||
{
|
||||
elog(NOTICE, "RESET SERVER_ENCODING is not supported");
|
||||
elog(INFO, "RESET SERVER_ENCODING is not supported");
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
@ -814,7 +814,7 @@ GetPGVariable(const char *name)
|
||||
{
|
||||
const char *val = GetConfigOption(name);
|
||||
|
||||
elog(NOTICE, "%s is %s", name, val);
|
||||
elog(INFO, "%s is %s", name, val);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: execAmi.c,v 1.61 2002/02/19 20:11:13 tgl Exp $
|
||||
* $Id: execAmi.c,v 1.62 2002/03/02 21:39:24 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -201,7 +201,7 @@ ExecMarkPos(Plan *node)
|
||||
|
||||
default:
|
||||
/* don't make hard error unless caller asks to restore... */
|
||||
elog(DEBUG, "ExecMarkPos: node type %d not supported",
|
||||
elog(LOG, "ExecMarkPos: node type %d not supported",
|
||||
nodeTag(node));
|
||||
break;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.150 2002/02/27 19:34:48 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.151 2002/03/02 21:39:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1095,7 +1095,7 @@ lnext: ;
|
||||
break;
|
||||
|
||||
default:
|
||||
elog(DEBUG, "ExecutePlan: unknown operation in queryDesc");
|
||||
elog(LOG, "ExecutePlan: unknown operation in queryDesc");
|
||||
result = NULL;
|
||||
break;
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Copyright (c) 2001, PostgreSQL Global Development Group
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/instrument.c,v 1.2 2001/10/25 05:49:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/instrument.c,v 1.3 2002/03/02 21:39:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -37,7 +37,7 @@ InstrStartNode(Instrumentation *instr)
|
||||
return;
|
||||
|
||||
if (instr->starttime.tv_sec != 0 || instr->starttime.tv_usec != 0)
|
||||
elog(DEBUG, "InstrStartTimer called twice in a row");
|
||||
elog(LOG, "InstrStartTimer called twice in a row");
|
||||
else
|
||||
gettimeofday(&instr->starttime, NULL);
|
||||
}
|
||||
@ -53,7 +53,7 @@ InstrStopNode(Instrumentation *instr, bool returnedTuple)
|
||||
|
||||
if (instr->starttime.tv_sec == 0 && instr->starttime.tv_usec == 0)
|
||||
{
|
||||
elog(DEBUG, "InstrStopNode without start");
|
||||
elog(LOG, "InstrStopNode without start");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.78 2001/10/25 05:49:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.79 2002/03/02 21:39:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -757,7 +757,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
|
||||
* So, just make a debug note, and force numaggs positive so that
|
||||
* palloc()s below don't choke.
|
||||
*/
|
||||
elog(DEBUG, "ExecInitAgg: could not find any aggregate functions");
|
||||
elog(LOG, "ExecInitAgg: could not find any aggregate functions");
|
||||
numaggs = 1;
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.43 2001/10/25 05:49:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.44 2002/03/02 21:39:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -290,7 +290,7 @@ ExecProcAppend(Append *node)
|
||||
subnode = (Plan *) nth(whichplan, appendplans);
|
||||
|
||||
if (subnode == NULL)
|
||||
elog(DEBUG, "ExecProcAppend: subnode is NULL");
|
||||
elog(LOG, "ExecProcAppend: subnode is NULL");
|
||||
|
||||
/*
|
||||
* get a tuple from the subplan
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.75 2002/02/25 20:07:02 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.76 2002/03/02 21:39:25 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -691,9 +691,7 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message ** msg, struct pam_re
|
||||
|
||||
initStringInfo(&buf);
|
||||
pq_getstr(&buf);
|
||||
if (DebugLvl > 5)
|
||||
fprintf(stderr, "received PAM packet with len=%d, pw=%s\n",
|
||||
len, buf.data);
|
||||
elog(DEBUG5, "received PAM packet with len=%d, pw=%s\n", len, buf.data);
|
||||
|
||||
if (strlen(buf.data) == 0)
|
||||
{
|
||||
@ -856,9 +854,8 @@ recv_and_check_password_packet(Port *port)
|
||||
return STATUS_EOF;
|
||||
}
|
||||
|
||||
if (DebugLvl > 5) /* this is probably a BAD idea... */
|
||||
fprintf(stderr, "received password packet with len=%d, pw=%s\n",
|
||||
len, buf.data);
|
||||
elog(DEBUG5, "received password packet with len=%d, pw=%s\n",
|
||||
len, buf.data);
|
||||
|
||||
result = checkPassword(port, port->user, buf.data);
|
||||
pfree(buf.data);
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.42 2001/11/05 17:46:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.43 2002/03/02 21:39:26 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -65,7 +65,7 @@ crypt_openpwdfile(void)
|
||||
pwdfile = AllocateFile(filename, "r");
|
||||
|
||||
if (pwdfile == NULL && errno != ENOENT)
|
||||
elog(DEBUG, "could not open %s: %m", filename);
|
||||
elog(LOG, "could not open %s: %m", filename);
|
||||
|
||||
pfree(filename);
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: pqcomm.c,v 1.126 2001/12/04 20:57:22 tgl Exp $
|
||||
* $Id: pqcomm.c,v 1.127 2002/03/02 21:39:26 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -507,15 +507,15 @@ pq_recvbuf(void)
|
||||
* Careful: an elog() that tries to write to the client
|
||||
* would cause recursion to here, leading to stack overflow
|
||||
* and core dump! This message must go *only* to the postmaster
|
||||
* log. elog(DEBUG) is presently safe.
|
||||
* log. elog(LOG) is presently safe.
|
||||
*/
|
||||
elog(DEBUG, "pq_recvbuf: recv() failed: %m");
|
||||
elog(LOG, "pq_recvbuf: recv() failed: %m");
|
||||
return EOF;
|
||||
}
|
||||
if (r == 0)
|
||||
{
|
||||
/* as above, only write to postmaster log */
|
||||
elog(DEBUG, "pq_recvbuf: unexpected EOF on client connection");
|
||||
elog(LOG, "pq_recvbuf: unexpected EOF on client connection");
|
||||
return EOF;
|
||||
}
|
||||
/* r contains number of bytes read, so just incr length */
|
||||
@ -680,7 +680,7 @@ pq_flush(void)
|
||||
* Careful: an elog() that tries to write to the client
|
||||
* would cause recursion to here, leading to stack overflow
|
||||
* and core dump! This message must go *only* to the postmaster
|
||||
* log. elog(DEBUG) is presently safe.
|
||||
* log. elog(LOG) is presently safe.
|
||||
*
|
||||
* If a client disconnects while we're in the midst of output,
|
||||
* we might write quite a bit of data before we get to a safe
|
||||
@ -689,7 +689,7 @@ pq_flush(void)
|
||||
if (errno != last_reported_send_errno)
|
||||
{
|
||||
last_reported_send_errno = errno;
|
||||
elog(DEBUG, "pq_flush: send() failed: %m");
|
||||
elog(LOG, "pq_flush: send() failed: %m");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -723,7 +723,7 @@ pq_eof(void)
|
||||
if (res < 0)
|
||||
{
|
||||
/* can log to postmaster log only */
|
||||
elog(DEBUG, "pq_eof: recv() failed: %m");
|
||||
elog(LOG, "pq_eof: recv() failed: %m");
|
||||
return EOF;
|
||||
}
|
||||
if (res == 0)
|
||||
|
@ -3,7 +3,7 @@
|
||||
* geqo_erx.c
|
||||
* edge recombination crossover [ER]
|
||||
*
|
||||
* $Id: geqo_erx.c,v 1.16 2001/10/25 05:49:31 momjian Exp $
|
||||
* $Id: geqo_erx.c,v 1.17 2002/03/02 21:39:26 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -419,7 +419,7 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
|
||||
}
|
||||
}
|
||||
|
||||
elog(DEBUG, "edge_failure(1): no edge found via random decision and total_edges == 4");
|
||||
elog(LOG, "edge_failure(1): no edge found via random decision and total_edges == 4");
|
||||
}
|
||||
|
||||
else
|
||||
@ -444,7 +444,7 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
|
||||
}
|
||||
}
|
||||
|
||||
elog(DEBUG, "edge_failure(2): no edge found via random decision and remainig edges");
|
||||
elog(LOG, "edge_failure(2): no edge found via random decision and remainig edges");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -462,7 +462,7 @@ edge_failure(Gene *gene, int index, Edge *edge_table, int num_gene)
|
||||
if (edge_table[i].unused_edges >= 0)
|
||||
return (Gene) i;
|
||||
|
||||
elog(DEBUG, "edge_failure(3): no edge found via looking for the last ununsed point");
|
||||
elog(LOG, "edge_failure(3): no edge found via looking for the last ununsed point");
|
||||
}
|
||||
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: geqo_main.c,v 1.29 2001/10/25 05:49:31 momjian Exp $
|
||||
* $Id: geqo_main.c,v 1.30 2002/03/02 21:39:26 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -120,30 +120,30 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
|
||||
daddy = alloc_chromo(pool->string_length);
|
||||
|
||||
#if defined (ERX)
|
||||
elog(DEBUG, "geqo_main: using edge recombination crossover [ERX]");
|
||||
elog(LOG, "geqo_main: using edge recombination crossover [ERX]");
|
||||
/* allocate edge table memory */
|
||||
edge_table = alloc_edge_table(pool->string_length);
|
||||
#elif defined(PMX)
|
||||
elog(DEBUG, "geqo_main: using partially matched crossover [PMX]");
|
||||
elog(LOG, "geqo_main: using partially matched crossover [PMX]");
|
||||
/* allocate chromosome kid memory */
|
||||
kid = alloc_chromo(pool->string_length);
|
||||
#elif defined(CX)
|
||||
elog(DEBUG, "geqo_main: using cycle crossover [CX]");
|
||||
elog(LOG, "geqo_main: using cycle crossover [CX]");
|
||||
/* allocate city table memory */
|
||||
kid = alloc_chromo(pool->string_length);
|
||||
city_table = alloc_city_table(pool->string_length);
|
||||
#elif defined(PX)
|
||||
elog(DEBUG, "geqo_main: using position crossover [PX]");
|
||||
elog(LOG, "geqo_main: using position crossover [PX]");
|
||||
/* allocate city table memory */
|
||||
kid = alloc_chromo(pool->string_length);
|
||||
city_table = alloc_city_table(pool->string_length);
|
||||
#elif defined(OX1)
|
||||
elog(DEBUG, "geqo_main: using order crossover [OX1]");
|
||||
elog(LOG, "geqo_main: using order crossover [OX1]");
|
||||
/* allocate city table memory */
|
||||
kid = alloc_chromo(pool->string_length);
|
||||
city_table = alloc_city_table(pool->string_length);
|
||||
#elif defined(OX2)
|
||||
elog(DEBUG, "geqo_main: using order crossover [OX2]");
|
||||
elog(LOG, "geqo_main: using order crossover [OX2]");
|
||||
/* allocate city table memory */
|
||||
kid = alloc_chromo(pool->string_length);
|
||||
city_table = alloc_city_table(pool->string_length);
|
||||
@ -214,18 +214,18 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
|
||||
|
||||
#if defined(ERX) && defined(GEQO_DEBUG)
|
||||
if (edge_failures != 0)
|
||||
elog(DEBUG, "[GEQO] failures: %d, average: %d",
|
||||
elog(LOG, "[GEQO] failures: %d, average: %d",
|
||||
edge_failures, (int) generation / edge_failures);
|
||||
else
|
||||
elog(DEBUG, "[GEQO] No edge failures detected.");
|
||||
elog(LOG, "[GEQO] No edge failures detected.");
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(CX) && defined(GEQO_DEBUG)
|
||||
if (mutations != 0)
|
||||
elog(DEBUG, "[GEQO] mutations: %d, generations: %d", mutations, generation);
|
||||
elog(LOG, "[GEQO] mutations: %d, generations: %d", mutations, generation);
|
||||
else
|
||||
elog(DEBUG, "[GEQO] No mutations processed.");
|
||||
elog(LOG, "[GEQO] No mutations processed.");
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.112 2001/10/25 05:49:32 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.113 2002/03/02 21:39:26 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1299,7 +1299,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
{
|
||||
/* this probably shouldn't fail? */
|
||||
elog(DEBUG, "pred_test_simple_clause: unknown test_op");
|
||||
elog(LOG, "pred_test_simple_clause: unknown test_op");
|
||||
return false;
|
||||
}
|
||||
aform = (Form_pg_amop) GETSTRUCT(tuple);
|
||||
@ -1327,7 +1327,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
|
||||
|
||||
if (isNull)
|
||||
{
|
||||
elog(DEBUG, "pred_test_simple_clause: null test result");
|
||||
elog(LOG, "pred_test_simple_clause: null test result");
|
||||
return false;
|
||||
}
|
||||
return DatumGetBool(test_result);
|
||||
|
@ -6,7 +6,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.215 2002/02/26 22:47:08 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.216 2002/03/02 21:39:27 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -849,7 +849,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
|
||||
sequence->istemp = cxt->istemp;
|
||||
sequence->options = NIL;
|
||||
|
||||
elog(NOTICE, "%s will create implicit sequence '%s' for SERIAL column '%s.%s'",
|
||||
elog(INFO, "%s will create implicit sequence '%s' for SERIAL column '%s.%s'",
|
||||
cxt->stmtType, sequence->seqname, cxt->relname, column->colname);
|
||||
|
||||
cxt->blist = lappend(cxt->blist, sequence);
|
||||
@ -1264,7 +1264,7 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
|
||||
elog(ERROR, "%s: failed to make implicit index name",
|
||||
cxt->stmtType);
|
||||
|
||||
elog(NOTICE, "%s / %s%s will create implicit index '%s' for table '%s'",
|
||||
elog(INFO, "%s / %s%s will create implicit index '%s' for table '%s'",
|
||||
cxt->stmtType,
|
||||
(strcmp(cxt->stmtType, "ALTER TABLE") == 0) ? "ADD " : "",
|
||||
(index->primary ? "PRIMARY KEY" : "UNIQUE"),
|
||||
@ -1288,7 +1288,7 @@ transformFKConstraints(ParseState *pstate, CreateStmtContext *cxt)
|
||||
if (cxt->fkconstraints == NIL)
|
||||
return;
|
||||
|
||||
elog(NOTICE, "%s will create implicit trigger(s) for FOREIGN KEY check(s)",
|
||||
elog(INFO, "%s will create implicit trigger(s) for FOREIGN KEY check(s)",
|
||||
cxt->stmtType);
|
||||
|
||||
foreach(fkclist, cxt->fkconstraints)
|
||||
@ -2710,7 +2710,7 @@ transformTypeRef(ParseState *pstate, TypeName *tn)
|
||||
elog(ERROR, "unsupported expression in %%TYPE");
|
||||
v = (Var *) n;
|
||||
tyn = typeidTypeName(v->vartype);
|
||||
elog(NOTICE, "%s.%s%%TYPE converted to %s", tn->name, tn->attrname, tyn);
|
||||
elog(INFO, "%s.%s%%TYPE converted to %s", tn->name, tn->attrname, tyn);
|
||||
tn->name = tyn;
|
||||
tn->typmod = v->vartypmod;
|
||||
tn->attrname = NULL;
|
||||
|
@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/parser/gram.y,v 2.282 2002/03/01 22:45:12 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/parser/gram.y,v 2.283 2002/03/02 21:39:27 momjian Exp $
|
||||
*
|
||||
* HISTORY
|
||||
* AUTHOR DATE MAJOR EVENT
|
||||
@ -1338,7 +1338,7 @@ columnDef: ColId Typename ColQualList opt_collate
|
||||
n->constraints = $3;
|
||||
|
||||
if ($4 != NULL)
|
||||
elog(NOTICE,"CREATE TABLE / COLLATE %s not yet implemented"
|
||||
elog(INFO,"CREATE TABLE / COLLATE %s not yet implemented"
|
||||
"; clause ignored", $4);
|
||||
|
||||
$$ = (Node *)n;
|
||||
@ -2336,7 +2336,7 @@ direction: FORWARD { $$ = FORWARD; }
|
||||
| RELATIVE { $$ = RELATIVE; }
|
||||
| ABSOLUTE
|
||||
{
|
||||
elog(NOTICE,"FETCH / ABSOLUTE not supported, using RELATIVE");
|
||||
elog(INFO,"FETCH / ABSOLUTE not supported, using RELATIVE");
|
||||
$$ = RELATIVE;
|
||||
}
|
||||
;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.60 2001/11/05 17:46:26 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.61 2002/03/02 21:39:28 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1041,7 +1041,7 @@ warnAutoRange(ParseState *pstate, char *refname)
|
||||
}
|
||||
}
|
||||
if (foundInFromCl)
|
||||
elog(NOTICE, "Adding missing FROM-clause entry%s for table \"%s\"",
|
||||
elog(INFO, "Adding missing FROM-clause entry%s for table \"%s\"",
|
||||
pstate->parentParseState != NULL ? " in subquery" : "",
|
||||
refname);
|
||||
}
|
||||
|
@ -37,7 +37,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.268 2002/03/02 20:46:12 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.269 2002/03/02 21:39:28 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@ -436,13 +436,15 @@ PostmasterMain(int argc, char *argv[])
|
||||
potential_DataDir = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
|
||||
/*
|
||||
* Turn on debugging for the postmaster and the backend
|
||||
* servers descended from it.
|
||||
*/
|
||||
SetConfigOption("debug_level", optarg, PGC_POSTMASTER, PGC_S_ARGV);
|
||||
{
|
||||
/* Turn on debugging for the postmaster. */
|
||||
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
|
||||
sprintf(debugstr, "debug%s", optarg);
|
||||
/* We use PGC_S_SESSION because we will reset in backend */
|
||||
SetConfigOption("server_min_messages", debugstr, PGC_POSTMASTER, PGC_S_SESSION);
|
||||
pfree(debugstr);
|
||||
break;
|
||||
}
|
||||
case 'F':
|
||||
SetConfigOption("fsync", "false", PGC_POSTMASTER, PGC_S_ARGV);
|
||||
break;
|
||||
@ -580,17 +582,15 @@ PostmasterMain(int argc, char *argv[])
|
||||
#endif
|
||||
|
||||
/* For debugging: display postmaster environment */
|
||||
if (DebugLvl > 2)
|
||||
{
|
||||
extern char **environ;
|
||||
char **p;
|
||||
|
||||
fprintf(stderr, "%s: PostmasterMain: initial environ dump:\n",
|
||||
progname);
|
||||
fprintf(stderr, "-----------------------------------------\n");
|
||||
elog(DEBUG2, "%s: PostmasterMain: initial environ dump:", progname);
|
||||
elog(DEBUG2, "-----------------------------------------");
|
||||
for (p = environ; *p; ++p)
|
||||
fprintf(stderr, "\t%s\n", *p);
|
||||
fprintf(stderr, "-----------------------------------------\n");
|
||||
elog(DEBUG2, "\t%s", *p);
|
||||
elog(DEBUG2, "-----------------------------------------");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -932,7 +932,7 @@ ServerLoop(void)
|
||||
PG_SETMASK(&BlockSig);
|
||||
if (errno == EINTR || errno == EWOULDBLOCK)
|
||||
continue;
|
||||
elog(DEBUG, "ServerLoop: select failed: %m");
|
||||
elog(LOG, "ServerLoop: select failed: %m");
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
|
||||
@ -1058,7 +1058,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
|
||||
|
||||
if (pq_getbytes((char *) &len, 4) == EOF)
|
||||
{
|
||||
elog(DEBUG, "incomplete startup packet");
|
||||
elog(LOG, "incomplete startup packet");
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
|
||||
@ -1072,7 +1072,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
|
||||
|
||||
if (pq_getbytes(buf, len) == EOF)
|
||||
{
|
||||
elog(DEBUG, "incomplete startup packet");
|
||||
elog(LOG, "incomplete startup packet");
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
|
||||
@ -1105,7 +1105,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
|
||||
#endif
|
||||
if (send(port->sock, &SSLok, 1, 0) != 1)
|
||||
{
|
||||
elog(DEBUG, "failed to send SSL negotiation response: %s",
|
||||
elog(LOG, "failed to send SSL negotiation response: %s",
|
||||
strerror(errno));
|
||||
return STATUS_ERROR; /* close the connection */
|
||||
}
|
||||
@ -1117,7 +1117,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
|
||||
!SSL_set_fd(port->ssl, port->sock) ||
|
||||
SSL_accept(port->ssl) <= 0)
|
||||
{
|
||||
elog(DEBUG, "failed to initialize SSL connection: %s (%m)",
|
||||
elog(LOG, "failed to initialize SSL connection: %s (%m)",
|
||||
SSLerrmessage());
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
@ -1215,8 +1215,7 @@ processCancelRequest(Port *port, void *pkt)
|
||||
|
||||
if (backendPID == CheckPointPID)
|
||||
{
|
||||
if (DebugLvl)
|
||||
elog(DEBUG, "processCancelRequest: CheckPointPID in cancel request for process %d", backendPID);
|
||||
elog(DEBUG1, "processCancelRequest: CheckPointPID in cancel request for process %d", backendPID);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1230,25 +1229,20 @@ processCancelRequest(Port *port, void *pkt)
|
||||
if (bp->cancel_key == cancelAuthCode)
|
||||
{
|
||||
/* Found a match; signal that backend to cancel current op */
|
||||
if (DebugLvl)
|
||||
elog(DEBUG, "processing cancel request: sending SIGINT to process %d",
|
||||
backendPID);
|
||||
elog(DEBUG1, "processing cancel request: sending SIGINT to process %d",
|
||||
backendPID);
|
||||
kill(bp->pid, SIGINT);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Right PID, wrong key: no way, Jose */
|
||||
if (DebugLvl)
|
||||
elog(DEBUG, "bad key in cancel request for process %d",
|
||||
backendPID);
|
||||
}
|
||||
elog(DEBUG1, "bad key in cancel request for process %d",
|
||||
backendPID);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* No matching backend */
|
||||
if (DebugLvl)
|
||||
elog(DEBUG, "bad pid in cancel request for process %d", backendPID);
|
||||
elog(DEBUG1, "bad pid in cancel request for process %d", backendPID);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1291,7 +1285,7 @@ ConnCreate(int serverFd)
|
||||
|
||||
if (!(port = (Port *) calloc(1, sizeof(Port))))
|
||||
{
|
||||
elog(DEBUG, "ConnCreate: malloc failed");
|
||||
elog(LOG, "ConnCreate: malloc failed");
|
||||
SignalChildren(SIGQUIT);
|
||||
ExitPostmaster(1);
|
||||
}
|
||||
@ -1412,8 +1406,7 @@ pmdie(SIGNAL_ARGS)
|
||||
|
||||
PG_SETMASK(&BlockSig);
|
||||
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "pmdie %d", postgres_signal_arg);
|
||||
elog(DEBUG1, "pmdie %d", postgres_signal_arg);
|
||||
|
||||
switch (postgres_signal_arg)
|
||||
{
|
||||
@ -1427,7 +1420,7 @@ pmdie(SIGNAL_ARGS)
|
||||
if (Shutdown >= SmartShutdown)
|
||||
break;
|
||||
Shutdown = SmartShutdown;
|
||||
elog(DEBUG, "smart shutdown request");
|
||||
elog(LOG, "smart shutdown request");
|
||||
if (DLGetHead(BackendList)) /* let reaper() handle this */
|
||||
break;
|
||||
|
||||
@ -1439,7 +1432,7 @@ pmdie(SIGNAL_ARGS)
|
||||
break;
|
||||
if (ShutdownPID > 0)
|
||||
{
|
||||
elog(REALLYFATAL, "shutdown process %d already running",
|
||||
elog(PANIC, "shutdown process %d already running",
|
||||
(int) ShutdownPID);
|
||||
abort();
|
||||
}
|
||||
@ -1457,13 +1450,13 @@ pmdie(SIGNAL_ARGS)
|
||||
*/
|
||||
if (Shutdown >= FastShutdown)
|
||||
break;
|
||||
elog(DEBUG, "fast shutdown request");
|
||||
elog(LOG, "fast shutdown request");
|
||||
if (DLGetHead(BackendList)) /* let reaper() handle this */
|
||||
{
|
||||
Shutdown = FastShutdown;
|
||||
if (!FatalError)
|
||||
{
|
||||
elog(DEBUG, "aborting any active transactions");
|
||||
elog(LOG, "aborting any active transactions");
|
||||
SignalChildren(SIGTERM);
|
||||
}
|
||||
break;
|
||||
@ -1483,7 +1476,7 @@ pmdie(SIGNAL_ARGS)
|
||||
break;
|
||||
if (ShutdownPID > 0)
|
||||
{
|
||||
elog(REALLYFATAL, "shutdown process %d already running",
|
||||
elog(PANIC, "shutdown process %d already running",
|
||||
(int) ShutdownPID);
|
||||
abort();
|
||||
}
|
||||
@ -1499,7 +1492,7 @@ pmdie(SIGNAL_ARGS)
|
||||
* abort all children with SIGQUIT and exit without attempt to
|
||||
* properly shutdown data base system.
|
||||
*/
|
||||
elog(DEBUG, "immediate shutdown request");
|
||||
elog(LOG, "immediate shutdown request");
|
||||
if (ShutdownPID > 0)
|
||||
kill(ShutdownPID, SIGQUIT);
|
||||
if (StartupPID > 0)
|
||||
@ -1534,8 +1527,7 @@ reaper(SIGNAL_ARGS)
|
||||
|
||||
PG_SETMASK(&BlockSig);
|
||||
|
||||
if (DebugLvl)
|
||||
elog(DEBUG, "reaping dead processes");
|
||||
elog(DEBUG1, "reaping dead processes");
|
||||
#ifdef HAVE_WAITPID
|
||||
while ((pid = waitpid(-1, &status, WNOHANG)) > 0)
|
||||
{
|
||||
@ -1578,7 +1570,7 @@ reaper(SIGNAL_ARGS)
|
||||
{
|
||||
LogChildExit(gettext("startup process"),
|
||||
pid, exitstatus);
|
||||
elog(DEBUG, "aborting startup due to startup process failure");
|
||||
elog(LOG, "aborting startup due to startup process failure");
|
||||
ExitPostmaster(1);
|
||||
}
|
||||
StartupPID = 0;
|
||||
@ -1587,7 +1579,7 @@ reaper(SIGNAL_ARGS)
|
||||
{
|
||||
if (ShutdownPID > 0)
|
||||
{
|
||||
elog(STOP, "startup process %d died while shutdown process %d already running",
|
||||
elog(PANIC, "startup process %d died while shutdown process %d already running",
|
||||
pid, (int) ShutdownPID);
|
||||
abort();
|
||||
}
|
||||
@ -1619,7 +1611,7 @@ reaper(SIGNAL_ARGS)
|
||||
*/
|
||||
if (DLGetHead(BackendList) || StartupPID > 0 || ShutdownPID > 0)
|
||||
goto reaper_done;
|
||||
elog(DEBUG, "all server processes terminated; reinitializing shared memory and semaphores");
|
||||
elog(LOG, "all server processes terminated; reinitializing shared memory and semaphores");
|
||||
|
||||
shmem_exit(0);
|
||||
reset_shared(PostPortNumber);
|
||||
@ -1657,8 +1649,7 @@ CleanupProc(int pid,
|
||||
*next;
|
||||
Backend *bp;
|
||||
|
||||
if (DebugLvl)
|
||||
LogChildExit(gettext("child process"), pid, exitstatus);
|
||||
LogChildExit(gettext("child process"), pid, exitstatus);
|
||||
|
||||
/*
|
||||
* If a backend dies in an ugly way (i.e. exit status not 0) then we
|
||||
@ -1704,7 +1695,7 @@ CleanupProc(int pid,
|
||||
if (!FatalError)
|
||||
{
|
||||
LogChildExit(gettext("server process"), pid, exitstatus);
|
||||
elog(DEBUG, "terminating any other active server processes");
|
||||
elog(LOG, "terminating any other active server processes");
|
||||
}
|
||||
|
||||
curr = DLGetHead(BackendList);
|
||||
@ -1725,10 +1716,8 @@ CleanupProc(int pid,
|
||||
*/
|
||||
if (!FatalError)
|
||||
{
|
||||
if (DebugLvl)
|
||||
elog(DEBUG, "CleanupProc: sending %s to process %d",
|
||||
(SendStop ? "SIGSTOP" : "SIGQUIT"),
|
||||
(int) bp->pid);
|
||||
elog(DEBUG1, "CleanupProc: sending %s to process %d",
|
||||
(SendStop ? "SIGSTOP" : "SIGQUIT"), (int) bp->pid);
|
||||
kill(bp->pid, (SendStop ? SIGSTOP : SIGQUIT));
|
||||
}
|
||||
}
|
||||
@ -1771,13 +1760,13 @@ LogChildExit(const char *procname, int pid, int exitstatus)
|
||||
* describing a child process, such as "server process"
|
||||
*/
|
||||
if (WIFEXITED(exitstatus))
|
||||
elog(DEBUG, "%s (pid %d) exited with exit code %d",
|
||||
elog(LOG, "%s (pid %d) exited with exit code %d",
|
||||
procname, pid, WEXITSTATUS(exitstatus));
|
||||
else if (WIFSIGNALED(exitstatus))
|
||||
elog(DEBUG, "%s (pid %d) was terminated by signal %d",
|
||||
elog(LOG, "%s (pid %d) was terminated by signal %d",
|
||||
procname, pid, WTERMSIG(exitstatus));
|
||||
else
|
||||
elog(DEBUG, "%s (pid %d) exited with unexpected status %d",
|
||||
elog(LOG, "%s (pid %d) exited with unexpected status %d",
|
||||
procname, pid, exitstatus);
|
||||
}
|
||||
|
||||
@ -1799,10 +1788,8 @@ SignalChildren(int signal)
|
||||
|
||||
if (bp->pid != MyProcPid)
|
||||
{
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "SignalChildren: sending signal %d to process %d",
|
||||
signal, (int) bp->pid);
|
||||
|
||||
elog(DEBUG1, "SignalChildren: sending signal %d to process %d",
|
||||
signal, (int) bp->pid);
|
||||
kill(bp->pid, signal);
|
||||
}
|
||||
|
||||
@ -1838,7 +1825,7 @@ BackendStartup(Port *port)
|
||||
bn = (Backend *) malloc(sizeof(Backend));
|
||||
if (!bn)
|
||||
{
|
||||
elog(DEBUG, "out of memory; connection startup aborted");
|
||||
elog(LOG, "out of memory; connection startup aborted");
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
|
||||
@ -1887,7 +1874,7 @@ BackendStartup(Port *port)
|
||||
status = DoBackend(port);
|
||||
if (status != 0)
|
||||
{
|
||||
elog(DEBUG, "connection startup failed");
|
||||
elog(LOG, "connection startup failed");
|
||||
proc_exit(status);
|
||||
}
|
||||
else
|
||||
@ -1904,16 +1891,15 @@ BackendStartup(Port *port)
|
||||
beos_backend_startup_failed();
|
||||
#endif
|
||||
free(bn);
|
||||
elog(DEBUG, "connection startup failed (fork failure): %s",
|
||||
elog(LOG, "connection startup failed (fork failure): %s",
|
||||
strerror(save_errno));
|
||||
report_fork_failure_to_client(port, save_errno);
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
|
||||
/* in parent, normal */
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "BackendStartup: forked pid=%d socket=%d",
|
||||
(int) pid, port->sock);
|
||||
elog(DEBUG1, "BackendStartup: forked pid=%d socket=%d", (int) pid,
|
||||
port->sock);
|
||||
|
||||
/*
|
||||
* Everything's been successful, it's safe to add this backend to our
|
||||
@ -2007,7 +1993,6 @@ DoBackend(Port *port)
|
||||
char *remote_host;
|
||||
char *av[ARGV_SIZE * 2];
|
||||
int ac = 0;
|
||||
char debugbuf[ARGV_SIZE];
|
||||
char protobuf[ARGV_SIZE];
|
||||
char dbbuf[ARGV_SIZE];
|
||||
char optbuf[ARGV_SIZE];
|
||||
@ -2151,7 +2136,7 @@ DoBackend(Port *port)
|
||||
PG_SETMASK(&BlockSig);
|
||||
|
||||
if (Log_connections)
|
||||
elog(DEBUG, "connection: host=%s user=%s database=%s",
|
||||
elog(LOG, "connection: host=%s user=%s database=%s",
|
||||
remote_host, port->user, port->database);
|
||||
|
||||
/*
|
||||
@ -2174,19 +2159,6 @@ DoBackend(Port *port)
|
||||
|
||||
av[ac++] = "postgres";
|
||||
|
||||
/*
|
||||
* Pass the requested debugging level along to the backend. Level one
|
||||
* debugging in the postmaster traces postmaster connection activity,
|
||||
* and levels two and higher are passed along to the backend. This
|
||||
* allows us to watch only the postmaster or the postmaster and the
|
||||
* backend.
|
||||
*/
|
||||
if (DebugLvl > 1)
|
||||
{
|
||||
sprintf(debugbuf, "-d%d", DebugLvl);
|
||||
av[ac++] = debugbuf;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass any backend switches specified with -o in the postmaster's own
|
||||
* command line. We assume these are secure. (It's OK to mangle
|
||||
@ -2244,14 +2216,10 @@ DoBackend(Port *port)
|
||||
/*
|
||||
* Debug: print arguments being passed to backend
|
||||
*/
|
||||
if (DebugLvl > 1)
|
||||
{
|
||||
fprintf(stderr, "%s child[%d]: starting with (",
|
||||
progname, MyProcPid);
|
||||
for (i = 0; i < ac; ++i)
|
||||
fprintf(stderr, "%s ", av[i]);
|
||||
fprintf(stderr, ")\n");
|
||||
}
|
||||
elog(DEBUG2, "%s child[%d]: starting with (", progname, MyProcPid);
|
||||
for (i = 0; i < ac; ++i)
|
||||
elog(DEBUG2, "%s ", av[i]);
|
||||
elog(DEBUG2, ")\n");
|
||||
|
||||
return (PostgresMain(ac, av, port->user));
|
||||
}
|
||||
@ -2584,8 +2552,6 @@ SSDataBase(int xlop)
|
||||
/* Set up command-line arguments for subprocess */
|
||||
av[ac++] = "postgres";
|
||||
|
||||
av[ac++] = "-d";
|
||||
|
||||
sprintf(nbbuf, "-B%d", NBuffers);
|
||||
av[ac++] = nbbuf;
|
||||
|
||||
@ -2614,16 +2580,16 @@ SSDataBase(int xlop)
|
||||
switch (xlop)
|
||||
{
|
||||
case BS_XLOG_STARTUP:
|
||||
elog(DEBUG, "could not launch startup process (fork failure): %s",
|
||||
elog(LOG, "could not launch startup process (fork failure): %s",
|
||||
strerror(errno));
|
||||
break;
|
||||
case BS_XLOG_CHECKPOINT:
|
||||
elog(DEBUG, "could not launch checkpoint process (fork failure): %s",
|
||||
elog(LOG, "could not launch checkpoint process (fork failure): %s",
|
||||
strerror(errno));
|
||||
break;
|
||||
case BS_XLOG_SHUTDOWN:
|
||||
default:
|
||||
elog(DEBUG, "could not launch shutdown process (fork failure): %s",
|
||||
elog(LOG, "could not launch shutdown process (fork failure): %s",
|
||||
strerror(errno));
|
||||
break;
|
||||
}
|
||||
@ -2646,7 +2612,7 @@ SSDataBase(int xlop)
|
||||
{
|
||||
if (!(bn = (Backend *) malloc(sizeof(Backend))))
|
||||
{
|
||||
elog(DEBUG, "CheckPointDataBase: malloc failed");
|
||||
elog(LOG, "CheckPointDataBase: malloc failed");
|
||||
ExitPostmaster(1);
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.120 2001/11/10 23:51:14 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.121 2002/03/02 21:39:28 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -443,7 +443,7 @@ BufferAlloc(Relation reln,
|
||||
*/
|
||||
if (buf->flags & BM_JUST_DIRTIED)
|
||||
{
|
||||
elog(STOP, "BufferAlloc: content of block %u (%u/%u) changed while flushing",
|
||||
elog(PANIC, "BufferAlloc: content of block %u (%u/%u) changed while flushing",
|
||||
buf->tag.blockNum,
|
||||
buf->tag.rnode.tblNode, buf->tag.rnode.relNode);
|
||||
}
|
||||
@ -804,7 +804,7 @@ BufferSync()
|
||||
}
|
||||
|
||||
if (status == SM_FAIL) /* disk failure ?! */
|
||||
elog(STOP, "BufferSync: cannot write %u for %u/%u",
|
||||
elog(PANIC, "BufferSync: cannot write %u for %u/%u",
|
||||
bufHdr->tag.blockNum,
|
||||
bufHdr->tag.rnode.tblNode, bufHdr->tag.rnode.relNode);
|
||||
|
||||
@ -1371,7 +1371,7 @@ PrintBufferDescs()
|
||||
LWLockAcquire(BufMgrLock, LW_EXCLUSIVE);
|
||||
for (i = 0; i < NBuffers; ++i, ++buf)
|
||||
{
|
||||
elog(DEBUG, "[%02d] (freeNext=%d, freePrev=%d, rel=%u/%u, \
|
||||
elog(LOG, "[%02d] (freeNext=%d, freePrev=%d, rel=%u/%u, \
|
||||
blockNum=%u, flags=0x%x, refcount=%d %ld)",
|
||||
i, buf->freeNext, buf->freePrev,
|
||||
buf->tag.rnode.tblNode, buf->tag.rnode.relNode,
|
||||
@ -1566,7 +1566,7 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
|
||||
(char *) MAKE_PTR(bufHdr->data));
|
||||
|
||||
if (status == SM_FAIL) /* disk failure ?! */
|
||||
elog(STOP, "FlushRelationBuffers: cannot write %u for %u/%u",
|
||||
elog(PANIC, "FlushRelationBuffers: cannot write %u for %u/%u",
|
||||
bufHdr->tag.blockNum,
|
||||
bufHdr->tag.rnode.tblNode,
|
||||
bufHdr->tag.rnode.relNode);
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.88 2002/02/10 22:56:31 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.89 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
* NOTES:
|
||||
*
|
||||
@ -270,7 +270,7 @@ tryAgain:
|
||||
{
|
||||
int save_errno = errno;
|
||||
|
||||
DO_DB(elog(DEBUG, "BasicOpenFile: not enough descs, retry, er= %d",
|
||||
DO_DB(elog(LOG, "BasicOpenFile: not enough descs, retry, er= %d",
|
||||
errno));
|
||||
errno = 0;
|
||||
if (ReleaseLruFile())
|
||||
@ -304,7 +304,7 @@ pg_nofile(void)
|
||||
#else
|
||||
no_files = (long) max_files_per_process;
|
||||
#endif
|
||||
elog(DEBUG, "pg_nofile: sysconf(_SC_OPEN_MAX) failed; using %ld",
|
||||
elog(LOG, "pg_nofile: sysconf(_SC_OPEN_MAX) failed; using %ld",
|
||||
no_files);
|
||||
}
|
||||
#else /* !HAVE_SYSCONF */
|
||||
@ -353,7 +353,7 @@ _dump_lru(void)
|
||||
sprintf(buf + strlen(buf), "%d ", mru);
|
||||
}
|
||||
sprintf(buf + strlen(buf), "LEAST");
|
||||
elog(DEBUG, buf);
|
||||
elog(LOG, buf);
|
||||
}
|
||||
#endif /* FDDEBUG */
|
||||
|
||||
@ -364,7 +364,7 @@ Delete(File file)
|
||||
|
||||
Assert(file != 0);
|
||||
|
||||
DO_DB(elog(DEBUG, "Delete %d (%s)",
|
||||
DO_DB(elog(LOG, "Delete %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
DO_DB(_dump_lru());
|
||||
|
||||
@ -383,7 +383,7 @@ LruDelete(File file)
|
||||
|
||||
Assert(file != 0);
|
||||
|
||||
DO_DB(elog(DEBUG, "LruDelete %d (%s)",
|
||||
DO_DB(elog(LOG, "LruDelete %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
vfdP = &VfdCache[file];
|
||||
@ -399,14 +399,14 @@ LruDelete(File file)
|
||||
if (vfdP->fdstate & FD_DIRTY)
|
||||
{
|
||||
if (pg_fsync(vfdP->fd))
|
||||
elog(DEBUG, "LruDelete: failed to fsync %s: %m",
|
||||
elog(LOG, "LruDelete: failed to fsync %s: %m",
|
||||
vfdP->fileName);
|
||||
vfdP->fdstate &= ~FD_DIRTY;
|
||||
}
|
||||
|
||||
/* close the file */
|
||||
if (close(vfdP->fd))
|
||||
elog(DEBUG, "LruDelete: failed to close %s: %m",
|
||||
elog(LOG, "LruDelete: failed to close %s: %m",
|
||||
vfdP->fileName);
|
||||
|
||||
--nfile;
|
||||
@ -420,7 +420,7 @@ Insert(File file)
|
||||
|
||||
Assert(file != 0);
|
||||
|
||||
DO_DB(elog(DEBUG, "Insert %d (%s)",
|
||||
DO_DB(elog(LOG, "Insert %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
DO_DB(_dump_lru());
|
||||
|
||||
@ -441,7 +441,7 @@ LruInsert(File file)
|
||||
|
||||
Assert(file != 0);
|
||||
|
||||
DO_DB(elog(DEBUG, "LruInsert %d (%s)",
|
||||
DO_DB(elog(LOG, "LruInsert %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
vfdP = &VfdCache[file];
|
||||
@ -463,12 +463,12 @@ LruInsert(File file)
|
||||
vfdP->fileMode);
|
||||
if (vfdP->fd < 0)
|
||||
{
|
||||
DO_DB(elog(DEBUG, "RE_OPEN FAILED: %d", errno));
|
||||
DO_DB(elog(LOG, "RE_OPEN FAILED: %d", errno));
|
||||
return vfdP->fd;
|
||||
}
|
||||
else
|
||||
{
|
||||
DO_DB(elog(DEBUG, "RE_OPEN SUCCESS"));
|
||||
DO_DB(elog(LOG, "RE_OPEN SUCCESS"));
|
||||
++nfile;
|
||||
}
|
||||
|
||||
@ -494,7 +494,7 @@ LruInsert(File file)
|
||||
static bool
|
||||
ReleaseLruFile(void)
|
||||
{
|
||||
DO_DB(elog(DEBUG, "ReleaseLruFile. Opened %d", nfile));
|
||||
DO_DB(elog(LOG, "ReleaseLruFile. Opened %d", nfile));
|
||||
|
||||
if (nfile > 0)
|
||||
{
|
||||
@ -515,7 +515,7 @@ AllocateVfd(void)
|
||||
Index i;
|
||||
File file;
|
||||
|
||||
DO_DB(elog(DEBUG, "AllocateVfd. Size %d", SizeVfdCache));
|
||||
DO_DB(elog(LOG, "AllocateVfd. Size %d", SizeVfdCache));
|
||||
|
||||
if (SizeVfdCache == 0)
|
||||
{
|
||||
@ -587,7 +587,7 @@ FreeVfd(File file)
|
||||
{
|
||||
Vfd *vfdP = &VfdCache[file];
|
||||
|
||||
DO_DB(elog(DEBUG, "FreeVfd: %d (%s)",
|
||||
DO_DB(elog(LOG, "FreeVfd: %d (%s)",
|
||||
file, vfdP->fileName ? vfdP->fileName : ""));
|
||||
|
||||
if (vfdP->fileName != NULL)
|
||||
@ -637,7 +637,7 @@ FileAccess(File file)
|
||||
{
|
||||
int returnValue;
|
||||
|
||||
DO_DB(elog(DEBUG, "FileAccess %d (%s)",
|
||||
DO_DB(elog(LOG, "FileAccess %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
/*
|
||||
@ -691,7 +691,7 @@ fileNameOpenFile(FileName fileName,
|
||||
if (fileName == NULL)
|
||||
elog(ERROR, "fileNameOpenFile: NULL fname");
|
||||
|
||||
DO_DB(elog(DEBUG, "fileNameOpenFile: %s %x %o",
|
||||
DO_DB(elog(LOG, "fileNameOpenFile: %s %x %o",
|
||||
fileName, fileFlags, fileMode));
|
||||
|
||||
file = AllocateVfd();
|
||||
@ -711,7 +711,7 @@ fileNameOpenFile(FileName fileName,
|
||||
return -1;
|
||||
}
|
||||
++nfile;
|
||||
DO_DB(elog(DEBUG, "fileNameOpenFile: success %d",
|
||||
DO_DB(elog(LOG, "fileNameOpenFile: success %d",
|
||||
vfdP->fd));
|
||||
|
||||
Insert(file);
|
||||
@ -830,7 +830,7 @@ FileClose(File file)
|
||||
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileClose: %d (%s)",
|
||||
DO_DB(elog(LOG, "FileClose: %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
vfdP = &VfdCache[file];
|
||||
@ -844,14 +844,14 @@ FileClose(File file)
|
||||
if (vfdP->fdstate & FD_DIRTY)
|
||||
{
|
||||
if (pg_fsync(vfdP->fd))
|
||||
elog(DEBUG, "FileClose: failed to fsync %s: %m",
|
||||
elog(LOG, "FileClose: failed to fsync %s: %m",
|
||||
vfdP->fileName);
|
||||
vfdP->fdstate &= ~FD_DIRTY;
|
||||
}
|
||||
|
||||
/* close the file */
|
||||
if (close(vfdP->fd))
|
||||
elog(DEBUG, "FileClose: failed to close %s: %m",
|
||||
elog(LOG, "FileClose: failed to close %s: %m",
|
||||
vfdP->fileName);
|
||||
|
||||
--nfile;
|
||||
@ -866,7 +866,7 @@ FileClose(File file)
|
||||
/* reset flag so that die() interrupt won't cause problems */
|
||||
vfdP->fdstate &= ~FD_TEMPORARY;
|
||||
if (unlink(vfdP->fileName))
|
||||
elog(DEBUG, "FileClose: failed to unlink %s: %m",
|
||||
elog(LOG, "FileClose: failed to unlink %s: %m",
|
||||
vfdP->fileName);
|
||||
}
|
||||
|
||||
@ -884,7 +884,7 @@ FileUnlink(File file)
|
||||
{
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileUnlink: %d (%s)",
|
||||
DO_DB(elog(LOG, "FileUnlink: %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
/* force FileClose to delete it */
|
||||
@ -900,7 +900,7 @@ FileRead(File file, char *buffer, int amount)
|
||||
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileRead: %d (%s) %ld %d %p",
|
||||
DO_DB(elog(LOG, "FileRead: %d (%s) %ld %d %p",
|
||||
file, VfdCache[file].fileName,
|
||||
VfdCache[file].seekPos, amount, buffer));
|
||||
|
||||
@ -921,7 +921,7 @@ FileWrite(File file, char *buffer, int amount)
|
||||
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileWrite: %d (%s) %ld %d %p",
|
||||
DO_DB(elog(LOG, "FileWrite: %d (%s) %ld %d %p",
|
||||
file, VfdCache[file].fileName,
|
||||
VfdCache[file].seekPos, amount, buffer));
|
||||
|
||||
@ -947,7 +947,7 @@ FileSeek(File file, long offset, int whence)
|
||||
{
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileSeek: %d (%s) %ld %ld %d",
|
||||
DO_DB(elog(LOG, "FileSeek: %d (%s) %ld %ld %d",
|
||||
file, VfdCache[file].fileName,
|
||||
VfdCache[file].seekPos, offset, whence));
|
||||
|
||||
@ -1005,7 +1005,7 @@ long
|
||||
FileTell(File file)
|
||||
{
|
||||
Assert(FileIsValid(file));
|
||||
DO_DB(elog(DEBUG, "FileTell %d (%s)",
|
||||
DO_DB(elog(LOG, "FileTell %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
return VfdCache[file].seekPos;
|
||||
}
|
||||
@ -1018,7 +1018,7 @@ FileTruncate(File file, long offset)
|
||||
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileTruncate %d (%s)",
|
||||
DO_DB(elog(LOG, "FileTruncate %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
FileSync(file);
|
||||
@ -1117,7 +1117,7 @@ FileMarkDirty(File file)
|
||||
{
|
||||
Assert(FileIsValid(file));
|
||||
|
||||
DO_DB(elog(DEBUG, "FileMarkDirty: %d (%s)",
|
||||
DO_DB(elog(LOG, "FileMarkDirty: %d (%s)",
|
||||
file, VfdCache[file].fileName));
|
||||
|
||||
VfdCache[file].fdstate |= FD_DIRTY;
|
||||
@ -1147,7 +1147,7 @@ AllocateFile(char *name, char *mode)
|
||||
{
|
||||
FILE *file;
|
||||
|
||||
DO_DB(elog(DEBUG, "AllocateFile: Allocated %d", numAllocatedFiles));
|
||||
DO_DB(elog(LOG, "AllocateFile: Allocated %d", numAllocatedFiles));
|
||||
|
||||
if (numAllocatedFiles >= MAX_ALLOCATED_FILES)
|
||||
elog(ERROR, "AllocateFile: too many private FDs demanded");
|
||||
@ -1164,7 +1164,7 @@ TryAgain:
|
||||
{
|
||||
int save_errno = errno;
|
||||
|
||||
DO_DB(elog(DEBUG, "AllocateFile: not enough descs, retry, er= %d",
|
||||
DO_DB(elog(LOG, "AllocateFile: not enough descs, retry, er= %d",
|
||||
errno));
|
||||
errno = 0;
|
||||
if (ReleaseLruFile())
|
||||
@ -1180,7 +1180,7 @@ FreeFile(FILE *file)
|
||||
{
|
||||
int i;
|
||||
|
||||
DO_DB(elog(DEBUG, "FreeFile: Allocated %d", numAllocatedFiles));
|
||||
DO_DB(elog(LOG, "FreeFile: Allocated %d", numAllocatedFiles));
|
||||
|
||||
/* Remove file from list of allocated files, if it's present */
|
||||
for (i = numAllocatedFiles; --i >= 0;)
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.75 2001/11/05 17:46:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.76 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
*
|
||||
@ -131,8 +131,7 @@ proc_exit(int code)
|
||||
InterruptHoldoffCount = 1;
|
||||
CritSectionCount = 0;
|
||||
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "proc_exit(%d)", code);
|
||||
elog(DEBUG2, "proc_exit(%d)", code);
|
||||
|
||||
/* do our shared memory exits first */
|
||||
shmem_exit(code);
|
||||
@ -150,8 +149,7 @@ proc_exit(int code)
|
||||
(*on_proc_exit_list[on_proc_exit_index].function) (code,
|
||||
on_proc_exit_list[on_proc_exit_index].arg);
|
||||
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "exit(%d)", code);
|
||||
elog(DEBUG2, "exit(%d)", code);
|
||||
exit(code);
|
||||
}
|
||||
|
||||
@ -164,8 +162,7 @@ proc_exit(int code)
|
||||
void
|
||||
shmem_exit(int code)
|
||||
{
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "shmem_exit(%d)", code);
|
||||
elog(DEBUG2, "shmem_exit(%d)", code);
|
||||
|
||||
/*
|
||||
* call all the registered callbacks.
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.45 2001/11/04 19:55:31 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.46 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -65,8 +65,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int maxBackends)
|
||||
/* might as well round it off to a multiple of a typical page size */
|
||||
size += 8192 - (size % 8192);
|
||||
|
||||
if (DebugLvl > 1)
|
||||
fprintf(stderr, "invoking IpcMemoryCreate(size=%d)\n", size);
|
||||
elog(DEBUG2, "invoking IpcMemoryCreate(size=%d)", size);
|
||||
|
||||
/*
|
||||
* Create the shmem segment
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.43 2001/10/25 05:49:42 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.44 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -66,7 +66,7 @@ SendSharedInvalidMessage(SharedInvalidationMessage *msg)
|
||||
insertOK = SIInsertDataEntry(shmInvalBuffer, msg);
|
||||
LWLockRelease(SInvalLock);
|
||||
if (!insertOK)
|
||||
elog(DEBUG, "SendSharedInvalidMessage: SI buffer overflow");
|
||||
elog(LOG, "SendSharedInvalidMessage: SI buffer overflow");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -108,7 +108,7 @@ ReceiveSharedInvalidMessages(
|
||||
if (getResult < 0)
|
||||
{
|
||||
/* got a reset message */
|
||||
elog(DEBUG, "ReceiveSharedInvalidMessages: cache state reset");
|
||||
elog(LOG, "ReceiveSharedInvalidMessages: cache state reset");
|
||||
resetFunction();
|
||||
}
|
||||
else
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.43 2001/11/05 17:46:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.44 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -119,7 +119,7 @@ SIBackendInit(SISeg *segP)
|
||||
MyBackendId = (stateP - &segP->procState[0]) + 1;
|
||||
|
||||
#ifdef INVALIDDEBUG
|
||||
elog(DEBUG, "SIBackendInit: backend id %d", MyBackendId);
|
||||
elog(LOG, "SIBackendInit: backend id %d", MyBackendId);
|
||||
#endif /* INVALIDDEBUG */
|
||||
|
||||
/* mark myself active, with all extant messages already read */
|
||||
@ -218,8 +218,7 @@ SIInsertDataEntry(SISeg *segP, SharedInvalidationMessage *data)
|
||||
if (numMsgs == (MAXNUMMESSAGES * 70 / 100) &&
|
||||
IsUnderPostmaster)
|
||||
{
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "SIInsertDataEntry: table is 70%% full, signaling postmaster");
|
||||
elog(DEBUG1, "SIInsertDataEntry: table is 70%% full, signaling postmaster");
|
||||
SendPostmasterSignal(PMSIGNAL_WAKEN_CHILDREN);
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.104 2001/11/05 17:46:28 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.105 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Outside modules can create a lock table and acquire/release
|
||||
@ -107,7 +107,7 @@ inline static void
|
||||
LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
|
||||
{
|
||||
if (LOCK_DEBUG_ENABLED(lock))
|
||||
elog(DEBUG,
|
||||
elog(LOG,
|
||||
"%s: lock(%lx) tbl(%d) rel(%u) db(%u) obj(%u) grantMask(%x) "
|
||||
"req(%d,%d,%d,%d,%d,%d,%d)=%d "
|
||||
"grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
|
||||
@ -133,7 +133,7 @@ HOLDER_PRINT(const char *where, const HOLDER *holderP)
|
||||
&& (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
|
||||
|| (Trace_lock_table && (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId == Trace_lock_table))
|
||||
)
|
||||
elog(DEBUG,
|
||||
elog(LOG,
|
||||
"%s: holder(%lx) lock(%lx) tbl(%d) proc(%lx) xid(%u) hold(%d,%d,%d,%d,%d,%d,%d)=%d",
|
||||
where, MAKE_OFFSET(holderP), holderP->tag.lock,
|
||||
HOLDER_LOCKMETHOD(*(holderP)),
|
||||
@ -461,7 +461,7 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
if (lockmethod == USER_LOCKMETHOD && Trace_userlocks)
|
||||
elog(DEBUG, "LockAcquire: user lock [%u] %s",
|
||||
elog(LOG, "LockAcquire: user lock [%u] %s",
|
||||
locktag->objId.blkno, lock_mode_names[lockmode]);
|
||||
#endif
|
||||
|
||||
@ -582,7 +582,7 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
|
||||
{
|
||||
if (i >= (int) lockmode)
|
||||
break; /* safe: we have a lock >= req level */
|
||||
elog(DEBUG, "Deadlock risk: raising lock level"
|
||||
elog(LOG, "Deadlock risk: raising lock level"
|
||||
" from %s to %s on object %u/%u/%u",
|
||||
lock_mode_names[i], lock_mode_names[lockmode],
|
||||
lock->tag.relId, lock->tag.dbId, lock->tag.objId.blkno);
|
||||
@ -1000,7 +1000,7 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
if (lockmethod == USER_LOCKMETHOD && Trace_userlocks)
|
||||
elog(DEBUG, "LockRelease: user lock tag [%u] %d", locktag->objId.blkno, lockmode);
|
||||
elog(LOG, "LockRelease: user lock tag [%u] %d", locktag->objId.blkno, lockmode);
|
||||
#endif
|
||||
|
||||
/* ???????? This must be changed when short term locks will be used */
|
||||
@ -1196,7 +1196,7 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
|
||||
elog(DEBUG, "LockReleaseAll: lockmethod=%d, pid=%d",
|
||||
elog(LOG, "LockReleaseAll: lockmethod=%d, pid=%d",
|
||||
lockmethod, proc->pid);
|
||||
#endif
|
||||
|
||||
@ -1341,7 +1341,7 @@ next_item:
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
|
||||
elog(DEBUG, "LockReleaseAll: done");
|
||||
elog(LOG, "LockReleaseAll: done");
|
||||
#endif
|
||||
|
||||
return TRUE;
|
||||
@ -1460,7 +1460,7 @@ DumpAllLocks(void)
|
||||
LOCK_PRINT("DumpAllLocks", lock, 0);
|
||||
}
|
||||
else
|
||||
elog(DEBUG, "DumpAllLocks: holder->tag.lock = NULL");
|
||||
elog(LOG, "DumpAllLocks: holder->tag.lock = NULL");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.8 2002/01/07 16:33:00 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.9 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -68,7 +68,7 @@ inline static void
|
||||
PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
|
||||
{
|
||||
if (Trace_lwlocks)
|
||||
elog(DEBUG, "%s(%d): excl %d shared %d head %p rOK %d",
|
||||
elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
|
||||
where, (int) lockid,
|
||||
(int) lock->exclusive, lock->shared, lock->head,
|
||||
(int) lock->releaseOK);
|
||||
@ -78,8 +78,7 @@ inline static void
|
||||
LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
|
||||
{
|
||||
if (Trace_lwlocks)
|
||||
elog(DEBUG, "%s(%d): %s",
|
||||
where, (int) lockid, msg);
|
||||
elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
|
||||
}
|
||||
|
||||
#else /* not LOCK_DEBUG */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.117 2001/12/28 18:16:43 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.118 2002/03/02 21:39:29 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -211,7 +211,7 @@ InitProcess(void)
|
||||
* we are a backend, we inherit this by fork() from the postmaster).
|
||||
*/
|
||||
if (procglobal == NULL)
|
||||
elog(STOP, "InitProcess: Proc Header uninitialized");
|
||||
elog(PANIC, "InitProcess: Proc Header uninitialized");
|
||||
|
||||
if (MyProc != NULL)
|
||||
elog(ERROR, "InitProcess: you already exist");
|
||||
@ -300,7 +300,7 @@ InitDummyProcess(void)
|
||||
* inherit this by fork() from the postmaster).
|
||||
*/
|
||||
if (ProcGlobal == NULL || DummyProc == NULL)
|
||||
elog(STOP, "InitDummyProcess: Proc Header uninitialized");
|
||||
elog(PANIC, "InitDummyProcess: Proc Header uninitialized");
|
||||
|
||||
if (MyProc != NULL)
|
||||
elog(ERROR, "InitDummyProcess: you already exist");
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.89 2001/10/28 06:25:51 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.90 2002/03/02 21:39:30 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -571,7 +571,7 @@ mdblindwrt(RelFileNode rnode,
|
||||
|
||||
if (lseek(fd, seekpos, SEEK_SET) != seekpos)
|
||||
{
|
||||
elog(DEBUG, "mdblindwrt: lseek(%ld) failed: %m", seekpos);
|
||||
elog(LOG, "mdblindwrt: lseek(%ld) failed: %m", seekpos);
|
||||
close(fd);
|
||||
return SM_FAIL;
|
||||
}
|
||||
@ -585,13 +585,13 @@ mdblindwrt(RelFileNode rnode,
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(DEBUG, "mdblindwrt: write() failed: %m");
|
||||
elog(LOG, "mdblindwrt: write() failed: %m");
|
||||
status = SM_FAIL;
|
||||
}
|
||||
|
||||
if (close(fd) < 0)
|
||||
{
|
||||
elog(DEBUG, "mdblindwrt: close() failed: %m");
|
||||
elog(LOG, "mdblindwrt: close() failed: %m");
|
||||
status = SM_FAIL;
|
||||
}
|
||||
|
||||
@ -1085,7 +1085,7 @@ _mdfd_blind_getseg(RelFileNode rnode, BlockNumber blkno)
|
||||
/* call fd.c to allow other FDs to be closed if needed */
|
||||
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, 0600);
|
||||
if (fd < 0)
|
||||
elog(DEBUG, "_mdfd_blind_getseg: couldn't open %s: %m", path);
|
||||
elog(LOG, "_mdfd_blind_getseg: couldn't open %s: %m", path);
|
||||
|
||||
pfree(path);
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.54 2001/10/25 05:49:43 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.55 2002/03/02 21:39:30 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -582,7 +582,7 @@ smgrsync()
|
||||
if (smgrsw[i].smgr_sync)
|
||||
{
|
||||
if ((*(smgrsw[i].smgr_sync)) () == SM_FAIL)
|
||||
elog(STOP, "storage sync failed on %s: %m",
|
||||
elog(PANIC, "storage sync failed on %s: %m",
|
||||
DatumGetCString(DirectFunctionCall1(smgrout,
|
||||
Int16GetDatum(i))));
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.251 2002/03/01 22:45:13 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.252 2002/03/02 21:39:31 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* this is the "main" module of the postgres backend and
|
||||
@ -370,7 +370,7 @@ pg_parse_query(char *query_string, Oid *typev, int nargs)
|
||||
List *raw_parsetree_list;
|
||||
|
||||
if (Debug_print_query)
|
||||
elog(DEBUG, "query: %s", query_string);
|
||||
elog(LOG, "query: %s", query_string);
|
||||
|
||||
if (Show_parser_stats)
|
||||
ResetUsage();
|
||||
@ -429,11 +429,11 @@ pg_analyze_and_rewrite(Node *parsetree)
|
||||
{
|
||||
if (Debug_pretty_print)
|
||||
{
|
||||
elog(DEBUG, "parse tree:");
|
||||
elog(LOG, "parse tree:");
|
||||
nodeDisplay(querytree);
|
||||
}
|
||||
else
|
||||
elog(DEBUG, "parse tree: %s", nodeToString(querytree));
|
||||
elog(LOG, "parse tree: %s", nodeToString(querytree));
|
||||
}
|
||||
|
||||
if (querytree->commandType == CMD_UTILITY)
|
||||
@ -473,7 +473,7 @@ pg_analyze_and_rewrite(Node *parsetree)
|
||||
{
|
||||
if (Debug_pretty_print)
|
||||
{
|
||||
elog(DEBUG, "rewritten parse tree:");
|
||||
elog(LOG, "rewritten parse tree:");
|
||||
foreach(list_item, querytree_list)
|
||||
{
|
||||
querytree = (Query *) lfirst(list_item);
|
||||
@ -483,11 +483,11 @@ pg_analyze_and_rewrite(Node *parsetree)
|
||||
}
|
||||
else
|
||||
{
|
||||
elog(DEBUG, "rewritten parse tree:");
|
||||
elog(LOG, "rewritten parse tree:");
|
||||
foreach(list_item, querytree_list)
|
||||
{
|
||||
querytree = (Query *) lfirst(list_item);
|
||||
elog(DEBUG, "%s", nodeToString(querytree));
|
||||
elog(LOG, "%s", nodeToString(querytree));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -541,11 +541,11 @@ pg_plan_query(Query *querytree)
|
||||
{
|
||||
if (Debug_pretty_print)
|
||||
{
|
||||
elog(DEBUG, "plan:");
|
||||
elog(LOG, "plan:");
|
||||
nodeDisplay(plan);
|
||||
}
|
||||
else
|
||||
elog(DEBUG, "plan: %s", nodeToString(plan));
|
||||
elog(LOG, "plan: %s", nodeToString(plan));
|
||||
}
|
||||
|
||||
return plan;
|
||||
@ -759,9 +759,8 @@ pg_exec_query_string(char *query_string, /* string to execute */
|
||||
* process utility functions (create, destroy, etc..)
|
||||
*/
|
||||
if (Debug_print_query)
|
||||
elog(DEBUG, "ProcessUtility: %s", query_string);
|
||||
else if (DebugLvl > 1)
|
||||
elog(DEBUG, "ProcessUtility");
|
||||
elog(LOG, "ProcessUtility: %s", query_string);
|
||||
else elog(DEBUG2, "ProcessUtility");
|
||||
|
||||
if (querytree->originalQuery)
|
||||
{
|
||||
@ -805,8 +804,7 @@ pg_exec_query_string(char *query_string, /* string to execute */
|
||||
}
|
||||
else
|
||||
{
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "ProcessQuery");
|
||||
elog(DEBUG2, "ProcessQuery");
|
||||
|
||||
if (querytree->originalQuery)
|
||||
{
|
||||
@ -916,8 +914,7 @@ pg_exec_query_string(char *query_string, /* string to execute */
|
||||
static void
|
||||
start_xact_command(void)
|
||||
{
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "StartTransactionCommand");
|
||||
elog(DEBUG1, "StartTransactionCommand");
|
||||
StartTransactionCommand();
|
||||
}
|
||||
|
||||
@ -928,8 +925,7 @@ finish_xact_command(void)
|
||||
DeferredTriggerEndQuery();
|
||||
|
||||
/* Now commit the command */
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "CommitTransactionCommand");
|
||||
elog(DEBUG1, "CommitTransactionCommand");
|
||||
|
||||
CommitTransactionCommand();
|
||||
|
||||
@ -1130,7 +1126,7 @@ usage(char *progname)
|
||||
#endif
|
||||
printf(" -B NBUFFERS number of shared buffers (default %d)\n", DEF_NBUFFERS);
|
||||
printf(" -c NAME=VALUE set run-time parameter\n");
|
||||
printf(" -d 1-5 debugging level\n");
|
||||
printf(" -d 1-5,0 debugging level (0 is off)\n");
|
||||
printf(" -D DATADIR database directory\n");
|
||||
printf(" -e use European date format\n");
|
||||
printf(" -E echo query before execution\n");
|
||||
@ -1281,17 +1277,37 @@ PostgresMain(int argc, char *argv[], const char *username)
|
||||
break;
|
||||
|
||||
case 'd': /* debug level */
|
||||
SetConfigOption("debug_level", optarg, ctx, gucsource);
|
||||
if (DebugLvl >= 1)
|
||||
SetConfigOption("log_connections", "true", ctx, gucsource);
|
||||
if (DebugLvl >= 2)
|
||||
SetConfigOption("debug_print_query", "true", ctx, gucsource);
|
||||
if (DebugLvl >= 3)
|
||||
SetConfigOption("debug_print_parse", "true", ctx, gucsource);
|
||||
if (DebugLvl >= 4)
|
||||
SetConfigOption("debug_print_plan", "true", ctx, gucsource);
|
||||
if (DebugLvl >= 5)
|
||||
SetConfigOption("debug_print_rewritten", "true", ctx, gucsource);
|
||||
{
|
||||
/* Set server debugging level. */
|
||||
if (atoi(optarg) != 0)
|
||||
{
|
||||
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
|
||||
|
||||
sprintf(debugstr, "debug%s", optarg);
|
||||
SetConfigOption("server_min_messages", debugstr, ctx, gucsource);
|
||||
pfree(debugstr);
|
||||
/*
|
||||
* -d is not the same as setting client_min_messages
|
||||
* because it enables other output options.
|
||||
*/
|
||||
if (atoi(optarg) >= 1)
|
||||
SetConfigOption("log_connections", "true", ctx, gucsource);
|
||||
if (atoi(optarg) >= 2)
|
||||
SetConfigOption("debug_print_query", "true", ctx, gucsource);
|
||||
if (atoi(optarg) >= 3)
|
||||
SetConfigOption("debug_print_parse", "true", ctx, gucsource);
|
||||
if (atoi(optarg) >= 4)
|
||||
SetConfigOption("debug_print_plan", "true", ctx, gucsource);
|
||||
if (atoi(optarg) >= 5)
|
||||
SetConfigOption("debug_print_rewritten", "true", ctx, gucsource);
|
||||
}
|
||||
else
|
||||
/*
|
||||
* -d 0 allows user to prevent postmaster debug from
|
||||
* propogating to backend.
|
||||
*/
|
||||
SetConfigOption("server_min_messages", "notice", PGC_POSTMASTER, PGC_S_ARGV);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'E':
|
||||
@ -1682,8 +1698,7 @@ PostgresMain(int argc, char *argv[], const char *username)
|
||||
* putting it inside InitPostgres() instead. In particular, anything
|
||||
* that involves database access should be there, not here.
|
||||
*/
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "InitPostgres");
|
||||
elog(DEBUG2, "InitPostgres");
|
||||
InitPostgres(DBName, username);
|
||||
|
||||
SetProcessingMode(NormalProcessing);
|
||||
@ -1707,7 +1722,7 @@ PostgresMain(int argc, char *argv[], const char *username)
|
||||
if (!IsUnderPostmaster)
|
||||
{
|
||||
puts("\nPOSTGRES backend interactive interface ");
|
||||
puts("$Revision: 1.251 $ $Date: 2002/03/01 22:45:13 $\n");
|
||||
puts("$Revision: 1.252 $ $Date: 2002/03/02 21:39:31 $\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1765,8 +1780,7 @@ PostgresMain(int argc, char *argv[], const char *username)
|
||||
MemoryContextSwitchTo(ErrorContext);
|
||||
|
||||
/* Do the recovery */
|
||||
if (DebugLvl >= 1)
|
||||
elog(DEBUG, "AbortCurrentTransaction");
|
||||
elog(DEBUG1, "AbortCurrentTransaction");
|
||||
AbortCurrentTransaction();
|
||||
|
||||
/*
|
||||
@ -2086,7 +2100,7 @@ ShowUsage(const char *title)
|
||||
if (str.data[str.len-1] == '\n')
|
||||
str.data[--str.len] = '\0';
|
||||
|
||||
elog(DEBUG, "%s\n%s", title, str.data);
|
||||
elog(LOG, "%s\n%s", title, str.data);
|
||||
|
||||
pfree(str.data);
|
||||
}
|
||||
@ -2108,10 +2122,10 @@ assertTest(int val)
|
||||
if (assert_enabled)
|
||||
{
|
||||
/* val != 0 should be trapped by previous Assert */
|
||||
elog(NOTICE, "Assert test successfull (val = %d)", val);
|
||||
elog(INFO, "Assert test successfull (val = %d)", val);
|
||||
}
|
||||
else
|
||||
elog(NOTICE, "Assert checking is disabled (val = %d)", val);
|
||||
elog(INFO, "Assert checking is disabled (val = %d)", val);
|
||||
|
||||
return val;
|
||||
}
|
||||
@ -2383,7 +2397,7 @@ CreateCommandTag(Node *parsetree)
|
||||
break;
|
||||
|
||||
default:
|
||||
elog(DEBUG, "CreateCommandTag: unknown parse node type %d",
|
||||
elog(LOG, "CreateCommandTag: unknown parse node type %d",
|
||||
nodeTag(parsetree));
|
||||
tag = "???";
|
||||
break;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.67 2002/02/18 23:11:22 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.68 2002/03/02 21:39:32 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -122,7 +122,7 @@ aclparse(const char *s, AclItem *aip, unsigned *modechg)
|
||||
Assert(s && aip && modechg);
|
||||
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "aclparse: input = '%s'", s);
|
||||
elog(LOG, "aclparse: input = '%s'", s);
|
||||
#endif
|
||||
aip->ai_idtype = ACL_IDTYPE_UID;
|
||||
s = getid(s, name);
|
||||
@ -204,7 +204,7 @@ aclparse(const char *s, AclItem *aip, unsigned *modechg)
|
||||
}
|
||||
|
||||
#ifdef ACLDEBUG
|
||||
elog(DEBUG, "aclparse: correctly read [%x %d %x], modechg=%x",
|
||||
elog(LOG, "aclparse: correctly read [%x %d %x], modechg=%x",
|
||||
aip->ai_idtype, aip->ai_id, aip->ai_mode, *modechg);
|
||||
#endif
|
||||
return s;
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/format_type.c,v 1.23 2001/11/19 19:51:20 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/format_type.c,v 1.24 2002/03/02 21:39:32 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -294,7 +294,7 @@ format_type_internal(Oid type_oid, int32 typemod,
|
||||
fieldstr = "";
|
||||
break;
|
||||
default:
|
||||
elog(DEBUG, "Invalid INTERVAL typmod 0x%x", typemod);
|
||||
elog(LOG, "Invalid INTERVAL typmod 0x%x", typemod);
|
||||
fieldstr = "";
|
||||
break;
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
* The PostgreSQL locale utils.
|
||||
*
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.13 2001/11/05 17:46:29 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.14 2002/03/02 21:39:32 momjian Exp $
|
||||
*
|
||||
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
|
||||
*
|
||||
@ -86,7 +86,7 @@ static void
|
||||
PGLC_debug_lc(PG_LocaleCategories *lc)
|
||||
{
|
||||
#ifdef LC_MESSAGES
|
||||
elog(DEBUG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\nLC_MESSAGES:\t%s\n",
|
||||
elog(LOG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\nLC_MESSAGES:\t%s\n",
|
||||
lc->lang,
|
||||
lc->lc_ctype,
|
||||
lc->lc_numeric,
|
||||
@ -95,7 +95,7 @@ PGLC_debug_lc(PG_LocaleCategories *lc)
|
||||
lc->lc_monetary,
|
||||
lc->lc_messages);
|
||||
#else
|
||||
elog(DEBUG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\n",
|
||||
elog(LOG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\n",
|
||||
lc->lang,
|
||||
lc->lc_ctype,
|
||||
lc->lc_numeric,
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.104 2002/03/01 04:09:25 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.105 2002/03/02 21:39:32 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -3169,7 +3169,7 @@ locale_is_like_safe(void)
|
||||
return (bool) result;
|
||||
localeptr = setlocale(LC_COLLATE, NULL);
|
||||
if (!localeptr)
|
||||
elog(STOP, "Invalid LC_COLLATE setting");
|
||||
elog(PANIC, "Invalid LC_COLLATE setting");
|
||||
|
||||
/*
|
||||
* Currently we accept only "C" and "POSIX" (do any systems still
|
||||
|
50
src/backend/utils/cache/catcache.c
vendored
50
src/backend/utils/cache/catcache.c
vendored
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.88 2002/02/25 04:06:50 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.89 2002/03/02 21:39:32 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -146,14 +146,14 @@ CatCachePrintStats(void)
|
||||
long cc_hits = 0;
|
||||
long cc_newloads = 0;
|
||||
|
||||
elog(DEBUG, "Catcache stats dump: %d/%d tuples in catcaches",
|
||||
elog(LOG, "Catcache stats dump: %d/%d tuples in catcaches",
|
||||
CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
|
||||
|
||||
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
|
||||
{
|
||||
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
|
||||
continue; /* don't print unused caches */
|
||||
elog(DEBUG, "Catcache %s/%s: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
|
||||
elog(LOG, "Catcache %s/%s: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
|
||||
cache->cc_relname,
|
||||
cache->cc_indname,
|
||||
cache->cc_ntup,
|
||||
@ -165,7 +165,7 @@ CatCachePrintStats(void)
|
||||
cc_hits += cache->cc_hits;
|
||||
cc_newloads += cache->cc_newloads;
|
||||
}
|
||||
elog(DEBUG, "Catcache totals: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
|
||||
elog(LOG, "Catcache totals: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
|
||||
CacheHdr->ch_ntup,
|
||||
cc_searches,
|
||||
cc_hits,
|
||||
@ -208,17 +208,17 @@ CreateCacheMemoryContext(void)
|
||||
*/
|
||||
#ifdef CACHEDEBUG
|
||||
#define CatalogCacheInitializeCache_DEBUG1 \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
|
||||
elog(LOG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
|
||||
cache->cc_relname)
|
||||
|
||||
#define CatalogCacheInitializeCache_DEBUG2 \
|
||||
do { \
|
||||
if (cache->cc_key[i] > 0) { \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
|
||||
elog(LOG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
|
||||
i+1, cache->cc_nkeys, cache->cc_key[i], \
|
||||
tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
|
||||
} else { \
|
||||
elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
|
||||
elog(LOG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
|
||||
i+1, cache->cc_nkeys, cache->cc_key[i]); \
|
||||
} \
|
||||
} while(0)
|
||||
@ -270,7 +270,7 @@ CatalogCacheInitializeCache(CatCache *cache)
|
||||
|
||||
heap_close(relation, NoLock);
|
||||
|
||||
CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: %s, %d keys",
|
||||
CACHE3_elog(LOG, "CatalogCacheInitializeCache: %s, %d keys",
|
||||
cache->cc_relname, cache->cc_nkeys);
|
||||
|
||||
/*
|
||||
@ -306,7 +306,7 @@ CatalogCacheInitializeCache(CatCache *cache)
|
||||
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
|
||||
cache->cc_skey[i].sk_attno = cache->cc_key[i];
|
||||
|
||||
CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %p",
|
||||
CACHE4_elog(LOG, "CatalogCacheInit %s %d %p",
|
||||
cache->cc_relname,
|
||||
i,
|
||||
cache);
|
||||
@ -350,7 +350,7 @@ CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
|
||||
{
|
||||
uint32 hashIndex = 0;
|
||||
|
||||
CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %p",
|
||||
CACHE4_elog(LOG, "CatalogCacheComputeHashIndex %s %d %p",
|
||||
cache->cc_relname,
|
||||
cache->cc_nkeys,
|
||||
cache);
|
||||
@ -491,7 +491,7 @@ CatalogCacheIdInvalidate(int cacheId,
|
||||
* sanity checks
|
||||
*/
|
||||
Assert(ItemPointerIsValid(pointer));
|
||||
CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
|
||||
CACHE1_elog(LOG, "CatalogCacheIdInvalidate: called");
|
||||
|
||||
/*
|
||||
* inspect caches to find the proper cache
|
||||
@ -521,7 +521,7 @@ CatalogCacheIdInvalidate(int cacheId,
|
||||
ct->dead = true;
|
||||
else
|
||||
CatCacheRemoveCTup(ccp, ct);
|
||||
CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
|
||||
CACHE1_elog(LOG, "CatalogCacheIdInvalidate: invalidated");
|
||||
/* could be multiple matches, so keep looking! */
|
||||
}
|
||||
}
|
||||
@ -623,12 +623,12 @@ ResetCatalogCaches(void)
|
||||
{
|
||||
CatCache *cache;
|
||||
|
||||
CACHE1_elog(DEBUG, "ResetCatalogCaches called");
|
||||
CACHE1_elog(LOG, "ResetCatalogCaches called");
|
||||
|
||||
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
|
||||
ResetCatalogCache(cache);
|
||||
|
||||
CACHE1_elog(DEBUG, "end of ResetCatalogCaches call");
|
||||
CACHE1_elog(LOG, "end of ResetCatalogCaches call");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -656,7 +656,7 @@ CatalogCacheFlushRelation(Oid relId)
|
||||
{
|
||||
CatCache *cache;
|
||||
|
||||
CACHE2_elog(DEBUG, "CatalogCacheFlushRelation called for %u", relId);
|
||||
CACHE2_elog(LOG, "CatalogCacheFlushRelation called for %u", relId);
|
||||
|
||||
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
|
||||
{
|
||||
@ -716,7 +716,7 @@ CatalogCacheFlushRelation(Oid relId)
|
||||
}
|
||||
}
|
||||
|
||||
CACHE1_elog(DEBUG, "end of CatalogCacheFlushRelation call");
|
||||
CACHE1_elog(LOG, "end of CatalogCacheFlushRelation call");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -730,7 +730,7 @@ CatalogCacheFlushRelation(Oid relId)
|
||||
#ifdef CACHEDEBUG
|
||||
#define InitCatCache_DEBUG1 \
|
||||
do { \
|
||||
elog(DEBUG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
|
||||
elog(LOG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
|
||||
cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
|
||||
} while(0)
|
||||
|
||||
@ -958,7 +958,7 @@ SearchCatCache(CatCache *cache,
|
||||
DLMoveToFront(&ct->cache_elem);
|
||||
|
||||
#ifdef CACHEDEBUG
|
||||
CACHE3_elog(DEBUG, "SearchCatCache(%s): found in bucket %d",
|
||||
CACHE3_elog(LOG, "SearchCatCache(%s): found in bucket %d",
|
||||
cache->cc_relname, hash);
|
||||
#endif /* CACHEDEBUG */
|
||||
|
||||
@ -1006,7 +1006,7 @@ SearchCatCache(CatCache *cache,
|
||||
Buffer buffer;
|
||||
int i;
|
||||
|
||||
CACHE2_elog(DEBUG, "SearchCatCache(%s): performing index scan",
|
||||
CACHE2_elog(LOG, "SearchCatCache(%s): performing index scan",
|
||||
cache->cc_relname);
|
||||
|
||||
/*
|
||||
@ -1045,7 +1045,7 @@ SearchCatCache(CatCache *cache,
|
||||
{
|
||||
HeapScanDesc sd;
|
||||
|
||||
CACHE2_elog(DEBUG, "SearchCatCache(%s): performing heap scan",
|
||||
CACHE2_elog(LOG, "SearchCatCache(%s): performing heap scan",
|
||||
cache->cc_relname);
|
||||
|
||||
sd = heap_beginscan(relation, 0, SnapshotNow,
|
||||
@ -1081,7 +1081,7 @@ SearchCatCache(CatCache *cache,
|
||||
* Finish initializing the CatCTup header, and add it to the linked
|
||||
* lists.
|
||||
*/
|
||||
CACHE1_elog(DEBUG, "SearchCatCache: found tuple");
|
||||
CACHE1_elog(LOG, "SearchCatCache: found tuple");
|
||||
|
||||
ct->ct_magic = CT_MAGIC;
|
||||
ct->my_cache = cache;
|
||||
@ -1112,7 +1112,7 @@ SearchCatCache(CatCache *cache,
|
||||
|
||||
if (oldct->refcount == 0)
|
||||
{
|
||||
CACHE2_elog(DEBUG, "SearchCatCache(%s): Overflow, LRU removal",
|
||||
CACHE2_elog(LOG, "SearchCatCache(%s): Overflow, LRU removal",
|
||||
cache->cc_relname);
|
||||
CatCacheRemoveCTup(oldct->my_cache, oldct);
|
||||
break;
|
||||
@ -1120,9 +1120,9 @@ SearchCatCache(CatCache *cache,
|
||||
}
|
||||
}
|
||||
|
||||
CACHE4_elog(DEBUG, "SearchCatCache(%s): Contains %d/%d tuples",
|
||||
CACHE4_elog(LOG, "SearchCatCache(%s): Contains %d/%d tuples",
|
||||
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
|
||||
CACHE3_elog(DEBUG, "SearchCatCache(%s): put in bucket %d",
|
||||
CACHE3_elog(LOG, "SearchCatCache(%s): put in bucket %d",
|
||||
cache->cc_relname, hash);
|
||||
|
||||
return &ct->tuple;
|
||||
@ -1194,7 +1194,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
|
||||
{
|
||||
CatCache *ccp;
|
||||
|
||||
CACHE1_elog(DEBUG, "PrepareToInvalidateCacheTuple: called");
|
||||
CACHE1_elog(LOG, "PrepareToInvalidateCacheTuple: called");
|
||||
|
||||
/*
|
||||
* sanity checks
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/error/elog.c,v 1.91 2001/11/05 17:46:30 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/error/elog.c,v 1.92 2002/03/02 21:39:33 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -38,6 +38,16 @@
|
||||
#include "mb/pg_wchar.h"
|
||||
#endif
|
||||
|
||||
#define DEFAULT_SERVER_MIN_MESSAGES_STR "notice"
|
||||
int server_min_messages;
|
||||
char *server_min_messages_str = NULL;
|
||||
const char server_min_messages_str_default[] = DEFAULT_SERVER_MIN_MESSAGES_STR;
|
||||
|
||||
#define DEFAULT_CLIENT_MIN_MESSAGES_STR "info"
|
||||
int client_min_messages;
|
||||
char *client_min_messages_str = NULL;
|
||||
const char client_min_messages_str_default[] = DEFAULT_CLIENT_MIN_MESSAGES_STR;
|
||||
|
||||
#ifdef ENABLE_SYSLOG
|
||||
/*
|
||||
* 0 = only stdout/stderr
|
||||
@ -109,7 +119,7 @@ elog(int lev, const char *fmt,...)
|
||||
* Note that we use malloc() not palloc() because we want to retain
|
||||
* control if we run out of memory. palloc() would recursively call
|
||||
* elog(ERROR), which would be all right except if we are working on a
|
||||
* FATAL or REALLYFATAL error. We'd lose track of the fatal condition
|
||||
* FATAL or PANIC error. We'd lose track of the fatal condition
|
||||
* and report a mere ERROR to outer loop, which would be a Bad Thing.
|
||||
* So, we substitute an appropriate message in-place, without
|
||||
* downgrading the level if it's above ERROR.
|
||||
@ -127,10 +137,47 @@ elog(int lev, const char *fmt,...)
|
||||
const char *cp;
|
||||
char *bp;
|
||||
size_t space_needed;
|
||||
bool output_to_server = false;
|
||||
bool output_to_client = false;
|
||||
|
||||
/* size of the prefix needed for timestamp and pid, if enabled */
|
||||
size_t timestamp_size;
|
||||
|
||||
/* Check for old elog calls. Codes were renumbered in 7.3. 2002-02-24 */
|
||||
if (lev < DEBUG5)
|
||||
elog(FATAL, "Pre-7.3 object file made an elog() call. Recompile.");
|
||||
|
||||
if (Use_syslog <= 1 || whereToSendOutput == Debug)
|
||||
{
|
||||
if (lev == LOG)
|
||||
{
|
||||
if (server_min_messages == LOG)
|
||||
output_to_server = true;
|
||||
else if (server_min_messages < FATAL)
|
||||
output_to_server = true;
|
||||
}
|
||||
/* lev != LOG */
|
||||
else
|
||||
{
|
||||
if (server_min_messages == LOG)
|
||||
{
|
||||
if (lev >= FATAL)
|
||||
output_to_server = true;
|
||||
}
|
||||
/* Neither is LOG */
|
||||
else if (lev >= server_min_messages)
|
||||
output_to_server = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (lev >= client_min_messages && whereToSendOutput == Remote)
|
||||
output_to_client = true;
|
||||
|
||||
/* optimization to prevent work for messages that would never be output */
|
||||
if (lev < ERROR && Use_syslog < 1 &&
|
||||
output_to_server == false && output_to_client == false)
|
||||
return;
|
||||
|
||||
/* Save error str before calling any function that might change errno */
|
||||
errorstr = useful_strerror(errno);
|
||||
|
||||
@ -142,13 +189,13 @@ elog(int lev, const char *fmt,...)
|
||||
lev = FATAL;
|
||||
|
||||
/*
|
||||
* If we are inside a critical section, all errors become REALLYFATAL
|
||||
* If we are inside a critical section, all errors become PANIC
|
||||
* errors. See miscadmin.h.
|
||||
*/
|
||||
if (lev == ERROR || lev == FATAL)
|
||||
{
|
||||
if (CritSectionCount > 0)
|
||||
lev = REALLYFATAL;
|
||||
lev = PANIC;
|
||||
}
|
||||
|
||||
prefix = elog_message_prefix(lev);
|
||||
@ -167,12 +214,15 @@ elog(int lev, const char *fmt,...)
|
||||
* vsnprintf won't know what to do with %m). To keep space
|
||||
* calculation simple, we only allow one %m.
|
||||
*/
|
||||
space_needed = timestamp_size + strlen(prefix)
|
||||
+ strlen(fmt) + strlen(errorstr) + 1;
|
||||
space_needed = timestamp_size + strlen(prefix) +
|
||||
strlen(fmt) + strlen(errorstr) + 1;
|
||||
|
||||
if (copy_lineno)
|
||||
{
|
||||
/* translator: This string will be truncated at 31 characters. */
|
||||
/*
|
||||
* Prints the failure line of the COPY. Wow, what a hack! bjm
|
||||
* Translators: Error message will be truncated at 31 characters.
|
||||
*/
|
||||
snprintf(copylineno_buf, 32, gettext("copy: line %d, "), copy_lineno);
|
||||
space_needed += strlen(copylineno_buf);
|
||||
}
|
||||
@ -184,7 +234,7 @@ elog(int lev, const char *fmt,...)
|
||||
{
|
||||
/* We're up against it, convert to out-of-memory error */
|
||||
fmt_buf = fmt_fixedbuf;
|
||||
if (lev != FATAL && lev != REALLYFATAL)
|
||||
if (lev != FATAL && lev != PANIC)
|
||||
{
|
||||
lev = ERROR;
|
||||
prefix = elog_message_prefix(lev);
|
||||
@ -213,7 +263,7 @@ elog(int lev, const char *fmt,...)
|
||||
if (copy_lineno)
|
||||
{
|
||||
strcat(fmt_buf, copylineno_buf);
|
||||
if (lev == ERROR || lev == FATAL || lev == REALLYFATAL)
|
||||
if (lev == ERROR || lev == FATAL || lev == PANIC)
|
||||
copy_lineno = 0;
|
||||
}
|
||||
|
||||
@ -281,7 +331,7 @@ elog(int lev, const char *fmt,...)
|
||||
{
|
||||
/* We're up against it, convert to out-of-memory error */
|
||||
msg_buf = msg_fixedbuf;
|
||||
if (lev != FATAL && lev != REALLYFATAL)
|
||||
if (lev != FATAL && lev != PANIC)
|
||||
{
|
||||
lev = ERROR;
|
||||
prefix = elog_message_prefix(lev);
|
||||
@ -309,9 +359,17 @@ elog(int lev, const char *fmt,...)
|
||||
|
||||
switch (lev)
|
||||
{
|
||||
case DEBUG:
|
||||
case DEBUG1:
|
||||
case DEBUG2:
|
||||
case DEBUG3:
|
||||
case DEBUG4:
|
||||
case DEBUG5:
|
||||
syslog_level = LOG_DEBUG;
|
||||
break;
|
||||
case LOG:
|
||||
case INFO:
|
||||
syslog_level = LOG_INFO;
|
||||
break;
|
||||
case NOTICE:
|
||||
syslog_level = LOG_NOTICE;
|
||||
break;
|
||||
@ -321,7 +379,7 @@ elog(int lev, const char *fmt,...)
|
||||
case FATAL:
|
||||
syslog_level = LOG_ERR;
|
||||
break;
|
||||
case REALLYFATAL:
|
||||
case PANIC:
|
||||
default:
|
||||
syslog_level = LOG_CRIT;
|
||||
break;
|
||||
@ -334,11 +392,12 @@ elog(int lev, const char *fmt,...)
|
||||
/* syslog doesn't want a trailing newline, but other destinations do */
|
||||
strcat(msg_buf, "\n");
|
||||
|
||||
/* write to terminal */
|
||||
if (Use_syslog <= 1 || whereToSendOutput == Debug)
|
||||
/* Write to server logs or server terminal */
|
||||
if (output_to_server)
|
||||
write(2, msg_buf, strlen(msg_buf));
|
||||
|
||||
if (lev > DEBUG && whereToSendOutput == Remote)
|
||||
/* Should we output to the client too? */
|
||||
if (output_to_client)
|
||||
{
|
||||
/* Send IPC message to the front-end program */
|
||||
MemoryContext oldcxt;
|
||||
@ -351,7 +410,7 @@ elog(int lev, const char *fmt,...)
|
||||
*/
|
||||
oldcxt = MemoryContextSwitchTo(ErrorContext);
|
||||
|
||||
if (lev == NOTICE)
|
||||
if (lev <= NOTICE)
|
||||
/* exclude the timestamp from msg sent to frontend */
|
||||
send_notice_to_frontend(msg_buf + timestamp_size);
|
||||
else
|
||||
@ -414,7 +473,7 @@ elog(int lev, const char *fmt,...)
|
||||
* Guard against infinite loop from elog() during error recovery.
|
||||
*/
|
||||
if (InError)
|
||||
elog(REALLYFATAL, "elog: error during error recovery, giving up!");
|
||||
elog(PANIC, "elog: error during error recovery, giving up!");
|
||||
InError = true;
|
||||
|
||||
/*
|
||||
@ -423,7 +482,7 @@ elog(int lev, const char *fmt,...)
|
||||
siglongjmp(Warn_restart, 1);
|
||||
}
|
||||
|
||||
if (lev == FATAL || lev == REALLYFATAL)
|
||||
if (lev == FATAL || lev == PANIC)
|
||||
{
|
||||
/*
|
||||
* Serious crash time. Postmaster will observe nonzero process
|
||||
@ -673,10 +732,10 @@ send_message_to_frontend(int type, const char *msg)
|
||||
{
|
||||
StringInfoData buf;
|
||||
|
||||
AssertArg(type == NOTICE || type == ERROR);
|
||||
AssertArg(type <= ERROR);
|
||||
|
||||
pq_beginmessage(&buf);
|
||||
pq_sendbyte(&buf, type == NOTICE ? 'N' : 'E');
|
||||
pq_sendbyte(&buf, type != ERROR ? 'N' : 'E'); /* N is INFO or NOTICE */
|
||||
pq_sendstring(&buf, msg);
|
||||
pq_endmessage(&buf);
|
||||
|
||||
@ -731,9 +790,19 @@ elog_message_prefix(int lev)
|
||||
|
||||
switch (lev)
|
||||
{
|
||||
case DEBUG:
|
||||
case DEBUG1:
|
||||
case DEBUG2:
|
||||
case DEBUG3:
|
||||
case DEBUG4:
|
||||
case DEBUG5:
|
||||
prefix = gettext("DEBUG: ");
|
||||
break;
|
||||
case LOG:
|
||||
prefix = gettext("LOG: ");
|
||||
break;
|
||||
case INFO:
|
||||
prefix = gettext("INFO: ");
|
||||
break;
|
||||
case NOTICE:
|
||||
prefix = gettext("NOTICE: ");
|
||||
break;
|
||||
@ -741,13 +810,112 @@ elog_message_prefix(int lev)
|
||||
prefix = gettext("ERROR: ");
|
||||
break;
|
||||
case FATAL:
|
||||
prefix = gettext("FATAL 1: ");
|
||||
prefix = gettext("FATAL: ");
|
||||
break;
|
||||
case REALLYFATAL:
|
||||
prefix = gettext("FATAL 2: ");
|
||||
case PANIC:
|
||||
prefix = gettext("PANIC: ");
|
||||
break;
|
||||
}
|
||||
|
||||
Assert(prefix != NULL);
|
||||
return prefix;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* GUC support routines
|
||||
*/
|
||||
|
||||
bool
|
||||
check_server_min_messages(const char *lev)
|
||||
{
|
||||
if (strcasecmp(lev, "debug") == 0 ||
|
||||
strcasecmp(lev, "debug1") == 0 ||
|
||||
strcasecmp(lev, "debug2") == 0 ||
|
||||
strcasecmp(lev, "debub3") == 0 ||
|
||||
strcasecmp(lev, "debug4") == 0 ||
|
||||
strcasecmp(lev, "debug5") == 0 ||
|
||||
strcasecmp(lev, "log") == 0 ||
|
||||
strcasecmp(lev, "info") == 0 ||
|
||||
strcasecmp(lev, "notice") == 0 ||
|
||||
strcasecmp(lev, "error") == 0 ||
|
||||
strcasecmp(lev, "fatal") == 0 ||
|
||||
strcasecmp(lev, "panic") == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
assign_server_min_messages(const char *lev)
|
||||
{
|
||||
if (strcasecmp(lev, "debug1") == 0)
|
||||
server_min_messages = DEBUG1;
|
||||
else if (strcasecmp(lev, "debug2") == 0)
|
||||
server_min_messages = DEBUG2;
|
||||
else if (strcasecmp(lev, "debug3") == 0)
|
||||
server_min_messages = DEBUG3;
|
||||
else if (strcasecmp(lev, "debug4") == 0)
|
||||
server_min_messages = DEBUG4;
|
||||
else if (strcasecmp(lev, "debug5") == 0)
|
||||
server_min_messages = DEBUG5;
|
||||
else if (strcasecmp(lev, "log") == 0)
|
||||
server_min_messages = LOG;
|
||||
else if (strcasecmp(lev, "info") == 0)
|
||||
server_min_messages = INFO;
|
||||
else if (strcasecmp(lev, "notice") == 0)
|
||||
server_min_messages = NOTICE;
|
||||
else if (strcasecmp(lev, "error") == 0)
|
||||
server_min_messages = ERROR;
|
||||
else if (strcasecmp(lev, "fatal") == 0)
|
||||
server_min_messages = FATAL;
|
||||
else if (strcasecmp(lev, "panic") == 0)
|
||||
server_min_messages = PANIC;
|
||||
else
|
||||
/* Can't get here unless guc.c screwed up */
|
||||
elog(ERROR, "bogus server_min_messages %s", lev);
|
||||
}
|
||||
|
||||
bool
|
||||
check_client_min_messages(const char *lev)
|
||||
{
|
||||
if (strcasecmp(lev, "debug") == 0 ||
|
||||
strcasecmp(lev, "debug1") == 0 ||
|
||||
strcasecmp(lev, "debug2") == 0 ||
|
||||
strcasecmp(lev, "debug3") == 0 ||
|
||||
strcasecmp(lev, "debug4") == 0 ||
|
||||
strcasecmp(lev, "debug5") == 0 ||
|
||||
strcasecmp(lev, "log") == 0 ||
|
||||
strcasecmp(lev, "info") == 0 ||
|
||||
strcasecmp(lev, "notice") == 0 ||
|
||||
strcasecmp(lev, "error") == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
assign_client_min_messages(const char *lev)
|
||||
{
|
||||
if (strcasecmp(lev, "debug1") == 0)
|
||||
client_min_messages = DEBUG1;
|
||||
else if (strcasecmp(lev, "debug2") == 0)
|
||||
client_min_messages = DEBUG2;
|
||||
else if (strcasecmp(lev, "debug3") == 0)
|
||||
client_min_messages = DEBUG3;
|
||||
else if (strcasecmp(lev, "debug4") == 0)
|
||||
client_min_messages = DEBUG4;
|
||||
else if (strcasecmp(lev, "debug5") == 0)
|
||||
client_min_messages = DEBUG5;
|
||||
else if (strcasecmp(lev, "log") == 0)
|
||||
client_min_messages = LOG;
|
||||
else if (strcasecmp(lev, "info") == 0)
|
||||
client_min_messages = INFO;
|
||||
else if (strcasecmp(lev, "notice") == 0)
|
||||
client_min_messages = NOTICE;
|
||||
else if (strcasecmp(lev, "error") == 0)
|
||||
client_min_messages = ERROR;
|
||||
else
|
||||
/* Can't get here unless guc.c screwed up */
|
||||
elog(ERROR, "bogus client_min_messages %s", lev);
|
||||
}
|
||||
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.54 2001/10/25 05:49:48 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.55 2002/03/02 21:39:33 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -399,8 +399,7 @@ find_in_dynamic_libpath(const char *basename)
|
||||
sprintf(full, "%s/%s", mangled, basename);
|
||||
pfree(mangled);
|
||||
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "find_in_dynamic_libpath: trying %s", full);
|
||||
elog(DEBUG2, "find_in_dynamic_libpath: trying %s", full);
|
||||
|
||||
if (file_exists(full))
|
||||
return full;
|
||||
|
@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.40 2001/10/28 06:25:54 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.41 2002/03/02 21:39:33 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -933,7 +933,7 @@ hash_corrupted(HTAB *hashp)
|
||||
* systemwide restart. Otherwise, just shut down this one backend.
|
||||
*/
|
||||
if (hashp->isshared)
|
||||
elog(STOP, "Hash table '%s' corrupted", hashp->tabname);
|
||||
elog(PANIC, "Hash table '%s' corrupted", hashp->tabname);
|
||||
else
|
||||
elog(FATAL, "Hash table '%s' corrupted", hashp->tabname);
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/init/Attic/findbe.c,v 1.26 2002/02/08 16:30:11 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/init/Attic/findbe.c,v 1.27 2002/03/02 21:39:33 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -61,17 +61,13 @@ ValidateBinary(char *path)
|
||||
*/
|
||||
if (stat(path, &buf) < 0)
|
||||
{
|
||||
if (DebugLvl > 1)
|
||||
fprintf(stderr, "ValidateBinary: can't stat \"%s\"\n",
|
||||
path);
|
||||
elog(DEBUG2, "ValidateBinary: can't stat \"%s\"", path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((buf.st_mode & S_IFMT) != S_IFREG)
|
||||
{
|
||||
if (DebugLvl > 1)
|
||||
fprintf(stderr, "ValidateBinary: \"%s\" is not a regular file\n",
|
||||
path);
|
||||
elog(DEBUG2, "ValidateBinary: \"%s\" is not a regular file", path);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -92,9 +88,8 @@ ValidateBinary(char *path)
|
||||
{
|
||||
is_r = buf.st_mode & S_IRUSR;
|
||||
is_x = buf.st_mode & S_IXUSR;
|
||||
if (DebugLvl > 1 && !(is_r && is_x))
|
||||
fprintf(stderr, "ValidateBinary: \"%s\" is not user read/execute\n",
|
||||
path);
|
||||
if (!(is_r && is_x))
|
||||
elog(DEBUG2, "ValidateBinary: \"%s\" is not user read/execute", path);
|
||||
return is_x ? (is_r ? 0 : -2) : -1;
|
||||
}
|
||||
pwp = getpwuid(euid);
|
||||
@ -119,17 +114,17 @@ ValidateBinary(char *path)
|
||||
{
|
||||
is_r = buf.st_mode & S_IRGRP;
|
||||
is_x = buf.st_mode & S_IXGRP;
|
||||
if (DebugLvl > 1 && !(is_r && is_x))
|
||||
fprintf(stderr, "ValidateBinary: \"%s\" is not group read/execute\n",
|
||||
path);
|
||||
if (!(is_r && is_x))
|
||||
elog(DEBUG2, "ValidateBinary: \"%s\" is not group read/execute",
|
||||
path);
|
||||
return is_x ? (is_r ? 0 : -2) : -1;
|
||||
}
|
||||
}
|
||||
is_r = buf.st_mode & S_IROTH;
|
||||
is_x = buf.st_mode & S_IXOTH;
|
||||
if (DebugLvl > 1 && !(is_r && is_x))
|
||||
fprintf(stderr, "ValidateBinary: \"%s\" is not other read/execute\n",
|
||||
path);
|
||||
if (!(is_r && is_x))
|
||||
elog(DEBUG2, "ValidateBinary: \"%s\" is not other read/execute",
|
||||
path);
|
||||
return is_x ? (is_r ? 0 : -2) : -1;
|
||||
}
|
||||
|
||||
@ -177,9 +172,7 @@ FindExec(char *full_path, const char *argv0, const char *binary_name)
|
||||
if (ValidateBinary(buf) == 0)
|
||||
{
|
||||
strncpy(full_path, buf, MAXPGPATH);
|
||||
if (DebugLvl)
|
||||
fprintf(stderr, "FindExec: found \"%s\" using argv[0]\n",
|
||||
full_path);
|
||||
elog(DEBUG1, "FindExec: found \"%s\" using argv[0]", full_path);
|
||||
return 0;
|
||||
}
|
||||
fprintf(stderr, "FindExec: invalid binary \"%s\"\n",
|
||||
@ -193,8 +186,7 @@ FindExec(char *full_path, const char *argv0, const char *binary_name)
|
||||
*/
|
||||
if ((p = getenv("PATH")) && *p)
|
||||
{
|
||||
if (DebugLvl)
|
||||
fprintf(stderr, "FindExec: searching PATH ...\n");
|
||||
elog(DEBUG1, "FindExec: searching PATH ...");
|
||||
path = strdup(p); /* make a modifiable copy */
|
||||
for (startp = path, endp = strchr(path, ':');
|
||||
startp && *startp;
|
||||
@ -215,9 +207,8 @@ FindExec(char *full_path, const char *argv0, const char *binary_name)
|
||||
{
|
||||
case 0: /* found ok */
|
||||
strncpy(full_path, buf, MAXPGPATH);
|
||||
if (DebugLvl)
|
||||
fprintf(stderr, "FindExec: found \"%s\" using PATH\n",
|
||||
full_path);
|
||||
elog(DEBUG1, "FindExec: found \"%s\" using PATH",
|
||||
full_path);
|
||||
free(path);
|
||||
return 0;
|
||||
case -1: /* wasn't even a candidate, keep looking */
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/init/globals.c,v 1.62 2001/10/25 05:49:51 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/init/globals.c,v 1.63 2002/03/02 21:39:33 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* Globals used all over the place should be declared here and not
|
||||
@ -69,8 +69,6 @@ Oid MyDatabaseId = InvalidOid;
|
||||
|
||||
bool IsUnderPostmaster = false;
|
||||
|
||||
int DebugLvl = 0;
|
||||
|
||||
int DateStyle = USE_ISO_DATES;
|
||||
bool EuroDates = false;
|
||||
bool HasCTZSet = false;
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/init/miscinit.c,v 1.83 2002/03/01 22:45:15 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/init/miscinit.c,v 1.84 2002/03/02 21:39:33 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -817,13 +817,13 @@ RecordSharedMemoryInLockFile(IpcMemoryKey shmKey, IpcMemoryId shmId)
|
||||
fd = open(directoryLockFile, O_RDWR | PG_BINARY, 0);
|
||||
if (fd < 0)
|
||||
{
|
||||
elog(DEBUG, "Failed to rewrite %s: %m", directoryLockFile);
|
||||
elog(LOG, "Failed to rewrite %s: %m", directoryLockFile);
|
||||
return;
|
||||
}
|
||||
len = read(fd, buffer, sizeof(buffer) - 100);
|
||||
if (len <= 0)
|
||||
{
|
||||
elog(DEBUG, "Failed to read %s: %m", directoryLockFile);
|
||||
elog(LOG, "Failed to read %s: %m", directoryLockFile);
|
||||
close(fd);
|
||||
return;
|
||||
}
|
||||
@ -836,7 +836,7 @@ RecordSharedMemoryInLockFile(IpcMemoryKey shmKey, IpcMemoryId shmId)
|
||||
if (ptr == NULL ||
|
||||
(ptr = strchr(ptr + 1, '\n')) == NULL)
|
||||
{
|
||||
elog(DEBUG, "Bogus data in %s", directoryLockFile);
|
||||
elog(LOG, "Bogus data in %s", directoryLockFile);
|
||||
close(fd);
|
||||
return;
|
||||
}
|
||||
@ -861,7 +861,7 @@ RecordSharedMemoryInLockFile(IpcMemoryKey shmKey, IpcMemoryId shmId)
|
||||
/* if write didn't set errno, assume problem is no disk space */
|
||||
if (errno == 0)
|
||||
errno = ENOSPC;
|
||||
elog(DEBUG, "Failed to write %s: %m", directoryLockFile);
|
||||
elog(LOG, "Failed to write %s: %m", directoryLockFile);
|
||||
close(fd);
|
||||
return;
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright 2000 by PostgreSQL Global Development Group
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/misc/guc-file.l,v 1.10 2002/02/23 01:31:36 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/misc/guc-file.l,v 1.11 2002/03/02 21:39:33 momjian Exp $
|
||||
*/
|
||||
|
||||
%{
|
||||
@ -137,7 +137,7 @@ ProcessConfigFile(GucContext context)
|
||||
Assert(context == PGC_POSTMASTER || context == PGC_BACKEND
|
||||
|| context == PGC_SIGHUP);
|
||||
Assert(DataDir);
|
||||
elevel = (context == PGC_SIGHUP) ? DEBUG : ERROR;
|
||||
elevel = (context == PGC_SIGHUP) ? DEBUG3 : ERROR;
|
||||
|
||||
/*
|
||||
* Open file
|
||||
|
@ -4,7 +4,7 @@
|
||||
* Support for grand unified configuration scheme, including SET
|
||||
* command, configuration file, and command line options.
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/misc/guc.c,v 1.60 2002/03/01 22:45:16 petere Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/misc/guc.c,v 1.61 2002/03/02 21:39:34 momjian Exp $
|
||||
*
|
||||
* Copyright 2000 by PostgreSQL Global Development Group
|
||||
* Written by Peter Eisentraut <peter_e@gmx.net>.
|
||||
@ -39,6 +39,7 @@
|
||||
#include "utils/array.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/datetime.h"
|
||||
#include "utils/elog.h"
|
||||
#include "pgstat.h"
|
||||
|
||||
|
||||
@ -432,11 +433,6 @@ static struct config_int
|
||||
1000, 25, INT_MAX, NULL, NULL
|
||||
},
|
||||
|
||||
{
|
||||
"debug_level", PGC_USERSET, PGC_S_DEFAULT, &DebugLvl,
|
||||
0, 0, 16, NULL, NULL
|
||||
},
|
||||
|
||||
#ifdef LOCK_DEBUG
|
||||
{
|
||||
"trace_lock_oidmin", PGC_SUSET, PGC_S_DEFAULT, &Trace_lock_oidmin,
|
||||
@ -556,6 +552,12 @@ static struct config_real
|
||||
static struct config_string
|
||||
ConfigureNamesString[] =
|
||||
{
|
||||
{
|
||||
"client_min_messages", PGC_USERSET, PGC_S_DEFAULT, &client_min_messages_str,
|
||||
client_min_messages_str_default, check_client_min_messages,
|
||||
assign_client_min_messages
|
||||
},
|
||||
|
||||
{
|
||||
"default_transaction_isolation", PGC_USERSET, PGC_S_DEFAULT, &default_iso_level_string,
|
||||
"read committed", check_defaultxactisolevel, assign_defaultxactisolevel
|
||||
@ -571,6 +573,12 @@ static struct config_string
|
||||
PG_KRB_SRVTAB, NULL, NULL
|
||||
},
|
||||
|
||||
{
|
||||
"server_min_messages", PGC_USERSET, PGC_S_DEFAULT, &server_min_messages_str,
|
||||
server_min_messages_str_default, check_server_min_messages,
|
||||
assign_server_min_messages
|
||||
},
|
||||
|
||||
#ifdef ENABLE_SYSLOG
|
||||
{
|
||||
"syslog_facility", PGC_POSTMASTER, PGC_S_DEFAULT, &Syslog_facility,
|
||||
@ -886,7 +894,7 @@ set_config_option(const char *name, const char *value,
|
||||
bool makeDefault;
|
||||
|
||||
if (context == PGC_SIGHUP)
|
||||
elevel = DEBUG;
|
||||
elevel = DEBUG1;
|
||||
else if (guc_session_init)
|
||||
elevel = NOTICE;
|
||||
else
|
||||
@ -901,9 +909,8 @@ set_config_option(const char *name, const char *value,
|
||||
|
||||
if (record->source > source)
|
||||
{
|
||||
if (DebugLvl > 1)
|
||||
elog(DEBUG, "setting %s refused because previous source is higher",
|
||||
name);
|
||||
elog(DEBUG2, "setting %s refused because previous source is higher",
|
||||
name);
|
||||
return false;
|
||||
}
|
||||
makeDefault = source < PGC_S_SESSION;
|
||||
|
@ -108,8 +108,15 @@
|
||||
|
||||
|
||||
#
|
||||
# Debug display
|
||||
# Message display
|
||||
#
|
||||
|
||||
#server_min_messages = log # Values, in order of decreasing detail:
|
||||
# debug5, debug4, debug3, debug2, debug1,
|
||||
# info, notice, error, log, fatal, panic
|
||||
#client_min_messages = info # Values, in order of decreasing detail:
|
||||
# debug5, debug4, debug3, debug2, debug1,
|
||||
# log, info, notice, error
|
||||
#silent_mode = false
|
||||
|
||||
#log_connections = false
|
||||
|
@ -27,7 +27,7 @@
|
||||
# Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
# Portions Copyright (c) 1994, Regents of the University of California
|
||||
#
|
||||
# $Header: /cvsroot/pgsql/src/bin/initdb/Attic/initdb.sh,v 1.144 2002/03/01 22:45:16 petere Exp $
|
||||
# $Header: /cvsroot/pgsql/src/bin/initdb/Attic/initdb.sh,v 1.145 2002/03/02 21:39:34 momjian Exp $
|
||||
#
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
@ -452,7 +452,7 @@ PGSQL_OPT="-F -D$PGDATA"
|
||||
|
||||
if [ "$debug" = yes ]
|
||||
then
|
||||
BACKEND_TALK_ARG="-d"
|
||||
BACKEND_TALK_ARG="-d 5"
|
||||
else
|
||||
PGSQL_OPT="$PGSQL_OPT -o /dev/null"
|
||||
fi
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright 2000 by PostgreSQL Global Development Group
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/bin/psql/tab-complete.c,v 1.41 2001/11/05 17:46:31 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/bin/psql/tab-complete.c,v 1.42 2002/03/02 21:39:34 momjian Exp $
|
||||
*/
|
||||
|
||||
/*----------------------------------------------------------------------
|
||||
@ -229,6 +229,8 @@ psql_completion(char *text, int start, int end)
|
||||
"ksqo",
|
||||
"geqo",
|
||||
"fsync",
|
||||
"server_min_messages",
|
||||
"client_min_messages",
|
||||
"debug_assertions",
|
||||
"debug_print_query",
|
||||
"debug_print_parse",
|
||||
@ -252,7 +254,6 @@ psql_completion(char *text, int start, int end)
|
||||
"geqo_random_seed",
|
||||
"sort_mem",
|
||||
"vacuum_mem",
|
||||
"debug_level",
|
||||
"max_expr_depth",
|
||||
"commit_delay",
|
||||
"commit_siblings",
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: bootstrap.h,v 1.26 2001/11/05 17:46:31 momjian Exp $
|
||||
* $Id: bootstrap.h,v 1.27 2002/03/02 21:39:34 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -34,8 +34,6 @@ typedef struct hashnode
|
||||
extern Relation reldesc;
|
||||
extern Form_pg_attribute attrtypes[MAXATTR];
|
||||
extern int numattr;
|
||||
extern int DebugMode;
|
||||
|
||||
extern int BootstrapMain(int ac, char *av[]);
|
||||
|
||||
extern void index_register(char *heap, char *ind, IndexInfo *indexInfo);
|
||||
|
@ -12,7 +12,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: miscadmin.h,v 1.99 2002/02/18 23:11:31 petere Exp $
|
||||
* $Id: miscadmin.h,v 1.100 2002/03/02 21:39:34 momjian Exp $
|
||||
*
|
||||
* NOTES
|
||||
* some of the information in this file should be moved to
|
||||
@ -129,8 +129,6 @@ extern DLLIMPORT Oid MyDatabaseId;
|
||||
|
||||
extern bool IsUnderPostmaster;
|
||||
|
||||
extern int DebugLvl;
|
||||
|
||||
/* Date/Time Configuration
|
||||
*
|
||||
* Constants to pass info from runtime environment:
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: elog.h,v 1.30 2001/11/05 17:46:36 momjian Exp $
|
||||
* $Id: elog.h,v 1.31 2002/03/02 21:39:35 momjian Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -15,15 +15,25 @@
|
||||
#define ELOG_H
|
||||
|
||||
/* Error level codes */
|
||||
#define NOTICE 0 /* random info, sent to frontend */
|
||||
#define ERROR (-1) /* user error - return to known state */
|
||||
#define FATAL 1 /* fatal error - abort process */
|
||||
#define REALLYFATAL 2 /* take down the other backends with me */
|
||||
#define DEBUG (-2) /* debug message */
|
||||
#define DEBUG5 10 /* sent only to server logs, label DEBUG */
|
||||
#define DEBUG4 11 /* logs in decreasing detail */
|
||||
#define DEBUG3 12
|
||||
#define DEBUG2 13
|
||||
#define DEBUG1 14
|
||||
#define LOG 15 /* sent only to server logs by default,
|
||||
* label LOG. */
|
||||
#define INFO 16 /* sent only to client by default, for
|
||||
* informative messages that are part of
|
||||
* normal query operation. */
|
||||
#define NOTICE 17 /* sent to client and server by default,
|
||||
* important messages, for unusual cases that
|
||||
* should be reported but are not serious
|
||||
* enough to abort the query. */
|
||||
#define ERROR 18 /* user error - return to known state */
|
||||
#define FATAL 19 /* fatal error - abort process */
|
||||
#define PANIC 20 /* take down the other backends with me */
|
||||
|
||||
/* temporary nonsense... */
|
||||
#define STOP REALLYFATAL
|
||||
#define LOG DEBUG
|
||||
/*#define DEBUG DEBUG5*/ /* Backward compatibility with pre-7.3 */
|
||||
|
||||
/* Configurable parameters */
|
||||
#ifdef ENABLE_SYSLOG
|
||||
@ -32,6 +42,10 @@ extern int Use_syslog;
|
||||
extern bool Log_timestamp;
|
||||
extern bool Log_pid;
|
||||
|
||||
extern char *server_min_messages_str;
|
||||
extern char *client_min_messages_str;
|
||||
extern const char server_min_messages_str_default[];
|
||||
extern const char client_min_messages_str_default[];
|
||||
|
||||
extern void
|
||||
elog(int lev, const char *fmt,...)
|
||||
@ -41,4 +55,9 @@ __attribute__((format(printf, 2, 3)));
|
||||
|
||||
extern int DebugFileOpen(void);
|
||||
|
||||
extern bool check_server_min_messages(const char *lev);
|
||||
extern void assign_server_min_messages(const char *lev);
|
||||
extern bool check_client_min_messages(const char *lev);
|
||||
extern void assign_client_min_messages(const char *lev);
|
||||
|
||||
#endif /* ELOG_H */
|
||||
|
@ -13,7 +13,7 @@
|
||||
int
|
||||
elog_DEBUG(void)
|
||||
{
|
||||
return DEBUG;
|
||||
return LOG;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -4,7 +4,7 @@
|
||||
* procedural language
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/pl/plpgsql/src/gram.y,v 1.29 2001/11/29 22:57:37 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/pl/plpgsql/src/gram.y,v 1.30 2002/03/02 21:39:35 momjian Exp $
|
||||
*
|
||||
* This software is copyrighted by Jan Wieck - Hamburg.
|
||||
*
|
||||
@ -1207,7 +1207,7 @@ raise_level : K_EXCEPTION
|
||||
}
|
||||
| K_DEBUG
|
||||
{
|
||||
$$ = DEBUG;
|
||||
$$ = DEBUG5;
|
||||
}
|
||||
;
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
* MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/pl/plpython/plpython.c,v 1.13 2001/11/16 18:04:31 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/pl/plpython/plpython.c,v 1.14 2002/03/02 21:39:35 momjian Exp $
|
||||
*
|
||||
*********************************************************************
|
||||
*/
|
||||
@ -407,7 +407,7 @@ plpython_call_handler(PG_FUNCTION_ARGS)
|
||||
}
|
||||
else
|
||||
PLy_restart_in_progress += 1;
|
||||
if (proc)
|
||||
if (proc)
|
||||
{
|
||||
Py_DECREF(proc->me);
|
||||
}
|
||||
@ -2501,7 +2501,7 @@ PLy_init_plpy(void)
|
||||
* New RExec methods
|
||||
*/
|
||||
|
||||
PyObject*
|
||||
PyObject*
|
||||
PLy_r_open(PyObject *self, PyObject* args)
|
||||
{
|
||||
PyErr_SetString(PyExc_IOError, "can't open files in restricted mode");
|
||||
@ -2559,7 +2559,7 @@ PLy_init_safe_interp(void)
|
||||
rexec_dict = ((PyClassObject*)rexec)->cl_dict;
|
||||
|
||||
/*
|
||||
* tweak the list of permitted modules, posix and sys functions
|
||||
* tweak the list of permitted modules, posix and sys functions
|
||||
*/
|
||||
PyDict_SetItemString(rexec_dict, "ok_builtin_modules", PLy_importable_modules);
|
||||
PyDict_SetItemString(rexec_dict, "ok_posix_names", PLy_ok_posix_names);
|
||||
@ -2596,7 +2596,7 @@ populate_methods(PyObject *klass, PyMethodDef *methods)
|
||||
|
||||
for ( ; methods->ml_name; ++methods) {
|
||||
|
||||
/* get a wrapper for the built-in function */
|
||||
/* get a wrapper for the built-in function */
|
||||
PyObject *func = PyCFunction_New(methods, NULL);
|
||||
PyObject *meth;
|
||||
int status;
|
||||
@ -2604,14 +2604,14 @@ populate_methods(PyObject *klass, PyMethodDef *methods)
|
||||
if (!func)
|
||||
return -1;
|
||||
|
||||
/* turn the function into an unbound method */
|
||||
/* turn the function into an unbound method */
|
||||
if (!(meth = PyMethod_New(func, NULL, klass))) {
|
||||
Py_DECREF(func);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* add method to dictionary */
|
||||
status = PyDict_SetItemString( ((PyClassObject*)klass)->cl_dict,
|
||||
status = PyDict_SetItemString( ((PyClassObject*)klass)->cl_dict,
|
||||
methods->ml_name, meth);
|
||||
Py_DECREF(meth);
|
||||
Py_DECREF(func);
|
||||
@ -2632,7 +2632,7 @@ static PyObject *PLy_log(int, PyObject *, PyObject *);
|
||||
PyObject *
|
||||
PLy_debug(PyObject * self, PyObject * args)
|
||||
{
|
||||
return PLy_log(DEBUG, self, args);
|
||||
return PLy_log(LOG, self, args);
|
||||
}
|
||||
|
||||
PyObject *
|
||||
@ -2690,7 +2690,7 @@ PLy_log(volatile int level, PyObject * self, PyObject * args)
|
||||
}
|
||||
|
||||
/*
|
||||
* ok, this is a NOTICE, or DEBUG message
|
||||
* ok, this is a NOTICE, or LOG message
|
||||
*
|
||||
* but just in case DON'T long jump out of the interpreter!
|
||||
*/
|
||||
@ -2732,9 +2732,9 @@ PLy_log(volatile int level, PyObject * self, PyObject * args)
|
||||
|
||||
char *PLy_procedure_name(PLyProcedure *proc)
|
||||
{
|
||||
if ( proc == NULL )
|
||||
return "<unknown procedure>";
|
||||
return proc->proname;
|
||||
if ( proc == NULL )
|
||||
return "<unknown procedure>";
|
||||
return proc->proname;
|
||||
}
|
||||
|
||||
/* output a python traceback/exception via the postgresql elog
|
||||
|
@ -31,7 +31,7 @@
|
||||
* ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/pl/tcl/pltcl.c,v 1.49 2002/01/24 19:31:36 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/pl/tcl/pltcl.c,v 1.50 2002/03/02 21:39:35 momjian Exp $
|
||||
*
|
||||
**********************************************************************/
|
||||
|
||||
@ -1268,7 +1268,7 @@ pltcl_elog(ClientData cdata, Tcl_Interp *interp,
|
||||
else if (strcmp(argv[1], "FATAL") == 0)
|
||||
level = FATAL;
|
||||
else if (strcmp(argv[1], "DEBUG") == 0)
|
||||
level = DEBUG;
|
||||
level = DEBUG1;
|
||||
else
|
||||
{
|
||||
Tcl_AppendResult(interp, "Unknown elog level '", argv[1],
|
||||
|
@ -271,10 +271,10 @@ SELECT unique1 FROM tenk1 WHERE unique1 < 5;
|
||||
|
||||
-- FOREIGN KEY CONSTRAINT adding TEST
|
||||
CREATE TABLE tmp2 (a int primary key);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'tmp2_pkey' for table 'tmp2'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'tmp2_pkey' for table 'tmp2'
|
||||
CREATE TABLE tmp3 (a int, b int);
|
||||
CREATE TABLE tmp4 (a int, b int, unique(a,b));
|
||||
NOTICE: CREATE TABLE / UNIQUE will create implicit index 'tmp4_a_key' for table 'tmp4'
|
||||
INFO: CREATE TABLE / UNIQUE will create implicit index 'tmp4_a_key' for table 'tmp4'
|
||||
CREATE TABLE tmp5 (a int, b int);
|
||||
-- Insert rows into tmp2 (pktable)
|
||||
INSERT INTO tmp2 values (1);
|
||||
@ -287,47 +287,47 @@ INSERT INTO tmp3 values (1,20);
|
||||
INSERT INTO tmp3 values (5,50);
|
||||
-- Try (and fail) to add constraint due to invalid source columns
|
||||
ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: ALTER TABLE: column "c" referenced in foreign key constraint does not exist
|
||||
-- Try (and fail) to add constraint due to invalide destination columns explicitly given
|
||||
ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: UNIQUE constraint matching given keys for referenced table "tmp2" not found
|
||||
-- Try (and fail) to add constraint due to invalid data
|
||||
ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: tmpconstr referential integrity violation - key referenced from tmp3 not found in tmp2
|
||||
-- Delete failing row
|
||||
DELETE FROM tmp3 where a=5;
|
||||
-- Try (and succeed)
|
||||
ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Try (and fail) to create constraint from tmp5(a) to tmp4(a) - unique constraint on
|
||||
-- tmp4 is a,b
|
||||
ALTER TABLE tmp5 add constraint tmpconstr foreign key(a) references tmp4(a) match full;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: UNIQUE constraint matching given keys for referenced table "tmp4" not found
|
||||
DROP TABLE tmp5;
|
||||
DROP TABLE tmp4;
|
||||
DROP TABLE tmp3;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
|
||||
DROP TABLE tmp2;
|
||||
-- Foreign key adding test with mixed types
|
||||
-- Note: these tables are TEMP to avoid name conflicts when this test
|
||||
-- is run in parallel with foreign_key.sql.
|
||||
CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TEMP TABLE FKTABLE (ftest1 text);
|
||||
-- This next should fail, because text=int does not exist
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This should also fail for the same reason, but here we
|
||||
-- give the column name
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This should succeed, even though they are different types
|
||||
@ -335,21 +335,21 @@ ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
DROP TABLE FKTABLE;
|
||||
CREATE TEMP TABLE FKTABLE (ftest1 varchar);
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- As should this
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE pktable;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
DROP TABLE fktable;
|
||||
CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 text,
|
||||
PRIMARY KEY(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
-- This should fail, because we just chose really odd types
|
||||
CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 datetime);
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable;
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- Again, so should this...
|
||||
@ -357,7 +357,7 @@ DROP TABLE FKTABLE;
|
||||
CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 datetime);
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
|
||||
references pktable(ptest1, ptest2);
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This fails because we mixed up the column ordering
|
||||
@ -365,13 +365,13 @@ DROP TABLE FKTABLE;
|
||||
CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 text);
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
|
||||
references pktable(ptest2, ptest1);
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- As does this...
|
||||
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1)
|
||||
references pktable(ptest1, ptest2);
|
||||
NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- temp tables should go away by themselves, need not drop them.
|
||||
@ -452,7 +452,7 @@ drop table atacc1;
|
||||
create table atacc1 ( test int );
|
||||
-- add a unique constraint
|
||||
alter table atacc1 add constraint atacc_test1 unique (test);
|
||||
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
|
||||
INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
|
||||
-- insert first value
|
||||
insert into atacc1 (test) values (2);
|
||||
-- should fail
|
||||
@ -462,7 +462,7 @@ ERROR: Cannot insert a duplicate key into unique index atacc_test1
|
||||
insert into atacc1 (test) values (4);
|
||||
-- try adding a unique oid constraint
|
||||
alter table atacc1 add constraint atacc_oid1 unique(oid);
|
||||
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_oid1' for table 'atacc1'
|
||||
INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_oid1' for table 'atacc1'
|
||||
drop table atacc1;
|
||||
-- let's do one where the unique constraint fails when added
|
||||
create table atacc1 ( test int );
|
||||
@ -471,7 +471,7 @@ insert into atacc1 (test) values (2);
|
||||
insert into atacc1 (test) values (2);
|
||||
-- add a unique constraint (fails)
|
||||
alter table atacc1 add constraint atacc_test1 unique (test);
|
||||
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
|
||||
INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
|
||||
ERROR: Cannot create unique index. Table contains non-unique values
|
||||
insert into atacc1 (test) values (3);
|
||||
drop table atacc1;
|
||||
@ -486,7 +486,7 @@ drop table atacc1;
|
||||
create table atacc1 ( test int, test2 int);
|
||||
-- add a unique constraint
|
||||
alter table atacc1 add constraint atacc_test1 unique (test, test2);
|
||||
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
|
||||
INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
|
||||
-- insert initial value
|
||||
insert into atacc1 (test,test2) values (4,4);
|
||||
-- should fail
|
||||
@ -499,9 +499,9 @@ insert into atacc1 (test,test2) values (5,5);
|
||||
drop table atacc1;
|
||||
-- lets do some naming tests
|
||||
create table atacc1 (test int, test2 int, unique(test));
|
||||
NOTICE: CREATE TABLE / UNIQUE will create implicit index 'atacc1_test_key' for table 'atacc1'
|
||||
INFO: CREATE TABLE / UNIQUE will create implicit index 'atacc1_test_key' for table 'atacc1'
|
||||
alter table atacc1 add unique (test2);
|
||||
NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc1_test2_key' for table 'atacc1'
|
||||
INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc1_test2_key' for table 'atacc1'
|
||||
-- should fail for @@ second one @@
|
||||
insert into atacc1 (test2, test) values (3, 3);
|
||||
insert into atacc1 (test2, test) values (2, 3);
|
||||
|
@ -136,8 +136,8 @@ INSERT INTO iportaltest (i, d, p)
|
||||
--- test creation of SERIAL column
|
||||
---
|
||||
CREATE TABLE serialTest (f1 text, f2 serial);
|
||||
NOTICE: CREATE TABLE will create implicit sequence 'serialtest_f2_seq' for SERIAL column 'serialtest.f2'
|
||||
NOTICE: CREATE TABLE / UNIQUE will create implicit index 'serialtest_f2_key' for table 'serialtest'
|
||||
INFO: CREATE TABLE will create implicit sequence 'serialtest_f2_seq' for SERIAL column 'serialtest.f2'
|
||||
INFO: CREATE TABLE / UNIQUE will create implicit index 'serialtest_f2_key' for table 'serialtest'
|
||||
INSERT INTO serialTest VALUES ('foo');
|
||||
INSERT INTO serialTest VALUES ('bar');
|
||||
INSERT INTO serialTest VALUES ('force', 100);
|
||||
|
@ -81,9 +81,9 @@ CREATE TABLE student (
|
||||
CREATE TABLE stud_emp (
|
||||
percent int4
|
||||
) INHERITS (emp, student);
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "name"
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "age"
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "location"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "name"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "age"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "location"
|
||||
CREATE TABLE city (
|
||||
name name,
|
||||
location box,
|
||||
@ -135,8 +135,8 @@ CREATE TABLE c_star (
|
||||
CREATE TABLE d_star (
|
||||
d float8
|
||||
) INHERITS (b_star, c_star);
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "class"
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "a"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "class"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "a"
|
||||
CREATE TABLE e_star (
|
||||
e int2
|
||||
) INHERITS (c_star);
|
||||
|
@ -6,9 +6,9 @@
|
||||
-- First test, check and cascade
|
||||
--
|
||||
CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE, ftest2 int );
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert test data into PKTABLE
|
||||
INSERT INTO PKTABLE VALUES (1, 'Test1');
|
||||
INSERT INTO PKTABLE VALUES (2, 'Test2');
|
||||
@ -56,16 +56,16 @@ SELECT * FROM FKTABLE;
|
||||
(3 rows)
|
||||
|
||||
DROP TABLE PKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
DROP TABLE FKTABLE;
|
||||
--
|
||||
-- check set NULL and table constraint on multiple columns
|
||||
--
|
||||
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
|
||||
REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert test data into PKTABLE
|
||||
INSERT INTO PKTABLE VALUES (1, 2, 'Test1');
|
||||
INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2');
|
||||
@ -139,16 +139,16 @@ SELECT * FROM FKTABLE;
|
||||
(5 rows)
|
||||
|
||||
DROP TABLE PKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
DROP TABLE FKTABLE;
|
||||
--
|
||||
-- check set default and table constraint on multiple columns
|
||||
--
|
||||
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
|
||||
REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert a value in PKTABLE for default
|
||||
INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!');
|
||||
-- Insert test data into PKTABLE
|
||||
@ -224,15 +224,15 @@ SELECT * FROM FKTABLE;
|
||||
(5 rows)
|
||||
|
||||
DROP TABLE PKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
DROP TABLE FKTABLE;
|
||||
--
|
||||
-- First test, check with no on delete or on update
|
||||
--
|
||||
CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL, ftest2 int );
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert test data into PKTABLE
|
||||
INSERT INTO PKTABLE VALUES (1, 'Test1');
|
||||
INSERT INTO PKTABLE VALUES (2, 'Test2');
|
||||
@ -299,15 +299,15 @@ SELECT * FROM PKTABLE;
|
||||
(4 rows)
|
||||
|
||||
DROP TABLE PKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
|
||||
DROP TABLE FKTABLE;
|
||||
-- MATCH unspecified
|
||||
-- Base test restricting update/delete
|
||||
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
|
||||
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert Primary Key values
|
||||
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
|
||||
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
|
||||
@ -363,16 +363,16 @@ SELECT * from FKTABLE;
|
||||
(5 rows)
|
||||
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
DROP TABLE PKTABLE;
|
||||
-- cascade update/delete
|
||||
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
|
||||
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
|
||||
ON DELETE CASCADE ON UPDATE CASCADE);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert Primary Key values
|
||||
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
|
||||
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
|
||||
@ -462,16 +462,16 @@ SELECT * from FKTABLE;
|
||||
(4 rows)
|
||||
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
DROP TABLE PKTABLE;
|
||||
-- set null update / set default delete
|
||||
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
|
||||
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
|
||||
ON DELETE SET DEFAULT ON UPDATE SET NULL);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert Primary Key values
|
||||
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
|
||||
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
|
||||
@ -568,16 +568,16 @@ SELECT * from FKTABLE;
|
||||
(6 rows)
|
||||
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
DROP TABLE PKTABLE;
|
||||
-- set default update / set null delete
|
||||
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int DEFAULT -1, ftest3 int, ftest4 int, CONSTRAINT constrname3
|
||||
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
|
||||
ON DELETE SET NULL ON UPDATE SET DEFAULT);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- Insert Primary Key values
|
||||
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
|
||||
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
|
||||
@ -687,16 +687,16 @@ SELECT * from FKTABLE;
|
||||
(7 rows)
|
||||
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
DROP TABLE PKTABLE;
|
||||
CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
CREATE TABLE FKTABLE_FAIL1 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest2) REFERENCES PKTABLE);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: CREATE TABLE: column "ftest2" referenced in foreign key constraint does not exist
|
||||
CREATE TABLE FKTABLE_FAIL2 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(ptest2));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: UNIQUE constraint matching given keys for referenced table "pktable" not found
|
||||
DROP TABLE FKTABLE_FAIL1;
|
||||
ERROR: table "fktable_fail1" does not exist
|
||||
@ -705,9 +705,9 @@ ERROR: table "fktable_fail2" does not exist
|
||||
DROP TABLE PKTABLE;
|
||||
-- Test for referencing column number smaller than referenced constraint
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 int, UNIQUE(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE / UNIQUE will create implicit index 'pktable_ptest1_key' for table 'pktable'
|
||||
INFO: CREATE TABLE / UNIQUE will create implicit index 'pktable_ptest1_key' for table 'pktable'
|
||||
CREATE TABLE FKTABLE_FAIL1 (ftest1 int REFERENCES pktable(ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: UNIQUE constraint matching given keys for referenced table "pktable" not found
|
||||
DROP TABLE FKTABLE_FAIL1;
|
||||
ERROR: table "fktable_fail1" does not exist
|
||||
@ -717,105 +717,105 @@ DROP TABLE PKTABLE;
|
||||
--
|
||||
-- Basic one column, two table setup
|
||||
CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
-- This next should fail, because text=int does not exist
|
||||
CREATE TABLE FKTABLE (ftest1 text REFERENCES pktable);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This should also fail for the same reason, but here we
|
||||
-- give the column name
|
||||
CREATE TABLE FKTABLE (ftest1 text REFERENCES pktable(ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This should succeed, even though they are different types
|
||||
-- because varchar=int does exist
|
||||
CREATE TABLE FKTABLE (ftest1 varchar REFERENCES pktable);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
-- As should this
|
||||
CREATE TABLE FKTABLE (ftest1 varchar REFERENCES pktable(ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
DROP TABLE PKTABLE;
|
||||
-- Two columns, two tables
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, PRIMARY KEY(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
-- This should fail, because we just chose really odd types
|
||||
CREATE TABLE FKTABLE (ftest1 cidr, ftest2 datetime, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- Again, so should this...
|
||||
CREATE TABLE FKTABLE (ftest1 cidr, ftest2 datetime, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This fails because we mixed up the column ordering
|
||||
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- As does this...
|
||||
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- And again..
|
||||
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest2, ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- This works...
|
||||
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest2, ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
-- As does this
|
||||
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE FKTABLE;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
DROP TABLE PKTABLE;
|
||||
-- Two columns, same table
|
||||
-- Make sure this still works...
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
|
||||
ptest4) REFERENCES pktable(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE PKTABLE;
|
||||
-- And this,
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
|
||||
ptest4) REFERENCES pktable);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
DROP TABLE PKTABLE;
|
||||
-- This shouldn't (mixed up columns)
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
|
||||
ptest4) REFERENCES pktable(ptest2, ptest1));
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- Nor should this... (same reason, we have 4,3 referencing 1,2 which mismatches types
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4,
|
||||
ptest3) REFERENCES pktable(ptest1, ptest2));
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- Not this one either... Same as the last one except we didn't defined the columns being referenced.
|
||||
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4,
|
||||
ptest3) REFERENCES pktable);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
--
|
||||
@ -823,10 +823,10 @@ ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
-- Basic 2 table case: 1 column of matching types.
|
||||
create table pktable_base (base1 int not null);
|
||||
create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE / UNIQUE will create implicit index 'pktable_base1_key' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / UNIQUE will create implicit index 'pktable_base1_key' for table 'pktable'
|
||||
create table fktable (ftest1 int references pktable(base1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- now some ins, upd, del
|
||||
insert into pktable(base1) values (1);
|
||||
insert into pktable(base1) values (2);
|
||||
@ -849,12 +849,12 @@ update pktable set base1=base1*4 where base1<3;
|
||||
delete from pktable where base1>3;
|
||||
-- cleanup
|
||||
drop table fktable;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
delete from pktable;
|
||||
-- Now 2 columns 2 tables, matching types
|
||||
create table fktable (ftest1 int, ftest2 int, foreign key(ftest1, ftest2) references pktable(base1, ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
-- now some ins, upd, del
|
||||
insert into pktable(base1, ptest1) values (1, 1);
|
||||
insert into pktable(base1, ptest1) values (2, 2);
|
||||
@ -877,16 +877,16 @@ update pktable set base1=base1*4 where base1<3;
|
||||
delete from pktable where base1>3;
|
||||
-- cleanup
|
||||
drop table fktable;
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
|
||||
drop table pktable;
|
||||
drop table pktable_base;
|
||||
-- Now we'll do one all in 1 table with 2 columns of matching types
|
||||
create table pktable_base(base1 int not null, base2 int);
|
||||
create table pktable(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references
|
||||
pktable(base1, ptest1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1);
|
||||
insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1);
|
||||
insert into pktable (base1, ptest1, base2, ptest2) values (2, 2, 2, 1);
|
||||
@ -908,27 +908,27 @@ drop table pktable_base;
|
||||
-- 2 columns (2 tables), mismatched types
|
||||
create table pktable_base(base1 int not null);
|
||||
create table pktable(ptest1 text, primary key(base1, ptest1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
-- just generally bad types (with and without column references on the referenced table)
|
||||
create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
-- let's mix up which columns reference which
|
||||
create table fktable(ftest1 int, ftest2 text, foreign key(ftest2, ftest1) references pktable);
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
create table fktable(ftest1 int, ftest2 text, foreign key(ftest2, ftest1) references pktable(base1, ptest1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
create table fktable(ftest1 int, ftest2 text, foreign key(ftest1, ftest2) references pktable(ptest1, base1));
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
|
||||
You will have to retype this query using an explicit cast
|
||||
drop table pktable;
|
||||
@ -937,26 +937,26 @@ drop table pktable_base;
|
||||
create table pktable_base(base1 int not null, base2 int);
|
||||
create table pktable(ptest1 text, ptest2 text[], primary key(base1, ptest1), foreign key(base2, ptest2) references
|
||||
pktable(base1, ptest1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text[]' and 'text'
|
||||
You will have to retype this query using an explicit cast
|
||||
create table pktable(ptest1 text, ptest2 text, primary key(base1, ptest1), foreign key(base2, ptest2) references
|
||||
pktable(ptest1, base1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
|
||||
You will have to retype this query using an explicit cast
|
||||
create table pktable(ptest1 text, ptest2 text, primary key(base1, ptest1), foreign key(ptest2, base2) references
|
||||
pktable(base1, ptest1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
create table pktable(ptest1 text, ptest2 text, primary key(base1, ptest1), foreign key(ptest2, base2) references
|
||||
pktable(base1, ptest1)) inherits (pktable_base);
|
||||
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
|
||||
INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
|
||||
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
|
||||
You will have to retype this query using an explicit cast
|
||||
drop table pktable;
|
||||
|
@ -2340,7 +2340,7 @@ DROP TABLE TEMP_TIMESTAMP;
|
||||
--
|
||||
SET DateStyle TO 'US,Postgres';
|
||||
SHOW DateStyle;
|
||||
NOTICE: DateStyle is Postgres with US (NonEuropean) conventions
|
||||
INFO: DateStyle is Postgres with US (NonEuropean) conventions
|
||||
SELECT '' AS "64", d1 AS us_postgres FROM TIMESTAMP_TBL;
|
||||
64 | us_postgres
|
||||
----+-----------------------------
|
||||
@ -2506,7 +2506,7 @@ SELECT '' AS seven, f1 AS us_iso FROM ABSTIME_TBL;
|
||||
|
||||
SET DateStyle TO 'US,SQL';
|
||||
SHOW DateStyle;
|
||||
NOTICE: DateStyle is SQL with US (NonEuropean) conventions
|
||||
INFO: DateStyle is SQL with US (NonEuropean) conventions
|
||||
SELECT '' AS "64", d1 AS us_sql FROM TIMESTAMP_TBL;
|
||||
64 | us_sql
|
||||
----+------------------------
|
||||
@ -2590,7 +2590,7 @@ SELECT '' AS seven, f1 AS us_sql FROM ABSTIME_TBL;
|
||||
|
||||
SET DateStyle TO 'European,Postgres';
|
||||
SHOW DateStyle;
|
||||
NOTICE: DateStyle is Postgres with European conventions
|
||||
INFO: DateStyle is Postgres with European conventions
|
||||
INSERT INTO TIMESTAMP_TBL VALUES('13/06/1957');
|
||||
SELECT count(*) as one FROM TIMESTAMP_TBL WHERE d1 = 'Jun 13 1957';
|
||||
one
|
||||
@ -2682,7 +2682,7 @@ SELECT '' AS seven, f1 AS european_postgres FROM ABSTIME_TBL;
|
||||
|
||||
SET DateStyle TO 'European,ISO';
|
||||
SHOW DateStyle;
|
||||
NOTICE: DateStyle is ISO with European conventions
|
||||
INFO: DateStyle is ISO with European conventions
|
||||
SELECT '' AS "65", d1 AS european_iso FROM TIMESTAMP_TBL;
|
||||
65 | european_iso
|
||||
----+------------------------
|
||||
@ -2767,7 +2767,7 @@ SELECT '' AS seven, f1 AS european_iso FROM ABSTIME_TBL;
|
||||
|
||||
SET DateStyle TO 'European,SQL';
|
||||
SHOW DateStyle;
|
||||
NOTICE: DateStyle is SQL with European conventions
|
||||
INFO: DateStyle is SQL with European conventions
|
||||
SELECT '' AS "65", d1 AS european_sql FROM TIMESTAMP_TBL;
|
||||
65 | european_sql
|
||||
----+------------------------
|
||||
|
@ -5,8 +5,8 @@ CREATE TABLE a (aa TEXT);
|
||||
CREATE TABLE b (bb TEXT) INHERITS (a);
|
||||
CREATE TABLE c (cc TEXT) INHERITS (a);
|
||||
CREATE TABLE d (dd TEXT) INHERITS (b,c,a);
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
|
||||
NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
|
||||
INFO: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
|
||||
INSERT INTO a(aa) VALUES('aaa');
|
||||
INSERT INTO a(aa) VALUES('aaaa');
|
||||
INSERT INTO a(aa) VALUES('aaaaa');
|
||||
|
Loading…
x
Reference in New Issue
Block a user