2005-03-12 18:36:24 +03:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_buffercache_pages.c
|
|
|
|
* display some contents of the buffer cache
|
|
|
|
*
|
2010-09-21 00:08:53 +04:00
|
|
|
* contrib/pg_buffercache/pg_buffercache_pages.c
|
2005-03-12 18:36:24 +03:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
2006-10-22 21:49:21 +04:00
|
|
|
|
2012-08-31 00:15:44 +04:00
|
|
|
#include "access/htup_details.h"
|
2005-03-12 18:36:24 +03:00
|
|
|
#include "catalog/pg_type.h"
|
2006-10-22 21:49:21 +04:00
|
|
|
#include "funcapi.h"
|
2005-03-12 18:36:24 +03:00
|
|
|
#include "storage/buf_internals.h"
|
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
|
|
|
|
|
2014-08-22 02:28:37 +04:00
|
|
|
#define NUM_BUFFERCACHE_PAGES_MIN_ELEM 8
|
|
|
|
#define NUM_BUFFERCACHE_PAGES_ELEM 9
|
2005-03-12 18:36:24 +03:00
|
|
|
|
2006-05-31 02:12:16 +04:00
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
|
2005-03-12 18:36:24 +03:00
|
|
|
/*
|
|
|
|
* Record structure holding the to be exposed cache data.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
uint32 bufferid;
|
|
|
|
Oid relfilenode;
|
|
|
|
Oid reltablespace;
|
|
|
|
Oid reldatabase;
|
2008-08-14 16:56:41 +04:00
|
|
|
ForkNumber forknum;
|
2005-03-12 18:36:24 +03:00
|
|
|
BlockNumber blocknum;
|
|
|
|
bool isvalid;
|
|
|
|
bool isdirty;
|
2007-04-07 20:09:14 +04:00
|
|
|
uint16 usagecount;
|
2015-05-24 04:35:49 +03:00
|
|
|
|
2014-08-22 02:28:37 +04:00
|
|
|
/*
|
|
|
|
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
|
|
|
|
* being pinned by too many backends and each backend will only pin once
|
2014-08-30 16:03:21 +04:00
|
|
|
* because of bufmgr.c's PrivateRefCount infrastructure.
|
2014-08-22 02:28:37 +04:00
|
|
|
*/
|
|
|
|
int32 pinning_backends;
|
2005-03-12 18:36:24 +03:00
|
|
|
} BufferCachePagesRec;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function context for data persisting over repeated calls.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
2006-10-22 21:49:21 +04:00
|
|
|
TupleDesc tupdesc;
|
2005-03-12 18:36:24 +03:00
|
|
|
BufferCachePagesRec *record;
|
|
|
|
} BufferCachePagesContext;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function returning data from the shared buffer cache - buffer number,
|
|
|
|
* relation node/tablespace/database/blocknum and dirty indicator.
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(pg_buffercache_pages);
|
2006-10-22 21:49:21 +04:00
|
|
|
|
2005-03-12 18:36:24 +03:00
|
|
|
Datum
|
|
|
|
pg_buffercache_pages(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
FuncCallContext *funcctx;
|
|
|
|
Datum result;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
BufferCachePagesContext *fctx; /* User function context. */
|
|
|
|
TupleDesc tupledesc;
|
2014-08-22 02:28:37 +04:00
|
|
|
TupleDesc expected_tupledesc;
|
2005-03-12 18:36:24 +03:00
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
2006-07-23 07:07:58 +04:00
|
|
|
int i;
|
2005-03-12 18:36:24 +03:00
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
|
|
|
|
/* Switch context when allocating stuff to be used in later calls */
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
2005-10-15 06:49:52 +04:00
|
|
|
|
2006-10-22 21:49:21 +04:00
|
|
|
/* Create a user function context for cross-call persistence */
|
|
|
|
fctx = (BufferCachePagesContext *) palloc(sizeof(BufferCachePagesContext));
|
|
|
|
|
2014-08-22 02:28:37 +04:00
|
|
|
/*
|
|
|
|
* To smoothly support upgrades from version 1.0 of this extension
|
2014-08-22 11:25:47 +04:00
|
|
|
* transparently handle the (non-)existence of the pinning_backends
|
2014-08-22 02:28:37 +04:00
|
|
|
* column. We unfortunately have to get the result type for that... -
|
|
|
|
* we can't use the result type determined by the function definition
|
|
|
|
* without potentially crashing when somebody uses the old (or even
|
|
|
|
* wrong) function definition though.
|
|
|
|
*/
|
|
|
|
if (get_call_result_type(fcinfo, NULL, &expected_tupledesc) != TYPEFUNC_COMPOSITE)
|
|
|
|
elog(ERROR, "return type must be a row type");
|
|
|
|
|
|
|
|
if (expected_tupledesc->natts < NUM_BUFFERCACHE_PAGES_MIN_ELEM ||
|
|
|
|
expected_tupledesc->natts > NUM_BUFFERCACHE_PAGES_ELEM)
|
|
|
|
elog(ERROR, "incorrect number of output arguments");
|
|
|
|
|
2006-10-22 21:49:21 +04:00
|
|
|
/* Construct a tuple descriptor for the result rows. */
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 02:36:57 +03:00
|
|
|
tupledesc = CreateTemplateTupleDesc(expected_tupledesc->natts);
|
2005-03-12 18:36:24 +03:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 1, "bufferid",
|
|
|
|
INT4OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 2, "relfilenode",
|
|
|
|
OIDOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 3, "reltablespace",
|
|
|
|
OIDOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 4, "reldatabase",
|
|
|
|
OIDOID, -1, 0);
|
2008-08-14 16:56:41 +04:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 5, "relforknumber",
|
|
|
|
INT2OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 6, "relblocknumber",
|
2005-05-31 04:07:47 +04:00
|
|
|
INT8OID, -1, 0);
|
2008-08-14 16:56:41 +04:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 7, "isdirty",
|
2005-03-12 18:36:24 +03:00
|
|
|
BOOLOID, -1, 0);
|
2008-08-14 16:56:41 +04:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 8, "usage_count",
|
2007-04-07 20:09:14 +04:00
|
|
|
INT2OID, -1, 0);
|
2005-03-12 18:36:24 +03:00
|
|
|
|
2014-08-22 02:28:37 +04:00
|
|
|
if (expected_tupledesc->natts == NUM_BUFFERCACHE_PAGES_ELEM)
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 9, "pinning_backends",
|
|
|
|
INT4OID, -1, 0);
|
|
|
|
|
2006-10-22 21:49:21 +04:00
|
|
|
fctx->tupdesc = BlessTupleDesc(tupledesc);
|
2005-03-12 18:36:24 +03:00
|
|
|
|
|
|
|
/* Allocate NBuffers worth of BufferCachePagesRec records. */
|
2016-09-15 16:22:52 +03:00
|
|
|
fctx->record = (BufferCachePagesRec *)
|
|
|
|
MemoryContextAllocHuge(CurrentMemoryContext,
|
|
|
|
sizeof(BufferCachePagesRec) * NBuffers);
|
2005-03-12 18:36:24 +03:00
|
|
|
|
2006-10-22 21:49:21 +04:00
|
|
|
/* Set max calls and remember the user function context. */
|
|
|
|
funcctx->max_calls = NBuffers;
|
|
|
|
funcctx->user_fctx = fctx;
|
2005-03-12 18:36:24 +03:00
|
|
|
|
|
|
|
/* Return to original context when allocating transient memory */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
2006-07-23 07:07:58 +04:00
|
|
|
/*
|
2015-05-20 16:18:11 +03:00
|
|
|
* Scan through all the buffers, saving the relevant fields in the
|
2006-07-23 07:07:58 +04:00
|
|
|
* fctx->record structure.
|
2016-09-29 13:16:30 +03:00
|
|
|
*
|
|
|
|
* We don't hold the partition locks, so we don't get a consistent
|
|
|
|
* snapshot across all buffers, but we do grab the buffer header
|
|
|
|
* locks, so the information of each buffer is self-consistent.
|
2006-07-23 07:07:58 +04:00
|
|
|
*/
|
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to
cache lines is important for scalability; especially on bigger,
multi-socket, machines.
Currently the array sometimes already happens to be aligned by
happenstance, depending how large previous shared memory allocations
were. That can lead to wildly varying performance results after minor
configuration changes.
In addition to aligning the start of descriptor array, also force the
size of individual descriptors to be of a common cache line size (64
bytes). That happens to already be the case on 64bit platforms, but
this way we can change the struct BufferDesc more easily.
As the alignment primarily matters in highly concurrent workloads
which probably all are 64bit these days, and the space wastage of
element alignment would be a bit more noticeable on 32bit systems, we
don't force the stride to be cacheline sized on 32bit platforms for
now. If somebody does actual performance testing, we can reevaluate
that decision by changing the definition of BUFFERDESC_PADDED_SIZE.
Discussion: 20140202151319.GD32123@awork2.anarazel.de
Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter
Geoghegan.
2015-01-29 19:49:03 +03:00
|
|
|
for (i = 0; i < NBuffers; i++)
|
2005-03-12 18:36:24 +03:00
|
|
|
{
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 06:12:32 +03:00
|
|
|
BufferDesc *bufHdr;
|
|
|
|
uint32 buf_state;
|
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to
cache lines is important for scalability; especially on bigger,
multi-socket, machines.
Currently the array sometimes already happens to be aligned by
happenstance, depending how large previous shared memory allocations
were. That can lead to wildly varying performance results after minor
configuration changes.
In addition to aligning the start of descriptor array, also force the
size of individual descriptors to be of a common cache line size (64
bytes). That happens to already be the case on 64bit platforms, but
this way we can change the struct BufferDesc more easily.
As the alignment primarily matters in highly concurrent workloads
which probably all are 64bit these days, and the space wastage of
element alignment would be a bit more noticeable on 32bit systems, we
don't force the stride to be cacheline sized on 32bit platforms for
now. If somebody does actual performance testing, we can reevaluate
that decision by changing the definition of BUFFERDESC_PADDED_SIZE.
Discussion: 20140202151319.GD32123@awork2.anarazel.de
Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter
Geoghegan.
2015-01-29 19:49:03 +03:00
|
|
|
|
|
|
|
bufHdr = GetBufferDescriptor(i);
|
2005-03-12 18:36:24 +03:00
|
|
|
/* Lock each buffer header before inspecting. */
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 06:12:32 +03:00
|
|
|
buf_state = LockBufHdr(bufHdr);
|
2005-03-12 18:36:24 +03:00
|
|
|
|
|
|
|
fctx->record[i].bufferid = BufferDescriptorGetBuffer(bufHdr);
|
2005-10-12 20:45:14 +04:00
|
|
|
fctx->record[i].relfilenode = bufHdr->tag.rnode.relNode;
|
|
|
|
fctx->record[i].reltablespace = bufHdr->tag.rnode.spcNode;
|
|
|
|
fctx->record[i].reldatabase = bufHdr->tag.rnode.dbNode;
|
2008-08-14 16:56:41 +04:00
|
|
|
fctx->record[i].forknum = bufHdr->tag.forkNum;
|
2005-03-12 18:36:24 +03:00
|
|
|
fctx->record[i].blocknum = bufHdr->tag.blockNum;
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 06:12:32 +03:00
|
|
|
fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state);
|
|
|
|
fctx->record[i].pinning_backends = BUF_STATE_GET_REFCOUNT(buf_state);
|
2005-03-12 18:36:24 +03:00
|
|
|
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 06:12:32 +03:00
|
|
|
if (buf_state & BM_DIRTY)
|
2005-03-12 18:36:24 +03:00
|
|
|
fctx->record[i].isdirty = true;
|
|
|
|
else
|
|
|
|
fctx->record[i].isdirty = false;
|
|
|
|
|
|
|
|
/* Note if the buffer is valid, and has storage created */
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 06:12:32 +03:00
|
|
|
if ((buf_state & BM_VALID) && (buf_state & BM_TAG_VALID))
|
2005-03-12 18:36:24 +03:00
|
|
|
fctx->record[i].isvalid = true;
|
|
|
|
else
|
|
|
|
fctx->record[i].isvalid = false;
|
|
|
|
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 06:12:32 +03:00
|
|
|
UnlockBufHdr(bufHdr, buf_state);
|
2005-03-12 18:36:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
2005-10-15 06:49:52 +04:00
|
|
|
|
2005-03-12 18:36:24 +03:00
|
|
|
/* Get the saved state */
|
|
|
|
fctx = funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (funcctx->call_cntr < funcctx->max_calls)
|
|
|
|
{
|
|
|
|
uint32 i = funcctx->call_cntr;
|
2006-10-22 21:49:21 +04:00
|
|
|
Datum values[NUM_BUFFERCACHE_PAGES_ELEM];
|
|
|
|
bool nulls[NUM_BUFFERCACHE_PAGES_ELEM];
|
2005-10-15 06:49:52 +04:00
|
|
|
|
2006-10-22 21:49:21 +04:00
|
|
|
values[0] = Int32GetDatum(fctx->record[i].bufferid);
|
|
|
|
nulls[0] = false;
|
2005-03-12 18:36:24 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set all fields except the bufferid to null if the buffer is unused
|
|
|
|
* or not valid.
|
|
|
|
*/
|
|
|
|
if (fctx->record[i].blocknum == InvalidBlockNumber ||
|
|
|
|
fctx->record[i].isvalid == false)
|
|
|
|
{
|
2006-10-22 21:49:21 +04:00
|
|
|
nulls[1] = true;
|
|
|
|
nulls[2] = true;
|
|
|
|
nulls[3] = true;
|
|
|
|
nulls[4] = true;
|
|
|
|
nulls[5] = true;
|
2007-04-07 20:09:14 +04:00
|
|
|
nulls[6] = true;
|
2008-08-14 16:56:41 +04:00
|
|
|
nulls[7] = true;
|
2014-08-22 02:28:37 +04:00
|
|
|
/* unused for v1.0 callers, but the array is always long enough */
|
|
|
|
nulls[8] = true;
|
2005-03-12 18:36:24 +03:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-10-22 21:49:21 +04:00
|
|
|
values[1] = ObjectIdGetDatum(fctx->record[i].relfilenode);
|
|
|
|
nulls[1] = false;
|
|
|
|
values[2] = ObjectIdGetDatum(fctx->record[i].reltablespace);
|
|
|
|
nulls[2] = false;
|
|
|
|
values[3] = ObjectIdGetDatum(fctx->record[i].reldatabase);
|
|
|
|
nulls[3] = false;
|
2008-08-14 16:56:41 +04:00
|
|
|
values[4] = ObjectIdGetDatum(fctx->record[i].forknum);
|
2006-10-22 21:49:21 +04:00
|
|
|
nulls[4] = false;
|
2008-08-14 16:56:41 +04:00
|
|
|
values[5] = Int64GetDatum((int64) fctx->record[i].blocknum);
|
2006-10-22 21:49:21 +04:00
|
|
|
nulls[5] = false;
|
2008-08-14 16:56:41 +04:00
|
|
|
values[6] = BoolGetDatum(fctx->record[i].isdirty);
|
2007-04-07 20:09:14 +04:00
|
|
|
nulls[6] = false;
|
2008-08-14 16:56:41 +04:00
|
|
|
values[7] = Int16GetDatum(fctx->record[i].usagecount);
|
|
|
|
nulls[7] = false;
|
2014-08-22 02:28:37 +04:00
|
|
|
/* unused for v1.0 callers, but the array is always long enough */
|
|
|
|
values[8] = Int32GetDatum(fctx->record[i].pinning_backends);
|
|
|
|
nulls[8] = false;
|
2005-03-12 18:36:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Build and return the tuple. */
|
2006-10-22 21:49:21 +04:00
|
|
|
tuple = heap_form_tuple(fctx->tupdesc, values, nulls);
|
2005-03-12 18:36:24 +03:00
|
|
|
result = HeapTupleGetDatum(tuple);
|
|
|
|
|
|
|
|
SRF_RETURN_NEXT(funcctx, result);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|