aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/buffer')
-rw-r--r--src/backend/storage/buffer/buf_init.c15
-rw-r--r--src/backend/storage/buffer/buf_table.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c85
-rw-r--r--src/backend/storage/buffer/freelist.c196
-rw-r--r--src/backend/storage/buffer/localbuf.c10
5 files changed, 159 insertions, 151 deletions
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index 86d17ad6dce..5b727c9a222 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.67 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.68 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,7 +59,7 @@ long int LocalBufferFlushCount;
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see IOWait/IOSignal.
*
@@ -112,9 +112,9 @@ InitBufferPool(void)
char *block;
/*
- * It's probably not really necessary to grab the lock --- if there's
- * anyone else attached to the shmem at this point, we've got
- * problems.
+ * It's probably not really necessary to grab the lock --- if
+ * there's anyone else attached to the shmem at this point, we've
+ * got problems.
*/
LWLockAcquire(BufMgrLock, LW_EXCLUSIVE);
@@ -122,8 +122,9 @@ InitBufferPool(void)
block = BufferBlocks;
/*
- * link the buffers into a single linked list. This will become the
- * LIFO list of unused buffers returned by StrategyGetBuffer().
+ * link the buffers into a single linked list. This will become
+ * the LIFO list of unused buffers returned by
+ * StrategyGetBuffer().
*/
for (i = 0; i < NBuffers; block += BLCKSZ, buf++, i++)
{
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 9292b436f83..ddc329a4089 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -4,7 +4,7 @@
* routines for finding buffers in the buffer pool.
*
* NOTE: these days, what this table actually provides is a mapping from
- * BufferTags to CDB indexes, not directly to buffers. The function names
+ * BufferTags to CDB indexes, not directly to buffers. The function names
* are thus slight misnomers.
*
* Note: all routines in this file assume that the BufMgrLock is held
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.36 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.37 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index ba79effe5db..53c01c844a5 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.174 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.175 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@
bool zero_damaged_pages = false;
#ifdef NOT_USED
-bool ShowPinTrace = false;
+bool ShowPinTrace = false;
#endif
long NDirectFileRead; /* some I/O's are direct file access.
@@ -143,6 +143,7 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
{
ReadBufferCount++;
pgstat_count_buffer_read(&reln->pgstat_info, reln);
+
/*
* lookup the buffer. IO_IN_PROGRESS is set if the requested
* block is not currently in memory.
@@ -174,11 +175,11 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
* it, if it's a shared buffer.
*
* Note: if smgrextend fails, we will end up with a buffer that is
- * allocated but not marked BM_VALID. P_NEW will still select the same
- * block number (because the relation didn't get any longer on disk)
- * and so future attempts to extend the relation will find the same
- * buffer (if it's not been recycled) but come right back here to try
- * smgrextend again.
+ * allocated but not marked BM_VALID. P_NEW will still select the
+ * same block number (because the relation didn't get any longer on
+ * disk) and so future attempts to extend the relation will find the
+ * same buffer (if it's not been recycled) but come right back here to
+ * try smgrextend again.
*/
Assert(!(bufHdr->flags & BM_VALID));
@@ -196,10 +197,11 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
if (!PageHeaderIsValid((PageHeader) MAKE_PTR(bufHdr->data)))
{
/*
- * During WAL recovery, the first access to any data page should
- * overwrite the whole page from the WAL; so a clobbered page
- * header is not reason to fail. Hence, when InRecovery we may
- * always act as though zero_damaged_pages is ON.
+ * During WAL recovery, the first access to any data page
+ * should overwrite the whole page from the WAL; so a
+ * clobbered page header is not reason to fail. Hence, when
+ * InRecovery we may always act as though zero_damaged_pages
+ * is ON.
*/
if (zero_damaged_pages || InRecovery)
{
@@ -212,8 +214,8 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of relation \"%s\"",
- blockNum, RelationGetRelationName(reln))));
+ errmsg("invalid page header in block %u of relation \"%s\"",
+ blockNum, RelationGetRelationName(reln))));
}
}
@@ -348,9 +350,9 @@ BufferAlloc(Relation reln,
* if someone were writing it.
*
* Note: it's okay to grab the io_in_progress lock while holding
- * BufMgrLock. All code paths that acquire this lock pin the
- * buffer first; since no one had it pinned (it just came off the
- * free list), no one else can have the lock.
+ * BufMgrLock. All code paths that acquire this lock pin the
+ * buffer first; since no one had it pinned (it just came off
+ * the free list), no one else can have the lock.
*/
StartBufferIO(buf, false);
@@ -364,23 +366,23 @@ BufferAlloc(Relation reln,
/*
* Somebody could have allocated another buffer for the same
- * block we are about to read in. While we flush out the
- * dirty buffer, we don't hold the lock and someone could have
+ * block we are about to read in. While we flush out the dirty
+ * buffer, we don't hold the lock and someone could have
* allocated another buffer for the same block. The problem is
* we haven't yet inserted the new tag into the buffer table.
* So we need to check here. -ay 3/95
*
- * Another reason we have to do this is to update cdb_found_index,
- * since the CDB could have disappeared from B1/B2 list while
- * we were writing.
+ * Another reason we have to do this is to update
+ * cdb_found_index, since the CDB could have disappeared from
+ * B1/B2 list while we were writing.
*/
buf2 = StrategyBufferLookup(&newTag, true, &cdb_found_index);
if (buf2 != NULL)
{
/*
- * Found it. Someone has already done what we were about to
- * do. We'll just handle this as if it were found in the
- * buffer pool in the first place. First, give up the
+ * Found it. Someone has already done what we were about
+ * to do. We'll just handle this as if it were found in
+ * the buffer pool in the first place. First, give up the
* buffer we were planning to use.
*/
TerminateBufferIO(buf, 0);
@@ -404,8 +406,9 @@ BufferAlloc(Relation reln,
if (!(buf->flags & BM_VALID))
{
/*
- * If we get here, previous attempts to read the buffer
- * must have failed ... but we shall bravely try again.
+ * If we get here, previous attempts to read the
+ * buffer must have failed ... but we shall
+ * bravely try again.
*/
*foundPtr = FALSE;
StartBufferIO(buf, true);
@@ -441,8 +444,8 @@ BufferAlloc(Relation reln,
/*
* Tell the buffer replacement strategy that we are replacing the
- * buffer content. Then rename the buffer. Clearing BM_VALID here
- * is necessary, clearing the dirtybits is just paranoia.
+ * buffer content. Then rename the buffer. Clearing BM_VALID here is
+ * necessary, clearing the dirtybits is just paranoia.
*/
StrategyReplaceBuffer(buf, &newTag, cdb_found_index, cdb_replace_index);
buf->tag = newTag;
@@ -685,9 +688,9 @@ BufferSync(int percent, int maxpages)
NBuffers);
/*
- * If called by the background writer, we are usually asked to
- * only write out some portion of dirty buffers now, to prevent
- * the IO storm at checkpoint time.
+ * If called by the background writer, we are usually asked to only
+ * write out some portion of dirty buffers now, to prevent the IO
+ * storm at checkpoint time.
*/
if (percent > 0)
{
@@ -702,8 +705,8 @@ BufferSync(int percent, int maxpages)
/*
* Loop over buffers to be written. Note the BufMgrLock is held at
- * loop top, but is released and reacquired within FlushBuffer,
- * so we aren't holding it long.
+ * loop top, but is released and reacquired within FlushBuffer, so we
+ * aren't holding it long.
*/
for (i = 0; i < num_buffer_dirty; i++)
{
@@ -712,8 +715,8 @@ BufferSync(int percent, int maxpages)
/*
* Check it is still the same page and still needs writing.
*
- * We can check bufHdr->cntxDirty here *without* holding any lock
- * on buffer context as long as we set this flag in access methods
+ * We can check bufHdr->cntxDirty here *without* holding any lock on
+ * buffer context as long as we set this flag in access methods
* *before* logging changes with XLogInsert(): if someone will set
* cntxDirty just after our check we don't worry because of our
* checkpoint.redo points before log record for upcoming changes
@@ -860,7 +863,7 @@ AtEOXact_Buffers(bool isCommit)
if (isCommit)
elog(WARNING,
"buffer refcount leak: [%03d] "
- "(rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
+ "(rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
i,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
buf->tag.rnode.relNode,
@@ -1009,12 +1012,12 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
XLogFlush(recptr);
/*
- * Now it's safe to write buffer to disk. Note that no one else
- * should have been able to write it while we were busy with
- * locking and log flushing because caller has set the IO flag.
+ * Now it's safe to write buffer to disk. Note that no one else should
+ * have been able to write it while we were busy with locking and log
+ * flushing because caller has set the IO flag.
*
- * It would be better to clear BM_JUST_DIRTIED right here, but we'd
- * have to reacquire the BufMgrLock and it doesn't seem worth it.
+ * It would be better to clear BM_JUST_DIRTIED right here, but we'd have
+ * to reacquire the BufMgrLock and it doesn't seem worth it.
*/
smgrwrite(reln,
buf->tag.blockNum,
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 9e956c8d1dd..f562d12ccef 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.46 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.47 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -26,15 +26,15 @@
/* GUC variable: time in seconds between statistics reports */
-int DebugSharedBuffers = 0;
+int DebugSharedBuffers = 0;
/* Pointers to shared state */
-static BufferStrategyControl *StrategyControl = NULL;
-static BufferStrategyCDB *StrategyCDB = NULL;
+static BufferStrategyControl *StrategyControl = NULL;
+static BufferStrategyCDB *StrategyCDB = NULL;
/* Backend-local state about whether currently vacuuming */
-static bool strategy_hint_vacuum = false;
-static TransactionId strategy_vacuum_xid;
+static bool strategy_hint_vacuum = false;
+static TransactionId strategy_vacuum_xid;
#define T1_TARGET (StrategyControl->target_T1_size)
@@ -47,7 +47,7 @@ static TransactionId strategy_vacuum_xid;
/*
* Macro to remove a CDB from whichever list it currently is on
*/
-#define STRAT_LIST_REMOVE(cdb) \
+#define STRAT_LIST_REMOVE(cdb) \
do { \
Assert((cdb)->list >= 0 && (cdb)->list < STRAT_NUM_LISTS); \
if ((cdb)->prev < 0) \
@@ -71,7 +71,7 @@ do { \
if (StrategyControl->listTail[(l)] < 0) \
{ \
(cdb)->prev = (cdb)->next = -1; \
- StrategyControl->listHead[(l)] = \
+ StrategyControl->listHead[(l)] = \
StrategyControl->listTail[(l)] = \
((cdb) - StrategyCDB); \
} \
@@ -79,9 +79,9 @@ do { \
{ \
(cdb)->next = -1; \
(cdb)->prev = StrategyControl->listTail[(l)]; \
- StrategyCDB[StrategyControl->listTail[(l)]].next = \
+ StrategyCDB[StrategyControl->listTail[(l)]].next = \
((cdb) - StrategyCDB); \
- StrategyControl->listTail[(l)] = \
+ StrategyControl->listTail[(l)] = \
((cdb) - StrategyCDB); \
} \
StrategyControl->listSize[(l)]++; \
@@ -97,7 +97,7 @@ do { \
if (StrategyControl->listHead[(l)] < 0) \
{ \
(cdb)->prev = (cdb)->next = -1; \
- StrategyControl->listHead[(l)] = \
+ StrategyControl->listHead[(l)] = \
StrategyControl->listTail[(l)] = \
((cdb) - StrategyCDB); \
} \
@@ -105,9 +105,9 @@ do { \
{ \
(cdb)->prev = -1; \
(cdb)->next = StrategyControl->listHead[(l)]; \
- StrategyCDB[StrategyControl->listHead[(l)]].prev = \
+ StrategyCDB[StrategyControl->listHead[(l)]].prev = \
((cdb) - StrategyCDB); \
- StrategyControl->listHead[(l)] = \
+ StrategyControl->listHead[(l)] = \
((cdb) - StrategyCDB); \
} \
StrategyControl->listSize[(l)]++; \
@@ -125,9 +125,15 @@ StrategyStatsDump(void)
if (StrategyControl->stat_report + DebugSharedBuffers < now)
{
- long all_hit, b1_hit, t1_hit, t2_hit, b2_hit;
- int id, t1_clean, t2_clean;
- ErrorContextCallback *errcxtold;
+ long all_hit,
+ b1_hit,
+ t1_hit,
+ t2_hit,
+ b2_hit;
+ int id,
+ t1_clean,
+ t2_clean;
+ ErrorContextCallback *errcxtold;
id = StrategyControl->listHead[STRAT_LIST_T1];
t1_clean = 0;
@@ -149,9 +155,7 @@ StrategyStatsDump(void)
}
if (StrategyControl->num_lookup == 0)
- {
all_hit = b1_hit = t1_hit = t2_hit = b2_hit = 0;
- }
else
{
b1_hit = (StrategyControl->num_hit[STRAT_LIST_B1] * 100 /
@@ -202,7 +206,7 @@ BufferDesc *
StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
int *cdb_found_index)
{
- BufferStrategyCDB *cdb;
+ BufferStrategyCDB *cdb;
/* Optional stats printout */
if (DebugSharedBuffers > 0)
@@ -235,8 +239,8 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
StrategyControl->num_hit[cdb->list]++;
/*
- * If this is a T2 hit, we simply move the CDB to the
- * T2 MRU position and return the found buffer.
+ * If this is a T2 hit, we simply move the CDB to the T2 MRU position
+ * and return the found buffer.
*
* A CDB in T2 cannot have t1_vacuum set, so we needn't check. However,
* if the current process is VACUUM then it doesn't promote to MRU.
@@ -253,12 +257,12 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
}
/*
- * If this is a T1 hit, we move the buffer to the T2 MRU only if another
- * transaction had read it into T1, *and* neither transaction is a VACUUM.
- * This is required because any UPDATE or DELETE in PostgreSQL does
- * multiple ReadBuffer(), first during the scan, later during the
- * heap_update() or heap_delete(). Otherwise move to T1 MRU. VACUUM
- * doesn't even get to make that happen.
+ * If this is a T1 hit, we move the buffer to the T2 MRU only if
+ * another transaction had read it into T1, *and* neither transaction
+ * is a VACUUM. This is required because any UPDATE or DELETE in
+ * PostgreSQL does multiple ReadBuffer(), first during the scan, later
+ * during the heap_update() or heap_delete(). Otherwise move to T1
+ * MRU. VACUUM doesn't even get to make that happen.
*/
if (cdb->list == STRAT_LIST_T1)
{
@@ -274,10 +278,11 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
{
STRAT_LIST_REMOVE(cdb);
STRAT_MRU_INSERT(cdb, STRAT_LIST_T1);
+
/*
- * If a non-VACUUM process references a page recently loaded
- * by VACUUM, clear the stigma; the state will now be the
- * same as if this process loaded it originally.
+ * If a non-VACUUM process references a page recently
+ * loaded by VACUUM, clear the stigma; the state will now
+ * be the same as if this process loaded it originally.
*/
if (cdb->t1_vacuum)
{
@@ -297,9 +302,9 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
* adjust the T1target.
*
* Now for this really to end up as a B1 or B2 cache hit, we must have
- * been flushing for quite some time as the block not only must have been
- * read, but also traveled through the queue and evicted from the T cache
- * again already.
+ * been flushing for quite some time as the block not only must have
+ * been read, but also traveled through the queue and evicted from the
+ * T cache again already.
*
* VACUUM re-reads shouldn't adjust the target either.
*/
@@ -307,26 +312,26 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
return NULL;
/*
- * Adjust the target size of the T1 cache depending on if this is
- * a B1 or B2 hit.
+ * Adjust the target size of the T1 cache depending on if this is a B1
+ * or B2 hit.
*/
switch (cdb->list)
{
case STRAT_LIST_B1:
+
/*
- * B1 hit means that the T1 cache is probably too
- * small. Adjust the T1 target size and continue
- * below.
+ * B1 hit means that the T1 cache is probably too small.
+ * Adjust the T1 target size and continue below.
*/
T1_TARGET = Min(T1_TARGET + Max(B2_LENGTH / B1_LENGTH, 1),
NBuffers);
break;
case STRAT_LIST_B2:
- /*
- * B2 hit means that the T2 cache is probably too
- * small. Adjust the T1 target size and continue
- * below.
+
+ /*
+ * B2 hit means that the T2 cache is probably too small.
+ * Adjust the T1 target size and continue below.
*/
T1_TARGET = Max(T1_TARGET - Max(B1_LENGTH / B2_LENGTH, 1), 0);
break;
@@ -337,8 +342,8 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
}
/*
- * Even though we had seen the block in the past, its data is
- * not currently in memory ... cache miss to the bufmgr.
+ * Even though we had seen the block in the past, its data is not
+ * currently in memory ... cache miss to the bufmgr.
*/
return NULL;
}
@@ -349,7 +354,7 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
*
* Called by the bufmgr to get the next candidate buffer to use in
* BufferAlloc(). The only hard requirement BufferAlloc() has is that
- * this buffer must not currently be pinned.
+ * this buffer must not currently be pinned.
*
* *cdb_replace_index is set to the index of the candidate CDB, or -1 if
* none (meaning we are using a previously free buffer). This is not
@@ -359,8 +364,8 @@ StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
BufferDesc *
StrategyGetBuffer(int *cdb_replace_index)
{
- int cdb_id;
- BufferDesc *buf;
+ int cdb_id;
+ BufferDesc *buf;
if (StrategyControl->listFreeBuffers < 0)
{
@@ -452,12 +457,12 @@ StrategyGetBuffer(int *cdb_replace_index)
/* There is a completely free buffer available - take it */
/*
- * Note: This code uses the side effect that a free buffer
- * can never be pinned or dirty and therefore the call to
+ * Note: This code uses the side effect that a free buffer can
+ * never be pinned or dirty and therefore the call to
* StrategyReplaceBuffer() will happen without the bufmgr
- * releasing the bufmgr-lock in the meantime. That means,
- * that there will never be any reason to recheck. Otherwise
- * we would leak shared buffers here!
+ * releasing the bufmgr-lock in the meantime. That means, that
+ * there will never be any reason to recheck. Otherwise we would
+ * leak shared buffers here!
*/
*cdb_replace_index = -1;
buf = &BufferDescriptors[StrategyControl->listFreeBuffers];
@@ -493,8 +498,8 @@ void
StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
int cdb_found_index, int cdb_replace_index)
{
- BufferStrategyCDB *cdb_found;
- BufferStrategyCDB *cdb_replace;
+ BufferStrategyCDB *cdb_found;
+ BufferStrategyCDB *cdb_replace;
if (cdb_found_index >= 0)
{
@@ -504,7 +509,7 @@ StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
/* Assert that the buffer remembered in cdb_found is the one */
/* the buffer manager is currently faulting in */
Assert(BUFFERTAGS_EQUAL(cdb_found->buf_tag, *newTag));
-
+
if (cdb_replace_index >= 0)
{
/* We are satisfying it with an evicted T buffer */
@@ -512,17 +517,18 @@ StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
/* Assert that the buffer remembered in cdb_replace is */
/* the one the buffer manager has just evicted */
- Assert(cdb_replace->list == STRAT_LIST_T1 ||
+ Assert(cdb_replace->list == STRAT_LIST_T1 ||
cdb_replace->list == STRAT_LIST_T2);
Assert(cdb_replace->buf_id == buf->buf_id);
Assert(BUFFERTAGS_EQUAL(cdb_replace->buf_tag, buf->tag));
/*
- * Under normal circumstances we move the evicted T list entry to
- * the corresponding B list. However, T1 entries that exist only
- * because of VACUUM are just thrown into the unused list instead.
- * We don't expect them to be touched again by the VACUUM, and if
- * we put them into B1 then VACUUM would skew T1_target adjusting.
+ * Under normal circumstances we move the evicted T list entry
+ * to the corresponding B list. However, T1 entries that
+ * exist only because of VACUUM are just thrown into the
+ * unused list instead. We don't expect them to be touched
+ * again by the VACUUM, and if we put them into B1 then VACUUM
+ * would skew T1_target adjusting.
*/
if (cdb_replace->t1_vacuum)
{
@@ -560,8 +566,8 @@ StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
else
{
/*
- * This was a complete cache miss, so we need to create
- * a new CDB. The goal is to keep T1len+B1len <= c.
+ * This was a complete cache miss, so we need to create a new CDB.
+ * The goal is to keep T1len+B1len <= c.
*/
if (B1_LENGTH > 0 && (T1_LENGTH + B1_LENGTH) >= NBuffers)
{
@@ -600,12 +606,12 @@ StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
if (cdb_replace_index >= 0)
{
/*
- * The buffer was formerly in a T list, move its CDB
- * to the corresponding B list
+ * The buffer was formerly in a T list, move its CDB to the
+ * corresponding B list
*/
cdb_replace = &StrategyCDB[cdb_replace_index];
- Assert(cdb_replace->list == STRAT_LIST_T1 ||
+ Assert(cdb_replace->list == STRAT_LIST_T1 ||
cdb_replace->list == STRAT_LIST_T2);
Assert(cdb_replace->buf_id == buf->buf_id);
Assert(BUFFERTAGS_EQUAL(cdb_replace->buf_tag, buf->tag));
@@ -651,9 +657,9 @@ StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
STRAT_MRU_INSERT(cdb_found, STRAT_LIST_T1);
/*
- * Remember the Xid when this buffer went onto T1 to avoid
- * a single UPDATE promoting a newcomer straight into T2.
- * Also remember if it was loaded for VACUUM.
+ * Remember the Xid when this buffer went onto T1 to avoid a
+ * single UPDATE promoting a newcomer straight into T2. Also
+ * remember if it was loaded for VACUUM.
*/
cdb_found->t1_xid = GetCurrentTransactionId();
cdb_found->t1_vacuum = strategy_hint_vacuum;
@@ -671,8 +677,8 @@ StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
void
StrategyInvalidateBuffer(BufferDesc *buf)
{
- int cdb_id;
- BufferStrategyCDB *cdb;
+ int cdb_id;
+ BufferStrategyCDB *cdb;
/* The buffer cannot be dirty or pinned */
Assert(!(buf->flags & BM_DIRTY) || !(buf->flags & BM_VALID));
@@ -687,15 +693,15 @@ StrategyInvalidateBuffer(BufferDesc *buf)
cdb = &StrategyCDB[cdb_id];
/*
- * Remove the CDB from the hashtable and the ARC queue it is
- * currently on.
+ * Remove the CDB from the hashtable and the ARC queue it is currently
+ * on.
*/
BufTableDelete(&(cdb->buf_tag));
STRAT_LIST_REMOVE(cdb);
/*
- * Clear out the CDB's buffer tag and association with the buffer
- * and add it to the list of unused CDB's
+ * Clear out the CDB's buffer tag and association with the buffer and
+ * add it to the list of unused CDB's
*/
CLEAR_BUFFERTAG(cdb->buf_tag);
cdb->buf_id = -1;
@@ -703,9 +709,9 @@ StrategyInvalidateBuffer(BufferDesc *buf)
StrategyControl->listUnusedCDB = cdb_id;
/*
- * Clear out the buffer's tag and add it to the list of
- * currently unused buffers. We must do this to ensure that linear
- * scans of the buffer array don't think the buffer is valid.
+ * Clear out the buffer's tag and add it to the list of currently
+ * unused buffers. We must do this to ensure that linear scans of the
+ * buffer array don't think the buffer is valid.
*/
CLEAR_BUFFERTAG(buf->tag);
buf->flags &= ~(BM_VALID | BM_DIRTY);
@@ -743,17 +749,17 @@ int
StrategyDirtyBufferList(BufferDesc **buffers, BufferTag *buftags,
int max_buffers)
{
- int num_buffer_dirty = 0;
- int cdb_id_t1;
- int cdb_id_t2;
- int buf_id;
- BufferDesc *buf;
+ int num_buffer_dirty = 0;
+ int cdb_id_t1;
+ int cdb_id_t2;
+ int buf_id;
+ BufferDesc *buf;
/*
- * Traverse the T1 and T2 list LRU to MRU in "parallel"
- * and add all dirty buffers found in that order to the list.
- * The ARC strategy keeps all used buffers including pinned ones
- * in the T1 or T2 list. So we cannot miss any dirty buffers.
+ * Traverse the T1 and T2 list LRU to MRU in "parallel" and add all
+ * dirty buffers found in that order to the list. The ARC strategy
+ * keeps all used buffers including pinned ones in the T1 or T2 list.
+ * So we cannot miss any dirty buffers.
*/
cdb_id_t1 = StrategyControl->listHead[STRAT_LIST_T1];
cdb_id_t2 = StrategyControl->listHead[STRAT_LIST_T2];
@@ -815,8 +821,8 @@ StrategyDirtyBufferList(BufferDesc **buffers, BufferTag *buftags,
void
StrategyInitialize(bool init)
{
- bool found;
- int i;
+ bool found;
+ int i;
/*
* Initialize the shared CDB lookup hashtable
@@ -841,14 +847,14 @@ StrategyInitialize(bool init)
Assert(init);
/*
- * Grab the whole linked list of free buffers for our strategy.
- * We assume it was previously set up by InitBufferPool().
+ * Grab the whole linked list of free buffers for our strategy. We
+ * assume it was previously set up by InitBufferPool().
*/
StrategyControl->listFreeBuffers = 0;
/*
- * We start off with a target T1 list size of
- * half the available cache blocks.
+ * We start off with a target T1 list size of half the available
+ * cache blocks.
*/
StrategyControl->target_T1_size = NBuffers / 2;
@@ -862,7 +868,7 @@ StrategyInitialize(bool init)
StrategyControl->listSize[i] = 0;
StrategyControl->num_hit[i] = 0;
}
- StrategyControl->num_lookup = 0;
+ StrategyControl->num_lookup = 0;
StrategyControl->stat_report = 0;
/*
@@ -879,7 +885,5 @@ StrategyInitialize(bool init)
StrategyControl->listUnusedCDB = 0;
}
else
- {
Assert(!init);
- }
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 703bd9adf5b..6ccc18ddad3 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.58 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.59 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ static int nextFreeLocalBuf = 0;
* allocate a local buffer. We do round robin allocation for now.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to have the BufMgrLock since this is all local. Also, IO_IN_PROGRESS
+ * to have the BufMgrLock since this is all local. Also, IO_IN_PROGRESS
* does not get set.
*/
BufferDesc *
@@ -64,7 +64,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
LocalRefCount[i]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner,
- BufferDescriptorGetBuffer(bufHdr));
+ BufferDescriptorGetBuffer(bufHdr));
if (bufHdr->flags & BM_VALID)
*foundPtr = TRUE;
else
@@ -92,7 +92,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
bufHdr = &LocalBufferDescriptors[b];
LocalRefCount[b]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner,
- BufferDescriptorGetBuffer(bufHdr));
+ BufferDescriptorGetBuffer(bufHdr));
nextFreeLocalBuf = (b + 1) % NLocBuffer;
break;
}
@@ -245,7 +245,7 @@ AtEOXact_LocalBuffers(bool isCommit)
"local buffer leak: [%03d] (rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
i,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
- buf->tag.rnode.relNode, buf->tag.blockNum, buf->flags,
+ buf->tag.rnode.relNode, buf->tag.blockNum, buf->flags,
buf->refcount, LocalRefCount[i]);
LocalRefCount[i] = 0;