aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/heap/heapam.c36
-rw-r--r--src/backend/access/heap/hio.c27
2 files changed, 48 insertions, 15 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 74f76c1d16a..843b2909ef2 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.194 2005/06/08 15:50:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.195 2005/06/20 18:37:01 tgl Exp $
*
*
* INTERFACE ROUTINES
@@ -1034,9 +1034,20 @@ heap_get_latest_tid(Relation relation,
*
* The new tuple is stamped with current transaction ID and the specified
* command ID.
+ *
+ * If use_wal is false, the new tuple is not logged in WAL, even for a
+ * non-temp relation. Safe usage of this behavior requires that we arrange
+ * that all new tuples go into new pages not containing any tuples from other
+ * transactions, that the relation gets fsync'd before commit, and that the
+ * transaction emits at least one WAL record to ensure RecordTransactionCommit
+ * will decide to WAL-log the commit.
+ *
+ * use_fsm is passed directly to RelationGetBufferForTuple, which see for
+ * more info.
*/
Oid
-heap_insert(Relation relation, HeapTuple tup, CommandId cid)
+heap_insert(Relation relation, HeapTuple tup, CommandId cid,
+ bool use_wal, bool use_fsm)
{
TransactionId xid = GetCurrentTransactionId();
Buffer buffer;
@@ -1086,7 +1097,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
heap_tuple_toast_attrs(relation, tup, NULL);
/* Find buffer to insert this tuple into */
- buffer = RelationGetBufferForTuple(relation, tup->t_len, InvalidBuffer);
+ buffer = RelationGetBufferForTuple(relation, tup->t_len,
+ InvalidBuffer, use_fsm);
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@@ -1096,7 +1108,12 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
pgstat_count_heap_insert(&relation->pgstat_info);
/* XLOG stuff */
- if (!relation->rd_istemp)
+ if (relation->rd_istemp)
+ {
+ /* No XLOG record, but still need to flag that XID exists on disk */
+ MyXactMadeTempRelUpdate = true;
+ }
+ else if (use_wal)
{
xl_heap_insert xlrec;
xl_heap_header xlhdr;
@@ -1151,11 +1168,6 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
- else
- {
- /* No XLOG record, but still need to flag that XID exists on disk */
- MyXactMadeTempRelUpdate = true;
- }
END_CRIT_SECTION();
@@ -1183,7 +1195,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
Oid
simple_heap_insert(Relation relation, HeapTuple tup)
{
- return heap_insert(relation, tup, GetCurrentCommandId());
+ return heap_insert(relation, tup, GetCurrentCommandId(), true, true);
}
/*
@@ -1743,7 +1755,7 @@ l2:
{
/* Assume there's no chance to put newtup on same page. */
newbuf = RelationGetBufferForTuple(relation, newtup->t_len,
- buffer);
+ buffer, true);
}
else
{
@@ -1760,7 +1772,7 @@ l2:
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
newbuf = RelationGetBufferForTuple(relation, newtup->t_len,
- buffer);
+ buffer, true);
}
else
{
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 583bb209336..fc1b0afd21e 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.56 2005/05/07 21:32:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.57 2005/06/20 18:37:01 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,12 +79,26 @@ RelationPutHeapTuple(Relation relation,
* happen if space is freed in that page after heap_update finds there's not
* enough there). In that case, the page will be pinned and locked only once.
*
+ * If use_fsm is true (the normal case), we use FSM to help us find free
+ * space. If use_fsm is false, we always append a new empty page to the
+ * end of the relation if the tuple won't fit on the current target page.
+ * This can save some cycles when we know the relation is new and doesn't
+ * contain useful amounts of free space.
+ *
+ * The use_fsm = false case is also useful for non-WAL-logged additions to a
+ * relation, if the caller holds exclusive lock and is careful to invalidate
+ * relation->rd_targblock before the first insertion --- that ensures that
+ * all insertions will occur into newly added pages and not be intermixed
+ * with tuples from other transactions. That way, a crash can't risk losing
+ * any committed data of other transactions. (See heap_insert's comments
+ * for additional constraints needed for safe usage of this behavior.)
+ *
* ereport(ERROR) is allowed here, so this routine *must* be called
* before any (unlogged) changes are made in buffer pool.
*/
Buffer
RelationGetBufferForTuple(Relation relation, Size len,
- Buffer otherBuffer)
+ Buffer otherBuffer, bool use_fsm)
{
Buffer buffer = InvalidBuffer;
Page pageHeader;
@@ -121,11 +135,14 @@ RelationGetBufferForTuple(Relation relation, Size len,
* on each page that proves not to be suitable.) If the FSM has no
* record of a page with enough free space, we give up and extend the
* relation.
+ *
+ * When use_fsm is false, we either put the tuple onto the existing
+ * target page or extend the relation.
*/
targetBlock = relation->rd_targblock;
- if (targetBlock == InvalidBlockNumber)
+ if (targetBlock == InvalidBlockNumber && use_fsm)
{
/*
* We have no cached target page, so ask the FSM for an initial
@@ -209,6 +226,10 @@ RelationGetBufferForTuple(Relation relation, Size len,
ReleaseBuffer(buffer);
}
+ /* Without FSM, always fall out of the loop and extend */
+ if (!use_fsm)
+ break;
+
/*
* Update FSM as to condition of this page, and ask for another
* page to try.