aboutsummaryrefslogtreecommitdiff
path: root/src/include
diff options
context:
space:
mode:
Diffstat (limited to 'src/include')
-rw-r--r--src/include/access/tableam.h28
-rw-r--r--src/include/executor/tuptable.h38
2 files changed, 9 insertions, 57 deletions
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 7159365e652..50ae053f461 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -530,8 +530,7 @@ typedef struct TableAmRoutine
Snapshot crosscheck,
bool wait,
TM_FailureData *tmfd,
- bool changingPart,
- LazyTupleTableSlot *lockedSlot);
+ bool changingPart);
/* see table_tuple_update() for reference about parameters */
TM_Result (*tuple_update) (Relation rel,
@@ -543,8 +542,7 @@ typedef struct TableAmRoutine
bool wait,
TM_FailureData *tmfd,
LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes,
- LazyTupleTableSlot *lockedSlot);
+ TU_UpdateIndexes *update_indexes);
/* see table_tuple_lock() for reference about parameters */
TM_Result (*tuple_lock) (Relation rel,
@@ -1459,7 +1457,7 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
}
/*
- * Delete a tuple (or lock last tuple version if lockedSlot is given).
+ * Delete a tuple.
*
* NB: do not call this directly unless prepared to deal with
* concurrent-update conditions. Use simple_table_tuple_delete instead.
@@ -1475,8 +1473,6 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
* tmfd - filled in failure cases (see below)
* changingPart - true iff the tuple is being moved to another partition
* table due to an update of the partition key. Otherwise, false.
- * lockedSlot - lazy slot to save the locked tuple if should lock the last
- * row version during the concurrent update. NULL if not needed.
*
* Normal, successful return value is TM_Ok, which means we did actually
* delete it. Failure return codes are TM_SelfModified, TM_Updated, and
@@ -1489,17 +1485,15 @@ table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
static inline TM_Result
table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
Snapshot snapshot, Snapshot crosscheck, bool wait,
- TM_FailureData *tmfd, bool changingPart,
- LazyTupleTableSlot *lockedSlot)
+ TM_FailureData *tmfd, bool changingPart)
{
return rel->rd_tableam->tuple_delete(rel, tid, cid,
snapshot, crosscheck,
- wait, tmfd, changingPart,
- lockedSlot);
+ wait, tmfd, changingPart);
}
/*
- * Update a tuple (or lock last tuple version if lockedSlot is given).
+ * Update a tuple.
*
* NB: do not call this directly unless you are prepared to deal with
* concurrent-update conditions. Use simple_table_tuple_update instead.
@@ -1517,9 +1511,7 @@ table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
* lockmode - filled with lock mode acquired on tuple
* update_indexes - in success cases this is set to true if new index entries
* are required for this tuple
- * lockedSlot - lazy slot to save the locked tuple if should lock the last
- * row version during the concurrent update. NULL if not needed.
-
+ *
* Normal, successful return value is TM_Ok, which means we did actually
* update it. Failure return codes are TM_SelfModified, TM_Updated, and
* TM_BeingModified (the last only possible if wait == false).
@@ -1538,14 +1530,12 @@ static inline TM_Result
table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
CommandId cid, Snapshot snapshot, Snapshot crosscheck,
bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
- TU_UpdateIndexes *update_indexes,
- LazyTupleTableSlot *lockedSlot)
+ TU_UpdateIndexes *update_indexes)
{
return rel->rd_tableam->tuple_update(rel, otid, slot,
cid, snapshot, crosscheck,
wait, tmfd,
- lockmode, update_indexes,
- lockedSlot);
+ lockmode, update_indexes);
}
/*
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index 2e13ecc3ffe..ff64b7cb98f 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -300,44 +300,6 @@ typedef struct MinimalTupleTableSlot
#define TupIsNull(slot) \
((slot) == NULL || TTS_EMPTY(slot))
-/*----------
- * LazyTupleTableSlot -- a lazy version of TupleTableSlot.
- *
- * Sometimes caller might need to pass to the function a slot, which most
- * likely will reain undemanded. Preallocating such slot would be a waste of
- * resources in the majority of cases. Lazy slot is aimed to resolve this
- * problem. It is basically a promise to allocate the slot once it's needed.
- * Once callee needs the slot, it could get it using LAZY_TTS_EVAL(lazySlot)
- * macro.
- */
-typedef struct
-{
- TupleTableSlot *slot; /* cached slot or NULL if not yet allocated */
- TupleTableSlot *(*getSlot) (void *arg); /* callback for slot allocation */
- void *getSlotArg; /* argument for the callback above */
-} LazyTupleTableSlot;
-
-/*
- * A constructor for the lazy slot.
- */
-#define MAKE_LAZY_TTS(lazySlot, callback, arg) \
- do { \
- (lazySlot)->slot = NULL; \
- (lazySlot)->getSlot = callback; \
- (lazySlot)->getSlotArg = arg; \
- } while (false)
-
-/*
- * Macro for lazy slot evaluation. NULL lazy slot evaluates to NULL slot.
- * Cached version is used if present. Use the callback otherwise.
- */
-#define LAZY_TTS_EVAL(lazySlot) \
- ((lazySlot) ? \
- ((lazySlot)->slot ? \
- (lazySlot)->slot : \
- ((lazySlot)->slot = (lazySlot)->getSlot((lazySlot)->getSlotArg))) : \
- NULL)
-
/* in executor/execTuples.c */
extern TupleTableSlot *MakeTupleTableSlot(TupleDesc tupleDesc,
const TupleTableSlotOps *tts_ops);