aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/copy.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands/copy.c')
-rw-r--r--src/backend/commands/copy.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index b00891ffd2b..a8ff3049094 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -90,7 +90,7 @@ typedef enum EolType
*/
typedef enum CopyInsertMethod
{
- CIM_SINGLE, /* use table_insert or fdw routine */
+ CIM_SINGLE, /* use table_tuple_insert or fdw routine */
CIM_MULTI, /* always use table_multi_insert */
CIM_MULTI_CONDITIONAL /* use table_multi_insert only if valid */
} CopyInsertMethod;
@@ -2664,7 +2664,7 @@ CopyFrom(CopyState cstate)
PartitionTupleRouting *proute = NULL;
ErrorContextCallback errcallback;
CommandId mycid = GetCurrentCommandId(true);
- int ti_options = 0; /* start with default table_insert options */
+ int ti_options = 0; /* start with default options for insert */
BulkInsertState bistate = NULL;
CopyInsertMethod insertMethod;
CopyMultiInsertInfo multiInsertInfo = {0}; /* pacify compiler */
@@ -2737,11 +2737,11 @@ CopyFrom(CopyState cstate)
* FSM for free space is a waste of time, even if we must use WAL because
* of archiving. This could possibly be wrong, but it's unlikely.
*
- * The comments for table_insert and RelationGetBufferForTuple specify that
- * skipping WAL logging is only safe if we ensure that our tuples do not
- * go into pages containing tuples from any other transactions --- but this
- * must be the case if we have a new table or new relfilenode, so we need
- * no additional work to enforce that.
+ * The comments for table_tuple_insert and RelationGetBufferForTuple
+ * specify that skipping WAL logging is only safe if we ensure that our
+ * tuples do not go into pages containing tuples from any other
+ * transactions --- but this must be the case if we have a new table or
+ * new relfilenode, so we need no additional work to enforce that.
*
* We currently don't support this optimization if the COPY target is a
* partitioned table as we currently only lazily initialize partition
@@ -2888,9 +2888,9 @@ CopyFrom(CopyState cstate)
/*
* It's generally more efficient to prepare a bunch of tuples for
* insertion, and insert them in one table_multi_insert() call, than call
- * table_insert() separately for every tuple. However, there are a number
- * of reasons why we might not be able to do this. These are explained
- * below.
+ * table_tuple_insert() separately for every tuple. However, there are a
+ * number of reasons why we might not be able to do this. These are
+ * explained below.
*/
if (resultRelInfo->ri_TrigDesc != NULL &&
(resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
@@ -3286,8 +3286,8 @@ CopyFrom(CopyState cstate)
else
{
/* OK, store the tuple and create index entries for it */
- table_insert(resultRelInfo->ri_RelationDesc, myslot,
- mycid, ti_options, bistate);
+ table_tuple_insert(resultRelInfo->ri_RelationDesc,
+ myslot, mycid, ti_options, bistate);
if (resultRelInfo->ri_NumIndices > 0)
recheckIndexes = ExecInsertIndexTuples(myslot,