diff options
Diffstat (limited to 'src/backend/access')
-rw-r--r-- | src/backend/access/gin/ginfast.c | 6 | ||||
-rw-r--r-- | src/backend/access/gin/ginget.c | 8 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtree.c | 12 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtutils.c | 4 | ||||
-rw-r--r-- | src/backend/access/transam/xlog.c | 2 | ||||
-rw-r--r-- | src/backend/access/transam/xlogreader.c | 6 |
6 files changed, 19 insertions, 19 deletions
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 5f624cf6fac..e32807e62ac 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -247,9 +247,9 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) metapage = BufferGetPage(metabuffer); /* - * An insertion to the pending list could logically belong anywhere in - * the tree, so it conflicts with all serializable scans. All scans - * acquire a predicate lock on the metabuffer to represent that. + * An insertion to the pending list could logically belong anywhere in the + * tree, so it conflicts with all serializable scans. All scans acquire a + * predicate lock on the metabuffer to represent that. */ CheckForSerializableConflictIn(index, NULL, metabuffer); diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index ef3cd7dbe2a..8466d947eab 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -235,8 +235,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, LockBuffer(stack->buffer, GIN_UNLOCK); /* - * Acquire predicate lock on the posting tree. We already hold - * a lock on the entry page, but insertions to the posting tree + * Acquire predicate lock on the posting tree. We already hold a + * lock on the entry page, but insertions to the posting tree * don't check for conflicts on that level. */ PredicateLockPage(btree->index, rootPostingTree, snapshot); @@ -1766,8 +1766,8 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids) *ntids = 0; /* - * Acquire predicate lock on the metapage, to conflict with any - * fastupdate insertions. + * Acquire predicate lock on the metapage, to conflict with any fastupdate + * insertions. */ PredicateLockPage(scan->indexRelation, GIN_METAPAGE_BLKNO, scan->xs_snapshot); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index cdd0403e1d8..e8725fbbe1e 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -820,10 +820,10 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) /* * If table receives enough insertions and no cleanup was performed, - * then index would appear have stale statistics. If scale factor - * is set, we avoid that by performing cleanup if the number of - * inserted tuples exceeds vacuum_cleanup_index_scale_factor fraction - * of original tuples count. + * then index would appear have stale statistics. If scale factor is + * set, we avoid that by performing cleanup if the number of inserted + * tuples exceeds vacuum_cleanup_index_scale_factor fraction of + * original tuples count. */ relopts = (StdRdOptions *) info->index->rd_options; cleanup_scale_factor = (relopts && @@ -873,8 +873,8 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, &oldestBtpoXact); /* - * Update cleanup-related information in metapage. This information - * is used only for cleanup but keeping them up to date can avoid + * Update cleanup-related information in metapage. This information is + * used only for cleanup but keeping them up to date can avoid * unnecessary cleanup even after bulkdelete. */ _bt_update_meta_cleanup_info(info->index, oldestBtpoXact, diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index acb944357a3..4528e87c833 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -2196,8 +2196,8 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) * non-zero, or when there is no explicit representation and the * tuple is evidently not a pre-pg_upgrade tuple. * - * Prior to v11, downlinks always had P_HIKEY as their offset. - * Use that to decide if the tuple is a pre-v11 tuple. + * Prior to v11, downlinks always had P_HIKEY as their offset. Use + * that to decide if the tuple is a pre-v11 tuple. */ return BTreeTupleGetNAtts(itup, rel) == 0 || ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 1a419aa49bf..dcfef365916 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4512,7 +4512,7 @@ ReadControlFile(void) errmsg("could not read from control file: %m"))); else ereport(PANIC, - (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData)))); + (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData)))); } pgstat_report_wait_end(); diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 1b000a2ef1d..dd96cef8f01 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -829,9 +829,9 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr, } /* - * Check that the address on the page agrees with what we expected. - * This check typically fails when an old WAL segment is recycled, - * and hasn't yet been overwritten with new data yet. + * Check that the address on the page agrees with what we expected. This + * check typically fails when an old WAL segment is recycled, and hasn't + * yet been overwritten with new data yet. */ if (hdr->xlp_pageaddr != recaddr) { |