aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils/cache/catcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils/cache/catcache.c')
-rw-r--r--src/backend/utils/cache/catcache.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index d17b6b0ba58..954b435bffa 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -836,9 +836,10 @@ RehashCatCache(CatCache *cp)
for (i = 0; i < cp->cc_nbuckets; i++)
{
dlist_mutable_iter iter;
+
dlist_foreach_modify(iter, &cp->cc_bucket[i])
{
- CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
+ CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
dlist_delete(iter.cur);
@@ -856,7 +857,7 @@ RehashCatCache(CatCache *cp)
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1081,7 +1082,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1214,8 +1215,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1254,7 +1255,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1584,7 +1585,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1673,7 +1674,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
@@ -1724,8 +1725,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
CacheHdr->ch_ntup++;
/*
- * If the hash table has become too full, enlarge the buckets array.
- * Quite arbitrarily, we enlarge when fill factor > 2.
+ * If the hash table has become too full, enlarge the buckets array. Quite
+ * arbitrarily, we enlarge when fill factor > 2.
*/
if (cache->cc_ntup > cache->cc_nbuckets * 2)
RehashCatCache(cache);