diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2000-01-31 04:35:57 +0000 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2000-01-31 04:35:57 +0000 |
commit | a152ebeec6142fbdaaaecd0922041b2f70745851 (patch) | |
tree | 4e2287020e98b488752cd286f212a4aab8e98a5e /src/backend/utils/cache | |
parent | ca0f1435ecd6f273428c6eefbc37c0210daa735d (diff) | |
download | postgresql-a152ebeec6142fbdaaaecd0922041b2f70745851.tar.gz postgresql-a152ebeec6142fbdaaaecd0922041b2f70745851.zip |
Fix problems seen in parallel regress tests when SI buffer overruns (causing
syscache and relcache flushes). Relcache entry rebuild now preserves
original tupledesc, rewrite rules, and triggers if possible, so that pointers
to these things remain valid --- if these things change while relcache entry
has positive refcount, we elog(ERROR) to avoid later crash. Arrange for
xact-local rels to be rebuilt when an SI inval message is seen for them,
so that they are updated by CommandCounterIncrement the same as regular rels.
(This is useful because of Hiroshi's recent changes to process our own SI
messages at CommandCounterIncrement time.) This allows simplification of
some routines that previously hacked around the lack of an automatic update.
catcache now keeps its own copy of tupledesc for its relation, rather than
depending on the relcache's copy; this avoids needing to reinitialize catcache
during a cache flush, which saves some cycles and eliminates nasty circularity
problems that occur if a cache flush happens while trying to initialize a
catcache.
Eliminate a number of permanent memory leaks that used to happen during
catcache or relcache flush; not least of which was that catcache never
freed any cached tuples! (Rule parsetree storage is still leaked, however;
will fix that separately.)
Nothing done yet about code that uses tuples retrieved by SearchSysCache
for longer than is safe.
Diffstat (limited to 'src/backend/utils/cache')
-rw-r--r-- | src/backend/utils/cache/catcache.c | 118 | ||||
-rw-r--r-- | src/backend/utils/cache/inval.c | 4 | ||||
-rw-r--r-- | src/backend/utils/cache/rel.c | 36 | ||||
-rw-r--r-- | src/backend/utils/cache/relcache.c | 421 |
4 files changed, 316 insertions, 263 deletions
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index e5e22a7ab31..f96a3956053 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.58 2000/01/26 05:57:17 momjian Exp $ + * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.59 2000/01/31 04:35:51 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -35,9 +35,6 @@ static long comphash(long l, char *v); /* ---------------- * variables, macros and other stuff - * - * note CCSIZE allocates 51 buckets .. one was already allocated in - * the catcache structure. * ---------------- */ @@ -64,17 +61,20 @@ GlobalMemory CacheCxt; /* context in which caches are allocated */ /* ---------------- - * EQPROC is used in CatalogCacheInitializeCache - * XXX this should be replaced by catalog lookups soon + * EQPROC is used in CatalogCacheInitializeCache to find the equality + * functions for system types that are used as cache key fields. + * + * XXX this should be replaced by catalog lookups, + * but that seems to pose considerable risk of circularity... * ---------------- */ -static long eqproc[] = { - F_BOOLEQ, 0l, F_CHAREQ, F_NAMEEQ, 0l, - F_INT2EQ, F_KEYFIRSTEQ, F_INT4EQ, 0l, F_TEXTEQ, - F_OIDEQ, 0l, 0l, 0l, F_OIDVECTOREQ +static const Oid eqproc[] = { + F_BOOLEQ, InvalidOid, F_CHAREQ, F_NAMEEQ, InvalidOid, + F_INT2EQ, F_KEYFIRSTEQ, F_INT4EQ, F_OIDEQ, F_TEXTEQ, + F_OIDEQ, InvalidOid, InvalidOid, InvalidOid, F_OIDVECTOREQ }; -#define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-16] +#define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-BOOLOID] /* ---------------------------------------------------------------- * internal support functions @@ -169,12 +169,13 @@ CatalogCacheInitializeCache(struct catcache * cache, } /* ---------------- - * initialize the cache's relation id + * initialize the cache's relation id and tuple descriptor * ---------------- */ Assert(RelationIsValid(relation)); cache->relationId = RelationGetRelid(relation); - tupdesc = cache->cc_tupdesc = RelationGetDescr(relation); + tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation)); + cache->cc_tupdesc = tupdesc; CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: relid %u, %d keys", cache->relationId, cache->cc_nkeys); @@ -254,22 +255,6 @@ CatalogCacheInitializeCache(struct catcache * cache, MemoryContextSwitchTo(oldcxt); } -/* -------------------------------- - * CatalogCacheSetId - * - * XXX temporary function - * -------------------------------- - */ -#ifdef NOT_USED -void -CatalogCacheSetId(CatCache *cacheInOutP, int id) -{ - Assert(id == InvalidCatalogCacheId || id >= 0); - cacheInOutP->id = id; -} - -#endif - /* ---------------- * comphash * Compute a hash value, somehow. @@ -369,10 +354,12 @@ CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP, Relation relation, HeapTuple tuple) { - bool isNull = '\0'; + bool isNull = false; + /* XXX is this really needed? */ if (cacheInOutP->relationId == InvalidOid) CatalogCacheInitializeCache(cacheInOutP, relation); + switch (cacheInOutP->cc_nkeys) { case 4: @@ -417,8 +404,7 @@ CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP, break; default: elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys", - cacheInOutP->cc_nkeys - ); + cacheInOutP->cc_nkeys); break; } @@ -427,6 +413,8 @@ CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP, /* -------------------------------- * CatCacheRemoveCTup + * + * NB: assumes caller has switched to CacheCxt * -------------------------------- */ static void @@ -436,19 +424,24 @@ CatCacheRemoveCTup(CatCache *cache, Dlelem *elt) CatCTup *other_ct; Dlelem *other_elt; - if (elt) - ct = (CatCTup *) DLE_VAL(elt); - else + if (!elt) /* probably-useless safety check */ return; + /* We need to zap both linked-list elements as well as the tuple */ + + ct = (CatCTup *) DLE_VAL(elt); other_elt = ct->ct_node; other_ct = (CatCTup *) DLE_VAL(other_elt); + + heap_freetuple(ct->ct_tup); + DLRemove(other_elt); DLFreeElem(other_elt); - free(other_ct); + pfree(other_ct); DLRemove(elt); DLFreeElem(elt); - free(ct); + pfree(ct); + --cache->cc_ntup; } @@ -529,7 +522,6 @@ CatalogCacheIdInvalidate(int cacheId, /* XXX */ * ---------------- */ MemoryContextSwitchTo(oldcxt); - /* sendpm('I', "Invalidated tuple"); */ } /* ---------------------------------------------------------------- @@ -615,34 +607,26 @@ ResetSystemCache() * * A special case occurs when relId is itself one of the cacheable system * tables --- although those'll never be dropped, they can get flushed from - * the relcache (VACUUM causes this, for example). In that case we need to - * force the next SearchSysCache() call to reinitialize the cache itself, - * because we have info (such as cc_tupdesc) that is pointing at the about- - * to-be-deleted relcache entry. + * the relcache (VACUUM causes this, for example). In that case we need + * to flush all cache entries from that table. The brute-force method + * currently used takes care of that quite handily. (At one point we + * also tried to force re-execution of CatalogCacheInitializeCache for + * the cache(s) on that table. This is a bad idea since it leads to all + * kinds of trouble if a cache flush occurs while loading cache entries. + * We now avoid the need to do it by copying cc_tupdesc out of the relcache, + * rather than relying on the relcache to keep a tupdesc for us. Of course + * this assumes the tupdesc of a cachable system table will not change...) * -------------------------------- */ void SystemCacheRelationFlushed(Oid relId) { - struct catcache *cache; - /* * XXX Ideally we'd search the caches and just zap entries that actually - * refer to the indicated relation. For now, we take the brute-force - * approach: just flush the caches entirely. + * refer to or come from the indicated relation. For now, we take the + * brute-force approach: just flush the caches entirely. */ ResetSystemCache(); - - /* - * If relcache is dropping a system relation's cache entry, mark the - * associated cache structures invalid, so we can rebuild them from - * scratch (not just repopulate them) next time they are used. - */ - for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next) - { - if (cache->relationId == relId) - cache->relationId = InvalidOid; - } } /* -------------------------------- @@ -715,11 +699,11 @@ InitSysCache(char *relname, { /* * We can only do this optimization because the number of hash - * buckets never changes. Without it, we call malloc() too much. + * buckets never changes. Without it, we call palloc() too much. * We could move this to dllist.c, but the way we do this is not - * dynamic/portabl, so why allow other routines to use it. + * dynamic/portable, so why allow other routines to use it. */ - Dllist *cache_begin = malloc((NCCBUCK + 1) * sizeof(Dllist)); + Dllist *cache_begin = palloc((NCCBUCK + 1) * sizeof(Dllist)); for (i = 0; i <= NCCBUCK; ++i) { @@ -927,7 +911,7 @@ SearchSysCache(struct catcache * cache, MemoryContext oldcxt; /* ---------------- - * sanity checks + * one-time startup overhead * ---------------- */ if (cache->relationId == InvalidOid) @@ -946,7 +930,7 @@ SearchSysCache(struct catcache * cache, * resolve self referencing informtion */ if ((ntp = SearchSelfReferences(cache))) - return heap_copytuple(ntp); + return ntp; /* ---------------- * find the hash bucket in which to look for the tuple @@ -995,10 +979,8 @@ SearchSysCache(struct catcache * cache, DLMoveToFront(elt); #ifdef CACHEDEBUG - relation = heap_open(cache->relationId, NoLock); CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d", - RelationGetRelationName(relation), hash); - heap_close(relation, NoLock); + cache->cc_relname, hash); #endif /* CACHEDEBUG */ return ct->ct_tup; @@ -1020,9 +1002,7 @@ SearchSysCache(struct catcache * cache, */ if (cache->busy) - { elog(ERROR, "SearchSysCache: recursive use of cache %d", cache->id); - } cache->busy = true; /* ---------------- @@ -1140,10 +1120,10 @@ SearchSysCache(struct catcache * cache, * it easier to remove something from both the cache bucket and * the lru list at the same time */ - nct = (CatCTup *) malloc(sizeof(CatCTup)); + nct = (CatCTup *) palloc(sizeof(CatCTup)); nct->ct_tup = ntp; elt = DLNewElem(nct); - nct2 = (CatCTup *) malloc(sizeof(CatCTup)); + nct2 = (CatCTup *) palloc(sizeof(CatCTup)); nct2->ct_tup = ntp; lru_elt = DLNewElem(nct2); nct2->ct_node = elt; diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 473978bd410..17071f8e235 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.33 2000/01/29 19:51:59 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.34 2000/01/31 04:35:52 tgl Exp $ * * Note - this code is real crufty... * @@ -557,7 +557,7 @@ static void ResetSystemCaches() { ResetSystemCache(); - RelationCacheInvalidate(true); + RelationCacheInvalidate(); } /* -------------------------------- diff --git a/src/backend/utils/cache/rel.c b/src/backend/utils/cache/rel.c index d24c1ca0c9c..50edb422468 100644 --- a/src/backend/utils/cache/rel.c +++ b/src/backend/utils/cache/rel.c @@ -8,11 +8,10 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/utils/cache/Attic/rel.c,v 1.7 2000/01/26 05:57:17 momjian Exp $ + * $Header: /cvsroot/pgsql/src/backend/utils/cache/Attic/rel.c,v 1.8 2000/01/31 04:35:52 tgl Exp $ * *------------------------------------------------------------------------- */ -/* #define RELREFDEBUG 1 */ #include "postgres.h" #include "access/istrat.h" @@ -21,45 +20,22 @@ /* * RelationIsValid is now a macro in rel.h -cim 4/27/91 * - * Many of the RelationGet...() functions are now macros in rel.h + * All of the RelationGet...() functions are now macros in rel.h * -mer 3/2/92 */ /* - * RelationGetIndexStrategy - * Returns index strategy for a relation. - * - * Note: - * Assumes relation descriptor is valid. - * Assumes relation descriptor is for an index relation. - */ -IndexStrategy -RelationGetIndexStrategy(Relation relation) -{ - return relation->rd_istrat; -} - -/* * RelationSetIndexSupport * Sets index strategy and support info for a relation. * + * This routine saves two pointers -- one to the IndexStrategy, and + * one to the RegProcs that support the indexed access method. + * * Note: - * Assumes relation descriptor is a valid pointer to sufficient space. + * Assumes relation descriptor is valid. * Assumes index strategy is valid. Assumes support is valid if non- * NULL. */ -/* ---------------- - * RelationSetIndexSupport - * - * This routine saves two pointers -- one to the IndexStrategy, and - * one to the RegProcs that support the indexed access method. These - * pointers are stored in the space following the attribute data in the - * reldesc. - * - * NEW: the index strategy and support are now stored in real fields - * at the end of the structure - jolly - * ---------------- - */ void RelationSetIndexSupport(Relation relation, IndexStrategy strategy, diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 4a6c86d84d9..7c993d3d73f 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.88 2000/01/29 19:51:59 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.89 2000/01/31 04:35:52 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -20,7 +20,6 @@ * RelationIdGetRelation - get a reldesc by relation id * RelationNameGetRelation - get a reldesc by relation name * RelationClose - close an open relation - * RelationRebuildRelation - rebuild relation information * * NOTES * This file is in the process of being cleaned up @@ -37,7 +36,6 @@ #include "postgres.h" -#include "utils/builtins.h" #include "access/genam.h" #include "access/heapam.h" #include "access/istrat.h" @@ -52,50 +50,45 @@ #include "catalog/pg_rewrite.h" #include "catalog/pg_type.h" #include "catalog/pg_variable.h" +#include "commands/trigger.h" #include "lib/hasht.h" #include "miscadmin.h" +#include "storage/bufmgr.h" #include "storage/smgr.h" +#include "utils/builtins.h" #include "utils/catcache.h" #include "utils/relcache.h" #include "utils/temprel.h" -static void RelationClearRelation(Relation relation, bool rebuildIt); -static void RelationFlushRelation(Relation *relationPtr, - bool onlyFlushReferenceCountZero); -static Relation RelationNameCacheGetRelation(const char *relationName); -static void RelationCacheAbortWalker(Relation *relationPtr, - int dummy); -static void init_irels(void); -static void write_irels(void); - -/* ---------------- - * externs - * ---------------- - */ -extern bool AMI_OVERRIDE; /* XXX style */ -extern GlobalMemory CacheCxt; /* from utils/cache/catcache.c */ - /* ---------------- * hardcoded tuple descriptors. see lib/backend/catalog/pg_attribute.h * ---------------- */ -FormData_pg_attribute Desc_pg_class[Natts_pg_class] = {Schema_pg_class}; -FormData_pg_attribute Desc_pg_attribute[Natts_pg_attribute] = {Schema_pg_attribute}; -FormData_pg_attribute Desc_pg_proc[Natts_pg_proc] = {Schema_pg_proc}; -FormData_pg_attribute Desc_pg_type[Natts_pg_type] = {Schema_pg_type}; -FormData_pg_attribute Desc_pg_variable[Natts_pg_variable] = {Schema_pg_variable}; -FormData_pg_attribute Desc_pg_log[Natts_pg_log] = {Schema_pg_log}; +static FormData_pg_attribute Desc_pg_class[Natts_pg_class] = {Schema_pg_class}; +static FormData_pg_attribute Desc_pg_attribute[Natts_pg_attribute] = {Schema_pg_attribute}; +static FormData_pg_attribute Desc_pg_proc[Natts_pg_proc] = {Schema_pg_proc}; +static FormData_pg_attribute Desc_pg_type[Natts_pg_type] = {Schema_pg_type}; +static FormData_pg_attribute Desc_pg_variable[Natts_pg_variable] = {Schema_pg_variable}; +static FormData_pg_attribute Desc_pg_log[Natts_pg_log] = {Schema_pg_log}; /* ---------------- - * global variables + * Hash tables that index the relation cache * * Relations are cached two ways, by name and by id, * thus there are two hash tables for referencing them. * ---------------- */ -HTAB *RelationNameCache; -HTAB *RelationIdCache; +static HTAB *RelationNameCache; +static HTAB *RelationIdCache; + +/* + * newlyCreatedRelns - + * relations created during this transaction. We need to keep track of + * these. + */ +static List *newlyCreatedRelns = NULL; + /* ---------------- * RelationBuildDescInfo exists so code can be shared @@ -207,8 +200,17 @@ do { \ } while(0) /* non-export function prototypes */ + +static void RelationClearRelation(Relation relation, bool rebuildIt); +static void RelationFlushRelation(Relation *relationPtr, + int skipLocalRelations); +static Relation RelationNameCacheGetRelation(const char *relationName); +static void RelationCacheAbortWalker(Relation *relationPtr, int dummy); +static void init_irels(void); +static void write_irels(void); + static void formrdesc(char *relationName, u_int natts, - FormData_pg_attribute *att); + FormData_pg_attribute *att); static HeapTuple ScanPgRelation(RelationBuildDescInfo buildinfo); static HeapTuple scan_pg_rel_seq(RelationBuildDescInfo buildinfo); @@ -227,16 +229,6 @@ static void IndexedAccessMethodInitialize(Relation relation); static void AttrDefaultFetch(Relation relation); static void RelCheckFetch(Relation relation); -extern void RelationBuildTriggers(Relation relation); -extern void FreeTriggerDesc(Relation relation); - -/* - * newlyCreatedRelns - - * relations created during this transaction. We need to keep track of - * these. - */ -static List *newlyCreatedRelns = NULL; - /* ---------------------------------------------------------------- * RelationIdGetRelation() and RelationNameGetRelation() * support functions @@ -632,22 +624,20 @@ RelationBuildRuleLock(Relation relation) ObjectIdGetDatum(RelationGetRelid(relation))); /* ---------------- - * open pg_attribute and begin a scan + * open pg_rewrite and begin a scan * ---------------- */ pg_rewrite_desc = heap_openr(RewriteRelationName, AccessShareLock); pg_rewrite_scan = heap_beginscan(pg_rewrite_desc, 0, SnapshotNow, 1, &key); pg_rewrite_tupdesc = RelationGetDescr(pg_rewrite_desc); - /* ---------------- - * add attribute data to relation->rd_att - * ---------------- - */ while (HeapTupleIsValid(pg_rewrite_tuple = heap_getnext(pg_rewrite_scan, 0))) { bool isnull; Datum ruleaction; - Datum rule_evqual_string; + Datum rule_evqual; + char *ruleaction_str; + char *rule_evqual_str; RewriteRule *rule; rule = (RewriteRule *) palloc(sizeof(RewriteRule)); @@ -665,24 +655,27 @@ RelationBuildRuleLock(Relation relation) &isnull); ruleaction = heap_getattr(pg_rewrite_tuple, - Anum_pg_rewrite_ev_action, pg_rewrite_tupdesc, + Anum_pg_rewrite_ev_action, + pg_rewrite_tupdesc, &isnull); - rule_evqual_string = heap_getattr(pg_rewrite_tuple, - Anum_pg_rewrite_ev_qual, pg_rewrite_tupdesc, - &isnull); - - ruleaction = PointerGetDatum(textout((text *) DatumGetPointer(ruleaction))); - rule_evqual_string = PointerGetDatum(textout((text *) DatumGetPointer(rule_evqual_string))); - - rule->actions = (List *) stringToNode(DatumGetPointer(ruleaction)); - rule->qual = (Node *) stringToNode(DatumGetPointer(rule_evqual_string)); - - rules[numlocks++] = rule; - if (numlocks == maxlocks) + ruleaction_str = textout((text *) DatumGetPointer(ruleaction)); + rule->actions = (List *) stringToNode(ruleaction_str); + pfree(ruleaction_str); + + rule_evqual = heap_getattr(pg_rewrite_tuple, + Anum_pg_rewrite_ev_qual, + pg_rewrite_tupdesc, + &isnull); + rule_evqual_str = textout((text *) DatumGetPointer(rule_evqual)); + rule->qual = (Node *) stringToNode(rule_evqual_str); + pfree(rule_evqual_str); + + if (numlocks >= maxlocks) { maxlocks *= 2; rules = (RewriteRule **) repalloc(rules, sizeof(RewriteRule *) * maxlocks); } + rules[numlocks++] = rule; } /* ---------------- @@ -701,7 +694,91 @@ RelationBuildRuleLock(Relation relation) rulelock->rules = rules; relation->rd_rules = rulelock; - return; +} + +/* -------------------------------- + * FreeRuleLock + * + * Release the storage used for a set of rewrite rules. + * + * Probably this should be in the rules code someplace... + * -------------------------------- + */ +static void +FreeRuleLock(RuleLock *rlock) +{ + int i; + + if (rlock == NULL) + return; + for (i = 0; i < rlock->numLocks; i++) + { + RewriteRule *rule = rlock->rules[i]; + +#if 0 /* does freefuncs.c still work? Not sure */ + freeObject(rule->actions); + freeObject(rule->qual); +#endif + pfree(rule); + } + pfree(rlock->rules); + pfree(rlock); +} + +/* -------------------------------- + * equalRuleLocks + * + * Determine whether two RuleLocks are equivalent + * + * Probably this should be in the rules code someplace... + * -------------------------------- + */ +static bool +equalRuleLocks(RuleLock *rlock1, RuleLock *rlock2) +{ + int i, + j; + + if (rlock1 != NULL) + { + if (rlock2 == NULL) + return false; + if (rlock1->numLocks != rlock2->numLocks) + return false; + for (i = 0; i < rlock1->numLocks; i++) + { + RewriteRule *rule1 = rlock1->rules[i]; + RewriteRule *rule2 = NULL; + + /* + * We can't assume that the rules are always read from + * pg_rewrite in the same order; so use the rule OIDs to + * identify the rules to compare. (We assume here that the + * same OID won't appear twice in either ruleset.) + */ + for (j = 0; j < rlock2->numLocks; j++) + { + rule2 = rlock2->rules[j]; + if (rule1->ruleId == rule2->ruleId) + break; + } + if (j >= rlock2->numLocks) + return false; + if (rule1->event != rule2->event) + return false; + if (rule1->attrno != rule2->attrno) + return false; + if (rule1->isInstead != rule2->isInstead) + return false; + if (! equal(rule1->qual, rule2->qual)) + return false; + if (! equal(rule1->actions, rule2->actions)) + return false; + } + } + else if (rlock2 != NULL) + return false; + return true; } @@ -800,7 +877,7 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo, * ---------------- */ if (OidIsValid(relam)) - relation->rd_am = (Form_pg_am) AccessMethodObjectIdGetForm(relam); + relation->rd_am = AccessMethodObjectIdGetForm(relam); /* ---------------- * initialize the tuple descriptor (relation->rd_att). @@ -1213,6 +1290,9 @@ RelationClose(Relation relation) * usually used when we are notified of a change to an open relation * (one with refcount > 0). However, this routine just does whichever * it's told to do; callers must determine which they want. + * + * If we detect a change in the relation's TupleDesc or trigger data + * while rebuilding, we complain unless refcount is 0. * -------------------------------- */ static void @@ -1252,26 +1332,53 @@ RelationClearRelation(Relation relation, bool rebuildIt) /* Clear out catcache's entries for this relation */ SystemCacheRelationFlushed(RelationGetRelid(relation)); - /* Free all the subsidiary data structures of the relcache entry */ - FreeTupleDesc(relation->rd_att); - FreeTriggerDesc(relation); - pfree(RelationGetForm(relation)); + /* + * Free all the subsidiary data structures of the relcache entry. + * We cannot free rd_att if we are trying to rebuild the entry, + * however, because pointers to it may be cached in various places. + * The trigger manager might also have pointers into the trigdesc, + * and the rule manager might have pointers into the rewrite rules. + * So to begin with, we can only get rid of these fields: + */ + if (relation->rd_am) + pfree(relation->rd_am); + if (relation->rd_rel) + pfree(relation->rd_rel); + if (relation->rd_istrat) + pfree(relation->rd_istrat); + if (relation->rd_support) + pfree(relation->rd_support); /* * If we're really done with the relcache entry, blow it away. * But if someone is still using it, reconstruct the whole deal * without moving the physical RelationData record (so that the - * someone's pointer is still valid). Must preserve ref count - * and myxactonly flag, too. + * someone's pointer is still valid). */ if (! rebuildIt) { + /* ok to zap remaining substructure */ + FreeTupleDesc(relation->rd_att); + FreeRuleLock(relation->rd_rules); + FreeTriggerDesc(relation->trigdesc); pfree(relation); } else { - uint16 old_refcnt = relation->rd_refcnt; - bool old_myxactonly = relation->rd_myxactonly; + /* + * When rebuilding an open relcache entry, must preserve ref count + * and myxactonly flag. Also attempt to preserve the tupledesc, + * rewrite rules, and trigger substructures in place. + * Furthermore we save/restore rd_nblocks (in case it is a local + * relation) *and* call RelationGetNumberOfBlocks (in case it isn't). + */ + uint16 old_refcnt = relation->rd_refcnt; + bool old_myxactonly = relation->rd_myxactonly; + TupleDesc old_att = relation->rd_att; + RuleLock *old_rules = relation->rd_rules; + TriggerDesc *old_trigdesc = relation->trigdesc; + int old_nblocks = relation->rd_nblocks; + bool relDescChanged = false; RelationBuildDescInfo buildinfo; buildinfo.infotype = INFO_RELID; @@ -1280,12 +1387,54 @@ RelationClearRelation(Relation relation, bool rebuildIt) if (RelationBuildDesc(buildinfo, relation) != relation) { /* Should only get here if relation was deleted */ + FreeTupleDesc(old_att); + FreeRuleLock(old_rules); + FreeTriggerDesc(old_trigdesc); pfree(relation); elog(ERROR, "RelationClearRelation: relation %u deleted while still in use", buildinfo.i.info_id); } RelationSetReferenceCount(relation, old_refcnt); relation->rd_myxactonly = old_myxactonly; + if (equalTupleDescs(old_att, relation->rd_att)) + { + FreeTupleDesc(relation->rd_att); + relation->rd_att = old_att; + } + else + { + FreeTupleDesc(old_att); + relDescChanged = true; + } + if (equalRuleLocks(old_rules, relation->rd_rules)) + { + FreeRuleLock(relation->rd_rules); + relation->rd_rules = old_rules; + } + else + { + FreeRuleLock(old_rules); + relDescChanged = true; + } + if (equalTriggerDescs(old_trigdesc, relation->trigdesc)) + { + FreeTriggerDesc(relation->trigdesc); + relation->trigdesc = old_trigdesc; + } + else + { + FreeTriggerDesc(old_trigdesc); + relDescChanged = true; + } + relation->rd_nblocks = old_nblocks; + /* this is kind of expensive, but I think we must do it in case + * relation has been truncated... + */ + relation->rd_nblocks = RelationGetNumberOfBlocks(relation); + + if (relDescChanged && ! RelationHasReferenceCountZero(relation)) + elog(ERROR, "RelationClearRelation: relation %u modified while in use", + buildinfo.i.info_id); } MemoryContextSwitchTo(oldcxt); @@ -1295,32 +1444,40 @@ RelationClearRelation(Relation relation, bool rebuildIt) * RelationFlushRelation * * Rebuild the relation if it is open (refcount > 0), else blow it away. - * Setting onlyFlushReferenceCountZero to FALSE overrides refcount check. - * This is currently only used to process SI invalidation notifications. + * If skipLocalRelations is TRUE, xact-local relations are ignored + * (which is useful when processing SI cache reset, since xact-local + * relations could not be targets of notifications from other backends). + * * The peculiar calling convention (pointer to pointer to relation) * is needed so that we can use this routine as a hash table walker. * -------------------------------- */ static void RelationFlushRelation(Relation *relationPtr, - bool onlyFlushReferenceCountZero) + int skipLocalRelations) { Relation relation = *relationPtr; + bool rebuildIt; - /* - * Do nothing to transaction-local relations, since they cannot be - * subjects of SI notifications from other backends. - */ if (relation->rd_myxactonly) - return; + { + if (skipLocalRelations) + return; /* don't touch local rels if so commanded */ + /* + * Local rels should always be rebuilt, not flushed; the relcache + * entry must live until RelationPurgeLocalRelation(). + */ + rebuildIt = true; + } + else + { + /* + * Nonlocal rels can be dropped from the relcache if not open. + */ + rebuildIt = ! RelationHasReferenceCountZero(relation); + } - /* - * Zap it. Rebuild if it has nonzero ref count and we did not get - * the override flag. - */ - RelationClearRelation(relation, - (onlyFlushReferenceCountZero && - ! RelationHasReferenceCountZero(relation))); + RelationClearRelation(relation, rebuildIt); } /* -------------------------------- @@ -1374,20 +1531,15 @@ RelationForgetRelation(Oid rid) } /* -------------------------------- - * RelationRebuildRelation - - * - * Force a relcache entry to be rebuilt from catalog entries. - * This is needed, eg, after modifying an attribute of the rel. - * -------------------------------- - */ -void -RelationRebuildRelation(Relation relation) -{ - RelationClearRelation(relation, true); -} - -/* -------------------------------- * RelationIdInvalidateRelationCacheByRelationId + * + * This routine is invoked for SI cache flush messages. + * + * We used to skip local relations, on the grounds that they could + * not be targets of cross-backend SI update messages; but it seems + * safer to process them, so that our *own* SI update messages will + * have the same effects during CommandCounterIncrement for both + * local and nonlocal relations. * -------------------------------- */ void @@ -1397,36 +1549,8 @@ RelationIdInvalidateRelationCacheByRelationId(Oid relationId) RelationIdCacheLookup(relationId, relation); - /* - * "local" relations are invalidated by RelationPurgeLocalRelation. - * (This is to make LocalBufferSync's life easier: want the descriptor - * to hang around for a while. In fact, won't we want this for - * BufferSync also? But I'll leave it for now since I don't want to - * break anything.) - ay 3/95 - */ - if (PointerIsValid(relation) && !relation->rd_myxactonly) - { -#if 1 - /* - * Seems safest just to NEVER flush rels with positive refcounts. - * I think the code only had that proviso as a rather lame method of - * cleaning up unused relcache entries that had dangling refcounts - * (following elog(ERROR) with an open rel). Now we rely on - * RelationCacheAbort to clean up dangling refcounts, so there's no - * good reason to ever risk flushing a rel with positive refcount. - * IMHO anyway --- tgl 1/29/00. - */ - RelationFlushRelation(&relation, true); -#else - /* - * The boolean onlyFlushReferenceCountZero in RelationFlushReln() - * should be set to true when we are incrementing the command - * counter and to false when we are starting a new xaction. This - * can be determined by checking the current xaction status. - */ - RelationFlushRelation(&relation, CurrentXactInProgress()); -#endif - } + if (PointerIsValid(relation)) + RelationFlushRelation(&relation, false); } #if NOT_USED @@ -1448,7 +1572,7 @@ RelationFlushIndexes(Relation *r, if (relation->rd_rel->relkind == RELKIND_INDEX && /* XXX style */ (!OidIsValid(accessMethodId) || relation->rd_rel->relam == accessMethodId)) - RelationFlushRelation(&relation, true); + RelationFlushRelation(&relation, false); } #endif @@ -1477,37 +1601,19 @@ RelationIdInvalidateRelationCacheByAccessMethodId(Oid accessMethodId) /* * RelationCacheInvalidate - * - * Will blow away either all the cached relation descriptors or - * those that have a zero reference count. - * - * CAUTION: this is only called with onlyFlushReferenceCountZero=true - * at present, so that relation descriptors with positive refcounts - * are rebuilt rather than clobbered. It would only be safe to use a - * "false" parameter in a totally idle backend with no open relations. + * Blow away cached relation descriptors that have zero reference counts, + * and rebuild those with positive reference counts. * * This is currently used only to recover from SI message buffer overflow, - * so we do not blow away transaction-local relations; they cannot be - * targets of SI updates. + * so we do not touch transaction-local relations; they cannot be targets + * of cross-backend SI updates (and our own updates now go through a + * separate linked list that isn't limited by the SI message buffer size). */ void -RelationCacheInvalidate(bool onlyFlushReferenceCountZero) +RelationCacheInvalidate(void) { HashTableWalk(RelationNameCache, (HashtFunc) RelationFlushRelation, - onlyFlushReferenceCountZero); - - if (!onlyFlushReferenceCountZero) - { - /* - * Debugging check: what's left should be transaction-local relations - * plus nailed-in reldescs. There should be 6 hardwired heaps - * + 3 hardwired indices == 9 total. - */ - int numRels = length(newlyCreatedRelns) + 9; - - Assert(RelationNameCache->hctl->nkeys == numRels); - Assert(RelationIdCache->hctl->nkeys == numRels); - } + (int) true); } /* @@ -1672,8 +1778,6 @@ RelationInitialize(void) * initialize the cache with pre-made relation descriptors * for some of the more important system relations. These * relations should always be in the cache. - * - * NB: if you change this list, fix the count in RelationCacheInvalidate! * ---------------- */ formrdesc(RelationRelationName, Natts_pg_class, Desc_pg_class); @@ -2008,7 +2112,7 @@ init_irels(void) } /* oh, for god's sake... */ -#define SMD(i) strat[0].strategyMapData[i].entry[0] +#define SMD(i) strat->strategyMapData[i].entry[0] /* have to reinit the function pointers in the strategy maps */ for (i = 0; i < am->amstrategies * relform->relnatts; i++) @@ -2038,11 +2142,6 @@ init_irels(void) write_irels(); return; } - - /* - * p += sizeof(IndexStrategy); ((RegProcedure **) p) = support; - */ - ird->rd_support = support; RelationInitLockInfo(ird); @@ -2085,8 +2184,6 @@ write_irels(void) * relation searches -- a necessary step, since we're trying to * instantiate the index relation descriptors here. Once we have the * descriptors, nail them into cache so we never lose them. - * - * NB: if you change this list, fix the count in RelationCacheInvalidate! */ oldmode = GetProcessingMode(); |