aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2013-05-29 16:58:43 -0400
committerBruce Momjian <bruce@momjian.us>2013-05-29 16:58:43 -0400
commit9af4159fce6654aa0e081b00d02bca40b978745c (patch)
tree3aa507fc6cc67ed3d9f6ceec4d65d1e56cc08e1a /src/backend/utils
parent07ab261ef3a9575a4a2bd3045b222d7b3dee2c46 (diff)
downloadpostgresql-9af4159fce6654aa0e081b00d02bca40b978745c.tar.gz
postgresql-9af4159fce6654aa0e081b00d02bca40b978745c.zip
pgindent run for release 9.3
This is the first run of the Perl-based pgindent script. Also update pgindent instructions.
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/array_typanalyze.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c12
-rw-r--r--src/backend/utils/adt/date.c4
-rw-r--r--src/backend/utils/adt/datetime.c5
-rw-r--r--src/backend/utils/adt/formatting.c11
-rw-r--r--src/backend/utils/adt/json.c2
-rw-r--r--src/backend/utils/adt/jsonfuncs.c10
-rw-r--r--src/backend/utils/adt/misc.c10
-rw-r--r--src/backend/utils/adt/numeric.c2
-rw-r--r--src/backend/utils/adt/pg_locale.c10
-rw-r--r--src/backend/utils/adt/pseudotypes.c2
-rw-r--r--src/backend/utils/adt/rangetypes.c10
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c41
-rw-r--r--src/backend/utils/adt/rangetypes_selfuncs.c165
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c74
-rw-r--r--src/backend/utils/adt/rangetypes_typanalyze.c50
-rw-r--r--src/backend/utils/adt/regproc.c5
-rw-r--r--src/backend/utils/adt/ri_triggers.c44
-rw-r--r--src/backend/utils/adt/ruleutils.c18
-rw-r--r--src/backend/utils/adt/selfuncs.c80
-rw-r--r--src/backend/utils/adt/timestamp.c12
-rw-r--r--src/backend/utils/adt/tsquery_rewrite.c2
-rw-r--r--src/backend/utils/adt/varlena.c6
-rw-r--r--src/backend/utils/adt/xml.c7
-rw-r--r--src/backend/utils/cache/catcache.c16
-rw-r--r--src/backend/utils/cache/evtcache.c42
-rw-r--r--src/backend/utils/cache/plancache.c14
-rw-r--r--src/backend/utils/cache/relcache.c193
-rw-r--r--src/backend/utils/cache/syscache.c4
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/hash/dynahash.c12
-rw-r--r--src/backend/utils/init/miscinit.c16
-rw-r--r--src/backend/utils/init/postinit.c6
-rw-r--r--src/backend/utils/mb/mbutils.c6
-rw-r--r--src/backend/utils/mb/wchar.c20
-rw-r--r--src/backend/utils/misc/guc.c4
-rw-r--r--src/backend/utils/resowner/resowner.c10
-rw-r--r--src/backend/utils/sort/tuplestore.c4
-rw-r--r--src/backend/utils/time/tqual.c59
39 files changed, 513 insertions, 483 deletions
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index fca47d2e257..ae7bb8a8b81 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -388,8 +388,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If element type is pass-by-reference, we must copy it into
- * palloc'd space, so that we can release the array below.
- * (We do this so that the space needed for element values is
+ * palloc'd space, so that we can release the array below. (We
+ * do this so that the space needed for element values is
* limited by the size of the hashtable; if we kept all the
* array values around, it could be much more.)
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index f53a0d248a6..1d61d5c7c8d 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -5187,7 +5187,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5250,8 +5250,8 @@ array_replace_internal(ArrayType *array,
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -5259,7 +5259,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
@@ -5370,8 +5370,8 @@ array_replace_internal(ArrayType *array,
if (!AllocSizeIsValid(nbytes))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("array size exceeds the maximum allowed (%d)",
- (int) MaxAllocSize)));
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxAllocSize)));
}
nresult++;
}
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 5dd27c4d650..8677520cb6f 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -2699,8 +2699,8 @@ timetz_izone(PG_FUNCTION_ARGS)
if (zone->month != 0 || zone->day != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not include months or days",
- DatumGetCString(DirectFunctionCall1(interval_out,
+ errmsg("interval time zone \"%s\" must not include months or days",
+ DatumGetCString(DirectFunctionCall1(interval_out,
PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 59805047b20..7a08b9279d9 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -945,6 +945,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
break;
case DTK_TIME:
+
/*
* This might be an ISO time following a "t" field.
*/
@@ -2180,7 +2181,7 @@ DecodeDate(char *str, int fmask, int *tmask, bool *is2digits,
str++;
if (*str == '\0')
- return DTERR_BAD_FORMAT; /* end of string after separator */
+ return DTERR_BAD_FORMAT; /* end of string after separator */
field[nf] = str;
if (isdigit((unsigned char) *str))
@@ -2894,7 +2895,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 81e3329ef60..7b854062f0d 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1045,7 +1045,6 @@ suff_search(char *str, KeySuffix *suf, int type)
static void
NUMDesc_prepare(NUMDesc *num, FormatNode *n)
{
-
if (n->type != NODE_TYPE_ACTION)
return;
@@ -2535,7 +2534,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
strcpy(s, str_toupper_z(localized_full_months[tm->tm_mon - 1], collid));
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
- asc_toupper_z(months_full[tm->tm_mon - 1]));
+ asc_toupper_z(months_full[tm->tm_mon - 1]));
s += strlen(s);
break;
case DCH_Month:
@@ -3561,17 +3560,17 @@ do_to_timestamp(text *date_txt, text *fmt,
}
else
/* find century year for dates ending in "00" */
- tm->tm_year = tmfc.cc * 100 + ((tmfc.cc >= 0) ? 0 : 1);
+ tm->tm_year = tmfc.cc * 100 + ((tmfc.cc >= 0) ? 0 : 1);
}
else
- /* If a 4-digit year is provided, we use that and ignore CC. */
+ /* If a 4-digit year is provided, we use that and ignore CC. */
{
tm->tm_year = tmfc.year;
if (tmfc.bc && tm->tm_year > 0)
tm->tm_year = -(tm->tm_year - 1);
}
}
- else if (tmfc.cc) /* use first year of century */
+ else if (tmfc.cc) /* use first year of century */
{
if (tmfc.bc)
tmfc.cc = -tmfc.cc;
@@ -3606,7 +3605,7 @@ do_to_timestamp(text *date_txt, text *fmt,
if (tmfc.w)
tmfc.dd = (tmfc.w - 1) * 7 + 1;
if (tmfc.d)
- tm->tm_wday = tmfc.d - 1; /* convert to native numbering */
+ tm->tm_wday = tmfc.d - 1; /* convert to native numbering */
if (tmfc.dd)
tm->tm_mday = tmfc.dd;
if (tmfc.ddd)
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 507c91ff97b..aaf99bddf27 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -46,7 +46,7 @@ typedef enum /* contexts of JSON parser */
JSON_PARSE_OBJECT_NEXT, /* saw object value, expecting ',' or '}' */
JSON_PARSE_OBJECT_COMMA, /* saw object ',', expecting next label */
JSON_PARSE_END /* saw the end of a document, expect nothing */
-} JsonParseContext;
+} JsonParseContext;
static inline void json_lex(JsonLexContext *lex);
static inline void json_lex_string(JsonLexContext *lex);
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 03378a3ea9b..dd625a4e47f 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -96,7 +96,7 @@ typedef enum
JSON_SEARCH_OBJECT = 1,
JSON_SEARCH_ARRAY,
JSON_SEARCH_PATH
-} JsonSearch;
+} JsonSearch;
/* state for json_object_keys */
typedef struct okeysState
@@ -682,10 +682,10 @@ get_array_start(void *state)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot extract field from a non-object")));
- /*
- * initialize array count for this nesting level
- * Note: the lex_level seen by array_start is one less than that seen by
- * the elements of the array.
+
+ /*
+ * initialize array count for this nesting level Note: the lex_level seen
+ * by array_start is one less than that seen by the elements of the array.
*/
if (_state->search_type == JSON_SEARCH_PATH &&
lex_level < _state->npath)
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 4e38d7c06c2..829ce59888c 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -95,11 +95,11 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
- * we reach kill(), a process for which we get a valid proc here might have
- * terminated on its own. There's no way to acquire a lock on an arbitrary
- * process to prevent that. But since so far all the callers of this
- * mechanism involve some request for ending the process anyway, that it
- * might end on its own first is not a problem.
+ * we reach kill(), a process for which we get a valid proc here might
+ * have terminated on its own. There's no way to acquire a lock on an
+ * arbitrary process to prevent that. But since so far all the callers of
+ * this mechanism involve some request for ending the process anyway, that
+ * it might end on its own first is not a problem.
*/
if (proc == NULL)
{
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index b343b5fe0f6..b4d639428ac 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -3402,7 +3402,7 @@ init_var_from_num(Numeric num, NumericVar *dest)
dest->sign = NUMERIC_SIGN(num);
dest->dscale = NUMERIC_DSCALE(num);
dest->digits = NUMERIC_DIGITS(num);
- dest->buf = NULL; /* digits array is not palloc'd */
+ dest->buf = NULL; /* digits array is not palloc'd */
}
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 890aa198167..7081b00500b 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -718,13 +718,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -735,12 +735,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -784,7 +784,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index fe9d18d0f44..04650d8ba4a 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -293,7 +293,7 @@ trigger_out(PG_FUNCTION_ARGS)
/*
- * event_trigger_in - input routine for pseudo-type event_trigger.
+ * event_trigger_in - input routine for pseudo-type event_trigger.
*/
Datum
event_trigger_in(PG_FUNCTION_ARGS)
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 84a4aca16c0..cd5c5f6621c 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -737,7 +737,7 @@ bounds_adjacent(TypeCacheEntry *typcache, RangeBound boundA, RangeBound boundB)
cmp = range_cmp_bound_values(typcache, &boundA, &boundB);
if (cmp < 0)
{
- RangeType *r;
+ RangeType *r;
/*
* Bounds do not overlap; see if there are points in between.
@@ -764,7 +764,7 @@ bounds_adjacent(TypeCacheEntry *typcache, RangeBound boundA, RangeBound boundB)
else if (cmp == 0)
return boundA.inclusive != boundB.inclusive;
else
- return false; /* bounds overlap */
+ return false; /* bounds overlap */
}
/* adjacent to (but not overlapping)? (internal version) */
@@ -1877,7 +1877,7 @@ range_parse_flags(const char *flags_str)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid range bound flags"),
- errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+ errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
switch (flags_str[0])
{
@@ -1890,7 +1890,7 @@ range_parse_flags(const char *flags_str)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid range bound flags"),
- errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+ errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
}
switch (flags_str[1])
@@ -1904,7 +1904,7 @@ range_parse_flags(const char *flags_str)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid range bound flags"),
- errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+ errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
}
return flags;
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index ea1251a5e65..464b37fe1fd 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -677,6 +677,7 @@ range_gist_same(PG_FUNCTION_ARGS)
else
{
TypeCacheEntry *typcache;
+
typcache = range_get_typcache(fcinfo, RangeTypeGetOid(r1));
*result = range_eq_internal(typcache, r1, r2);
@@ -781,36 +782,36 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_overright_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_OVERLEFT:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_after_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_OVERLAPS:
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERRIGHT:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_before_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_AFTER:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_overleft_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_ADJACENT:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
if (range_adjacent_internal(typcache, key,
- DatumGetRangeType(query)))
+ DatumGetRangeType(query)))
return true;
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS:
return range_contains_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINED_BY:
/*
@@ -821,7 +822,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
if (RangeIsOrContainsEmpty(key))
return true;
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS_ELEM:
return range_contains_elem_internal(typcache, key, query);
case RANGESTRAT_EQ:
@@ -833,10 +834,10 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
if (RangeIsEmpty(DatumGetRangeType(query)))
return RangeIsOrContainsEmpty(key);
return range_contains_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
default:
elog(ERROR, "unrecognized range strategy: %d", strategy);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
@@ -851,35 +852,35 @@ range_gist_consistent_leaf(TypeCacheEntry *typcache, StrategyNumber strategy,
{
case RANGESTRAT_BEFORE:
return range_before_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERLEFT:
return range_overleft_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERLAPS:
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERRIGHT:
return range_overright_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_AFTER:
return range_after_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_ADJACENT:
return range_adjacent_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS:
return range_contains_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINED_BY:
return range_contained_by_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS_ELEM:
return range_contains_elem_internal(typcache, key, query);
case RANGESTRAT_EQ:
return range_eq_internal(typcache, key, DatumGetRangeType(query));
default:
elog(ERROR, "unrecognized range strategy: %d", strategy);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
diff --git a/src/backend/utils/adt/rangetypes_selfuncs.c b/src/backend/utils/adt/rangetypes_selfuncs.c
index c450c6a1580..074d326b121 100644
--- a/src/backend/utils/adt/rangetypes_selfuncs.c
+++ b/src/backend/utils/adt/rangetypes_selfuncs.c
@@ -42,19 +42,19 @@ static float8 get_position(TypeCacheEntry *typcache, RangeBound *value,
RangeBound *hist1, RangeBound *hist2);
static float8 get_len_position(double value, double hist1, double hist2);
static float8 get_distance(TypeCacheEntry *typcache, RangeBound *bound1,
- RangeBound *bound2);
+ RangeBound *bound2);
static int length_hist_bsearch(Datum *length_hist_values,
int length_hist_nvalues, double value, bool equal);
static double calc_length_hist_frac(Datum *length_hist_values,
- int length_hist_nvalues, double length1, double length2, bool equal);
+ int length_hist_nvalues, double length1, double length2, bool equal);
static double calc_hist_selectivity_contained(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues);
+ Datum *length_hist_values, int length_hist_nvalues);
static double calc_hist_selectivity_contains(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues);
+ Datum *length_hist_values, int length_hist_nvalues);
/*
* Returns a default selectivity estimate for given operator, when we don't
@@ -73,6 +73,7 @@ default_range_selectivity(Oid operator)
return 0.005;
case OID_RANGE_CONTAINS_ELEM_OP:
+
/*
* "range @> elem" is more or less identical to a scalar
* inequality "A >= b AND A <= c".
@@ -162,8 +163,8 @@ rangesel(PG_FUNCTION_ARGS)
*
* If the operator is "range @> element", the constant should be of the
* element type of the range column. Convert it to a range that includes
- * only that single point, so that we don't need special handling for
- * that in what follows.
+ * only that single point, so that we don't need special handling for that
+ * in what follows.
*/
if (operator == OID_RANGE_CONTAINS_ELEM_OP)
{
@@ -171,7 +172,9 @@ rangesel(PG_FUNCTION_ARGS)
if (((Const *) other)->consttype == typcache->rngelemtype->type_id)
{
- RangeBound lower, upper;
+ RangeBound lower,
+ upper;
+
lower.inclusive = true;
lower.val = ((Const *) other)->constvalue;
lower.infinite = false;
@@ -193,8 +196,8 @@ rangesel(PG_FUNCTION_ARGS)
/*
* If we got a valid constant on one side of the operator, proceed to
- * estimate using statistics. Otherwise punt and return a default
- * constant estimate.
+ * estimate using statistics. Otherwise punt and return a default constant
+ * estimate.
*/
if (constrange)
selec = calc_rangesel(typcache, &vardata, constrange, operator);
@@ -214,7 +217,8 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
{
double hist_selec;
double selec;
- float4 empty_frac, null_frac;
+ float4 empty_frac,
+ null_frac;
/*
* First look up the fraction of NULLs and empty ranges from pg_statistic.
@@ -231,13 +235,13 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
/* Try to get fraction of empty ranges */
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
- STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM, InvalidOid,
+ STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM, InvalidOid,
NULL,
NULL, NULL,
&numbers, &nnumbers))
{
if (nnumbers != 1)
- elog(ERROR, "invalid empty fraction statistic"); /* shouldn't happen */
+ elog(ERROR, "invalid empty fraction statistic"); /* shouldn't happen */
empty_frac = numbers[0];
}
else
@@ -250,8 +254,8 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
{
/*
* No stats are available. Follow through the calculations below
- * anyway, assuming no NULLs and no empty ranges. This still allows
- * us to give a better-than-nothing estimate based on whether the
+ * anyway, assuming no NULLs and no empty ranges. This still allows us
+ * to give a better-than-nothing estimate based on whether the
* constant is an empty range or not.
*/
null_frac = 0.0;
@@ -278,6 +282,7 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
case OID_RANGE_CONTAINED_OP:
case OID_RANGE_LESS_EQUAL_OP:
case OID_RANGE_GREATER_EQUAL_OP:
+
/*
* these return true when both args are empty, false if only
* one is empty
@@ -293,7 +298,7 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
case OID_RANGE_CONTAINS_ELEM_OP:
default:
elog(ERROR, "unexpected operator %u", operator);
- selec = 0.0; /* keep compiler quiet */
+ selec = 0.0; /* keep compiler quiet */
break;
}
}
@@ -406,7 +411,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
/* Extract the bounds of the constant value. */
range_deserialize(typcache, constval, &const_lower, &const_upper, &empty);
- Assert (!empty);
+ Assert(!empty);
/*
* Calculate selectivity comparing the lower or upper bound of the
@@ -415,6 +420,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
switch (operator)
{
case OID_RANGE_LESS_OP:
+
/*
* The regular b-tree comparison operators (<, <=, >, >=) compare
* the lower bounds first, and the upper bounds for values with
@@ -476,11 +482,13 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
case OID_RANGE_OVERLAP_OP:
case OID_RANGE_CONTAINS_ELEM_OP:
+
/*
* A && B <=> NOT (A << B OR A >> B).
*
- * Since A << B and A >> B are mutually exclusive events we can sum
- * their probabilities to find probability of (A << B OR A >> B).
+ * Since A << B and A >> B are mutually exclusive events we can
+ * sum their probabilities to find probability of (A << B OR A >>
+ * B).
*
* "range @> elem" is equivalent to "range && [elem,elem]". The
* caller already constructed the singular range from the element
@@ -491,15 +499,15 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
nhist, false);
hist_selec +=
(1.0 - calc_hist_selectivity_scalar(typcache, &const_upper, hist_lower,
- nhist, true));
+ nhist, true));
hist_selec = 1.0 - hist_selec;
break;
case OID_RANGE_CONTAINS_OP:
hist_selec =
calc_hist_selectivity_contains(typcache, &const_lower,
- &const_upper, hist_lower, nhist,
- length_hist_values, length_nhist);
+ &const_upper, hist_lower, nhist,
+ length_hist_values, length_nhist);
break;
case OID_RANGE_CONTAINED_OP:
@@ -517,20 +525,20 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
{
hist_selec =
1.0 - calc_hist_selectivity_scalar(typcache, &const_lower,
- hist_lower, nhist, false);
+ hist_lower, nhist, false);
}
else
{
hist_selec =
calc_hist_selectivity_contained(typcache, &const_lower,
- &const_upper, hist_lower, nhist,
- length_hist_values, length_nhist);
+ &const_upper, hist_lower, nhist,
+ length_hist_values, length_nhist);
}
break;
default:
elog(ERROR, "unknown range operator %u", operator);
- hist_selec = -1.0; /* keep compiler quiet */
+ hist_selec = -1.0; /* keep compiler quiet */
break;
}
@@ -546,7 +554,7 @@ static double
calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound,
RangeBound *hist, int hist_nvalues, bool equal)
{
- Selectivity selec;
+ Selectivity selec;
int index;
/*
@@ -576,7 +584,7 @@ calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound,
*/
static int
rbound_bsearch(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist,
- int hist_length, bool equal)
+ int hist_length, bool equal)
{
int lower = -1,
upper = hist_length - 1,
@@ -613,7 +621,7 @@ length_hist_bsearch(Datum *length_hist_values, int length_hist_nvalues,
while (lower < upper)
{
- double middleval;
+ double middleval;
middle = (lower + upper + 1) / 2;
@@ -659,7 +667,7 @@ get_position(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist1,
hist2->val,
hist1->val));
if (bin_width <= 0.0)
- return 0.5; /* zero width bin */
+ return 0.5; /* zero width bin */
position = DatumGetFloat8(FunctionCall2Coll(
&typcache->rng_subdiff_finfo,
@@ -724,9 +732,8 @@ get_len_position(double value, double hist1, double hist2)
else if (is_infinite(hist1) && !is_infinite(hist2))
{
/*
- * Lower bin boundary is -infinite, upper is finite.
- * Return 1.0 to indicate the value is infinitely far from the lower
- * bound.
+ * Lower bin boundary is -infinite, upper is finite. Return 1.0 to
+ * indicate the value is infinitely far from the lower bound.
*/
return 1.0;
}
@@ -740,8 +747,8 @@ get_len_position(double value, double hist1, double hist2)
/*
* If both bin boundaries are infinite, they should be equal to each
* other, and the value should also be infinite and equal to both
- * bounds. (But don't Assert that, to avoid crashing unnecessarily
- * if the caller messes up)
+ * bounds. (But don't Assert that, to avoid crashing unnecessarily if
+ * the caller messes up)
*
* Assume the value to lie in the middle of the infinite bounds.
*/
@@ -755,7 +762,7 @@ get_len_position(double value, double hist1, double hist2)
static float8
get_distance(TypeCacheEntry *typcache, RangeBound *bound1, RangeBound *bound2)
{
- bool has_subdiff = OidIsValid(typcache->rng_subdiff_finfo.fn_oid);
+ bool has_subdiff = OidIsValid(typcache->rng_subdiff_finfo.fn_oid);
if (!bound1->infinite && !bound2->infinite)
{
@@ -797,7 +804,10 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
double length1, double length2, bool equal)
{
double frac;
- double A, B, PA, PB;
+ double A,
+ B,
+ PA,
+ PB;
double pos;
int i;
double area;
@@ -805,7 +815,7 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
Assert(length2 >= length1);
if (length2 < 0.0)
- return 0.0; /* shouldn't happen, but doesn't hurt to check */
+ return 0.0; /* shouldn't happen, but doesn't hurt to check */
/* All lengths in the table are <= infinite. */
if (is_infinite(length2) && equal)
@@ -815,25 +825,25 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
* The average of a function between A and B can be calculated by the
* formula:
*
- * B
- * 1 /
- * ------- | P(x)dx
- * B - A /
- * A
+ * B
+ * 1 /
+ * ------- | P(x)dx
+ * B - A /
+ * A
*
* The geometrical interpretation of the integral is the area under the
* graph of P(x). P(x) is defined by the length histogram. We calculate
* the area in a piecewise fashion, iterating through the length histogram
* bins. Each bin is a trapezoid:
*
- * P(x2)
- * /|
- * / |
+ * P(x2)
+ * /|
+ * / |
* P(x1)/ |
- * | |
- * | |
- * ---+---+--
- * x1 x2
+ * | |
+ * | |
+ * ---+---+--
+ * x1 x2
*
* where x1 and x2 are the boundaries of the current histogram, and P(x1)
* and P(x1) are the cumulative fraction of tuples at the boundaries.
@@ -845,7 +855,7 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
* boundary to calculate P(x1). Likewise for the last bin: we use linear
* interpolation to calculate P(x2). For the bins in between, x1 and x2
* lie on histogram bin boundaries, so P(x1) and P(x2) are simply:
- * P(x1) = (bin index) / (number of bins)
+ * P(x1) = (bin index) / (number of bins)
* P(x2) = (bin index + 1 / (number of bins)
*/
@@ -870,9 +880,9 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
B = length1;
/*
- * In the degenerate case that length1 == length2, simply return P(length1).
- * This is not merely an optimization: if length1 == length2, we'd divide
- * by zero later on.
+ * In the degenerate case that length1 == length2, simply return
+ * P(length1). This is not merely an optimization: if length1 == length2,
+ * we'd divide by zero later on.
*/
if (length2 == length1)
return PB;
@@ -885,32 +895,34 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
area = 0.0;
for (; i < length_hist_nvalues - 1; i++)
{
- double bin_upper = DatumGetFloat8(length_hist_values[i + 1]);
+ double bin_upper = DatumGetFloat8(length_hist_values[i + 1]);
/* check if we've reached the last bin */
if (!(bin_upper < length2 || (equal && bin_upper <= length2)))
break;
/* the upper bound of previous bin is the lower bound of this bin */
- A = B; PA = PB;
+ A = B;
+ PA = PB;
B = bin_upper;
PB = (double) i / (double) (length_hist_nvalues - 1);
/*
* Add the area of this trapezoid to the total. The point of the
- * if-check is to avoid NaN, in the corner case that PA == PB == 0, and
- * B - A == Inf. The area of a zero-height trapezoid (PA == PB == 0) is
- * zero, regardless of the width (B - A).
+ * if-check is to avoid NaN, in the corner case that PA == PB == 0,
+ * and B - A == Inf. The area of a zero-height trapezoid (PA == PB ==
+ * 0) is zero, regardless of the width (B - A).
*/
if (PA > 0 || PB > 0)
area += 0.5 * (PB + PA) * (B - A);
}
/* Last bin */
- A = B; PA = PB;
+ A = B;
+ PA = PB;
- B = length2; /* last bin ends at the query upper bound */
+ B = length2; /* last bin ends at the query upper bound */
if (i >= length_hist_nvalues - 1)
pos = 0.0;
else
@@ -953,8 +965,8 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
static double
calc_hist_selectivity_contained(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
- RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues)
+ RangeBound *hist_lower, int hist_nvalues,
+ Datum *length_hist_values, int length_hist_nvalues)
{
int i,
upper_index;
@@ -1013,9 +1025,10 @@ calc_hist_selectivity_contained(TypeCacheEntry *typcache,
if (range_cmp_bounds(typcache, &hist_lower[i], lower) < 0)
{
dist = get_distance(typcache, lower, upper);
+
/*
- * Subtract from bin_width the portion of this bin that we want
- * to ignore.
+ * Subtract from bin_width the portion of this bin that we want to
+ * ignore.
*/
bin_width -= get_position(typcache, lower, &hist_lower[i],
&hist_lower[i + 1]);
@@ -1035,8 +1048,8 @@ calc_hist_selectivity_contained(TypeCacheEntry *typcache,
prev_dist, dist, true);
/*
- * Add the fraction of tuples in this bin, with a suitable length,
- * to the total.
+ * Add the fraction of tuples in this bin, with a suitable length, to
+ * the total.
*/
sum_frac += length_hist_frac * bin_width / (double) (hist_nvalues - 1);
@@ -1063,7 +1076,7 @@ static double
calc_hist_selectivity_contains(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues)
+ Datum *length_hist_values, int length_hist_nvalues)
{
int i,
lower_index;
@@ -1083,17 +1096,17 @@ calc_hist_selectivity_contains(TypeCacheEntry *typcache,
*/
if (lower_index >= 0 && lower_index < hist_nvalues - 1)
lower_bin_width = get_position(typcache, lower, &hist_lower[lower_index],
- &hist_lower[lower_index + 1]);
+ &hist_lower[lower_index + 1]);
else
lower_bin_width = 0.0;
/*
* Loop through all the lower bound bins, smaller than the query lower
- * bound. In the loop, dist and prev_dist are the distance of the "current"
- * bin's lower and upper bounds from the constant upper bound. We begin
- * from query lower bound, and walk backwards, so the first bin's upper
- * bound is the query lower bound, and its distance to the query upper
- * bound is the length of the query range.
+ * bound. In the loop, dist and prev_dist are the distance of the
+ * "current" bin's lower and upper bounds from the constant upper bound.
+ * We begin from query lower bound, and walk backwards, so the first bin's
+ * upper bound is the query lower bound, and its distance to the query
+ * upper bound is the length of the query range.
*
* bin_width represents the width of the current bin. Normally it is 1.0,
* meaning a full width bin, except for the first bin, which is only
@@ -1108,9 +1121,9 @@ calc_hist_selectivity_contains(TypeCacheEntry *typcache,
double length_hist_frac;
/*
- * dist -- distance from upper bound of query range to current
- * value of lower bound histogram or lower bound of query range (if
- * we've reach it).
+ * dist -- distance from upper bound of query range to current value
+ * of lower bound histogram or lower bound of query range (if we've
+ * reach it).
*/
dist = get_distance(typcache, &hist_lower[i], upper);
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9a7f20d9f37..0d47854974e 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -151,8 +151,8 @@ spg_range_quad_choose(PG_FUNCTION_ARGS)
/*
* A node with no centroid divides ranges purely on whether they're empty
- * or not. All empty ranges go to child node 0, all non-empty ranges go
- * to node 1.
+ * or not. All empty ranges go to child node 0, all non-empty ranges go to
+ * node 1.
*/
if (!in->hasPrefix)
{
@@ -307,8 +307,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* For adjacent search we need also previous centroid (if any) to improve
- * the precision of the consistent check. In this case needPrevious flag is
- * set and centroid is passed into reconstructedValues. This is not the
+ * the precision of the consistent check. In this case needPrevious flag
+ * is set and centroid is passed into reconstructedValues. This is not the
* intended purpose of reconstructedValues (because we already have the
* full value available at the leaf), but it's a convenient place to store
* state while traversing the tree.
@@ -370,18 +370,20 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_CONTAINS:
+
/*
- * All ranges contain an empty range. Only non-empty ranges
- * can contain a non-empty range.
+ * All ranges contain an empty range. Only non-empty
+ * ranges can contain a non-empty range.
*/
if (!empty)
which &= (1 << 2);
break;
case RANGESTRAT_CONTAINED_BY:
+
/*
- * Only an empty range is contained by an empty range. Both
- * empty and non-empty ranges can be contained by a
+ * Only an empty range is contained by an empty range.
+ * Both empty and non-empty ranges can be contained by a
* non-empty range.
*/
if (empty)
@@ -438,11 +440,13 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
upper;
bool empty;
RangeType *range = NULL;
+
/* Restrictions on range bounds according to scan strategy */
RangeBound *minLower = NULL,
*maxLower = NULL,
*minUpper = NULL,
*maxUpper = NULL;
+
/* Are the restrictions on range bounds inclusive? */
bool inclusive = true;
bool strictEmpty = true;
@@ -482,9 +486,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* Most strategies are handled by forming a bounding box from the
- * search key, defined by a minLower, maxLower, minUpper, maxUpper.
- * Some modify 'which' directly, to specify exactly which quadrants
- * need to be visited.
+ * search key, defined by a minLower, maxLower, minUpper,
+ * maxUpper. Some modify 'which' directly, to specify exactly
+ * which quadrants need to be visited.
*
* For most strategies, nothing matches an empty search key, and
* an empty range never matches a non-empty key. If a strategy
@@ -494,6 +498,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
switch (strategy)
{
case RANGESTRAT_BEFORE:
+
/*
* Range A is before range B if upper bound of A is lower
* than lower bound of B.
@@ -503,6 +508,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_OVERLEFT:
+
/*
* Range A is overleft to range B if upper bound of A is
* less or equal to upper bound of B.
@@ -511,6 +517,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_OVERLAPS:
+
/*
* Non-empty ranges overlap, if lower bound of each range
* is lower or equal to upper bound of the other range.
@@ -520,6 +527,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_OVERRIGHT:
+
/*
* Range A is overright to range B if lower bound of A is
* greater or equal to lower bound of B.
@@ -528,6 +536,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_AFTER:
+
/*
* Range A is after range B if lower bound of A is greater
* than upper bound of B.
@@ -538,12 +547,13 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
case RANGESTRAT_ADJACENT:
if (empty)
- break; /* Skip to strictEmpty check. */
+ break; /* Skip to strictEmpty check. */
/*
* which1 is bitmask for possibility to be adjacent with
* lower bound of argument. which2 is bitmask for
- * possibility to be adjacent with upper bound of argument.
+ * possibility to be adjacent with upper bound of
+ * argument.
*/
which1 = which2 = (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
@@ -622,9 +632,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* For a range's lower bound to be adjacent to the
* argument's upper bound, it will be found along the
- * line adjacent to (and just right of)
- * X=upper. Therefore, if the argument's upper bound is
- * less than (and not adjacent to) the centroid's upper
+ * line adjacent to (and just right of) X=upper.
+ * Therefore, if the argument's upper bound is less
+ * than (and not adjacent to) the centroid's upper
* bound, the line falls in quadrants 3 and 4; if
* greater or equal to, the line falls in quadrants 1
* and 2.
@@ -649,6 +659,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_CONTAINS:
+
/*
* Non-empty range A contains non-empty range B if lower
* bound of A is lower or equal to lower bound of range B
@@ -682,6 +693,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_EQ:
+
/*
* Equal range can be only in the same quadrant where
* argument would be placed to.
@@ -717,10 +729,10 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
if (minLower)
{
/*
- * If the centroid's lower bound is less than or equal to
- * the minimum lower bound, anything in the 3rd and 4th
- * quadrants will have an even smaller lower bound, and thus
- * can't match.
+ * If the centroid's lower bound is less than or equal to the
+ * minimum lower bound, anything in the 3rd and 4th quadrants
+ * will have an even smaller lower bound, and thus can't
+ * match.
*/
if (range_cmp_bounds(typcache, &centroidLower, minLower) <= 0)
which &= (1 << 1) | (1 << 2) | (1 << 5);
@@ -731,9 +743,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
* If the centroid's lower bound is greater than the maximum
* lower bound, anything in the 1st and 2nd quadrants will
* also have a greater than or equal lower bound, and thus
- * can't match. If the centroid's lower bound is equal to
- * the maximum lower bound, we can still exclude the 1st and
- * 2nd quadrants if we're looking for a value strictly greater
+ * can't match. If the centroid's lower bound is equal to the
+ * maximum lower bound, we can still exclude the 1st and 2nd
+ * quadrants if we're looking for a value strictly greater
* than the maximum.
*/
int cmp;
@@ -745,10 +757,10 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
if (minUpper)
{
/*
- * If the centroid's upper bound is less than or equal to
- * the minimum upper bound, anything in the 2nd and 3rd
- * quadrants will have an even smaller upper bound, and thus
- * can't match.
+ * If the centroid's upper bound is less than or equal to the
+ * minimum upper bound, anything in the 2nd and 3rd quadrants
+ * will have an even smaller upper bound, and thus can't
+ * match.
*/
if (range_cmp_bounds(typcache, &centroidUpper, minUpper) <= 0)
which &= (1 << 1) | (1 << 4) | (1 << 5);
@@ -759,9 +771,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
* If the centroid's upper bound is greater than the maximum
* upper bound, anything in the 1st and 4th quadrants will
* also have a greater than or equal upper bound, and thus
- * can't match. If the centroid's upper bound is equal to
- * the maximum upper bound, we can still exclude the 1st and
- * 4th quadrants if we're looking for a value strictly greater
+ * can't match. If the centroid's upper bound is equal to the
+ * maximum upper bound, we can still exclude the 1st and 4th
+ * quadrants if we're looking for a value strictly greater
* than the maximum.
*/
int cmp;
@@ -848,7 +860,7 @@ spg_range_quad_leaf_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_ADJACENT:
res = range_adjacent_internal(typcache, leafRange,
- DatumGetRangeType(keyDatum));
+ DatumGetRangeType(keyDatum));
break;
case RANGESTRAT_CONTAINS:
res = range_contains_internal(typcache, leafRange,
diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c
index e111f8ff979..114bce015c6 100644
--- a/src/backend/utils/adt/rangetypes_typanalyze.c
+++ b/src/backend/utils/adt/rangetypes_typanalyze.c
@@ -29,8 +29,8 @@
#include "utils/builtins.h"
#include "utils/rangetypes.h"
-static int float8_qsort_cmp(const void *a1, const void *a2);
-static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
+static int float8_qsort_cmp(const void *a1, const void *a2);
+static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
static void compute_range_stats(VacAttrStats *stats,
AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
@@ -48,7 +48,7 @@ range_typanalyze(PG_FUNCTION_ARGS)
typcache = range_get_typcache(fcinfo, stats->attrtypid);
if (attr->attstattarget < 0)
- attr->attstattarget = default_statistics_target;
+ attr->attstattarget = default_statistics_target;
stats->compute_stats = compute_range_stats;
stats->extra_data = typcache;
@@ -81,9 +81,9 @@ float8_qsort_cmp(const void *a1, const void *a2)
static int
range_bound_qsort_cmp(const void *a1, const void *a2, void *arg)
{
- RangeBound *b1 = (RangeBound *)a1;
- RangeBound *b2 = (RangeBound *)a2;
- TypeCacheEntry *typcache = (TypeCacheEntry *)arg;
+ RangeBound *b1 = (RangeBound *) a1;
+ RangeBound *b2 = (RangeBound *) a2;
+ TypeCacheEntry *typcache = (TypeCacheEntry *) arg;
return range_cmp_bounds(typcache, b1, b2);
}
@@ -106,7 +106,8 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
int num_bins = stats->attr->attstattarget;
int num_hist;
float8 *lengths;
- RangeBound *lowers, *uppers;
+ RangeBound *lowers,
+ *uppers;
double total_width = 0;
/* Allocate memory to hold range bounds and lengths of the sample ranges. */
@@ -163,9 +164,9 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* and lower bound values.
*/
length = DatumGetFloat8(FunctionCall2Coll(
- &typcache->rng_subdiff_finfo,
- typcache->rng_collation,
- upper.val, lower.val));
+ &typcache->rng_subdiff_finfo,
+ typcache->rng_collation,
+ upper.val, lower.val));
}
else
{
@@ -227,13 +228,13 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* The object of this loop is to construct ranges from first and
* last entries in lowers[] and uppers[] along with evenly-spaced
- * values in between. So the i'th value is a range of
- * lowers[(i * (nvals - 1)) / (num_hist - 1)] and
- * uppers[(i * (nvals - 1)) / (num_hist - 1)]. But computing that
- * subscript directly risks integer overflow when the stats target
- * is more than a couple thousand. Instead we add
- * (nvals - 1) / (num_hist - 1) to pos at each step, tracking the
- * integral and fractional parts of the sum separately.
+ * values in between. So the i'th value is a range of lowers[(i *
+ * (nvals - 1)) / (num_hist - 1)] and uppers[(i * (nvals - 1)) /
+ * (num_hist - 1)]. But computing that subscript directly risks
+ * integer overflow when the stats target is more than a couple
+ * thousand. Instead we add (nvals - 1) / (num_hist - 1) to pos
+ * at each step, tracking the integral and fractional parts of the
+ * sum separately.
*/
delta = (non_empty_cnt - 1) / (num_hist - 1);
deltafrac = (non_empty_cnt - 1) % (num_hist - 1);
@@ -242,7 +243,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
for (i = 0; i < num_hist; i++)
{
bound_hist_values[i] = PointerGetDatum(range_serialize(
- typcache, &lowers[pos], &uppers[pos], false));
+ typcache, &lowers[pos], &uppers[pos], false));
pos += delta;
posfrac += deltafrac;
if (posfrac >= (num_hist - 1))
@@ -281,10 +282,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* The object of this loop is to copy the first and last lengths[]
* entries along with evenly-spaced values in between. So the i'th
* value is lengths[(i * (nvals - 1)) / (num_hist - 1)]. But
- * computing that subscript directly risks integer overflow when the
- * stats target is more than a couple thousand. Instead we add
- * (nvals - 1) / (num_hist - 1) to pos at each step, tracking the
- * integral and fractional parts of the sum separately.
+ * computing that subscript directly risks integer overflow when
+ * the stats target is more than a couple thousand. Instead we
+ * add (nvals - 1) / (num_hist - 1) to pos at each step, tracking
+ * the integral and fractional parts of the sum separately.
*/
delta = (non_empty_cnt - 1) / (num_hist - 1);
deltafrac = (non_empty_cnt - 1) % (num_hist - 1);
@@ -342,9 +343,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/* We found only nulls; assume the column is entirely null */
stats->stats_valid = true;
stats->stanullfrac = 1.0;
- stats->stawidth = 0; /* "unknown" */
- stats->stadistinct = 0.0; /* "unknown" */
+ stats->stawidth = 0; /* "unknown" */
+ stats->stadistinct = 0.0; /* "unknown" */
}
+
/*
* We don't need to bother cleaning up any of our temporary palloc's. The
* hashtable should also go away, as it used a child memory context.
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 700247e4741..0d1ff61bf9f 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -319,7 +319,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -698,7 +698,8 @@ format_operator_internal(Oid operator_oid, bool force_qualify)
/*
* Would this oper be found (given the right args) by regoperatorin?
- * If not, or if caller explicitely requests it, we need to qualify it.
+ * If not, or if caller explicitely requests it, we need to qualify
+ * it.
*/
if (force_qualify || !OperatorIsVisible(operator_oid))
{
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 43228447ea4..65edc1fb04e 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -81,8 +81,8 @@
#define RI_PLAN_RESTRICT_UPD_CHECKREF 6
#define RI_PLAN_SETNULL_DEL_DOUPDATE 7
#define RI_PLAN_SETNULL_UPD_DOUPDATE 8
-#define RI_PLAN_SETDEFAULT_DEL_DOUPDATE 9
-#define RI_PLAN_SETDEFAULT_UPD_DOUPDATE 10
+#define RI_PLAN_SETDEFAULT_DEL_DOUPDATE 9
+#define RI_PLAN_SETDEFAULT_UPD_DOUPDATE 10
#define MAX_QUOTED_NAME_LEN (NAMEDATALEN*2+3)
#define MAX_QUOTED_REL_NAME_LEN (MAX_QUOTED_NAME_LEN*2)
@@ -135,7 +135,7 @@ typedef struct RI_ConstraintInfo
typedef struct RI_QueryKey
{
Oid constr_id; /* OID of pg_constraint entry */
- int32 constr_queryno; /* query type ID, see RI_PLAN_XXX above */
+ int32 constr_queryno; /* query type ID, see RI_PLAN_XXX above */
} RI_QueryKey;
@@ -403,7 +403,7 @@ RI_FKey_check(TriggerData *trigdata)
/* ----------
* The query string built is
* SELECT 1 FROM ONLY <pktable> x WHERE pkatt1 = $1 [AND ...]
- * FOR KEY SHARE OF x
+ * FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* corresponding FK attributes.
* ----------
@@ -539,7 +539,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
/* ----------
* The query string built is
* SELECT 1 FROM ONLY <pktable> x WHERE pkatt1 = $1 [AND ...]
- * FOR KEY SHARE OF x
+ * FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* PK attributes themselves.
* ----------
@@ -697,8 +697,8 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
}
/*
- * If another PK row now exists providing the old key values,
- * we should not do anything. However, this check should only be
+ * If another PK row now exists providing the old key values, we
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -729,7 +729,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/* ----------
* The query string built is
* SELECT 1 FROM ONLY <fktable> x WHERE $1 = fkatt1 [AND ...]
- * FOR KEY SHARE OF x
+ * FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* corresponding PK attributes.
* ----------
@@ -921,8 +921,8 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
}
/*
- * If another PK row now exists providing the old key values,
- * we should not do anything. However, this check should only be
+ * If another PK row now exists providing the old key values, we
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2150,6 +2150,7 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_SIMPLE:
+
/*
* If any new key value is NULL, the row must satisfy the
* constraint, so no check is needed.
@@ -2176,6 +2177,7 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
return true;
case FKCONSTR_MATCH_FULL:
+
/*
* If all new key values are NULL, the row must satisfy the
* constraint, so no check is needed. On the other hand, if only
@@ -2449,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -2676,8 +2678,8 @@ ri_BuildQueryKey(RI_QueryKey *key, const RI_ConstraintInfo *riinfo,
int32 constr_queryno)
{
/*
- * We assume struct RI_QueryKey contains no padding bytes, else we'd
- * need to use memset to clear them.
+ * We assume struct RI_QueryKey contains no padding bytes, else we'd need
+ * to use memset to clear them.
*/
key->constr_id = riinfo->constraint_id;
key->constr_queryno = constr_queryno;
@@ -2812,14 +2814,14 @@ ri_LoadConstraintInfo(Oid constraintOid)
elog(ERROR, "cache lookup failed for constraint %u", constraintOid);
conForm = (Form_pg_constraint) GETSTRUCT(tup);
- if (conForm->contype != CONSTRAINT_FOREIGN) /* should not happen */
+ if (conForm->contype != CONSTRAINT_FOREIGN) /* should not happen */
elog(ERROR, "constraint %u is not a foreign key constraint",
constraintOid);
/* And extract data */
Assert(riinfo->constraint_id == constraintOid);
riinfo->oidHashValue = GetSysCacheHashValue1(CONSTROID,
- ObjectIdGetDatum(constraintOid));
+ ObjectIdGetDatum(constraintOid));
memcpy(&riinfo->conname, &conForm->conname, sizeof(NameData));
riinfo->pk_relid = conForm->confrelid;
riinfo->fk_relid = conForm->conrelid;
@@ -3020,10 +3022,10 @@ ri_PerformCheck(const RI_ConstraintInfo *riinfo,
/*
* The values for the query are taken from the table on which the trigger
- * is called - it is normally the other one with respect to query_rel.
- * An exception is ri_Check_Pk_Match(), which uses the PK table for both
- * (and sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK). We might
- * eventually need some less klugy way to determine this.
+ * is called - it is normally the other one with respect to query_rel. An
+ * exception is ri_Check_Pk_Match(), which uses the PK table for both (and
+ * sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK). We might eventually
+ * need some less klugy way to determine this.
*/
if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK)
{
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 043baf3c790..a1ed7813f24 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -1258,7 +1258,7 @@ pg_get_constraintdef(PG_FUNCTION_ARGS)
prettyFlags = PRETTYFLAG_INDENT;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
false,
- prettyFlags)));
+ prettyFlags)));
}
Datum
@@ -1271,7 +1271,7 @@ pg_get_constraintdef_ext(PG_FUNCTION_ARGS)
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : PRETTYFLAG_INDENT;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
false,
- prettyFlags)));
+ prettyFlags)));
}
/* Internal version that returns a palloc'd C string; no pretty-printing */
@@ -4229,19 +4229,19 @@ get_select_query_def(Query *query, deparse_context *context,
{
case LCS_FORKEYSHARE:
appendContextKeyword(context, " FOR KEY SHARE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORSHARE:
appendContextKeyword(context, " FOR SHARE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORNOKEYUPDATE:
appendContextKeyword(context, " FOR NO KEY UPDATE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORUPDATE:
appendContextKeyword(context, " FOR UPDATE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
}
@@ -5340,8 +5340,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with columns that were merged by USING or NATURAL clauses in a
* FULL JOIN; we took pains previously to make the unqualified column name
* unique in such cases.)
@@ -8550,7 +8550,7 @@ generate_relation_name(Oid relid, List *namespaces)
* means a FuncExpr and not some other way of calling the function), then
* was_variadic must specify whether VARIADIC appeared in the original call,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, was_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, was_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 0d5cafba962..da66f347078 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -194,10 +194,10 @@ static Selectivity prefix_selectivity(PlannerInfo *root,
VariableStatData *vardata,
Oid vartype, Oid opfamily, Const *prefixcon);
static Selectivity like_selectivity(const char *patt, int pattlen,
- bool case_insensitive);
+ bool case_insensitive);
static Selectivity regex_selectivity(const char *patt, int pattlen,
- bool case_insensitive,
- int fixed_prefix_len);
+ bool case_insensitive,
+ int fixed_prefix_len);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
static Const *string_to_bytea_const(const char *str, size_t str_len);
@@ -1123,7 +1123,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
Pattern_Prefix_Status pstatus;
Const *patt;
Const *prefix = NULL;
- Selectivity rest_selec = 0;
+ Selectivity rest_selec = 0;
double result;
/*
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1867,17 +1867,17 @@ scalararraysel(PlannerInfo *root,
s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
Int16GetDatum(jointype),
- PointerGetDatum(sjinfo)));
+ PointerGetDatum(sjinfo)));
else
s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
if (useOr)
{
@@ -1934,17 +1934,17 @@ scalararraysel(PlannerInfo *root,
s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
Int16GetDatum(jointype),
- PointerGetDatum(sjinfo)));
+ PointerGetDatum(sjinfo)));
else
s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
if (useOr)
{
@@ -5293,7 +5293,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
if (rest_selec != NULL)
{
- char *patt = TextDatumGetCString(patt_const->constvalue);
+ char *patt = TextDatumGetCString(patt_const->constvalue);
*rest_selec = regex_selectivity(patt, strlen(patt),
case_insensitive,
@@ -5315,7 +5315,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
}
else
{
- char *patt = TextDatumGetCString(patt_const->constvalue);
+ char *patt = TextDatumGetCString(patt_const->constvalue);
*rest_selec = regex_selectivity(patt, strlen(patt),
case_insensitive,
@@ -5928,7 +5928,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -5941,15 +5941,15 @@ typedef struct
{
/* These are the values the cost estimator must return to the planner */
Cost indexStartupCost; /* index-related startup cost */
- Cost indexTotalCost; /* total index-related scan cost */
- Selectivity indexSelectivity; /* selectivity of index */
+ Cost indexTotalCost; /* total index-related scan cost */
+ Selectivity indexSelectivity; /* selectivity of index */
double indexCorrelation; /* order correlation of index */
/* Intermediate values we obtain along the way */
- double numIndexPages; /* number of leaf pages visited */
- double numIndexTuples; /* number of leaf tuples visited */
+ double numIndexPages; /* number of leaf pages visited */
+ double numIndexTuples; /* number of leaf tuples visited */
double spc_random_page_cost; /* relevant random_page_cost value */
- double num_sa_scans; /* # indexscans from ScalarArrayOps */
+ double num_sa_scans; /* # indexscans from ScalarArrayOps */
} GenericCosts;
static void
@@ -5963,7 +5963,7 @@ genericcostestimate(PlannerInfo *root,
List *indexOrderBys = path->indexorderbys;
Cost indexStartupCost;
Cost indexTotalCost;
- Selectivity indexSelectivity;
+ Selectivity indexSelectivity;
double indexCorrelation;
double numIndexPages;
double numIndexTuples;
@@ -6048,7 +6048,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6570,7 +6570,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6617,7 +6617,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
* Although this computation isn't really expensive enough to require
* caching, we might as well use index->tree_height to cache it.
*/
- if (index->tree_height < 0) /* unknown? */
+ if (index->tree_height < 0) /* unknown? */
{
if (index->pages > 1) /* avoid computing log(0) */
index->tree_height = (int) (log(index->pages) / log(100.0));
@@ -6626,9 +6626,9 @@ gistcostestimate(PG_FUNCTION_ARGS)
}
/*
- * Add a CPU-cost component to represent the costs of initial descent.
- * We just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * Add a CPU-cost component to represent the costs of initial descent. We
+ * just use log(N) here not log2(N) since the branching factor isn't
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6679,7 +6679,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
* Although this computation isn't really expensive enough to require
* caching, we might as well use index->tree_height to cache it.
*/
- if (index->tree_height < 0) /* unknown? */
+ if (index->tree_height < 0) /* unknown? */
{
if (index->pages > 1) /* avoid computing log(0) */
index->tree_height = (int) (log(index->pages) / log(100.0));
@@ -6688,9 +6688,9 @@ spgcostestimate(PG_FUNCTION_ARGS)
}
/*
- * Add a CPU-cost component to represent the costs of initial descent.
- * We just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * Add a CPU-cost component to represent the costs of initial descent. We
+ * just use log(N) here not log2(N) since the branching factor isn't
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6801,14 +6801,14 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
collation = DEFAULT_COLLATION_OID;
OidFunctionCall7Coll(extractProcOid,
- collation,
- query,
- PointerGetDatum(&nentries),
- UInt16GetDatum(strategy_op),
- PointerGetDatum(&partial_matches),
- PointerGetDatum(&extra_data),
- PointerGetDatum(&nullFlags),
- PointerGetDatum(&searchMode));
+ collation,
+ query,
+ PointerGetDatum(&nentries),
+ UInt16GetDatum(strategy_op),
+ PointerGetDatum(&partial_matches),
+ PointerGetDatum(&extra_data),
+ PointerGetDatum(&nullFlags),
+ PointerGetDatum(&searchMode));
if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
{
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 60f29533b71..94b2a3608a6 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -1296,7 +1296,7 @@ GetCurrentTimestamp(void)
int64
GetCurrentIntegerTimestamp(void)
{
- int64 result;
+ int64 result;
struct timeval tp;
gettimeofday(&tp, NULL);
@@ -3759,7 +3759,7 @@ interval_trunc(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("interval units \"%s\" not supported "
- "because months usually have fractional weeks",
+ "because months usually have fractional weeks",
lowunits)));
else
ereport(ERROR,
@@ -4608,8 +4608,8 @@ timestamp_izone(PG_FUNCTION_ARGS)
if (zone->month != 0 || zone->day != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not include months or days",
- DatumGetCString(DirectFunctionCall1(interval_out,
+ errmsg("interval time zone \"%s\" must not include months or days",
+ DatumGetCString(DirectFunctionCall1(interval_out,
PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
@@ -4781,8 +4781,8 @@ timestamptz_izone(PG_FUNCTION_ARGS)
if (zone->month != 0 || zone->day != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not include months or days",
- DatumGetCString(DirectFunctionCall1(interval_out,
+ errmsg("interval time zone \"%s\" must not include months or days",
+ DatumGetCString(DirectFunctionCall1(interval_out,
PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
diff --git a/src/backend/utils/adt/tsquery_rewrite.c b/src/backend/utils/adt/tsquery_rewrite.c
index 6d3f618e8fd..a301f8fc180 100644
--- a/src/backend/utils/adt/tsquery_rewrite.c
+++ b/src/backend/utils/adt/tsquery_rewrite.c
@@ -46,7 +46,6 @@ addone(int *counters, int last, int total)
static QTNode *
findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
{
-
if ((node->sign & ex->sign) != ex->sign ||
node->valnode->type != ex->valnode->type)
return node;
@@ -196,7 +195,6 @@ dofindsubquery(QTNode *root, QTNode *ex, QTNode *subs, bool *isfind)
static QTNode *
dropvoidsubtree(QTNode *root)
{
-
if (!root)
return NULL;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index bb85faf1a7b..56349e7e2aa 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -4245,7 +4245,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4274,8 +4274,8 @@ text_format(PG_FUNCTION_ARGS)
/* should not get here, because of previous check */
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized conversion type specifier \"%c\"",
- *cp)));
+ errmsg("unrecognized conversion type specifier \"%c\"",
+ *cp)));
break;
}
}
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 9c5daec31e9..25ab79b1979 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1499,7 +1499,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1609,6 +1609,7 @@ xml_errorHandler(void *data, xmlErrorPtr error)
case XML_FROM_NONE:
case XML_FROM_MEMORY:
case XML_FROM_IO:
+
/*
* Suppress warnings about undeclared entities. We need to do
* this to avoid problems due to not loading DTD definitions.
@@ -2002,8 +2003,8 @@ map_sql_value_to_xml_value(Datum value, Oid type, bool xml_escape_strings)
char *str;
/*
- * Flatten domains; the special-case treatments below should apply
- * to, eg, domains over boolean not just boolean.
+ * Flatten domains; the special-case treatments below should apply to,
+ * eg, domains over boolean not just boolean.
*/
type = getBaseType(type);
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 25f50e56670..cc91406582b 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -291,7 +291,7 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
static void
CatCachePrintStats(int code, Datum arg)
{
- slist_iter iter;
+ slist_iter iter;
long cc_searches = 0;
long cc_hits = 0;
long cc_neg_hits = 0;
@@ -444,7 +444,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
void
CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
{
- slist_iter cache_iter;
+ slist_iter cache_iter;
CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
@@ -554,12 +554,12 @@ AtEOXact_CatCache(bool isCommit)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- slist_iter cache_iter;
+ slist_iter cache_iter;
slist_foreach(cache_iter, &CacheHdr->ch_caches)
{
CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
- dlist_iter iter;
+ dlist_iter iter;
int i;
/* Check CatCLists */
@@ -649,7 +649,7 @@ ResetCatalogCache(CatCache *cache)
void
ResetCatalogCaches(void)
{
- slist_iter iter;
+ slist_iter iter;
CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
@@ -679,7 +679,7 @@ ResetCatalogCaches(void)
void
CatalogCacheFlushCatalog(Oid catId)
{
- slist_iter iter;
+ slist_iter iter;
CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
@@ -1343,7 +1343,7 @@ SearchCatCacheList(CatCache *cache,
{
ScanKeyData cur_skey[CATCACHE_MAXKEYS];
uint32 lHashValue;
- dlist_iter iter;
+ dlist_iter iter;
CatCList *cl;
CatCTup *ct;
List *volatile ctlist;
@@ -1789,7 +1789,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple newtuple,
void (*function) (int, uint32, Oid))
{
- slist_iter iter;
+ slist_iter iter;
Oid reloid;
CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index bbd3ae369d3..2180f2abcc1 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -40,7 +40,7 @@ typedef enum
typedef struct
{
- EventTriggerEvent event;
+ EventTriggerEvent event;
List *triggerlist;
} EventTriggerCacheEntry;
@@ -51,7 +51,7 @@ static EventTriggerCacheStateType EventTriggerCacheState = ETCS_NEEDS_REBUILD;
static void BuildEventTriggerCache(void);
static void InvalidateEventCacheCallback(Datum arg,
int cacheid, uint32 hashvalue);
-static int DecodeTextArrayToCString(Datum array, char ***cstringp);
+static int DecodeTextArrayToCString(Datum array, char ***cstringp);
/*
* Search the event cache by trigger event.
@@ -77,12 +77,12 @@ EventCacheLookup(EventTriggerEvent event)
static void
BuildEventTriggerCache(void)
{
- HASHCTL ctl;
- HTAB *cache;
- MemoryContext oldcontext;
- Relation rel;
- Relation irel;
- SysScanDesc scan;
+ HASHCTL ctl;
+ HTAB *cache;
+ MemoryContext oldcontext;
+ Relation rel;
+ Relation irel;
+ SysScanDesc scan;
if (EventTriggerCacheContext != NULL)
{
@@ -96,8 +96,8 @@ BuildEventTriggerCache(void)
else
{
/*
- * This is our first time attempting to build the cache, so we need
- * to set up the memory context and register a syscache callback to
+ * This is our first time attempting to build the cache, so we need to
+ * set up the memory context and register a syscache callback to
* capture future invalidation events.
*/
if (CacheMemoryContext == NULL)
@@ -129,24 +129,24 @@ BuildEventTriggerCache(void)
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
- * Prepare to scan pg_event_trigger in name order. We use an MVCC
- * snapshot to avoid getting inconsistent results if the table is
- * being concurrently updated.
+ * Prepare to scan pg_event_trigger in name order. We use an MVCC
+ * snapshot to avoid getting inconsistent results if the table is being
+ * concurrently updated.
*/
rel = relation_open(EventTriggerRelationId, AccessShareLock);
irel = index_open(EventTriggerNameIndexId, AccessShareLock);
scan = systable_beginscan_ordered(rel, irel, GetLatestSnapshot(), 0, NULL);
/*
- * Build a cache item for each pg_event_trigger tuple, and append each
- * one to the appropriate cache entry.
+ * Build a cache item for each pg_event_trigger tuple, and append each one
+ * to the appropriate cache entry.
*/
for (;;)
{
- HeapTuple tup;
- Form_pg_event_trigger form;
+ HeapTuple tup;
+ Form_pg_event_trigger form;
char *evtevent;
- EventTriggerEvent event;
+ EventTriggerEvent event;
EventTriggerCacheItem *item;
Datum evttags;
bool evttags_isnull;
@@ -257,9 +257,9 @@ static void
InvalidateEventCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
{
/*
- * If the cache isn't valid, then there might be a rebuild in progress,
- * so we can't immediately blow it away. But it's advantageous to do
- * this when possible, so as to immediately free memory.
+ * If the cache isn't valid, then there might be a rebuild in progress, so
+ * we can't immediately blow it away. But it's advantageous to do this
+ * when possible, so as to immediately free memory.
*/
if (EventTriggerCacheState == ETCS_VALID)
{
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index c4960d597e0..26cae97d955 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -216,7 +216,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* in that context.
*
* A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified. There is also no support for
+ * preserve the raw parse tree unmodified. There is also no support for
* invalidation, so plan use must be completed in the current transaction,
* and DDL that might invalidate the querytree_list must be avoided as well.
*
@@ -373,9 +373,9 @@ CompleteCachedPlan(CachedPlanSource *plansource,
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This
+ * Also save the current search_path in the query_context. (This
* should not generate much extra cruft either, since almost certainly
- * the path is already valid.) Again, we don't really need this for
+ * the path is already valid.) Again, we don't really need this for
* one-shot plans; and we *must* skip this for transaction control
* commands, because this could result in catalog accesses.
*/
@@ -554,9 +554,9 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
/*
* For one-shot plans, we do not support revalidation checking; it's
* assumed the query is parsed, planned, and executed in one transaction,
- * so that no lock re-acquisition is necessary. Also, there is never
- * any need to revalidate plans for transaction control commands (and
- * we mustn't risk any catalog accesses when handling those).
+ * so that no lock re-acquisition is necessary. Also, there is never any
+ * need to revalidate plans for transaction control commands (and we
+ * mustn't risk any catalog accesses when handling those).
*/
if (plansource->is_oneshot || IsTransactionStmtPlan(plansource))
{
@@ -725,7 +725,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This should
+ * Also save the current search_path in the query_context. (This should
* not generate much extra cruft either, since almost certainly the path
* is already valid.)
*/
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 7888d387234..f1140385883 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2313,7 +2313,7 @@ AtEOXact_RelationCache(bool isCommit)
* For simplicity, eoxact_list[] entries are not deleted till end of
* top-level transaction, even though we could remove them at
* subtransaction end in some cases, or remove relations from the list if
- * they are cleared for other reasons. Therefore we should expect the
+ * they are cleared for other reasons. Therefore we should expect the
* case that list entries are not found in the hashtable; if not, there's
* nothing to do for them.
*/
@@ -2354,66 +2354,66 @@ AtEOXact_RelationCache(bool isCommit)
static void
AtEOXact_cleanup(Relation relation, bool isCommit)
{
- /*
- * The relcache entry's ref count should be back to its normal
- * not-in-a-transaction state: 0 unless it's nailed in cache.
- *
- * In bootstrap mode, this is NOT true, so don't check it --- the
- * bootstrap code expects relations to stay open across start/commit
- * transaction calls. (That seems bogus, but it's not worth fixing.)
- *
- * Note: ideally this check would be applied to every relcache entry,
- * not just those that have eoxact work to do. But it's not worth
- * forcing a scan of the whole relcache just for this. (Moreover,
- * doing so would mean that assert-enabled testing never tests the
- * hash_search code path above, which seems a bad idea.)
- */
+ /*
+ * The relcache entry's ref count should be back to its normal
+ * not-in-a-transaction state: 0 unless it's nailed in cache.
+ *
+ * In bootstrap mode, this is NOT true, so don't check it --- the
+ * bootstrap code expects relations to stay open across start/commit
+ * transaction calls. (That seems bogus, but it's not worth fixing.)
+ *
+ * Note: ideally this check would be applied to every relcache entry, not
+ * just those that have eoxact work to do. But it's not worth forcing a
+ * scan of the whole relcache just for this. (Moreover, doing so would
+ * mean that assert-enabled testing never tests the hash_search code path
+ * above, which seems a bad idea.)
+ */
#ifdef USE_ASSERT_CHECKING
- if (!IsBootstrapProcessingMode())
- {
- int expected_refcnt;
+ if (!IsBootstrapProcessingMode())
+ {
+ int expected_refcnt;
- expected_refcnt = relation->rd_isnailed ? 1 : 0;
- Assert(relation->rd_refcnt == expected_refcnt);
- }
+ expected_refcnt = relation->rd_isnailed ? 1 : 0;
+ Assert(relation->rd_refcnt == expected_refcnt);
+ }
#endif
- /*
- * Is it a relation created in the current transaction?
- *
- * During commit, reset the flag to zero, since we are now out of the
- * creating transaction. During abort, simply delete the relcache
- * entry --- it isn't interesting any longer. (NOTE: if we have
- * forgotten the new-ness of a new relation due to a forced cache
- * flush, the entry will get deleted anyway by shared-cache-inval
- * processing of the aborted pg_class insertion.)
- */
- if (relation->rd_createSubid != InvalidSubTransactionId)
+ /*
+ * Is it a relation created in the current transaction?
+ *
+ * During commit, reset the flag to zero, since we are now out of the
+ * creating transaction. During abort, simply delete the relcache entry
+ * --- it isn't interesting any longer. (NOTE: if we have forgotten the
+ * new-ness of a new relation due to a forced cache flush, the entry will
+ * get deleted anyway by shared-cache-inval processing of the aborted
+ * pg_class insertion.)
+ */
+ if (relation->rd_createSubid != InvalidSubTransactionId)
+ {
+ if (isCommit)
+ relation->rd_createSubid = InvalidSubTransactionId;
+ else
{
- if (isCommit)
- relation->rd_createSubid = InvalidSubTransactionId;
- else
- {
- RelationClearRelation(relation, false);
- return;
- }
+ RelationClearRelation(relation, false);
+ return;
}
+ }
- /*
- * Likewise, reset the hint about the relfilenode being new.
- */
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+ /*
+ * Likewise, reset the hint about the relfilenode being new.
+ */
+ relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- /*
- * Flush any temporary index list.
- */
- if (relation->rd_indexvalid == 2)
- {
- list_free(relation->rd_indexlist);
- relation->rd_indexlist = NIL;
- relation->rd_oidindex = InvalidOid;
- relation->rd_indexvalid = 0;
- }
+ /*
+ * Flush any temporary index list.
+ */
+ if (relation->rd_indexvalid == 2)
+ {
+ list_free(relation->rd_indexlist);
+ relation->rd_indexlist = NIL;
+ relation->rd_oidindex = InvalidOid;
+ relation->rd_indexvalid = 0;
+ }
}
/*
@@ -2474,45 +2474,44 @@ static void
AtEOSubXact_cleanup(Relation relation, bool isCommit,
SubTransactionId mySubid, SubTransactionId parentSubid)
{
- /*
- * Is it a relation created in the current subtransaction?
- *
- * During subcommit, mark it as belonging to the parent, instead.
- * During subabort, simply delete the relcache entry.
- */
- if (relation->rd_createSubid == mySubid)
+ /*
+ * Is it a relation created in the current subtransaction?
+ *
+ * During subcommit, mark it as belonging to the parent, instead. During
+ * subabort, simply delete the relcache entry.
+ */
+ if (relation->rd_createSubid == mySubid)
+ {
+ if (isCommit)
+ relation->rd_createSubid = parentSubid;
+ else
{
- if (isCommit)
- relation->rd_createSubid = parentSubid;
- else
- {
- RelationClearRelation(relation, false);
- return;
- }
+ RelationClearRelation(relation, false);
+ return;
}
+ }
- /*
- * Likewise, update or drop any new-relfilenode-in-subtransaction
- * hint.
- */
- if (relation->rd_newRelfilenodeSubid == mySubid)
- {
- if (isCommit)
- relation->rd_newRelfilenodeSubid = parentSubid;
- else
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- }
+ /*
+ * Likewise, update or drop any new-relfilenode-in-subtransaction hint.
+ */
+ if (relation->rd_newRelfilenodeSubid == mySubid)
+ {
+ if (isCommit)
+ relation->rd_newRelfilenodeSubid = parentSubid;
+ else
+ relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+ }
- /*
- * Flush any temporary index list.
- */
- if (relation->rd_indexvalid == 2)
- {
- list_free(relation->rd_indexlist);
- relation->rd_indexlist = NIL;
- relation->rd_oidindex = InvalidOid;
- relation->rd_indexvalid = 0;
- }
+ /*
+ * Flush any temporary index list.
+ */
+ if (relation->rd_indexvalid == 2)
+ {
+ list_free(relation->rd_indexlist);
+ relation->rd_indexlist = NIL;
+ relation->rd_oidindex = InvalidOid;
+ relation->rd_indexvalid = 0;
+ }
}
@@ -2699,8 +2698,8 @@ RelationBuildLocalRelation(const char *relname,
RelationCacheInsert(rel);
/*
- * Flag relation as needing eoxact cleanup (to clear rd_createSubid).
- * We can't do this before storing relid in it.
+ * Flag relation as needing eoxact cleanup (to clear rd_createSubid). We
+ * can't do this before storing relid in it.
*/
EOXactListAdd(rel);
@@ -3847,8 +3846,8 @@ RelationGetIndexAttrBitmap(Relation relation, bool keyAttrs)
/* Can this index be referenced by a foreign key? */
isKey = indexInfo->ii_Unique &&
- indexInfo->ii_Expressions == NIL &&
- indexInfo->ii_Predicate == NIL;
+ indexInfo->ii_Expressions == NIL &&
+ indexInfo->ii_Predicate == NIL;
/* Collect simple attribute references */
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
@@ -3861,7 +3860,7 @@ RelationGetIndexAttrBitmap(Relation relation, bool keyAttrs)
attrnum - FirstLowInvalidHeapAttributeNumber);
if (isKey)
uindexattrs = bms_add_member(uindexattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
}
}
@@ -4030,7 +4029,7 @@ errtable(Relation rel)
get_namespace_name(RelationGetNamespace(rel)));
err_generic_string(PG_DIAG_TABLE_NAME, RelationGetRelationName(rel));
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
/*
@@ -4061,7 +4060,7 @@ errtablecol(Relation rel, int attnum)
* given directly rather than extracted from the relation's catalog data.
*
* Don't use this directly unless errtablecol() is inconvenient for some
- * reason. This might possibly be needed during intermediate states in ALTER
+ * reason. This might possibly be needed during intermediate states in ALTER
* TABLE, for instance.
*/
int
@@ -4070,7 +4069,7 @@ errtablecolname(Relation rel, const char *colname)
errtable(rel);
err_generic_string(PG_DIAG_COLUMN_NAME, colname);
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
/*
@@ -4083,7 +4082,7 @@ errtableconstraint(Relation rel, const char *conname)
errtable(rel);
err_generic_string(PG_DIAG_CONSTRAINT_NAME, conname);
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index bfc3c86aa8c..ecb0f96d467 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -382,7 +382,7 @@ static const struct cachedesc cacheinfo[] = {
},
256
},
- {EventTriggerRelationId, /* EVENTTRIGGERNAME */
+ {EventTriggerRelationId, /* EVENTTRIGGERNAME */
EventTriggerNameIndexId,
1,
{
@@ -393,7 +393,7 @@ static const struct cachedesc cacheinfo[] = {
},
8
},
- {EventTriggerRelationId, /* EVENTTRIGGEROID */
+ {EventTriggerRelationId, /* EVENTTRIGGEROID */
EventTriggerOidIndexId,
1,
{
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index f8cf190e652..e9eb3d5be8c 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -741,7 +741,7 @@ errcode_for_socket_access(void)
StringInfoData buf; \
/* Internationalize the error format string */ \
if (!in_error_recursion_trouble()) \
- fmt = dngettext((domain), fmt_singular, fmt_plural, n); \
+ fmt = dngettext((domain), fmt_singular, fmt_plural, n); \
else \
fmt = (n == 1 ? fmt_singular : fmt_plural); \
/* Expand %m in format string */ \
@@ -1151,7 +1151,7 @@ err_generic_string(int field, const char *str)
break;
}
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
/*
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 5454befe152..7c3f9206e5e 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -1042,9 +1042,9 @@ hash_update_hash_key(HTAB *hashp,
hashp->tabname);
/*
- * Lookup the existing element using its saved hash value. We need to
- * do this to be able to unlink it from its hash chain, but as a side
- * benefit we can verify the validity of the passed existingEntry pointer.
+ * Lookup the existing element using its saved hash value. We need to do
+ * this to be able to unlink it from its hash chain, but as a side benefit
+ * we can verify the validity of the passed existingEntry pointer.
*/
bucket = calc_bucket(hctl, existingElement->hashvalue);
@@ -1074,8 +1074,8 @@ hash_update_hash_key(HTAB *hashp,
oldPrevPtr = prevBucketPtr;
/*
- * Now perform the equivalent of a HASH_ENTER operation to locate the
- * hash chain we want to put the entry into.
+ * Now perform the equivalent of a HASH_ENTER operation to locate the hash
+ * chain we want to put the entry into.
*/
newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
/*
* If old and new hash values belong to the same bucket, we need not
* change any chain links, and indeed should not since this simplistic
- * update will corrupt the list if currBucket is the last element. (We
+ * update will corrupt the list if currBucket is the last element. (We
* cannot fall out earlier, however, since we need to scan the bucket to
* check for duplicate keys.)
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 493e91ca610..cb78caf8ebd 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -498,8 +498,8 @@ void
InitializeSessionUserIdStandalone(void)
{
/*
- * This function should only be called in single-user mode, in
- * autovacuum workers, and in background workers.
+ * This function should only be called in single-user mode, in autovacuum
+ * workers, and in background workers.
*/
AssertState(!IsUnderPostmaster || IsAutoVacuumWorkerProcess() || IsBackgroundWorker);
@@ -894,7 +894,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Successfully created the file, now fill it. See comment in miscadmin.h
- * about the contents. Note that we write the same first five lines into
+ * about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
*/
@@ -948,9 +948,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Arrange to unlink the lock file(s) at proc_exit. If this is the
- * first one, set up the on_proc_exit function to do it; then add this
- * lock file to the list of files to unlink.
+ * Arrange to unlink the lock file(s) at proc_exit. If this is the first
+ * one, set up the on_proc_exit function to do it; then add this lock file
+ * to the list of files to unlink.
*/
if (lock_files == NIL)
on_proc_exit(UnlinkLockFiles, 0);
@@ -1077,8 +1077,8 @@ AddToDataDirLockFile(int target_line, const char *str)
srcbuffer[len] = '\0';
/*
- * Advance over lines we are not supposed to rewrite, then copy them
- * to destbuffer.
+ * Advance over lines we are not supposed to rewrite, then copy them to
+ * destbuffer.
*/
srcptr = srcbuffer;
for (lineno = 1; lineno < target_line; lineno++)
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 5b52bd27973..e0abff1145a 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -203,9 +203,9 @@ PerformAuthentication(Port *port)
{
/*
* It is ok to continue if we fail to load the IDENT file, although it
- * means that you cannot log in using any of the authentication methods
- * that need a user name mapping. load_ident() already logged the
- * details of error to the log.
+ * means that you cannot log in using any of the authentication
+ * methods that need a user name mapping. load_ident() already logged
+ * the details of error to the log.
*/
}
#endif
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 287ff808fc1..4582219af73 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -714,14 +714,14 @@ pg_encoding_mb2wchar_with_len(int encoding,
int
pg_wchar2mb(const pg_wchar *from, char *to)
{
- return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *)to, pg_wchar_strlen(from));
+ return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *) to, pg_wchar_strlen(from));
}
/* convert a wchar string to a multibyte with a limited length */
int
pg_wchar2mb_with_len(const pg_wchar *from, char *to, int len)
{
- return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *)to, len);
+ return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *) to, len);
}
/* same, with any encoding */
@@ -729,7 +729,7 @@ int
pg_encoding_wchar2mb_with_len(int encoding,
const pg_wchar *from, char *to, int len)
{
- return (*pg_wchar_table[encoding].wchar2mb_with_len) (from, (unsigned char *)to, len);
+ return (*pg_wchar_table[encoding].wchar2mb_with_len) (from, (unsigned char *) to, len);
}
/* returns the byte length of a multibyte character */
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 2fc17feb5eb..45bc3c1604b 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -98,7 +98,7 @@ pg_euc2wchar_with_len(const unsigned char *from, pg_wchar *to, int len)
*to |= *from++;
len -= 2;
}
- else /* must be ASCII */
+ else /* must be ASCII */
{
*to = *from++;
len--;
@@ -513,7 +513,7 @@ pg_wchar2utf_with_len(const pg_wchar *from, unsigned char *to, int len)
while (len > 0 && *from)
{
- int char_len;
+ int char_len;
unicode_to_utf8(*from, to);
char_len = pg_utf_mblen(to);
@@ -1721,7 +1721,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
*-------------------------------------------------------------------
*/
pg_wchar_tbl pg_wchar_table[] = {
- {pg_ascii2wchar_with_len, pg_wchar2single_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* PG_SQL_ASCII */
+ {pg_ascii2wchar_with_len, pg_wchar2single_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* PG_SQL_ASCII */
{pg_eucjp2wchar_with_len, pg_wchar2euc_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* PG_EUC_JP */
{pg_euccn2wchar_with_len, pg_wchar2euc_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 2}, /* PG_EUC_CN */
{pg_euckr2wchar_with_len, pg_wchar2euc_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* PG_EUC_KR */
@@ -1756,13 +1756,13 @@ pg_wchar_tbl pg_wchar_table[] = {
{pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* PG_WIN1255 */
{pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* PG_WIN1257 */
{pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* PG_KOI8U */
- {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* PG_SJIS */
- {0, 0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* PG_BIG5 */
- {0, 0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* PG_GBK */
- {0, 0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* PG_UHC */
- {0, 0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4}, /* PG_GB18030 */
- {0, 0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* PG_JOHAB */
- {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2} /* PG_SHIFT_JIS_2004 */
+ {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* PG_SJIS */
+ {0, 0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* PG_BIG5 */
+ {0, 0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* PG_GBK */
+ {0, 0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* PG_UHC */
+ {0, 0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4}, /* PG_GB18030 */
+ {0, 0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* PG_JOHAB */
+ {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2} /* PG_SHIFT_JIS_2004 */
};
/* returns the byte length of a word for mule internal code */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 22ba35fef93..ea16c64619f 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -814,8 +814,8 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Detection of a checksum failure normally causes PostgreSQL to "
"report an error, aborting the current transaction. Setting "
"ignore_checksum_failure to true causes the system to ignore the failure "
- "(but still report a warning), and continue processing. This "
- "behavior could cause crashes or other serious problems. Only "
+ "(but still report a warning), and continue processing. This "
+ "behavior could cause crashes or other serious problems. Only "
"has an effect if checksums are enabled."),
GUC_NOT_IN_SAMPLE
},
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 6c3f965151a..e7ec3931f12 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -62,8 +62,8 @@ typedef struct ResourceOwnerData
int maxbuffers; /* currently allocated array size */
/* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */
- int nlocks; /* number of owned locks */
- LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */
+ int nlocks; /* number of owned locks */
+ LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */
/* We have built-in support for remembering catcache references */
int ncatrefs; /* number of owned catcache pins */
@@ -641,10 +641,10 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
* the entry.
*/
void
-ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK * locallock)
+ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
{
if (owner->nlocks > MAX_RESOWNER_LOCKS)
- return; /* we have already overflowed */
+ return; /* we have already overflowed */
if (owner->nlocks < MAX_RESOWNER_LOCKS)
owner->locks[owner->nlocks] = locallock;
@@ -664,7 +664,7 @@ ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
int i;
if (owner->nlocks > MAX_RESOWNER_LOCKS)
- return; /* we have overflowed */
+ return; /* we have overflowed */
Assert(owner->nlocks > 0);
for (i = owner->nlocks - 1; i >= 0; i--)
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 57d0d3f5e8b..ea9bc04823d 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -575,8 +575,8 @@ grow_memtuples(Tuplestorestate *state)
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
- * than availMem / sizeof(void *) elements. In practice, we want
- * to increase it by considerably less, because we need to leave some
+ * than availMem / sizeof(void *) elements. In practice, we want to
+ * increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
* we've already seen, and thus we can extrapolate from the space
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 24384b49890..ab4020a710b 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -214,12 +214,12 @@ HeapTupleSatisfiesSelf(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -270,7 +270,7 @@ HeapTupleSatisfiesSelf(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
@@ -405,12 +405,12 @@ HeapTupleSatisfiesNow(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -464,7 +464,7 @@ HeapTupleSatisfiesNow(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
@@ -682,12 +682,12 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return HeapTupleMayBeUpdated;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return HeapTupleMayBeUpdated;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -699,9 +699,11 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
else
{
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return HeapTupleSelfUpdated; /* updated after scan
+ * started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return HeapTupleInvisible; /* updated before scan
+ * started */
}
}
@@ -746,14 +748,13 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
{
/*
- * If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK
- * is set, it cannot possibly be running. Otherwise need to
- * check.
+ * If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
+ * set, it cannot possibly be running. Otherwise need to check.
*/
if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -777,9 +778,9 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (TransactionIdIsCurrentTransactionId(xmax))
{
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return HeapTupleInvisible; /* updated before scan started */
}
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple)))
@@ -902,12 +903,12 @@ HeapTupleSatisfiesDirty(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -962,7 +963,7 @@ HeapTupleSatisfiesDirty(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
@@ -1094,12 +1095,12 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -1111,7 +1112,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
else if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
return true; /* updated after scan started */
else
- return false; /* updated before scan started */
+ return false; /* updated before scan started */
}
if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
@@ -1156,7 +1157,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
/* already checked above */
Assert(!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
@@ -1354,9 +1355,9 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * We don't really care whether xmax did commit, abort or crash.
- * We know that xmax did lock the tuple, but it did not and will
- * never actually update it.
+ * We don't really care whether xmax did commit, abort or crash. We
+ * know that xmax did lock the tuple, but it did not and will never
+ * actually update it.
*/
return HEAPTUPLE_LIVE;
@@ -1629,7 +1630,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
bool
HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
{
- TransactionId xmax;
+ TransactionId xmax;
/* if there's no valid Xmax, then there's obviously no update either */
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1643,8 +1644,8 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
return true;
/*
- * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this
- * must necessarily have been updated
+ * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this must
+ * necessarily have been updated
*/
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
return false;