aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/common/indextuple.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/common/indextuple.c')
-rw-r--r--src/backend/access/common/indextuple.c142
1 files changed, 89 insertions, 53 deletions
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index c83e34834ca..471d28c28c4 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.81 2007/02/27 23:48:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.82 2007/04/06 04:21:41 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,6 +38,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
char *tp; /* tuple pointer */
IndexTuple tuple; /* return tuple */
Size size,
+ data_size,
hoff;
int i;
unsigned short infomask = 0;
@@ -74,9 +75,9 @@ index_form_tuple(TupleDesc tupleDescriptor,
*/
if (VARATT_IS_EXTERNAL(values[i]))
{
- untoasted_values[i] = PointerGetDatum(
- heap_tuple_fetch_attr(
- (varattrib *) DatumGetPointer(values[i])));
+ untoasted_values[i] =
+ PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
+ DatumGetPointer(values[i])));
untoasted_free[i] = true;
}
@@ -84,8 +85,8 @@ index_form_tuple(TupleDesc tupleDescriptor,
* If value is above size target, and is of a compressible datatype,
* try to compress it in-line.
*/
- if (VARSIZE(untoasted_values[i]) > TOAST_INDEX_TARGET &&
- !VARATT_IS_EXTENDED(untoasted_values[i]) &&
+ if (!VARATT_IS_EXTENDED(untoasted_values[i]) &&
+ VARSIZE(untoasted_values[i]) > TOAST_INDEX_TARGET &&
(att->attstorage == 'x' || att->attstorage == 'm'))
{
Datum cvalue = toast_compress_datum(untoasted_values[i]);
@@ -116,12 +117,13 @@ index_form_tuple(TupleDesc tupleDescriptor,
hoff = IndexInfoFindDataOffset(infomask);
#ifdef TOAST_INDEX_HACK
- size = hoff + heap_compute_data_size(tupleDescriptor,
- untoasted_values, isnull);
+ data_size = heap_compute_data_size(tupleDescriptor,
+ untoasted_values, isnull);
#else
- size = hoff + heap_compute_data_size(tupleDescriptor,
- values, isnull);
+ data_size = heap_compute_data_size(tupleDescriptor,
+ values, isnull);
#endif
+ size = hoff + data_size;
size = MAXALIGN(size); /* be conservative */
tp = (char *) palloc0(size);
@@ -135,6 +137,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
#endif
isnull,
(char *) tp + hoff,
+ data_size,
&tupmask,
(hasnull ? (bits8 *) tp + sizeof(IndexTupleData) : NULL));
@@ -201,17 +204,14 @@ nocache_index_getattr(IndexTuple tup,
bool *isnull)
{
Form_pg_attribute *att = tupleDesc->attrs;
- char *tp; /* ptr to att in tuple */
- bits8 *bp = NULL; /* ptr to null bitmask in tuple */
- bool slow = false; /* do we have to walk nulls? */
+ char *tp; /* ptr to data part of tuple */
+ bits8 *bp = NULL; /* ptr to null bitmap in tuple */
+ bool slow = false; /* do we have to walk attrs? */
int data_off; /* tuple data offset */
+ int off; /* current offset within data */
(void) isnull; /* not used */
- /*
- * sanity checks
- */
-
/* ----------------
* Three cases:
*
@@ -237,7 +237,7 @@ nocache_index_getattr(IndexTuple tup,
{
#ifdef IN_MACRO
/* This is handled in the macro */
- if (att[attnum]->attcacheoff != -1)
+ if (att[attnum]->attcacheoff >= 0)
{
return fetchatt(att[attnum],
(char *) tup + data_off +
@@ -295,21 +295,28 @@ nocache_index_getattr(IndexTuple tup,
tp = (char *) tup + data_off;
- /*
- * now check for any non-fixed length attrs before our attribute
- */
if (!slow)
{
- if (att[attnum]->attcacheoff != -1)
+ /*
+ * If we get here, there are no nulls up to and including the target
+ * attribute. If we have a cached offset, we can use it.
+ */
+ if (att[attnum]->attcacheoff >= 0)
{
return fetchatt(att[attnum],
tp + att[attnum]->attcacheoff);
}
- else if (IndexTupleHasVarwidths(tup))
+
+ /*
+ * Otherwise, check for non-fixed-length attrs up to and including
+ * target. If there aren't any, it's safe to cheaply initialize
+ * the cached offsets for these attrs.
+ */
+ if (IndexTupleHasVarwidths(tup))
{
int j;
- for (j = 0; j < attnum; j++)
+ for (j = 0; j <= attnum; j++)
{
if (att[j]->attlen <= 0)
{
@@ -320,80 +327,109 @@ nocache_index_getattr(IndexTuple tup,
}
}
- /*
- * If slow is false, and we got here, we know that we have a tuple with no
- * nulls or var-widths before the target attribute. If possible, we also
- * want to initialize the remainder of the attribute cached offset values.
- */
if (!slow)
{
+ int natts = tupleDesc->natts;
int j = 1;
- long off;
/*
- * need to set cache for some atts
+ * If we get here, we have a tuple with no nulls or var-widths up to
+ * and including the target attribute, so we can use the cached offset
+ * ... only we don't have it yet, or we'd not have got here. Since
+ * it's cheap to compute offsets for fixed-width columns, we take the
+ * opportunity to initialize the cached offsets for *all* the leading
+ * fixed-width columns, in hope of avoiding future visits to this
+ * routine.
*/
-
att[0]->attcacheoff = 0;
- while (j < attnum && att[j]->attcacheoff > 0)
+ /* we might have set some offsets in the slow path previously */
+ while (j < natts && att[j]->attcacheoff > 0)
j++;
off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
- for (; j <= attnum; j++)
+ for (; j < natts; j++)
{
- off = att_align(off, att[j]->attalign);
+ if (att[j]->attlen <= 0)
+ break;
+
+ off = att_align_nominal(off, att[j]->attalign);
att[j]->attcacheoff = off;
off += att[j]->attlen;
}
- return fetchatt(att[attnum], tp + att[attnum]->attcacheoff);
+ Assert(j > attnum);
+
+ off = att[attnum]->attcacheoff;
}
else
{
bool usecache = true;
- int off = 0;
int i;
/*
- * Now we know that we have to walk the tuple CAREFULLY.
+ * Now we know that we have to walk the tuple CAREFULLY. But we
+ * still might be able to cache some offsets for next time.
+ *
+ * Note - This loop is a little tricky. For each non-null attribute,
+ * we have to first account for alignment padding before the attr,
+ * then advance over the attr based on its length. Nulls have no
+ * storage and no alignment padding either. We can use/set
+ * attcacheoff until we reach either a null or a var-width attribute.
*/
-
- for (i = 0; i < attnum; i++)
+ off = 0;
+ for (i = 0; ; i++) /* loop exit is at "break" */
{
- if (IndexTupleHasNulls(tup))
+ if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
{
- if (att_isnull(i, bp))
- {
- usecache = false;
- continue;
- }
+ usecache = false;
+ continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
- if (usecache && att[i]->attcacheoff != -1)
+ if (usecache && att[i]->attcacheoff >= 0)
off = att[i]->attcacheoff;
+ else if (att[i]->attlen == -1)
+ {
+ /*
+ * We can only cache the offset for a varlena attribute
+ * if the offset is already suitably aligned, so that there
+ * would be no pad bytes in any case: then the offset will
+ * be valid for either an aligned or unaligned value.
+ */
+ if (usecache &&
+ off == att_align_nominal(off, att[i]->attalign))
+ att[i]->attcacheoff = off;
+ else
+ {
+ off = att_align_pointer(off, att[i]->attalign, -1,
+ tp + off);
+ usecache = false;
+ }
+ }
else
{
- off = att_align(off, att[i]->attalign);
+ /* not varlena, so safe to use att_align_nominal */
+ off = att_align_nominal(off, att[i]->attalign);
if (usecache)
att[i]->attcacheoff = off;
}
- off = att_addlength(off, att[i]->attlen, tp + off);
+ if (i == attnum)
+ break;
+
+ off = att_addlength_pointer(off, att[i]->attlen, tp + off);
if (usecache && att[i]->attlen <= 0)
usecache = false;
}
-
- off = att_align(off, att[attnum]->attalign);
-
- return fetchatt(att[attnum], tp + off);
}
+
+ return fetchatt(att[attnum], tp + off);
}
/*