aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/hash/hash_xlog.c4
-rw-r--r--src/backend/access/hash/hashinsert.c6
-rw-r--r--src/backend/access/hash/hashovfl.c2
-rw-r--r--src/backend/access/hash/hashpage.c2
-rw-r--r--src/backend/access/heap/hio.c4
-rw-r--r--src/backend/access/nbtree/nbtinsert.c6
-rw-r--r--src/backend/access/nbtree/nbtsort.c2
-rw-r--r--src/backend/access/nbtree/nbtxlog.c32
-rw-r--r--src/include/access/itup.h3
9 files changed, 33 insertions, 28 deletions
diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index b38208e61d5..ab5aaff1566 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -558,7 +558,7 @@ hash_xlog_move_page_contents(XLogReaderState *record)
Size itemsz;
OffsetNumber l;
- itemsz = IndexTupleDSize(*itup);
+ itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz);
data += itemsz;
@@ -686,7 +686,7 @@ hash_xlog_squeeze_page(XLogReaderState *record)
Size itemsz;
OffsetNumber l;
- itemsz = IndexTupleDSize(*itup);
+ itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz);
data += itemsz;
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index f668dcff0f6..f121286b8ca 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -55,7 +55,7 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
hashkey = _hash_get_indextuple_hashkey(itup);
/* compute item size too */
- itemsz = IndexTupleDSize(*itup);
+ itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
@@ -222,7 +222,7 @@ restart_insert:
XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
- XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup));
+ XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INSERT);
@@ -309,7 +309,7 @@ _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups,
{
Size itemsize;
- itemsize = IndexTupleDSize(*itups[i]);
+ itemsize = IndexTupleSize(itups[i]);
itemsize = MAXALIGN(itemsize);
/* Find where to insert the tuple (preserving page's hashkey ordering) */
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 2033b2f7f97..b170b46d86b 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -891,7 +891,7 @@ readpage:
itup = (IndexTuple) PageGetItem(rpage,
PageGetItemId(rpage, roffnum));
- itemsz = IndexTupleDSize(*itup);
+ itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz);
/*
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index e3c8721d295..3859e3bd838 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1173,7 +1173,7 @@ _hash_splitbucket(Relation rel,
* the current page in the new bucket, we must allocate a new
* overflow page and place the tuple on that page instead.
*/
- itemsz = IndexTupleDSize(*new_itup);
+ itemsz = IndexTupleSize(new_itup);
itemsz = MAXALIGN(itemsz);
if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 0d7bc683399..42e75ec0b67 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -67,9 +67,9 @@ RelationPutHeapTuple(Relation relation,
if (!token)
{
ItemId itemId = PageGetItemId(pageHeader, offnum);
- Item item = PageGetItem(pageHeader, itemId);
+ HeapTupleHeader item = (HeapTupleHeader) PageGetItem(pageHeader, itemId);
- ((HeapTupleHeader) item)->t_ctid = tuple->t_self;
+ item->t_ctid = tuple->t_self;
}
}
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 51059c0c7d5..2fe98673531 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -558,7 +558,7 @@ _bt_findinsertloc(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
- itemsz = IndexTupleDSize(*newtup);
+ itemsz = IndexTupleSize(newtup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
@@ -755,7 +755,7 @@ _bt_insertonpg(Relation rel,
elog(ERROR, "cannot insert to incompletely split page %u",
BufferGetBlockNumber(buf));
- itemsz = IndexTupleDSize(*itup);
+ itemsz = IndexTupleSize(itup);
itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
* need to be consistent */
@@ -914,7 +914,7 @@ _bt_insertonpg(Relation rel,
sizeof(IndexTupleData));
}
else
- XLogRegisterBufData(0, (char *) itup, IndexTupleDSize(*itup));
+ XLogRegisterBufData(0, (char *) itup, IndexTupleSize(itup));
recptr = XLogInsert(RM_BTREE_ID, xlinfo);
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 521ae6e5f77..f0c276b52a1 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -813,7 +813,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
last_off = state->btps_lastoff;
pgspc = PageGetFreeSpace(npage);
- itupsz = IndexTupleDSize(*itup);
+ itupsz = IndexTupleSize(itup);
itupsz = MAXALIGN(itupsz);
/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index bed1dd2a098..233c3965d95 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -51,9 +51,15 @@ _bt_restore_page(Page page, char *from, int len)
i = 0;
while (from < end)
{
- /* Need to copy tuple header due to alignment considerations */
+ /*
+ * As we step through the items, 'from' won't always be properly
+ * aligned, so we need to use memcpy(). Further, we use Item (which
+ * is just a char*) here for our items array for the same reason;
+ * wouldn't want the compiler or anyone thinking that an item is
+ * aligned when it isn't.
+ */
memcpy(&itupdata, from, sizeof(IndexTupleData));
- itemsz = IndexTupleDSize(itupdata);
+ itemsz = IndexTupleSize(&itupdata);
itemsz = MAXALIGN(itemsz);
items[i] = (Item) from;
@@ -205,7 +211,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
BTPageOpaque ropaque;
char *datapos;
Size datalen;
- Item left_hikey = NULL;
+ IndexTuple left_hikey = NULL;
Size left_hikeysz = 0;
BlockNumber leftsib;
BlockNumber rightsib;
@@ -248,7 +254,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
{
ItemId hiItemId = PageGetItemId(rpage, P_FIRSTDATAKEY(ropaque));
- left_hikey = PageGetItem(rpage, hiItemId);
+ left_hikey = (IndexTuple) PageGetItem(rpage, hiItemId);
left_hikeysz = ItemIdGetLength(hiItemId);
}
@@ -272,7 +278,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
OffsetNumber off;
- Item newitem = NULL;
+ IndexTuple newitem = NULL;
Size newitemsz = 0;
Page newlpage;
OffsetNumber leftoff;
@@ -281,7 +287,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
if (onleft)
{
- newitem = (Item) datapos;
+ newitem = (IndexTuple) datapos;
newitemsz = MAXALIGN(IndexTupleSize(newitem));
datapos += newitemsz;
datalen -= newitemsz;
@@ -290,7 +296,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
/* Extract left hikey and its size (assuming 16-bit alignment) */
if (!isleaf)
{
- left_hikey = (Item) datapos;
+ left_hikey = (IndexTuple) datapos;
left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey));
datapos += left_hikeysz;
datalen -= left_hikeysz;
@@ -301,7 +307,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
/* Set high key */
leftoff = P_HIKEY;
- if (PageAddItem(newlpage, left_hikey, left_hikeysz,
+ if (PageAddItem(newlpage, (Item) left_hikey, left_hikeysz,
P_HIKEY, false, false) == InvalidOffsetNumber)
elog(PANIC, "failed to add high key to left page after split");
leftoff = OffsetNumberNext(leftoff);
@@ -310,12 +316,12 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
{
ItemId itemid;
Size itemsz;
- Item item;
+ IndexTuple item;
/* add the new item if it was inserted on left page */
if (onleft && off == xlrec->newitemoff)
{
- if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
+ if (PageAddItem(newlpage, (Item) newitem, newitemsz, leftoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add new item to left page after split");
leftoff = OffsetNumberNext(leftoff);
@@ -323,8 +329,8 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
itemid = PageGetItemId(lpage, off);
itemsz = ItemIdGetLength(itemid);
- item = PageGetItem(lpage, itemid);
- if (PageAddItem(newlpage, item, itemsz, leftoff,
+ item = (IndexTuple) PageGetItem(lpage, itemid);
+ if (PageAddItem(newlpage, (Item) item, itemsz, leftoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add old item to left page after split");
leftoff = OffsetNumberNext(leftoff);
@@ -333,7 +339,7 @@ btree_xlog_split(bool onleft, XLogReaderState *record)
/* cope with possibility that newitem goes at the end */
if (onleft && off == xlrec->newitemoff)
{
- if (PageAddItem(newlpage, newitem, newitemsz, leftoff,
+ if (PageAddItem(newlpage, (Item) newitem, newitemsz, leftoff,
false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add new item to left page after split");
leftoff = OffsetNumberNext(leftoff);
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index 0ffa91d6861..9be3442c66d 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -67,8 +67,7 @@ typedef IndexAttributeBitMapData * IndexAttributeBitMap;
#define INDEX_VAR_MASK 0x4000
#define INDEX_NULL_MASK 0x8000
-#define IndexTupleSize(itup) ((Size) (((IndexTuple) (itup))->t_info & INDEX_SIZE_MASK))
-#define IndexTupleDSize(itup) ((Size) ((itup).t_info & INDEX_SIZE_MASK))
+#define IndexTupleSize(itup) ((Size) ((itup)->t_info & INDEX_SIZE_MASK))
#define IndexTupleHasNulls(itup) ((((IndexTuple) (itup))->t_info & INDEX_NULL_MASK))
#define IndexTupleHasVarwidths(itup) ((((IndexTuple) (itup))->t_info & INDEX_VAR_MASK))