aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/hash/hashinsert.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/hash/hashinsert.c')
-rw-r--r--src/backend/access/hash/hashinsert.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 860376cd481..7637c3566cb 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.37 2005/08/10 21:36:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.38 2005/10/15 02:49:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,8 +50,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
bool isnull;
/*
- * Compute the hash key for the item. We do this first so as not to
- * need to hold any locks while running the hash function.
+ * Compute the hash key for the item. We do this first so as not to need
+ * to hold any locks while running the hash function.
*/
itup = &(hitem->hash_itup);
if (rel->rd_rel->relnatts != 1)
@@ -64,12 +64,12 @@ _hash_doinsert(Relation rel, HashItem hitem)
itemsz = IndexTupleDSize(hitem->hash_itup)
+ (sizeof(HashItemData) - sizeof(IndexTupleData));
- itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
- * we need to be consistent */
+ itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but we
+ * need to be consistent */
/*
- * Acquire shared split lock so we can compute the target bucket
- * safely (see README).
+ * Acquire shared split lock so we can compute the target bucket safely
+ * (see README).
*/
_hash_getlock(rel, 0, HASH_SHARE);
@@ -79,9 +79,9 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
- * Check whether the item can fit on a hash page at all. (Eventually,
- * we ought to try to apply TOAST methods if not.) Note that at this
- * point, itemsz doesn't include the ItemId.
+ * Check whether the item can fit on a hash page at all. (Eventually, we
+ * ought to try to apply TOAST methods if not.) Note that at this point,
+ * itemsz doesn't include the ItemId.
*/
if (itemsz > HashMaxItemSize((Page) metap))
ereport(ERROR,
@@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
errmsg("index row size %lu exceeds hash maximum %lu",
(unsigned long) itemsz,
(unsigned long) HashMaxItemSize((Page) metap)),
- errhint("Values larger than a buffer page cannot be indexed.")));
+ errhint("Values larger than a buffer page cannot be indexed.")));
/*
* Compute the target bucket number, and convert to block number.
@@ -105,8 +105,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
- * Acquire share lock on target bucket; then we can release split
- * lock.
+ * Acquire share lock on target bucket; then we can release split lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -130,8 +129,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
if (BlockNumberIsValid(nextblkno))
{
/*
- * ovfl page exists; go get it. if it doesn't have room,
- * we'll find out next pass through the loop test above.
+ * ovfl page exists; go get it. if it doesn't have room, we'll
+ * find out next pass through the loop test above.
*/
_hash_relbuf(rel, buf);
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);