aboutsummaryrefslogtreecommitdiff
path: root/src/bitvec.c
diff options
context:
space:
mode:
authorlarrybr <larrybr@noemail.net>2023-06-07 08:40:31 +0000
committerlarrybr <larrybr@noemail.net>2023-06-07 08:40:31 +0000
commitbc91738e665a6d1d18917da69df3eccbfc6e92c4 (patch)
treea2c2fbf1b78809d559489e7cec08007910963744 /src/bitvec.c
parent25e87ed1baa41de59ca37a5cda65a2104916632a (diff)
downloadsqlite-bc91738e665a6d1d18917da69df3eccbfc6e92c4.tar.gz
sqlite-bc91738e665a6d1d18917da69df3eccbfc6e92c4.zip
Add a C-source spell-checking facility. make misspell (on Nix)
FossilOrigin-Name: 26c1bb4bd9e9f56613c3aa87407a7f562fd4ebde5bfd6dece02078001d9a45f8
Diffstat (limited to 'src/bitvec.c')
-rw-r--r--src/bitvec.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/src/bitvec.c b/src/bitvec.c
index 9393428d9..13f87d567 100644
--- a/src/bitvec.c
+++ b/src/bitvec.c
@@ -17,8 +17,8 @@
** property. Usually only a few pages are meet either condition.
** So the bitmap is usually sparse and has low cardinality.
** But sometimes (for example when during a DROP of a large table) most
-** or all of the pages in a database can get journalled. In those cases,
-** the bitmap becomes dense with high cardinality. The algorithm needs
+** or all of the pages in a database can get journalled. In those cases,
+** the bitmap becomes dense with high cardinality. The algorithm needs
** to handle both cases well.
**
** The size of the bitmap is fixed when the object is created.
@@ -39,13 +39,13 @@
/* Size of the Bitvec structure in bytes. */
#define BITVEC_SZ 512
-/* Round the union size down to the nearest pointer boundary, since that's how
+/* Round the union size down to the nearest pointer boundary, since that's how
** it will be aligned within the Bitvec struct. */
#define BITVEC_USIZE \
(((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*))
-/* Type of the array "element" for the bitmap representation.
-** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE.
+/* Type of the array "element" for the bitmap representation.
+** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE.
** Setting this to the "natural word" size of your CPU may improve
** performance. */
#define BITVEC_TELEM u8
@@ -58,12 +58,12 @@
/* Number of u32 values in hash table. */
#define BITVEC_NINT (BITVEC_USIZE/sizeof(u32))
-/* Maximum number of entries in hash table before
+/* Maximum number of entries in hash table before
** sub-dividing and re-hashing. */
#define BITVEC_MXHASH (BITVEC_NINT/2)
/* Hashing function for the aHash representation.
-** Empirical testing showed that the *37 multiplier
-** (an arbitrary prime)in the hash function provided
+** Empirical testing showed that the *37 multiplier
+** (an arbitrary prime)in the hash function provided
** no fewer collisions than the no-op *1. */
#define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT)
@@ -109,7 +109,7 @@ struct Bitvec {
/*
** Create a new bitmap object able to handle bits between 0 and iSize,
-** inclusive. Return a pointer to the new object. Return NULL if
+** inclusive. Return a pointer to the new object. Return NULL if
** malloc fails.
*/
Bitvec *sqlite3BitvecCreate(u32 iSize){
@@ -188,7 +188,7 @@ int sqlite3BitvecSet(Bitvec *p, u32 i){
h = BITVEC_HASH(i++);
/* if there wasn't a hash collision, and this doesn't */
/* completely fill the hash, then just add it without */
- /* worring about sub-dividing and re-hashing. */
+ /* worrying about sub-dividing and re-hashing. */
if( !p->u.aHash[h] ){
if (p->nSet<(BITVEC_NINT-1)) {
goto bitvec_set_end;
@@ -365,7 +365,7 @@ int sqlite3BitvecBuiltinTest(int sz, int *aOp){
break;
}
case 3:
- case 4:
+ case 4:
default: {
nx = 2;
sqlite3_randomness(sizeof(i), &i);