diff options
Diffstat (limited to 'src/btree.c')
-rw-r--r-- | src/btree.c | 205 |
1 files changed, 147 insertions, 58 deletions
diff --git a/src/btree.c b/src/btree.c index 907e37f1e..c752b0771 100644 --- a/src/btree.c +++ b/src/btree.c @@ -151,8 +151,47 @@ int corruptPageError(int lineno, MemPage *p){ # define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno) #endif +/* Default value for SHARED_LOCK_TRACE macro if shared-cache is disabled +** or if the lock tracking is disabled. This is always the value for +** release builds. +*/ +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) /*no-op*/ + #ifndef SQLITE_OMIT_SHARED_CACHE +#if 0 +/* ^---- Change to 1 and recompile to enable shared-lock tracing +** for debugging purposes. +** +** Print all shared-cache locks on a BtShared. Debugging use only. +*/ +static void sharedLockTrace( + BtShared *pBt, + const char *zMsg, + int iRoot, + int eLockType +){ + BtLock *pLock; + if( iRoot>0 ){ + printf("%s-%p %u%s:", zMsg, pBt, iRoot, eLockType==READ_LOCK?"R":"W"); + }else{ + printf("%s-%p:", zMsg, pBt); + } + for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ + printf(" %p/%u%s", pLock->pBtree, pLock->iTable, + pLock->eLock==READ_LOCK ? "R" : "W"); + while( pLock->pNext && pLock->pBtree==pLock->pNext->pBtree ){ + pLock = pLock->pNext; + printf(",%u%s", pLock->iTable, pLock->eLock==READ_LOCK ? "R" : "W"); + } + } + printf("\n"); + fflush(stdout); +} +#undef SHARED_LOCK_TRACE +#define SHARED_LOCK_TRACE(X,MSG,TAB,TYPE) sharedLockTrace(X,MSG,TAB,TYPE) +#endif /* Shared-lock tracing */ + #ifdef SQLITE_DEBUG /* **** This function is only used as part of an assert() statement. *** @@ -229,6 +268,8 @@ static int hasSharedCacheTableLock( iTab = iRoot; } + SHARED_LOCK_TRACE(pBtree->pBt,"hasLock",iRoot,eLockType); + /* Search for the required lock. Either a write-lock on root-page iTab, a ** write-lock on the schema table, or (if the client is reading) a ** read-lock on iTab will suffice. Return 1 if any of these are found. */ @@ -362,6 +403,8 @@ static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtLock *pLock = 0; BtLock *pIter; + SHARED_LOCK_TRACE(pBt,"setLock", iTable, eLock); + assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); @@ -429,6 +472,8 @@ static void clearAllSharedCacheTableLocks(Btree *p){ assert( p->sharable || 0==*ppIter ); assert( p->inTrans>0 ); + SHARED_LOCK_TRACE(pBt, "clearAllLocks", 0, 0); + while( *ppIter ){ BtLock *pLock = *ppIter; assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); @@ -467,6 +512,9 @@ static void clearAllSharedCacheTableLocks(Btree *p){ */ static void downgradeAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; + + SHARED_LOCK_TRACE(pBt, "downgradeLocks", 0, 0); + if( pBt->pWriter==p ){ BtLock *pLock; pBt->pWriter = 0; @@ -5080,9 +5128,12 @@ static int accessPayload( if( pCur->aOverflow==0 || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow) ){ - Pgno *aNew = (Pgno*)sqlite3Realloc( - pCur->aOverflow, nOvfl*2*sizeof(Pgno) - ); + Pgno *aNew; + if( sqlite3FaultSim(413) ){ + aNew = 0; + }else{ + aNew = (Pgno*)sqlite3Realloc(pCur->aOverflow, nOvfl*2*sizeof(Pgno)); + } if( aNew==0 ){ return SQLITE_NOMEM_BKPT; }else{ @@ -5092,6 +5143,12 @@ static int accessPayload( memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); pCur->curFlags |= BTCF_ValidOvfl; }else{ + /* Sanity check the validity of the overflow page cache */ + assert( pCur->aOverflow[0]==nextPage + || pCur->aOverflow[0]==0 + || CORRUPT_DB ); + assert( pCur->aOverflow[0]!=0 || pCur->aOverflow[offset/ovflSize]==0 ); + /* If the overflow page-list cache has been allocated and the ** entry for the first required overflow page is valid, skip ** directly to it. @@ -5573,6 +5630,23 @@ int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ return rc; } +#ifdef SQLITE_DEBUG +/* The cursors is CURSOR_VALID and has BTCF_AtLast set. Verify that +** this flags are true for a consistent database. +** +** This routine is is called from within assert() statements only. +** It is an internal verification routine and does not appear in production +** builds. +*/ +static int cursorIsAtLastEntry(BtCursor *pCur){ + int ii; + for(ii=0; ii<pCur->iPage; ii++){ + if( pCur->aiIdx[ii]!=pCur->apPage[ii]->nCell ) return 0; + } + return pCur->ix==pCur->pPage->nCell-1 && pCur->pPage->leaf!=0; +} +#endif + /* Move the cursor to the last entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. @@ -5601,18 +5675,7 @@ int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ /* If the cursor already points to the last entry, this is a no-op. */ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ -#ifdef SQLITE_DEBUG - /* This block serves to assert() that the cursor really does point - ** to the last entry in the b-tree. */ - int ii; - for(ii=0; ii<pCur->iPage; ii++){ - assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); - } - assert( pCur->ix==pCur->pPage->nCell-1 || CORRUPT_DB ); - testcase( pCur->ix!=pCur->pPage->nCell-1 ); - /* ^-- dbsqlfuzz b92b72e4de80b5140c30ab71372ca719b8feb618 */ - assert( pCur->pPage->leaf ); -#endif + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = 0; return SQLITE_OK; } @@ -5665,6 +5728,7 @@ int sqlite3BtreeTableMoveto( } if( pCur->info.nKey<intKey ){ if( (pCur->curFlags & BTCF_AtLast)!=0 ){ + assert( cursorIsAtLastEntry(pCur) || CORRUPT_DB ); *pRes = -1; return SQLITE_OK; } @@ -6131,10 +6195,10 @@ i64 sqlite3BtreeRowCountEst(BtCursor *pCur){ assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); - /* Currently this interface is only called by the OP_IfSmaller - ** opcode, and it that case the cursor will always be valid and - ** will always point to a leaf node. */ - if( NEVER(pCur->eState!=CURSOR_VALID) ) return -1; + /* Currently this interface is only called by the OP_IfSizeBetween + ** opcode and the OP_Count opcode with P3=1. In either case, + ** the cursor will always be valid unless the btree is empty. */ + if( pCur->eState!=CURSOR_VALID ) return 0; if( NEVER(pCur->pPage->leaf==0) ) return -1; n = pCur->pPage->nCell; @@ -6280,7 +6344,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){ } pPage = pCur->pPage; - assert( pPage->isInit ); + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ + return SQLITE_CORRUPT_BKPT; + } if( !pPage->leaf ){ int idx = pCur->ix; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); @@ -6953,7 +7020,10 @@ static int fillInCell( n = nHeader + nPayload; testcase( n==3 ); testcase( n==4 ); - if( n<4 ) n = 4; + if( n<4 ){ + n = 4; + pPayload[nPayload] = 0; + } *pnSize = n; assert( nSrc<=nPayload ); testcase( nSrc<nPayload ); @@ -7497,7 +7567,8 @@ static int rebuildPage( if( j>(u32)usableSize ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); - for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i; k++){} + assert( pCArray->ixNx[NB*2-1]>i ); + for(k=0; pCArray->ixNx[k]<=i; k++){} pSrcEnd = pCArray->apEnd[k]; pData = pEnd; @@ -7580,7 +7651,8 @@ static int pageInsertArray( u8 *pEnd; /* Maximum extent of cell data */ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ if( iEnd<=iFirst ) return 0; - for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i ; k++){} + assert( pCArray->ixNx[NB*2-1]>i ); + for(k=0; pCArray->ixNx[k]<=i ; k++){} pEnd = pCArray->apEnd[k]; while( 1 /*Exit by break*/ ){ int sz, rc; @@ -7865,6 +7937,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ b.szCell = &szCell; b.apEnd[0] = pPage->aDataEnd; b.ixNx[0] = 2; + b.ixNx[NB*2-1] = 0x7fffffff; rc = rebuildPage(&b, 0, 1, pNew); if( NEVER(rc) ){ releasePage(pNew); @@ -8100,7 +8173,9 @@ static int balance_nonroot( CellArray b; /* Parsed information on cells being balanced */ memset(abDone, 0, sizeof(abDone)); - memset(&b, 0, sizeof(b)); + assert( sizeof(b) - sizeof(b.ixNx) == offsetof(CellArray,ixNx) ); + memset(&b, 0, sizeof(b)-sizeof(b.ixNx[0])); + b.ixNx[NB*2-1] = 0x7fffffff; pBt = pParent->pBt; assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); @@ -8259,7 +8334,7 @@ static int balance_nonroot( ** table-interior, index-leaf, or index-interior). */ if( pOld->aData[0]!=apOld[0]->aData[0] ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } @@ -8283,7 +8358,7 @@ static int balance_nonroot( memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow)); if( pOld->nOverflow>0 ){ if( NEVER(limit<pOld->aiOvfl[0]) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pOld); goto balance_cleanup; } limit = pOld->aiOvfl[0]; @@ -8691,7 +8766,8 @@ static int balance_nonroot( iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); - for(k=0; ALWAYS(k<NB*2) && b.ixNx[k]<=j; k++){} + assert( b.ixNx[NB*2-1]>j ); + for(k=0; b.ixNx[k]<=j; k++){} pSrcEnd = b.apEnd[k]; if( SQLITE_OVERFLOW(pSrcEnd, pCell, pCell+sz) ){ rc = SQLITE_CORRUPT_BKPT; @@ -8926,7 +9002,7 @@ static int anotherValidCursor(BtCursor *pCur){ && pOther->eState==CURSOR_VALID && pOther->pPage==pCur->pPage ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pCur->pPage); } } return SQLITE_OK; @@ -8986,7 +9062,7 @@ static int balance(BtCursor *pCur){ /* The page being written is not a root page, and there is currently ** more than one reference to it. This only happens if the page is one ** of its own ancestor pages. Corruption. */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; @@ -9150,7 +9226,7 @@ static SQLITE_NOINLINE int btreeOverwriteOverflowCell( rc = btreeGetPage(pBt, ovflPgno, &pPage, 0); if( rc ) return rc; if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 || pPage->isInit ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ if( iOffset+ovflPageSize<(u32)nTotal ){ ovflPgno = get4byte(pPage->aData); @@ -9178,7 +9254,7 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd || pCur->info.pPayload < pPage->aData + pPage->cellOffset ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCur->info.nLocal==nTotal ){ /* The entire cell is local */ @@ -9259,7 +9335,7 @@ int sqlite3BtreeInsert( ** Which can only happen if the SQLITE_NoSchemaError flag was set when ** the schema was loaded. This cannot be asserted though, as a user might ** set the flag, load the schema, and then unset the flag. */ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } @@ -9382,7 +9458,7 @@ int sqlite3BtreeInsert( if( pPage->nFree<0 ){ if( NEVER(pCur->eState>CURSOR_INVALID) ){ /* ^^^^^--- due to the moveToRoot() call above */ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); }else{ rc = btreeComputeFreeSpace(pPage); } @@ -9399,7 +9475,10 @@ int sqlite3BtreeInsert( if( flags & BTREE_PREFORMAT ){ rc = SQLITE_OK; szNew = p->pBt->nPreformatSize; - if( szNew<4 ) szNew = 4; + if( szNew<4 ){ + szNew = 4; + newCell[3] = 0; + } if( ISAUTOVACUUM(p->pBt) && szNew>pPage->maxLocal ){ CellInfo info; pPage->xParseCell(pPage, newCell, &info); @@ -9421,7 +9500,7 @@ int sqlite3BtreeInsert( CellInfo info; assert( idx>=0 ); if( idx>=pPage->nCell ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ @@ -9448,10 +9527,10 @@ int sqlite3BtreeInsert( ** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ if( oldCell < pPage->aData+pPage->hdrOffset+10 ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( oldCell+szNew > pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } memcpy(oldCell, newCell, szNew); return SQLITE_OK; @@ -9461,7 +9540,7 @@ int sqlite3BtreeInsert( }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); idx = ++pCur->ix; - pCur->curFlags &= ~BTCF_ValidNKey; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); }else{ assert( pPage->leaf ); } @@ -9491,7 +9570,7 @@ int sqlite3BtreeInsert( */ if( pPage->nOverflow ){ assert( rc==SQLITE_OK ); - pCur->curFlags &= ~(BTCF_ValidNKey); + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); rc = balance(pCur); /* Must make sure nOverflow is reset to zero even if the balance() @@ -9553,7 +9632,7 @@ int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){ nIn = pSrc->info.nLocal; aIn = pSrc->info.pPayload; if( aIn+nIn>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } nRem = pSrc->info.nPayload; if( nIn==nRem && nIn<pDest->pPage->maxLocal ){ @@ -9578,7 +9657,7 @@ int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){ if( nRem>nIn ){ if( aIn+nIn+4>pSrc->pPage->aDataEnd ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pSrc->pPage); } ovflIn = get4byte(&pSrc->info.pPayload[nIn]); } @@ -9674,7 +9753,7 @@ int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( rc!=SQLITE_OK || CORRUPT_DB || pCur->eState==CURSOR_VALID ); if( rc || pCur->eState!=CURSOR_VALID ) return rc; }else{ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pCur->pgnoRoot); } } assert( pCur->eState==CURSOR_VALID ); @@ -9683,14 +9762,14 @@ int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ iCellIdx = pCur->ix; pPage = pCur->pPage; if( pPage->nCell<=iCellIdx ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } pCell = findCell(pPage, iCellIdx); if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } if( pCell<&pPage->aCellIdx[pPage->nCell] ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PAGE(pPage); } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must @@ -9781,7 +9860,7 @@ int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ n = pCur->pPage->pgno; } pCell = findCell(pLeaf, pLeaf->nCell-1); - if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; + if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_PAGE(pLeaf); nCell = pLeaf->xCellSize(pLeaf, pCell); assert( MX_CELL_SIZE(pBt) >= nCell ); pTmp = pBt->pTmpSpace; @@ -9897,7 +9976,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ */ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); if( pgnoRoot>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgnoRoot); } pgnoRoot++; @@ -9945,7 +10024,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PGNO(pgnoRoot); } if( rc!=SQLITE_OK ){ releasePage(pRoot); @@ -10035,14 +10114,14 @@ static int clearDatabasePage( assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(pgno); } rc = getAndInitPage(pBt, pgno, &pPage, 0); if( rc ) return rc; if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) ){ - rc = SQLITE_CORRUPT_BKPT; + rc = SQLITE_CORRUPT_PAGE(pPage); goto cleardatabasepage_out; } hdr = pPage->hdrOffset; @@ -10146,7 +10225,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ assert( p->inTrans==TRANS_WRITE ); assert( iTable>=2 ); if( iTable>btreePagecount(pBt) ){ - return SQLITE_CORRUPT_BKPT; + return SQLITE_CORRUPT_PGNO(iTable); } rc = sqlite3BtreeClearTable(p, iTable, 0); @@ -10740,6 +10819,9 @@ static int checkTreePage( ** number of cells on the page. */ nCell = get2byte(&data[hdr+3]); assert( pPage->nCell==nCell ); + if( pPage->leaf || pPage->intKey==0 ){ + pCheck->nRow += nCell; + } /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page ** immediately follows the b-tree page header. */ @@ -10851,6 +10933,7 @@ static int checkTreePage( btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); } } + assert( heap!=0 ); /* Add the freeblocks to the min-heap ** ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header @@ -10950,6 +11033,7 @@ int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ + Mem *aCnt, /* Memory cells to write counts for each tree to */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr, /* OUT: Write number of errors seen to this variable */ @@ -10963,7 +11047,9 @@ int sqlite3BtreeIntegrityCheck( int bPartial = 0; /* True if not checking all btrees */ int bCkFreelist = 1; /* True to scan the freelist */ VVA_ONLY( int nRef ); + assert( nRoot>0 ); + assert( aCnt!=0 ); /* aRoot[0]==0 means this is a partial check */ if( aRoot[0]==0 ){ @@ -11036,15 +11122,18 @@ int sqlite3BtreeIntegrityCheck( testcase( pBt->db->flags & SQLITE_CellSizeCk ); pBt->db->flags &= ~(u64)SQLITE_CellSizeCk; for(i=0; (int)i<nRoot && sCheck.mxErr; i++){ - i64 notUsed; - if( aRoot[i]==0 ) continue; + sCheck.nRow = 0; + if( aRoot[i] ){ + i64 notUsed; #ifndef SQLITE_OMIT_AUTOVACUUM - if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){ - checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); - } + if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){ + checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); + } #endif - sCheck.v0 = aRoot[i]; - checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + sCheck.v0 = aRoot[i]; + checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + } + sqlite3MemSetArrayInt64(aCnt, i, sCheck.nRow); } pBt->db->flags = savedDbFlags; |