diff options
46 files changed, 4084 insertions, 1073 deletions
diff --git a/Makefile.in b/Makefile.in index 3292da774..09a368227 100644 --- a/Makefile.in +++ b/Makefile.in @@ -180,7 +180,7 @@ LIBOBJS0 = alter.lo analyze.lo attach.lo auth.lo \ pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo \ sqlite3session.lo select.lo status.lo \ - table.lo tokenize.lo trigger.lo \ + table.lo threads.lo tokenize.lo trigger.lo \ update.lo util.lo vacuum.lo \ vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbesort.lo \ vdbetrace.lo wal.lo walker.lo where.lo utf.lo vtab.lo @@ -266,6 +266,7 @@ SRC = \ $(TOP)/src/sqliteInt.h \ $(TOP)/src/sqliteLimit.h \ $(TOP)/src/table.c \ + $(TOP)/src/threads.c \ $(TOP)/src/tclsqlite.c \ $(TOP)/src/tokenize.c \ $(TOP)/src/trigger.c \ @@ -747,6 +748,9 @@ status.lo: $(TOP)/src/status.c $(HDR) table.lo: $(TOP)/src/table.c $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/table.c +threads.lo: $(TOP)/src/threads.c $(HDR) + $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/threads.c + tokenize.lo: $(TOP)/src/tokenize.c keywordhash.h $(HDR) $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/tokenize.c diff --git a/Makefile.msc b/Makefile.msc index d246cf03c..33f42b45c 100644 --- a/Makefile.msc +++ b/Makefile.msc @@ -647,7 +647,7 @@ LIBOBJS0 = vdbe.lo parse.lo alter.lo analyze.lo attach.lo auth.lo \ pager.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo \ sqlite3session.lo select.lo status.lo \ - table.lo tokenize.lo trigger.lo \ + table.lo threads.lo tokenize.lo trigger.lo \ update.lo util.lo vacuum.lo \ vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbesort.lo \ vdbetrace.lo wal.lo walker.lo where.lo utf.lo vtab.lo @@ -744,6 +744,7 @@ SRC = \ $(TOP)\src\sqliteInt.h \ $(TOP)\src\sqliteLimit.h \ $(TOP)\src\table.c \ + $(TOP)\src\threads.c \ $(TOP)\src\tclsqlite.c \ $(TOP)\src\tokenize.c \ $(TOP)\src\trigger.c \ @@ -1242,6 +1243,9 @@ status.lo: $(TOP)\src\status.c $(HDR) table.lo: $(TOP)\src\table.c $(HDR) $(LTCOMPILE) -c $(TOP)\src\table.c +threads.lo: $(TOP)\src\threads.c $(HDR) + $(LTCOMPILE) -c $(TOP)\src\threads.c + tokenize.lo: $(TOP)\src\tokenize.c keywordhash.h $(HDR) $(LTCOMPILE) -c $(TOP)\src\tokenize.c @@ -67,7 +67,7 @@ LIBOBJ+= vdbe.o parse.o \ notify.o opcodes.o os.o os_unix.o os_win.o \ pager.o pcache.o pcache1.o pragma.o prepare.o printf.o \ random.o resolve.o rowset.o rtree.o select.o status.o \ - table.o tokenize.o trigger.o \ + table.o threads.o tokenize.o trigger.o \ update.o util.o vacuum.o \ vdbeapi.o vdbeaux.o vdbeblob.o vdbemem.o vdbesort.o \ vdbetrace.o wal.o walker.o where.o utf.o vtab.o @@ -149,6 +149,7 @@ SRC = \ $(TOP)/src/sqliteLimit.h \ $(TOP)/src/table.c \ $(TOP)/src/tclsqlite.c \ + $(TOP)/src/threads.c \ $(TOP)/src/tokenize.c \ $(TOP)/src/trigger.c \ $(TOP)/src/utf.c \ @@ -321,6 +322,7 @@ TESTSRC2 = \ $(TOP)/src/pcache.c \ $(TOP)/src/pcache1.c \ $(TOP)/src/select.c \ + $(TOP)/src/threads.c \ $(TOP)/src/tokenize.c \ $(TOP)/src/utf.c \ $(TOP)/src/util.c \ @@ -1,9 +1,9 @@ -C Merge\srecent\sperformance\senhancements\sand\sthe\sCAST\soperator\senhancements\ninto\sthe\ssessions\sbranch. -D 2014-08-26T02:15:07.293 +C Merge\sthe\slatest\strunk\schanges,\sincluding\sthe\smulti-threaded\ssorter,\sinto\nthe\ssessions\sbranch. +D 2014-09-02T15:49:47.703 F Makefile.arm-wince-mingw32ce-gcc d6df77f1f48d690bd73162294bbba7f59507c72f -F Makefile.in d5ad373b7a23525414b8843b3084cf90c560d92f +F Makefile.in dd5f245aa8c741bc65845747203c8ce2f3fb6c83 F Makefile.linux-gcc 91d710bdc4998cb015f39edf3cb314ec4f4d7e23 -F Makefile.msc f1bbf555916b6e60887d86cea62f27e6a26cdb24 +F Makefile.msc 35808af7f8d999176ed5b38fb482a87a129ee3e1 F Makefile.vxworks 034289efa9d591b04b1a73598623119c306cbba0 F README.md 64f270c43c38c46de749e419c22f0ae2f4499fe8 F VERSION 53a0b870e7f16d3b06623c31d233a304c163a6af @@ -163,7 +163,7 @@ F ext/session/test_session.c a252fb669d3a1b3552ee7b87fe610debc0afeb7b F install-sh 9d4de14ab9fb0facae2f48780b874848cbf2f895 x F ltmain.sh 3ff0879076df340d2e23ae905484d8c15d5fdea8 F magic.txt 8273bf49ba3b0c8559cb2774495390c31fd61c60 -F main.mk a9987b6655610e663e9f055e0f1646227f45cddd +F main.mk 4dfbd8fbc91ee5732554b31a205960241c4fc059 F mkopcodec.awk c2ff431854d702cdd2d779c9c0d1f58fa16fa4ea F mkopcodeh.awk c6b3fa301db6ef7ac916b14c60868aeaec1337b5 F mkso.sh fd21c06b063bb16a5d25deea1752c2da6ac3ed83 @@ -178,22 +178,22 @@ F sqlite.pc.in 42b7bf0d02e08b9e77734a47798d1a55a9e0716b F sqlite3.1 3d8b83c91651f53472ca17599dae3457b8b89494 F sqlite3.pc.in 48fed132e7cb71ab676105d2a4dc77127d8c1f3a F src/alter.c b00900877f766f116f9e16116f1ccacdc21d82f1 -F src/analyze.c f98a351908da29f7b44741cfeb9eb20dda648ba0 +F src/analyze.c f00f06e6ef66c61b41f154889fe7caf5ed55a0ce F src/attach.c 3801129015ef59d76bf23c95ef9b0069d18a0c52 F src/auth.c 523da7fb4979469955d822ff9298352d6b31de34 F src/backup.c a31809c65623cc41849b94d368917f8bb66e6a7e F src/bitvec.c 19a4ba637bd85f8f63fc8c9bae5ade9fb05ec1cb F src/btmutex.c ec9d3f1295dafeb278c3830211cc5584132468f4 -F src/btree.c 4737cb5bdb2eb8989cb292f6ff921f7ff45f0c46 +F src/btree.c 2a483a8045118faa99867a8679da42754b532318 F src/btree.h a79aa6a71e7f1055f01052b7f821bd1c2dce95c8 F src/btreeInt.h cf180d86b2e9e418f638d65baa425c4c69c0e0e3 -F src/build.c 058e3aadb1376521ff291735237edf4c10f438fb +F src/build.c c26b233dcdb1e2c8f468d49236c266f9f3de96d8 F src/callback.c b97d0695ffcf6a8710ee445ffe56ee387d4d8a6f F src/complete.c dc1d136c0feee03c2f7550bafc0d29075e36deac F src/ctime.c 0231df905e2c4abba4483ee18ffc05adc321df2a F src/date.c 593c744b2623971e45affd0bde347631bdfa4625 F src/delete.c cb7a757eb829ebb046c66f6399435c6636fe1314 -F src/expr.c 358634f4ddeeb4e69643cb6db5819104a7834c60 +F src/expr.c e1691ab0fe6be7247ef073b0038fb8ecd9944fad F src/fault.c 160a0c015b6c2629d3899ed2daf63d75754a32bb F src/fkey.c 8d81a780ad78d16ec9082585758a8f1d6bf02ca3 F src/func.c bbb724b74ed96ca42675a7274646a71dd52bcda7 @@ -206,7 +206,7 @@ F src/journal.c b4124532212b6952f42eb2c12fa3c25701d8ba8d F src/legacy.c 87c92f4a08e2f70220e3b22a9c3b2482d36a134a F src/lempar.c cdf0a000315332fc9b50b62f3b5e22e080a0952b F src/loadext.c 31c2122b7dd05a179049bbf163fd4839f181cbab -F src/main.c f375a8d81960b5ff3a7a765e1367b0d0cd90f222 +F src/main.c 20a0c78f2b9f66766402d2a6563ffe047c64a8be F src/malloc.c 954de5f998c23237e04474a3f2159bf483bba65a F src/mem0.c 6a55ebe57c46ca1a7d98da93aaa07f99f1059645 F src/mem1.c c0c990fcaddff810ea277b4fb5d9138603dd5d4b @@ -224,32 +224,32 @@ F src/os.c 1b147e4cf7cc39e618115c14a086aed44bc91ace F src/os.h 60d419395e32a8029fa380a80a3da2e9030f635e F src/os_common.h 92815ed65f805560b66166e3583470ff94478f04 F src/os_setup.h c9d4553b5aaa6f73391448b265b89bed0b890faa -F src/os_unix.c bd7df3094a60915c148517504c76df4fca24e542 -F src/os_win.c d067fce558a5032e6e6afe62899e5397bf63cf3e +F src/os_unix.c 8525ca79457c5b4673a5fda2774ee39fe155f40f +F src/os_win.c 2aa8aa7780d7cf03e912d2088ab2ec5c32f33dc5 F src/os_win.h 09e751b20bbc107ffbd46e13555dc73576d88e21 -F src/pager.c 53cc5e9d73afb74add79f49755c8ee240fbdbef7 +F src/pager.c 3e732d2bbdd8d8d95fed0c5ae7e718d73153c4c5 F src/pager.h ffd5607f7b3e4590b415b007a4382f693334d428 F src/parse.y 22d6a074e5f5a7258947a1dc55a9bf946b765dd0 -F src/pcache.c da602c5447051705cab41604bf3276815eb569d0 -F src/pcache.h a5e4f5d9f5d592051d91212c5949517971ae6222 +F src/pcache.c 3b3791297e8977002e56b4a9b8916f2039abad9b +F src/pcache.h 9b559127b83f84ff76d735c8262f04853be0c59a F src/pcache1.c c5af6403a55178c9d1c09e4f77b0f9c88822762c -F src/pragma.c d10ef67c4de79f78188b965b4b7988aff1d66f2e +F src/pragma.c 14bcdb504128a476cce5bbc086d5226c5e46c225 F src/prepare.c 3842c1dfc0b053458e3adcf9f6efc48e03e3fe3d F src/printf.c 00986c86ddfffefc2fd3c73667ff51b3b9709c74 F src/random.c d10c1f85b6709ca97278428fd5db5bbb9c74eece F src/resolve.c 0ea356d32a5e884add23d1b9b4e8736681dd5697 F src/rowset.c a9c9aae3234b44a6d7c6f5a3cadf90dce1e627be -F src/select.c ea48e891406ccdf748f3eb02893e056d134a0fea -F src/shell.c 34be9dc9e7b96081488acebecae6cd92632397a6 -F src/sqlite.h.in 021a1f5c50e83060675d994a6014fd409e611d9e +F src/select.c 89e569b263535662f54b537eb9118b2c554ae7aa +F src/shell.c ec6d5f630ed617dc80cbc35d9e45fe47f07923db +F src/sqlite.h.in 49c501f66e0d6591ebe7588edddf0c4b06c8b9e9 F src/sqlite3.rc 992c9f5fb8285ae285d6be28240a7e8d3a7f2bad F src/sqlite3ext.h 886f5a34de171002ad46fae8c36a7d8051c190fc -F src/sqliteInt.h 2ecc10cd81d38e8617e24ed425e6ec792195b0f0 +F src/sqliteInt.h 465b40ebe68a1a4127e33740550ac53976172652 F src/sqliteLimit.h 164b0e6749d31e0daa1a4589a169d31c0dec7b3d F src/status.c 7ac05a5c7017d0b9f0b4bcd701228b784f987158 F src/table.c 2cd62736f845d82200acfa1287e33feb3c15d62e F src/tclsqlite.c 30d8f4ba516061832cfe10d7c71d84e17bff1918 -F src/test1.c 14409a611e9c27c6c522c610bbff5561f05c1558 +F src/test1.c 363a5089230a92cf0aaa7a2945da7f2bf3b0a8d3 F src/test2.c 98049e51a17dc62606a99a9eb95ee477f9996712 F src/test3.c 1c0e5d6f080b8e33c1ce8b3078e7013fdbcd560c F src/test4.c 9b32d22f5f150abe23c1830e2057c4037c45b3df @@ -262,7 +262,7 @@ F src/test_async.c 21e11293a2f72080eda70e1124e9102044531cd8 F src/test_autoext.c dea8a01a7153b9adc97bd26161e4226329546e12 F src/test_backup.c 3875e899222b651e18b662f86e0e50daa946344e F src/test_btree.c 2e9978eca99a9a4bfa8cae949efb00886860a64f -F src/test_config.c f0252240543895769a6bf037c206a4af74cf1e3c +F src/test_config.c a65043d01ad3bd2dfe9a3aa7e39a9935b069f6aa F src/test_demovfs.c 69b2085076654ebc18014cbc6386f04409c959a9 F src/test_devsym.c e7498904e72ba7491d142d5c83b476c4e76993bc F src/test_fs.c ced436e3d4b8e4681328409b8081051ce614e28f @@ -273,7 +273,7 @@ F src/test_intarray.c db4614c2262a06abc4409dc048d59c580c38320f F src/test_intarray.h 9dc57417fb65bc7835cc18548852cc08cc062202 F src/test_journal.c f5c0a05b7b3d5930db769b5ee6c3766dc2221a64 F src/test_loadext.c a5251f956ab6af21e138dc1f9c0399394a510cb4 -F src/test_malloc.c 1ff5b1243d96124c9a180f3b89424820a1f337f3 +F src/test_malloc.c 5368fb1de77246da1ae0ff59cba0d30cb0e5812f F src/test_multiplex.c ca90057438b63bf0840ebb84d0ef050624519a76 F src/test_multiplex.h c08e4e8f8651f0c5e0509b138ff4d5b43ed1f5d3 F src/test_mutex.c 293042d623ebba969160f471a82aa1551626454f @@ -294,26 +294,27 @@ F src/test_thread.c 1e133a40b50e9c035b00174035b846e7eef481cb F src/test_vfs.c f84075a388527892ff184988f43b69ce69b8083c F src/test_vfstrace.c bab9594adc976cbe696ff3970728830b4c5ed698 F src/test_wsd.c 41cadfd9d97fe8e3e4e44f61a4a8ccd6f7ca8fe9 +F src/threads.c 22dded4283dc4b25422f6444cdcb8d6b1ea0b5ff F src/tokenize.c ae45399d6252b4d736af43bee1576ce7bff86aec F src/trigger.c 4bddd12803275aa98f1c7ce0118fceb02b2167f6 F src/update.c b0f38fda25d532343d54b7dc49f55ab73e92ca45 F src/utf.c 77abb5e6d27f3d236e50f7c8fff1d00e15262359 F src/util.c 068dcd26354a3898ccc64ad5c4bdb95a7a15d33a F src/vacuum.c 3728d74919d4fb1356f9e9a13e27773db60b7179 -F src/vdbe.c 5e576d164e6cfb8ef1c6324c3d6eca56322fd656 +F src/vdbe.c 591bd4a84f1c37d004f545f8e805b7e597afc87f F src/vdbe.h ca3b6df299adce6e2f499c57e42ae54f142ae823 -F src/vdbeInt.h 6a50eb240aac711ac609f624daa225781711c6fd -F src/vdbeapi.c ce34bb80571f72463dbd45380e3eae65ddc52018 -F src/vdbeaux.c 236cc1afd82ab542fc752ed760471ac816cb7902 +F src/vdbeInt.h 872d39f632bfd12897a6ab184ad4c1df5c38eb7a +F src/vdbeapi.c 6b14e76648bbd10a95a2f3963ee09a8d06658f5e +F src/vdbeaux.c 3118c164becbbf5a017d04826bbd93559ab9e190 F src/vdbeblob.c d65b01f439df63911ac3d7a9a85c15503965f2c3 -F src/vdbemem.c 4e08ea087aea367dae7c45129b75487e0056e819 -F src/vdbesort.c f7f5563bf7d4695ca8f3203f3bf9de96d04ed0b3 +F src/vdbemem.c 921d5468a68ac06f369810992e84ca22cc730a62 +F src/vdbesort.c 02646a9f86421776ae5d7594f620f9ed669d3698 F src/vdbetrace.c 6f52bc0c51e144b7efdcfb2a8f771167a8816767 F src/vtab.c 019dbfd0406a7447c990e1f7bd1dfcdb8895697f F src/wal.c 264df50a1b33124130b23180ded2e2c5663c652a F src/wal.h df01efe09c5cb8c8e391ff1715cca294f89668a4 F src/walker.c 11edb74d587bc87b33ca96a5173e3ec1b8389e45 -F src/where.c 4e2770a1914b8ce30f3e44ad954b720eca3b5efd +F src/where.c d9eae96b2cbbe4842eac3ee156ccd1b933d802c4 F src/whereInt.h 923820bee9726033a501a08d2fc69b9c1ee4feb3 F test/8_3_names.test ebbb5cd36741350040fd28b432ceadf495be25b2 F test/aggerror.test a867e273ef9e3d7919f03ef4f0e8c0d2767944f2 @@ -642,7 +643,7 @@ F test/index3.test 55a90cff99834305e8141df7afaef39674b57062 F test/index4.test ab92e736d5946840236cd61ac3191f91a7856bf6 F test/index5.test fc07c14193c0430814e7a08b5da46888ee795c33 F test/index6.test fb370966ac3cd0989053dd5385757b5c3e24ab6a -F test/index7.test a3baf9a625bda7fd49471e99aeae04095fbfeecf +F test/index7.test 917cf1e1c7439bb155abbeabec511b28945e157b F test/indexedby.test b2f22f3e693a53813aa3f50b812eb609ba6df1ec F test/indexfault.test 31d4ab9a7d2f6e9616933eb079722362a883eb1d F test/init.test 15c823093fdabbf7b531fe22cf037134d09587a7 @@ -693,7 +694,7 @@ F test/lock_common.tcl 0c270b121d40959fa2f3add382200c27045b3d95 F test/lookaside.test 93f07bac140c5bb1d49f3892d2684decafdc7af2 F test/main.test 39c4bb8a157f57298ed1659d6df89d9f35aaf2c8 F test/make-where7.tcl 05c16b5d4f5d6512881dfec560cb793915932ef9 -F test/malloc.test 4eb83876dfe4915766c179b687b8640437f14abf +F test/malloc.test 96939d2d1a6f39667bbebe5bc27c6525f2ab614e F test/malloc3.test e3b32c724b5a124b57cb0ed177f675249ad0c66a F test/malloc4.test 957337613002b7058a85116493a262f679f3a261 F test/malloc5.test fafce0aa9157060445cd1a56ad50fc79d82f28c3 @@ -701,7 +702,7 @@ F test/malloc6.test 2f039d9821927eacae43e1831f815e157659a151 F test/malloc7.test 7c68a32942858bc715284856c5507446bba88c3a F test/malloc8.test 9b7a3f8cb9cf0b12fff566e80a980b1767bd961d F test/malloc9.test 2307c6ee3703b0a21391f3ea92388b4b73f9105e -F test/mallocA.test 1ba0367fb5434e7bc2fa4afcb30b14174d91b160 +F test/mallocA.test c049224adeb0244b8f6eb770c1fa6ac40f9b3518 F test/mallocAll.test 98f1be74bc9f49a858bc4f361fc58e26486798be F test/mallocB.test bc475ab850cda896142ab935bbfbc74c24e51ed6 F test/mallocC.test 3dffe16532f109293ce1ccecd0c31dca55ef08c4 @@ -771,7 +772,7 @@ F test/pagesize.test 1dd51367e752e742f58e861e65ed7390603827a0 F test/pcache.test b09104b03160aca0d968d99e8cd2c5b1921a993d F test/pcache2.test a83efe2dec0d392f814bfc998def1d1833942025 F test/percentile.test b98fc868d71eb5619d42a1702e9ab91718cbed54 -F test/permutations.test bf568516e21758f2961a4612f29dc422d3ab75c1 +F test/permutations.test 89f594fdba922586d46c3e0a7ab4990b5a7f8da7 F test/pragma.test 19d0241a007bcdd77fc2606ec60fc60357e7fc8b F test/pragma2.test aea7b3d82c76034a2df2b38a13745172ddc0bc13 F test/printf.test ec9870c4dce8686a37818e0bf1aba6e6a1863552 @@ -854,7 +855,11 @@ F test/skipscan3.test ec5bab3f81c7038b43450e7b3062e04a198bdbb5 F test/skipscan5.test d8b9692b702745a0e41c23f9da6beac81df01196 F test/soak.test 0b5b6375c9f4110c828070b826b3b4b0bb65cd5f F test/softheap1.test 40562fe6cac6d9827b7b42b86d45aedf12c15e24 -F test/sort.test 0e4456e729e5a92a625907c63dcdedfbe72c5dc5 +F test/sort.test 15e1d3014abc3f6d4357ed81b93b82117aefd235 +F test/sort2.test 269f4f50c6e468cc32b302ae7ff0add8338ec6de +F test/sort3.test 6178ade30810ac9166fcdf14b7065e49c0f534e2 +F test/sort4.test 6c37d85f7cd28d50cce222fcab84ccd771e105cb +F test/sortfault.test b8e35177f97438b930ee87c9419ca2599e8073e1 F test/speed1.test f2974a91d79f58507ada01864c0e323093065452 F test/speed1p.explain d841e650a04728b39e6740296b852dccdca9b2cb F test/speed1p.test b180e98609c7677382cf618c0ec9b69f789033a8 @@ -863,7 +868,7 @@ F test/speed3.test d32043614c08c53eafdc80f33191d5bd9b920523 F test/speed4.test abc0ad3399dcf9703abed2fff8705e4f8e416715 F test/speed4p.explain 6b5f104ebeb34a038b2f714150f51d01143e59aa F test/speed4p.test 0e51908951677de5a969b723e03a27a1c45db38b -F test/speedtest1.c d29c8048beb7ea9254191f3fde9414709166a920 +F test/speedtest1.c 83f6b3318f7ee60e52b978b5a5e5dd7e83dfb7ee F test/spellfix.test 24f676831acddd2f4056a598fd731a72c6311f49 F test/sqllimits1.test b1aae27cc98eceb845e7f7adf918561256e31298 F test/stat.test 76fd746b85459e812a0193410fb599f0531f22de @@ -883,7 +888,7 @@ F test/tclsqlite.test a7308276aad2e6c0bfb5b0414424dd0d9cc0cad7 F test/tempdb.test 19d0f66e2e3eeffd68661a11c83ba5e6ace9128c F test/temptable.test d2c9b87a54147161bcd1822e30c1d1cd891e5b30 F test/temptrigger.test 8ec228b0db5d7ebc4ee9b458fc28cb9e7873f5e1 -F test/tester.tcl eac48cc21d519ac33a4fbaa3b425d178861fe741 +F test/tester.tcl 655afed0715958ec50fd575549e6c4e57311ff18 F test/thread001.test 9f22fd3525a307ff42a326b6bc7b0465be1745a5 F test/thread002.test e630504f8a06c00bf8bbe68528774dd96aeb2e58 F test/thread003.test ee4c9efc3b86a6a2767516a37bd64251272560a7 @@ -1137,7 +1142,7 @@ F test/whereF.test 5b2ba0dbe8074aa13e416b37c753991f0a2492d7 F test/whereG.test 69f5ec4b15760a8c860f80e2d55525669390aab3 F test/whereH.test e4b07f7a3c2f5d31195cd33710054c78667573b2 F test/whereI.test 1d89199697919d4930be05a71e7fe620f114e622 -F test/whereJ.test 35a40a50d0e13aa6b0de7cc5d4b204e5f9f9669f +F test/whereJ.test 8880784c211c459595f734a35bcc5f2061fce987 F test/wherelimit.test 5e9fd41e79bb2b2d588ed999d641d9c965619b31 F test/wild001.test bca33f499866f04c24510d74baf1e578d4e44b1c F test/win32heap.test ea19770974795cff26e11575e12d422dbd16893c @@ -1170,10 +1175,10 @@ F tool/logest.c eef612f8adf4d0993dafed0416064cf50d5d33c6 F tool/mkautoconfamal.sh 5dc5010e2e748a9e1bba67baca5956a2c2deda7b F tool/mkkeywordhash.c dfff09dbbfaf950e89af294f48f902181b144670 F tool/mkopts.tcl 66ac10d240cc6e86abd37dc908d50382f84ff46e -F tool/mkpragmatab.tcl 78a77b2c554d534c6f2dc903130186ed15715460 +F tool/mkpragmatab.tcl cce51d8f60c7f145d8fccabe6b5dfdedf31c5f5c F tool/mkspeedsql.tcl a1a334d288f7adfe6e996f2e712becf076745c97 -F tool/mksqlite3c-noext.tcl 1712d3d71256ca1f297046619c89e77a4d7c8f6d -F tool/mksqlite3c.tcl d2a3f219da0d86cea1e7e7101f819e187385ea1d +F tool/mksqlite3c-noext.tcl 88a1e3b0c769773fb7a9ebb363ffc603a4ac21d8 +F tool/mksqlite3c.tcl a2f61b529778ffe620531352c03b5cdc6fd03c0a F tool/mksqlite3h.tcl 2d0f1b3768f8d000b7881217d5fd4c776eb27467 F tool/mksqlite3internalh.tcl b6514145a7d5321b47e64e19b8116cc44f973eb1 F tool/mkvsix.tcl 52a4c613707ac34ae9c226e5ccc69cb948556105 @@ -1205,7 +1210,7 @@ F tool/vdbe_profile.tcl 67746953071a9f8f2f668b73fe899074e2c6d8c1 F tool/warnings-clang.sh f6aa929dc20ef1f856af04a730772f59283631d4 F tool/warnings.sh 0abfd78ceb09b7f7c27c688c8e3fe93268a13b32 F tool/win/sqlite.vsix deb315d026cc8400325c5863eef847784a219a2f -P 0b9e2c3269695713b538561d999c68097db70f0c af364cce9da0961593ef876b646197f82df08ad5 -R 7c6d04c5134339ec2be8d6e1d0b0aa83 +P 08ae974ac80fabe53f515bbbd93ccf55de8ee671 9779c7a9eb1e2bd36e9286331a9314f064014d80 +R 11d01aea9d4869c03771e6bed22886b5 U drh -Z 9ac7a7803f5fd67d6db4d00d2f3fd4ea +Z 5223b9b83d8a079e78fcc2500add2ce8 diff --git a/manifest.uuid b/manifest.uuid index 796f45da2..89f6e3564 100644 --- a/manifest.uuid +++ b/manifest.uuid @@ -1 +1 @@ -08ae974ac80fabe53f515bbbd93ccf55de8ee671
\ No newline at end of file +d4cce2c71e64ab7b6a65a81b88b69445ed859351
\ No newline at end of file diff --git a/src/analyze.c b/src/analyze.c index f9c03dc84..9920c32a8 100644 --- a/src/analyze.c +++ b/src/analyze.c @@ -387,8 +387,9 @@ static void stat4Destructor(void *pOld){ ** original WITHOUT ROWID table as N==K as a special case. ** ** This routine allocates the Stat4Accum object in heap memory. The return -** value is a pointer to the the Stat4Accum object encoded as a blob (i.e. -** the size of the blob is sizeof(void*) bytes). +** value is a pointer to the the Stat4Accum object. The datatype of the +** return value is BLOB, but it is really just a pointer to the Stat4Accum +** object. */ static void statInit( sqlite3_context *context, @@ -466,8 +467,11 @@ static void statInit( } #endif - /* Return a pointer to the allocated object to the caller */ - sqlite3_result_blob(context, p, sizeof(p), stat4Destructor); + /* Return a pointer to the allocated object to the caller. Note that + ** only the pointer (the 2nd parameter) matters. The size of the object + ** (given by the 3rd parameter) is never used and can be any positive + ** value. */ + sqlite3_result_blob(context, p, sizeof(*p), stat4Destructor); } static const FuncDef statInitFuncdef = { 2+IsStat34, /* nArg */ @@ -793,7 +797,7 @@ static const FuncDef statPushFuncdef = { ** Implementation of the stat_get(P,J) SQL function. This routine is ** used to query statistical information that has been gathered into ** the Stat4Accum object by prior calls to stat_push(). The P parameter -** is a BLOB which is decoded into a pointer to the Stat4Accum objects. +** has type BLOB but it is really just a pointer to the Stat4Accum object. ** The content to returned is determined by the parameter J ** which is one of the STAT_GET_xxxx values defined above. ** diff --git a/src/btree.c b/src/btree.c index 384bab218..a04302225 100644 --- a/src/btree.c +++ b/src/btree.c @@ -4513,17 +4513,16 @@ static int moveToRightmost(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); - while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ + while( !(pPage = pCur->apPage[pCur->iPage])->leaf ){ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); pCur->aiIdx[pCur->iPage] = pPage->nCell; rc = moveToChild(pCur, pgno); + if( rc ) return rc; } - if( rc==SQLITE_OK ){ - pCur->aiIdx[pCur->iPage] = pPage->nCell-1; - pCur->info.nSize = 0; - pCur->curFlags &= ~BTCF_ValidNKey; - } - return rc; + pCur->aiIdx[pCur->iPage] = pPage->nCell-1; + assert( pCur->info.nSize==0 ); + assert( (pCur->curFlags & BTCF_ValidNKey)==0 ); + return SQLITE_OK; } /* Move the cursor to the first entry in the table. Return SQLITE_OK @@ -4654,7 +4653,7 @@ int sqlite3BtreeMovetoUnpacked( if( pIdxKey ){ xRecordCompare = sqlite3VdbeFindCompare(pIdxKey); - pIdxKey->isCorrupt = 0; + pIdxKey->errCode = 0; assert( pIdxKey->default_rc==1 || pIdxKey->default_rc==0 || pIdxKey->default_rc==-1 @@ -4778,7 +4777,10 @@ int sqlite3BtreeMovetoUnpacked( c = xRecordCompare(nCell, pCellKey, pIdxKey, 0); sqlite3_free(pCellKey); } - assert( pIdxKey->isCorrupt==0 || c==0 ); + assert( + (pIdxKey->errCode!=SQLITE_CORRUPT || c==0) + && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed) + ); if( c<0 ){ lwr = idx+1; }else if( c>0 ){ @@ -4788,7 +4790,7 @@ int sqlite3BtreeMovetoUnpacked( *pRes = 0; rc = SQLITE_OK; pCur->aiIdx[pCur->iPage] = (u16)idx; - if( pIdxKey->isCorrupt ) rc = SQLITE_CORRUPT; + if( pIdxKey->errCode ) rc = SQLITE_CORRUPT; goto moveto_finish; } if( lwr>upr ) break; @@ -4843,6 +4845,12 @@ int sqlite3BtreeEof(BtCursor *pCur){ ** was already pointing to the last entry in the database before ** this routine was called, then set *pRes=1. ** +** The main entry point is sqlite3BtreeNext(). That routine is optimized +** for the common case of merely incrementing the cell counter BtCursor.aiIdx +** to the next cell on the current page. The (slower) btreeNext() helper +** routine is called when it is necessary to move to a different page or +** to restore the cursor. +** ** The calling function will set *pRes to 0 or 1. The initial *pRes value ** will be 1 if the cursor being stepped corresponds to an SQL index and ** if this routine could have been skipped if that SQL index had been @@ -4852,20 +4860,18 @@ int sqlite3BtreeEof(BtCursor *pCur){ ** SQLite btree implementation does not. (Note that the comdb2 btree ** implementation does use this hint, however.) */ -int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ +static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){ int rc; int idx; MemPage *pPage; assert( cursorHoldsMutex(pCur) ); - assert( pRes!=0 ); - assert( *pRes==0 || *pRes==1 ); assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + assert( *pRes==0 ); if( pCur->eState!=CURSOR_VALID ){ - invalidateOverflowCache(pCur); + assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ - *pRes = 0; return rc; } if( CURSOR_INVALID==pCur->eState ){ @@ -4877,7 +4883,6 @@ int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ pCur->eState = CURSOR_VALID; if( pCur->skipNext>0 ){ pCur->skipNext = 0; - *pRes = 0; return SQLITE_OK; } pCur->skipNext = 0; @@ -4895,18 +4900,11 @@ int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ ** page into more than one b-tree structure. */ testcase( idx>pPage->nCell ); - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); if( idx>=pPage->nCell ){ if( !pPage->leaf ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); - if( rc ){ - *pRes = 0; - return rc; - } - rc = moveToLeftmost(pCur); - *pRes = 0; - return rc; + if( rc ) return rc; + return moveToLeftmost(pCur); } do{ if( pCur->iPage==0 ){ @@ -4917,29 +4915,52 @@ int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ moveToParent(pCur); pPage = pCur->apPage[pCur->iPage]; }while( pCur->aiIdx[pCur->iPage]>=pPage->nCell ); - *pRes = 0; if( pPage->intKey ){ - rc = sqlite3BtreeNext(pCur, pRes); + return sqlite3BtreeNext(pCur, pRes); }else{ - rc = SQLITE_OK; + return SQLITE_OK; } - return rc; } + if( pPage->leaf ){ + return SQLITE_OK; + }else{ + return moveToLeftmost(pCur); + } +} +int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ + MemPage *pPage; + assert( cursorHoldsMutex(pCur) ); + assert( pRes!=0 ); + assert( *pRes==0 || *pRes==1 ); + assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); *pRes = 0; + if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur, pRes); + pPage = pCur->apPage[pCur->iPage]; + if( (++pCur->aiIdx[pCur->iPage])>=pPage->nCell ){ + pCur->aiIdx[pCur->iPage]--; + return btreeNext(pCur, pRes); + } if( pPage->leaf ){ return SQLITE_OK; + }else{ + return moveToLeftmost(pCur); } - rc = moveToLeftmost(pCur); - return rc; } - /* ** Step the cursor to the back to the previous entry in the database. If ** successful then set *pRes=0. If the cursor ** was already pointing to the first entry in the database before ** this routine was called, then set *pRes=1. ** +** The main entry point is sqlite3BtreePrevious(). That routine is optimized +** for the common case of merely decrementing the cell counter BtCursor.aiIdx +** to the previous cell on the current page. The (slower) btreePrevious() helper +** routine is called when it is necessary to move to a different page or +** to restore the cursor. +** ** The calling function will set *pRes to 0 or 1. The initial *pRes value ** will be 1 if the cursor being stepped corresponds to an SQL index and ** if this routine could have been skipped if that SQL index had been @@ -4949,22 +4970,21 @@ int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ ** SQLite btree implementation does not. (Note that the comdb2 btree ** implementation does use this hint, however.) */ -int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ +static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){ int rc; MemPage *pPage; assert( cursorHoldsMutex(pCur) ); assert( pRes!=0 ); - assert( *pRes==0 || *pRes==1 ); + assert( *pRes==0 ); assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); - pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl); + assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 ); + assert( pCur->info.nSize==0 ); if( pCur->eState!=CURSOR_VALID ){ - if( ALWAYS(pCur->eState>=CURSOR_REQUIRESEEK) ){ - rc = btreeRestoreCursorPosition(pCur); - if( rc!=SQLITE_OK ){ - *pRes = 0; - return rc; - } + assert( pCur->eState>=CURSOR_REQUIRESEEK ); + rc = btreeRestoreCursorPosition(pCur); + if( rc!=SQLITE_OK ){ + return rc; } if( CURSOR_INVALID==pCur->eState ){ *pRes = 1; @@ -4975,7 +4995,6 @@ int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ pCur->eState = CURSOR_VALID; if( pCur->skipNext<0 ){ pCur->skipNext = 0; - *pRes = 0; return SQLITE_OK; } pCur->skipNext = 0; @@ -4987,10 +5006,7 @@ int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ if( !pPage->leaf ){ int idx = pCur->aiIdx[pCur->iPage]; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); - if( rc ){ - *pRes = 0; - return rc; - } + if( rc ) return rc; rc = moveToRightmost(pCur); }else{ while( pCur->aiIdx[pCur->iPage]==0 ){ @@ -5001,8 +5017,8 @@ int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ } moveToParent(pCur); } - pCur->info.nSize = 0; - pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + assert( pCur->info.nSize==0 ); + assert( (pCur->curFlags & (BTCF_ValidNKey|BTCF_ValidOvfl))==0 ); pCur->aiIdx[pCur->iPage]--; pPage = pCur->apPage[pCur->iPage]; @@ -5012,9 +5028,25 @@ int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ rc = SQLITE_OK; } } - *pRes = 0; return rc; } +int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ + assert( cursorHoldsMutex(pCur) ); + assert( pRes!=0 ); + assert( *pRes==0 || *pRes==1 ); + assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + *pRes = 0; + pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey); + pCur->info.nSize = 0; + if( pCur->eState!=CURSOR_VALID + || pCur->aiIdx[pCur->iPage]==0 + || pCur->apPage[pCur->iPage]->leaf==0 + ){ + return btreePrevious(pCur, pRes); + } + pCur->aiIdx[pCur->iPage]--; + return SQLITE_OK; +} /* ** Allocate a new page from the database file. diff --git a/src/build.c b/src/build.c index af776cb3d..0921d6d25 100644 --- a/src/build.c +++ b/src/build.c @@ -2679,7 +2679,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ /* Open the sorter cursor if we are to use one. */ iSorter = pParse->nTab++; - sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, 0, (char*) + sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, pIndex->nKeyCol, (char*) sqlite3KeyInfoRef(pKey), P4_KEYINFO); /* Open the table. Loop through all rows of the table, inserting index @@ -3028,7 +3028,7 @@ Index *sqlite3CreateIndex( pParse->checkSchema = 1; goto exit_create_index; } - assert( pTab->nCol<=0x7fff && j<=0x7fff ); + assert( j<=0x7fff ); pIndex->aiColumn[i] = (i16)j; if( pListItem->pExpr ){ int nColl; diff --git a/src/expr.c b/src/expr.c index fabdae2fc..1a2465f7e 100644 --- a/src/expr.c +++ b/src/expr.c @@ -1912,6 +1912,7 @@ int sqlite3CodeSubselect( sqlite3SelectDestInit(&dest, 0, ++pParse->nMem); if( pExpr->op==TK_SELECT ){ dest.eDest = SRT_Mem; + dest.iSdst = dest.iSDParm; sqlite3VdbeAddOp2(v, OP_Null, 0, dest.iSDParm); VdbeComment((v, "Init subquery result")); }else{ diff --git a/src/main.c b/src/main.c index fa218efd4..948ffebde 100644 --- a/src/main.c +++ b/src/main.c @@ -2099,6 +2099,7 @@ static const int aHardLimit[] = { SQLITE_MAX_LIKE_PATTERN_LENGTH, SQLITE_MAX_VARIABLE_NUMBER, /* IMP: R-38091-32352 */ SQLITE_MAX_TRIGGER_DEPTH, + SQLITE_MAX_WORKER_THREADS, }; /* @@ -2134,6 +2135,9 @@ static const int aHardLimit[] = { #if SQLITE_MAX_TRIGGER_DEPTH<1 # error SQLITE_MAX_TRIGGER_DEPTH must be at least 1 #endif +#if SQLITE_MAX_WORKER_THREADS<0 || SQLITE_MAX_WORKER_THREADS>50 +# error SQLITE_MAX_WORKER_THREADS must be between 0 and 50 +#endif /* @@ -2167,7 +2171,8 @@ int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ SQLITE_MAX_LIKE_PATTERN_LENGTH ); assert( aHardLimit[SQLITE_LIMIT_VARIABLE_NUMBER]==SQLITE_MAX_VARIABLE_NUMBER); assert( aHardLimit[SQLITE_LIMIT_TRIGGER_DEPTH]==SQLITE_MAX_TRIGGER_DEPTH ); - assert( SQLITE_LIMIT_TRIGGER_DEPTH==(SQLITE_N_LIMIT-1) ); + assert( aHardLimit[SQLITE_LIMIT_WORKER_THREADS]==SQLITE_MAX_WORKER_THREADS ); + assert( SQLITE_LIMIT_WORKER_THREADS==(SQLITE_N_LIMIT-1) ); if( limitId<0 || limitId>=SQLITE_N_LIMIT ){ @@ -2514,10 +2519,12 @@ static int openDatabase( assert( sizeof(db->aLimit)==sizeof(aHardLimit) ); memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit)); + db->aLimit[SQLITE_LIMIT_WORKER_THREADS] = SQLITE_DEFAULT_WORKER_THREADS; db->autoCommit = 1; db->nextAutovac = -1; db->szMmap = sqlite3GlobalConfig.szMmap; db->nextPagesize = 0; + db->nMaxSorterMmap = 0x7FFFFFFF; db->flags |= SQLITE_ShortColNames | SQLITE_EnableTrigger | SQLITE_CacheSpill #if !defined(SQLITE_DEFAULT_AUTOMATIC_INDEX) || SQLITE_DEFAULT_AUTOMATIC_INDEX | SQLITE_AutoIndex @@ -3382,6 +3389,13 @@ int sqlite3_test_control(int op, ...){ break; } + /* sqlite3_test_control(SQLITE_TESTCTRL_SORTER_MMAP, db, nMax); */ + case SQLITE_TESTCTRL_SORTER_MMAP: { + sqlite3 *db = va_arg(ap, sqlite3*); + db->nMaxSorterMmap = va_arg(ap, int); + break; + } + /* sqlite3_test_control(SQLITE_TESTCTRL_ISINIT); ** ** Return SQLITE_OK if SQLite has been initialized and SQLITE_ERROR if @@ -3391,7 +3405,6 @@ int sqlite3_test_control(int op, ...){ if( sqlite3GlobalConfig.isInit==0 ) rc = SQLITE_ERROR; break; } - } va_end(ap); #endif /* SQLITE_OMIT_BUILTIN_TEST */ diff --git a/src/os_unix.c b/src/os_unix.c index b1a0bedcf..f63afc6bc 100644 --- a/src/os_unix.c +++ b/src/os_unix.c @@ -5885,7 +5885,7 @@ static int unixDelete( if( osUnlink(zPath)==(-1) ){ if( errno==ENOENT #if OS_VXWORKS - || errno==0x380003 + || osAccess(zPath,0)!=0 #endif ){ rc = SQLITE_IOERR_DELETE_NOENT; diff --git a/src/os_win.c b/src/os_win.c index b9f13becd..46bf88e38 100644 --- a/src/os_win.c +++ b/src/os_win.c @@ -943,11 +943,7 @@ static struct win_syscall { #define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \ DWORD))aSyscall[63].pCurrent) -#if SQLITE_OS_WINRT { "WaitForSingleObjectEx", (SYSCALL)WaitForSingleObjectEx, 0 }, -#else - { "WaitForSingleObjectEx", (SYSCALL)0, 0 }, -#endif #define osWaitForSingleObjectEx ((DWORD(WINAPI*)(HANDLE,DWORD, \ BOOL))aSyscall[64].pCurrent) @@ -1290,6 +1286,13 @@ void sqlite3_win32_sleep(DWORD milliseconds){ #endif } +DWORD sqlite3Win32Wait(HANDLE hObject){ + DWORD rc; + while( (rc = osWaitForSingleObjectEx(hObject, INFINITE, + TRUE))==WAIT_IO_COMPLETION ){} + return rc; +} + /* ** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, ** or WinCE. Return false (zero) for Win95, Win98, or WinME. @@ -1317,19 +1320,24 @@ void sqlite3_win32_sleep(DWORD milliseconds){ ** based on the NT kernel. */ int sqlite3_win32_is_nt(void){ -#if defined(SQLITE_WIN32_GETVERSIONEX) && SQLITE_WIN32_GETVERSIONEX +#if SQLITE_OS_WINRT + /* + ** NOTE: The WinRT sub-platform is always assumed to be based on the NT + ** kernel. + */ + return 1; +#elif defined(SQLITE_WIN32_GETVERSIONEX) && SQLITE_WIN32_GETVERSIONEX if( osInterlockedCompareExchange(&sqlite3_os_type, 0, 0)==0 ){ -#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ - defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WIN8 - OSVERSIONINFOW sInfo; +#if defined(SQLITE_WIN32_HAS_ANSI) + OSVERSIONINFOA sInfo; sInfo.dwOSVersionInfoSize = sizeof(sInfo); - osGetVersionExW(&sInfo); + osGetVersionExA(&sInfo); osInterlockedCompareExchange(&sqlite3_os_type, (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0); -#elif defined(SQLITE_WIN32_HAS_ANSI) - OSVERSIONINFOA sInfo; +#elif defined(SQLITE_WIN32_HAS_WIDE) + OSVERSIONINFOW sInfo; sInfo.dwOSVersionInfoSize = sizeof(sInfo); - osGetVersionExA(&sInfo); + osGetVersionExW(&sInfo); osInterlockedCompareExchange(&sqlite3_os_type, (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0); #endif @@ -1338,6 +1346,10 @@ int sqlite3_win32_is_nt(void){ #elif SQLITE_TEST return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; #else + /* + ** NOTE: All sub-platforms where the GetVersionEx[AW] functions are + ** deprecated are always assumed to be based on the NT kernel. + */ return 1; #endif } diff --git a/src/pager.c b/src/pager.c index 8930ce862..3ef54d98e 100644 --- a/src/pager.c +++ b/src/pager.c @@ -3622,7 +3622,7 @@ int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nReserve){ pPager->pageSize = pageSize; sqlite3PageFree(pPager->pTmpSpace); pPager->pTmpSpace = pNew; - sqlite3PcacheSetPageSize(pPager->pPCache, pageSize); + rc = sqlite3PcacheSetPageSize(pPager->pPCache, pageSize); } } @@ -4385,7 +4385,7 @@ static int pagerStress(void *p, PgHdr *pPg){ ** ** Spilling is also prohibited when in an error state since that could ** lead to database corruption. In the current implementaton it - ** is impossible for sqlite3PcacheFetch() to be called with createFlag==1 + ** is impossible for sqlite3PcacheFetch() to be called with createFlag==3 ** while in the error state, hence it is impossible for this routine to ** be called in the error state. Nevertheless, we include a NEVER() ** test for the error state as a safeguard against future changes. @@ -4721,22 +4721,23 @@ act_like_temp_file: testcase( rc!=SQLITE_OK ); } - /* If an error occurred in either of the blocks above, free the - ** Pager structure and close the file. + /* Initialize the PCache object. */ + if( rc==SQLITE_OK ){ + assert( nExtra<1000 ); + nExtra = ROUND8(nExtra); + rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, + !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); + } + + /* If an error occurred above, free the Pager structure and close the file. */ if( rc!=SQLITE_OK ){ - assert( !pPager->pTmpSpace ); sqlite3OsClose(pPager->fd); + sqlite3PageFree(pPager->pTmpSpace); sqlite3_free(pPager); return rc; } - /* Initialize the PCache object. */ - assert( nExtra<1000 ); - nExtra = ROUND8(nExtra); - sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, - !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); - PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename)); IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename)) @@ -5285,7 +5286,6 @@ int sqlite3PagerAcquire( if( pPager->errCode!=SQLITE_OK ){ rc = pPager->errCode; }else{ - if( bMmapOk && pagerUseWal(pPager) ){ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); if( rc!=SQLITE_OK ) goto pager_acquire_err; @@ -5300,7 +5300,7 @@ int sqlite3PagerAcquire( if( rc==SQLITE_OK && pData ){ if( pPager->eState>PAGER_READER ){ - (void)sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg); + pPg = sqlite3PagerLookup(pPager, pgno); } if( pPg==0 ){ rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg); @@ -5318,7 +5318,16 @@ int sqlite3PagerAcquire( } } - rc = sqlite3PcacheFetch(pPager->pPCache, pgno, 1, ppPage); + { + sqlite3_pcache_page *pBase; + pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3); + if( pBase==0 ){ + rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase); + if( rc!=SQLITE_OK ) goto pager_acquire_err; + } + pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase); + if( pPg==0 ) rc = SQLITE_NOMEM; + } } if( rc!=SQLITE_OK ){ @@ -5415,12 +5424,12 @@ pager_acquire_err: ** has ever happened. */ DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ - PgHdr *pPg = 0; + sqlite3_pcache_page *pPage; assert( pPager!=0 ); assert( pgno!=0 ); assert( pPager->pPCache!=0 ); - sqlite3PcacheFetch(pPager->pPCache, pgno, 0, &pPg); - return pPg; + pPage = sqlite3PcacheFetch(pPager->pPCache, pgno, 0); + return sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pPage); } /* diff --git a/src/pcache.c b/src/pcache.c index 2e4b5d78b..eabfadd4b 100644 --- a/src/pcache.c +++ b/src/pcache.c @@ -144,6 +144,17 @@ static void pcacheUnpin(PgHdr *p){ } } +/* +** Compute the number of pages of cache requested. +*/ +static int numberOfCachePages(PCache *p){ + if( p->szCache>=0 ){ + return p->szCache; + }else{ + return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); + } +} + /*************************************************** General Interfaces ****** ** ** Initialize and shutdown the page cache subsystem. Neither of these @@ -176,7 +187,7 @@ int sqlite3PcacheSize(void){ return sizeof(PCache); } ** The caller discovers how much space needs to be allocated by ** calling sqlite3PcacheSize(). */ -void sqlite3PcacheOpen( +int sqlite3PcacheOpen( int szPage, /* Size of every page */ int szExtra, /* Extra space associated with each page */ int bPurgeable, /* True if pages are on backing store */ @@ -185,76 +196,75 @@ void sqlite3PcacheOpen( PCache *p /* Preallocated space for the PCache */ ){ memset(p, 0, sizeof(PCache)); - p->szPage = szPage; + p->szPage = 1; p->szExtra = szExtra; p->bPurgeable = bPurgeable; p->eCreate = 2; p->xStress = xStress; p->pStress = pStress; p->szCache = 100; + return sqlite3PcacheSetPageSize(p, szPage); } /* ** Change the page size for PCache object. The caller must ensure that there ** are no outstanding page references when this function is called. */ -void sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ +int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ assert( pCache->nRef==0 && pCache->pDirty==0 ); - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); - pCache->pCache = 0; + if( pCache->szPage ){ + sqlite3_pcache *pNew; + pNew = sqlite3GlobalConfig.pcache2.xCreate( + szPage, pCache->szExtra + sizeof(PgHdr), pCache->bPurgeable + ); + if( pNew==0 ) return SQLITE_NOMEM; + sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache)); + if( pCache->pCache ){ + sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); + } + pCache->pCache = pNew; pCache->pPage1 = 0; + pCache->szPage = szPage; } - pCache->szPage = szPage; -} - -/* -** Compute the number of pages of cache requested. -*/ -static int numberOfCachePages(PCache *p){ - if( p->szCache>=0 ){ - return p->szCache; - }else{ - return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); - } + return SQLITE_OK; } /* ** Try to obtain a page from the cache. -*/ -int sqlite3PcacheFetch( +** +** This routine returns a pointer to an sqlite3_pcache_page object if +** such an object is already in cache, or if a new one is created. +** This routine returns a NULL pointer if the object was not in cache +** and could not be created. +** +** The createFlags should be 0 to check for existing pages and should +** be 3 (not 1, but 3) to try to create a new page. +** +** If the createFlag is 0, then NULL is always returned if the page +** is not already in the cache. If createFlag is 1, then a new page +** is created only if that can be done without spilling dirty pages +** and without exceeding the cache size limit. +** +** The caller needs to invoke sqlite3PcacheFetchFinish() to properly +** initialize the sqlite3_pcache_page object and convert it into a +** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish() +** routines are split this way for performance reasons. When separated +** they can both (usually) operate without having to push values to +** the stack on entry and pop them back off on exit, which saves a +** lot of pushing and popping. +*/ +sqlite3_pcache_page *sqlite3PcacheFetch( PCache *pCache, /* Obtain the page from this cache */ Pgno pgno, /* Page number to obtain */ - int createFlag, /* If true, create page if it does not exist already */ - PgHdr **ppPage /* Write the page here */ + int createFlag /* If true, create page if it does not exist already */ ){ - sqlite3_pcache_page *pPage; - PgHdr *pPgHdr = 0; int eCreate; assert( pCache!=0 ); - assert( createFlag==1 || createFlag==0 ); + assert( pCache->pCache!=0 ); + assert( createFlag==3 || createFlag==0 ); assert( pgno>0 ); - /* If the pluggable cache (sqlite3_pcache*) has not been allocated, - ** allocate it now. - */ - if( !pCache->pCache ){ - sqlite3_pcache *p; - if( !createFlag ){ - *ppPage = 0; - return SQLITE_OK; - } - p = sqlite3GlobalConfig.pcache2.xCreate( - pCache->szPage, pCache->szExtra + sizeof(PgHdr), pCache->bPurgeable - ); - if( !p ){ - return SQLITE_NOMEM; - } - sqlite3GlobalConfig.pcache2.xCachesize(p, numberOfCachePages(pCache)); - pCache->pCache = p; - } - /* eCreate defines what to do if the page does not exist. ** 0 Do not allocate a new page. (createFlag==0) ** 1 Allocate a new page if doing so is inexpensive. @@ -262,71 +272,120 @@ int sqlite3PcacheFetch( ** 2 Allocate a new page even it doing so is difficult. ** (createFlag==1 AND !(bPurgeable AND pDirty) */ - eCreate = createFlag==0 ? 0 : pCache->eCreate; - assert( (createFlag*(1+(!pCache->bPurgeable||!pCache->pDirty)))==eCreate ); - pPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); - if( !pPage && eCreate==1 ){ - PgHdr *pPg; - - /* Find a dirty page to write-out and recycle. First try to find a - ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC - ** cleared), but if that is not possible settle for any other - ** unreferenced dirty page. - */ - expensive_assert( pcacheCheckSynced(pCache) ); - for(pPg=pCache->pSynced; - pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); - pPg=pPg->pDirtyPrev - ); - pCache->pSynced = pPg; - if( !pPg ){ - for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); - } - if( pPg ){ - int rc; + eCreate = createFlag & pCache->eCreate; + assert( eCreate==0 || eCreate==1 || eCreate==2 ); + assert( createFlag==0 || pCache->eCreate==eCreate ); + assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); + return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); +} + +/* +** If the sqlite3PcacheFetch() routine is unable to allocate a new +** page because new clean pages are available for reuse and the cache +** size limit has been reached, then this routine can be invoked to +** try harder to allocate a page. This routine might invoke the stress +** callback to spill dirty pages to the journal. It will then try to +** allocate the new page and will only fail to allocate a new page on +** an OOM error. +** +** This routine should be invoked only after sqlite3PcacheFetch() fails. +*/ +int sqlite3PcacheFetchStress( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number to obtain */ + sqlite3_pcache_page **ppPage /* Write result here */ +){ + PgHdr *pPg; + if( pCache->eCreate==2 ) return 0; + + + /* Find a dirty page to write-out and recycle. First try to find a + ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC + ** cleared), but if that is not possible settle for any other + ** unreferenced dirty page. + */ + expensive_assert( pcacheCheckSynced(pCache) ); + for(pPg=pCache->pSynced; + pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); + pPg=pPg->pDirtyPrev + ); + pCache->pSynced = pPg; + if( !pPg ){ + for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); + } + if( pPg ){ + int rc; #ifdef SQLITE_LOG_CACHE_SPILL - sqlite3_log(SQLITE_FULL, - "spill page %d making room for %d - cache used: %d/%d", - pPg->pgno, pgno, - sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), - numberOfCachePages(pCache)); + sqlite3_log(SQLITE_FULL, + "spill page %d making room for %d - cache used: %d/%d", + pPg->pgno, pgno, + sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), + numberOfCachePages(pCache)); #endif - rc = pCache->xStress(pCache->pStress, pPg); - if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ - return rc; - } + rc = pCache->xStress(pCache->pStress, pPg); + if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ + return rc; } - - pPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); } + *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); + return *ppPage==0 ? SQLITE_NOMEM : SQLITE_OK; +} - if( pPage ){ - pPgHdr = (PgHdr *)pPage->pExtra; - - if( !pPgHdr->pPage ){ - memset(pPgHdr, 0, sizeof(PgHdr)); - pPgHdr->pPage = pPage; - pPgHdr->pData = pPage->pBuf; - pPgHdr->pExtra = (void *)&pPgHdr[1]; - memset(pPgHdr->pExtra, 0, pCache->szExtra); - pPgHdr->pCache = pCache; - pPgHdr->pgno = pgno; - } - assert( pPgHdr->pCache==pCache ); - assert( pPgHdr->pgno==pgno ); - assert( pPgHdr->pData==pPage->pBuf ); - assert( pPgHdr->pExtra==(void *)&pPgHdr[1] ); +/* +** This is a helper routine for sqlite3PcacheFetchFinish() +** +** In the uncommon case where the page being fetched has not been +** initialized, this routine is invoked to do the initialization. +** This routine is broken out into a separate function since it +** requires extra stack manipulation that can be avoided in the common +** case. +*/ +static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number obtained */ + sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ +){ + PgHdr *pPgHdr; + assert( pPage!=0 ); + pPgHdr = (PgHdr*)pPage->pExtra; + assert( pPgHdr->pPage==0 ); + memset(pPgHdr, 0, sizeof(PgHdr)); + pPgHdr->pPage = pPage; + pPgHdr->pData = pPage->pBuf; + pPgHdr->pExtra = (void *)&pPgHdr[1]; + memset(pPgHdr->pExtra, 0, pCache->szExtra); + pPgHdr->pCache = pCache; + pPgHdr->pgno = pgno; + return sqlite3PcacheFetchFinish(pCache,pgno,pPage); +} - if( 0==pPgHdr->nRef ){ - pCache->nRef++; - } - pPgHdr->nRef++; - if( pgno==1 ){ - pCache->pPage1 = pPgHdr; - } +/* +** This routine converts the sqlite3_pcache_page object returned by +** sqlite3PcacheFetch() into an initialized PgHdr object. This routine +** must be called after sqlite3PcacheFetch() in order to get a usable +** result. +*/ +PgHdr *sqlite3PcacheFetchFinish( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number obtained */ + sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ +){ + PgHdr *pPgHdr; + + if( pPage==0 ) return 0; + pPgHdr = (PgHdr *)pPage->pExtra; + + if( !pPgHdr->pPage ){ + return pcacheFetchFinishWithInit(pCache, pgno, pPage); + } + if( 0==pPgHdr->nRef ){ + pCache->nRef++; } - *ppPage = pPgHdr; - return (pPgHdr==0 && eCreate) ? SQLITE_NOMEM : SQLITE_OK; + pPgHdr->nRef++; + if( pgno==1 ){ + pCache->pPage1 = pPgHdr; + } + return pPgHdr; } /* @@ -471,9 +530,8 @@ void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ ** Close a cache. */ void sqlite3PcacheClose(PCache *pCache){ - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); - } + assert( pCache->pCache!=0 ); + sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); } /* @@ -582,11 +640,8 @@ int sqlite3PcachePageRefcount(PgHdr *p){ ** Return the total number of pages in the cache. */ int sqlite3PcachePagecount(PCache *pCache){ - int nPage = 0; - if( pCache->pCache ){ - nPage = sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache); - } - return nPage; + assert( pCache->pCache!=0 ); + return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache); } #ifdef SQLITE_TEST @@ -602,20 +657,18 @@ int sqlite3PcacheGetCachesize(PCache *pCache){ ** Set the suggested cache-size value. */ void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){ + assert( pCache->pCache!=0 ); pCache->szCache = mxPage; - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache, - numberOfCachePages(pCache)); - } + sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache, + numberOfCachePages(pCache)); } /* ** Free up as much memory as possible from the page cache. */ void sqlite3PcacheShrink(PCache *pCache){ - if( pCache->pCache ){ - sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); - } + assert( pCache->pCache!=0 ); + sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); } #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) diff --git a/src/pcache.h b/src/pcache.h index f4d4ad71c..dd9bfc745 100644 --- a/src/pcache.h +++ b/src/pcache.h @@ -68,7 +68,7 @@ void sqlite3PCacheBufferSetup(void *, int sz, int n); ** Under memory stress, invoke xStress to try to make pages clean. ** Only clean and unpinned pages can be reclaimed. */ -void sqlite3PcacheOpen( +int sqlite3PcacheOpen( int szPage, /* Size of every page */ int szExtra, /* Extra space associated with each page */ int bPurgeable, /* True if pages are on backing store */ @@ -78,7 +78,7 @@ void sqlite3PcacheOpen( ); /* Modify the page-size after the cache has been created. */ -void sqlite3PcacheSetPageSize(PCache *, int); +int sqlite3PcacheSetPageSize(PCache *, int); /* Return the size in bytes of a PCache object. Used to preallocate ** storage space. @@ -88,7 +88,9 @@ int sqlite3PcacheSize(void); /* One release per successful fetch. Page is pinned until released. ** Reference counted. */ -int sqlite3PcacheFetch(PCache*, Pgno, int createFlag, PgHdr**); +sqlite3_pcache_page *sqlite3PcacheFetch(PCache*, Pgno, int createFlag); +int sqlite3PcacheFetchStress(PCache*, Pgno, sqlite3_pcache_page**); +PgHdr *sqlite3PcacheFetchFinish(PCache*, Pgno, sqlite3_pcache_page *pPage); void sqlite3PcacheRelease(PgHdr*); void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */ diff --git a/src/pragma.c b/src/pragma.c index 9ed5e13eb..12446125f 100644 --- a/src/pragma.c +++ b/src/pragma.c @@ -61,14 +61,15 @@ #define PragTyp_TABLE_INFO 30 #define PragTyp_TEMP_STORE 31 #define PragTyp_TEMP_STORE_DIRECTORY 32 -#define PragTyp_WAL_AUTOCHECKPOINT 33 -#define PragTyp_WAL_CHECKPOINT 34 -#define PragTyp_ACTIVATE_EXTENSIONS 35 -#define PragTyp_HEXKEY 36 -#define PragTyp_KEY 37 -#define PragTyp_REKEY 38 -#define PragTyp_LOCK_STATUS 39 -#define PragTyp_PARSER_TRACE 40 +#define PragTyp_THREADS 33 +#define PragTyp_WAL_AUTOCHECKPOINT 34 +#define PragTyp_WAL_CHECKPOINT 35 +#define PragTyp_ACTIVATE_EXTENSIONS 36 +#define PragTyp_HEXKEY 37 +#define PragTyp_KEY 38 +#define PragTyp_REKEY 39 +#define PragTyp_LOCK_STATUS 40 +#define PragTyp_PARSER_TRACE 41 #define PragFlag_NeedSchema 0x01 static const struct sPragmaNames { const char *const zName; /* Name of pragma */ @@ -418,6 +419,10 @@ static const struct sPragmaNames { /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif + { /* zName: */ "threads", + /* ePragTyp: */ PragTyp_THREADS, + /* ePragFlag: */ 0, + /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) { /* zName: */ "user_version", /* ePragTyp: */ PragTyp_HEADER_VALUE, @@ -465,7 +470,7 @@ static const struct sPragmaNames { /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode }, #endif }; -/* Number of pragmas: 56 on by default, 69 total. */ +/* Number of pragmas: 57 on by default, 70 total. */ /* End of the automatically generated pragma table. ***************************************************************************/ @@ -2273,6 +2278,26 @@ void sqlite3Pragma( break; } + /* + ** PRAGMA threads + ** PRAGMA threads = N + ** + ** Configure the maximum number of worker threads. Return the new + ** maximum, which might be less than requested. + */ + case PragTyp_THREADS: { + sqlite3_int64 N; + if( zRight + && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK + && N>=0 + ){ + sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff)); + } + returnSingleInt(pParse, "threads", + sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1)); + break; + } + #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) /* ** Report the current state of file logs for all databases diff --git a/src/select.c b/src/select.c index 932874d8f..5508c2e69 100644 --- a/src/select.c +++ b/src/select.c @@ -455,28 +455,43 @@ static KeyInfo *keyInfoFromExprList( ); /* -** Insert code into "v" that will push the record in register regData -** into the sorter. +** Generate code that will push the record in registers regData +** through regData+nData-1 onto the sorter. */ static void pushOntoSorter( Parse *pParse, /* Parser context */ SortCtx *pSort, /* Information about the ORDER BY clause */ Select *pSelect, /* The whole SELECT statement */ - int regData /* Register holding data to be sorted */ + int regData, /* First register holding data to be sorted */ + int nData, /* Number of elements in the data array */ + int nPrefixReg /* No. of reg prior to regData available for use */ ){ - Vdbe *v = pParse->pVdbe; - int nExpr = pSort->pOrderBy->nExpr; - int regRecord = ++pParse->nMem; - int regBase = pParse->nMem+1; - int nOBSat = pSort->nOBSat; - int op; + Vdbe *v = pParse->pVdbe; /* Stmt under construction */ + int bSeq = ((pSort->sortFlags & SORTFLAG_UseSorter)==0); + int nExpr = pSort->pOrderBy->nExpr; /* No. of ORDER BY terms */ + int nBase = nExpr + bSeq + nData; /* Fields in sorter record */ + int regBase; /* Regs for sorter record */ + int regRecord = ++pParse->nMem; /* Assembled sorter record */ + int nOBSat = pSort->nOBSat; /* ORDER BY terms to skip */ + int op; /* Opcode to add sorter record to sorter */ + + assert( bSeq==0 || bSeq==1 ); + if( nPrefixReg ){ + assert( nPrefixReg==nExpr+bSeq ); + regBase = regData - nExpr - bSeq; + }else{ + regBase = pParse->nMem + 1; + pParse->nMem += nBase; + } + sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, SQLITE_ECEL_DUP); + if( bSeq ){ + sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr); + } + if( nPrefixReg==0 ){ + sqlite3VdbeAddOp3(v, OP_Move, regData, regBase+nExpr+bSeq, nData); + } - pParse->nMem += nExpr+2; /* nExpr+2 registers allocated at regBase */ - sqlite3ExprCacheClear(pParse); - sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, 0); - sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr); - sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+1, 1); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nExpr+2-nOBSat,regRecord); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regRecord); if( nOBSat>0 ){ int regPrevKey; /* The first nOBSat columns of the previous row */ int addrFirst; /* Address of the OP_IfNot opcode */ @@ -487,12 +502,17 @@ static void pushOntoSorter( regPrevKey = pParse->nMem+1; pParse->nMem += pSort->nOBSat; - nKey = nExpr - pSort->nOBSat + 1; - addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr); VdbeCoverage(v); + nKey = nExpr - pSort->nOBSat + bSeq; + if( bSeq ){ + addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr); + }else{ + addrFirst = sqlite3VdbeAddOp1(v, OP_SequenceTest, pSort->iECursor); + } + VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat); pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex); if( pParse->db->mallocFailed ) return; - pOp->p2 = nKey + 1; + pOp->p2 = nKey + nData; pKI = pOp->p4.pKeyInfo; memset(pKI->aSortOrder, 0, pKI->nField); /* Makes OP_Jump below testable */ sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO); @@ -626,6 +646,7 @@ static void selectInnerLoop( int eDest = pDest->eDest; /* How to dispose of results */ int iParm = pDest->iSDParm; /* First argument to disposal method */ int nResultCol; /* Number of result columns */ + int nPrefixReg = 0; /* Number of extra registers before regResult */ assert( v ); assert( pEList!=0 ); @@ -641,6 +662,11 @@ static void selectInnerLoop( nResultCol = pEList->nExpr; if( pDest->iSdst==0 ){ + if( pSort ){ + nPrefixReg = pSort->pOrderBy->nExpr; + if( !(pSort->sortFlags & SORTFLAG_UseSorter) ) nPrefixReg++; + pParse->nMem += nPrefixReg; + } pDest->iSdst = pParse->nMem+1; pParse->nMem += nResultCol; }else if( pDest->iSdst+nResultCol > pParse->nMem ){ @@ -757,10 +783,10 @@ static void selectInnerLoop( case SRT_DistFifo: case SRT_Table: case SRT_EphemTab: { - int r1 = sqlite3GetTempReg(pParse); + int r1 = sqlite3GetTempRange(pParse, nPrefixReg+1); testcase( eDest==SRT_Table ); testcase( eDest==SRT_EphemTab ); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg); #ifndef SQLITE_OMIT_CTE if( eDest==SRT_DistFifo ){ /* If the destination is DistFifo, then cursor (iParm+1) is open @@ -775,7 +801,7 @@ static void selectInnerLoop( } #endif if( pSort ){ - pushOntoSorter(pParse, pSort, p, r1); + pushOntoSorter(pParse, pSort, p, r1+nPrefixReg, 1, nPrefixReg); }else{ int r2 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2); @@ -783,7 +809,7 @@ static void selectInnerLoop( sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3ReleaseTempReg(pParse, r2); } - sqlite3ReleaseTempReg(pParse, r1); + sqlite3ReleaseTempRange(pParse, r1, nPrefixReg+1); break; } @@ -801,7 +827,7 @@ static void selectInnerLoop( ** ORDER BY in this case since the order of entries in the set ** does not matter. But there might be a LIMIT clause, in which ** case the order does matter */ - pushOntoSorter(pParse, pSort, p, regResult); + pushOntoSorter(pParse, pSort, p, regResult, 1, nPrefixReg); }else{ int r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult,1,r1, &pDest->affSdst, 1); @@ -827,9 +853,9 @@ static void selectInnerLoop( case SRT_Mem: { assert( nResultCol==1 ); if( pSort ){ - pushOntoSorter(pParse, pSort, p, regResult); + pushOntoSorter(pParse, pSort, p, regResult, 1, nPrefixReg); }else{ - sqlite3ExprCodeMove(pParse, regResult, iParm, 1); + assert( regResult==iParm ); /* The LIMIT clause will jump out of the loop for us */ } break; @@ -841,10 +867,7 @@ static void selectInnerLoop( testcase( eDest==SRT_Coroutine ); testcase( eDest==SRT_Output ); if( pSort ){ - int r1 = sqlite3GetTempReg(pParse); - sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); - pushOntoSorter(pParse, pSort, p, r1); - sqlite3ReleaseTempReg(pParse, r1); + pushOntoSorter(pParse, pSort, p, regResult, nResultCol, nPrefixReg); }else if( eDest==SRT_Coroutine ){ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); }else{ @@ -1124,46 +1147,62 @@ static void generateSortTail( int addr; int addrOnce = 0; int iTab; - int pseudoTab = 0; ExprList *pOrderBy = pSort->pOrderBy; int eDest = pDest->eDest; int iParm = pDest->iSDParm; int regRow; int regRowid; int nKey; + int iSortTab; /* Sorter cursor to read from */ + int nSortData; /* Trailing values to read from sorter */ + u8 p5; /* p5 parameter for 1st OP_Column */ + int i; + int bSeq; /* True if sorter record includes seq. no. */ +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + struct ExprList_item *aOutEx = p->pEList->a; +#endif if( pSort->labelBkOut ){ sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrBreak); sqlite3VdbeResolveLabel(v, pSort->labelBkOut); - addrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v); } iTab = pSort->iECursor; - regRow = sqlite3GetTempReg(pParse); if( eDest==SRT_Output || eDest==SRT_Coroutine ){ - pseudoTab = pParse->nTab++; - sqlite3VdbeAddOp3(v, OP_OpenPseudo, pseudoTab, regRow, nColumn); regRowid = 0; + regRow = pDest->iSdst; + nSortData = nColumn; }else{ regRowid = sqlite3GetTempReg(pParse); + regRow = sqlite3GetTempReg(pParse); + nSortData = 1; } nKey = pOrderBy->nExpr - pSort->nOBSat; if( pSort->sortFlags & SORTFLAG_UseSorter ){ int regSortOut = ++pParse->nMem; - int ptab2 = pParse->nTab++; - sqlite3VdbeAddOp3(v, OP_OpenPseudo, ptab2, regSortOut, nKey+2); + iSortTab = pParse->nTab++; + if( pSort->labelBkOut ){ + addrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v); + } + sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut, nKey+1+nSortData); if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak); VdbeCoverage(v); codeOffset(v, p->iOffset, addrContinue); sqlite3VdbeAddOp2(v, OP_SorterData, iTab, regSortOut); - sqlite3VdbeAddOp3(v, OP_Column, ptab2, nKey+1, regRow); - sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE); + p5 = OPFLAG_CLEARCACHE; + bSeq = 0; }else{ - if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v); codeOffset(v, p->iOffset, addrContinue); - sqlite3VdbeAddOp3(v, OP_Column, iTab, nKey+1, regRow); + iSortTab = iTab; + p5 = 0; + bSeq = 1; + } + for(i=0; i<nSortData; i++){ + sqlite3VdbeAddOp3(v, OP_Column, iSortTab, nKey+bSeq+i, regRow+i); + if( i==0 ) sqlite3VdbeChangeP5(v, p5); + VdbeComment((v, "%s", aOutEx[i].zName ? aOutEx[i].zName : aOutEx[i].zSpan)); } switch( eDest ){ case SRT_Table: @@ -1192,17 +1231,9 @@ static void generateSortTail( } #endif default: { - int i; assert( eDest==SRT_Output || eDest==SRT_Coroutine ); testcase( eDest==SRT_Output ); testcase( eDest==SRT_Coroutine ); - for(i=0; i<nColumn; i++){ - assert( regRow!=pDest->iSdst+i ); - sqlite3VdbeAddOp3(v, OP_Column, pseudoTab, i, pDest->iSdst+i); - if( i==0 ){ - sqlite3VdbeChangeP5(v, OPFLAG_CLEARCACHE); - } - } if( eDest==SRT_Output ){ sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn); sqlite3ExprCacheAffinityChange(pParse, pDest->iSdst, nColumn); @@ -1212,9 +1243,10 @@ static void generateSortTail( break; } } - sqlite3ReleaseTempReg(pParse, regRow); - sqlite3ReleaseTempReg(pParse, regRowid); - + if( regRowid ){ + sqlite3ReleaseTempReg(pParse, regRow); + sqlite3ReleaseTempReg(pParse, regRowid); + } /* The bottom of the loop */ sqlite3VdbeResolveLabel(v, addrContinue); @@ -4755,8 +4787,9 @@ int sqlite3Select( sSort.iECursor = pParse->nTab++; sSort.addrSortIndex = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, - sSort.iECursor, sSort.pOrderBy->nExpr+2, 0, - (char*)pKeyInfo, P4_KEYINFO); + sSort.iECursor, sSort.pOrderBy->nExpr+1+pEList->nExpr, 0, + (char*)pKeyInfo, P4_KEYINFO + ); }else{ sSort.addrSortIndex = -1; } @@ -4887,7 +4920,7 @@ int sqlite3Select( sNC.pSrcList = pTabList; sNC.pAggInfo = &sAggInfo; sAggInfo.mnReg = pParse->nMem+1; - sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr+1 : 0; + sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0; sAggInfo.pGroupBy = pGroupBy; sqlite3ExprAnalyzeAggList(&sNC, pEList); sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy); @@ -4980,8 +5013,8 @@ int sqlite3Select( groupBySort = 1; nGroupBy = pGroupBy->nExpr; - nCol = nGroupBy + 1; - j = nGroupBy+1; + nCol = nGroupBy; + j = nGroupBy; for(i=0; i<sAggInfo.nColumn; i++){ if( sAggInfo.aCol[i].iSorterColumn>=j ){ nCol++; @@ -4991,8 +5024,7 @@ int sqlite3Select( regBase = sqlite3GetTempRange(pParse, nCol); sqlite3ExprCacheClear(pParse); sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0); - sqlite3VdbeAddOp2(v, OP_Sequence, sAggInfo.sortingIdx,regBase+nGroupBy); - j = nGroupBy+1; + j = nGroupBy; for(i=0; i<sAggInfo.nColumn; i++){ struct AggInfo_col *pCol = &sAggInfo.aCol[i]; if( pCol->iSorterColumn>=j ){ diff --git a/src/shell.c b/src/shell.c index 932f561bd..c7089fa8f 100644 --- a/src/shell.c +++ b/src/shell.c @@ -475,6 +475,7 @@ struct ShellState { int mode; /* An output mode setting */ int writableSchema; /* True if PRAGMA writable_schema=ON */ int showHeader; /* True to show column names in List or Column mode */ + unsigned shellFlgs; /* Various flags */ char *zDestTable; /* Name of destination table when MODE_Insert */ char separator[20]; /* Separator character for MODE_List */ char newline[20]; /* Record separator in MODE_Csv */ @@ -499,6 +500,13 @@ struct ShellState { }; /* +** These are the allowed shellFlgs values +*/ +#define SHFLG_Scratch 0x00001 /* The --scratch option is used */ +#define SHFLG_Pagecache 0x00002 /* The --pagecache option is used */ +#define SHFLG_Lookaside 0x00004 /* Lookaside memory is used */ + +/* ** These are the allowed modes. */ #define MODE_Line 0 /* One column per line. Blank line between records */ @@ -1114,21 +1122,19 @@ static int display_stats( iHiwtr = iCur = -1; sqlite3_status(SQLITE_STATUS_MALLOC_COUNT, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Number of Outstanding Allocations: %d (max %d)\n", iCur, iHiwtr); -/* -** Not currently used by the CLI. -** iHiwtr = iCur = -1; -** sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset); -** fprintf(pArg->out, "Number of Pcache Pages Used: %d (max %d) pages\n", iCur, iHiwtr); -*/ + if( pArg->shellFlgs & SHFLG_Pagecache ){ + iHiwtr = iCur = -1; + sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset); + fprintf(pArg->out, "Number of Pcache Pages Used: %d (max %d) pages\n", iCur, iHiwtr); + } iHiwtr = iCur = -1; sqlite3_status(SQLITE_STATUS_PAGECACHE_OVERFLOW, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Number of Pcache Overflow Bytes: %d (max %d) bytes\n", iCur, iHiwtr); -/* -** Not currently used by the CLI. -** iHiwtr = iCur = -1; -** sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset); -** fprintf(pArg->out, "Number of Scratch Allocations Used: %d (max %d)\n", iCur, iHiwtr); -*/ + if( pArg->shellFlgs & SHFLG_Scratch ){ + iHiwtr = iCur = -1; + sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset); + fprintf(pArg->out, "Number of Scratch Allocations Used: %d (max %d)\n", iCur, iHiwtr); + } iHiwtr = iCur = -1; sqlite3_status(SQLITE_STATUS_SCRATCH_OVERFLOW, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Number of Scratch Overflow Bytes: %d (max %d) bytes\n", iCur, iHiwtr); @@ -1149,15 +1155,17 @@ static int display_stats( } if( pArg && pArg->out && db ){ - iHiwtr = iCur = -1; - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Lookaside Slots Used: %d (max %d)\n", iCur, iHiwtr); - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Successful lookaside attempts: %d\n", iHiwtr); - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Lookaside failures due to size: %d\n", iHiwtr); - sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, &iCur, &iHiwtr, bReset); - fprintf(pArg->out, "Lookaside failures due to OOM: %d\n", iHiwtr); + if( pArg->shellFlgs & SHFLG_Lookaside ){ + iHiwtr = iCur = -1; + sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED, &iCur, &iHiwtr, bReset); + fprintf(pArg->out, "Lookaside Slots Used: %d (max %d)\n", iCur, iHiwtr); + sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT, &iCur, &iHiwtr, bReset); + fprintf(pArg->out, "Successful lookaside attempts: %d\n", iHiwtr); + sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE, &iCur, &iHiwtr, bReset); + fprintf(pArg->out, "Lookaside failures due to size: %d\n", iHiwtr); + sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL, &iCur, &iHiwtr, bReset); + fprintf(pArg->out, "Lookaside failures due to OOM: %d\n", iHiwtr); + } iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Pager Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1; @@ -4062,12 +4070,15 @@ static const char zOptions[] = " -interactive force interactive I/O\n" " -line set output mode to 'line'\n" " -list set output mode to 'list'\n" + " -lookaside SIZE N use N entries of SZ bytes for lookaside memory\n" " -mmap N default mmap size set to N\n" #ifdef SQLITE_ENABLE_MULTIPLEX " -multiplex enable the multiplexor VFS\n" #endif " -newline SEP set newline character(s) for CSV\n" " -nullvalue TEXT set text string for NULL values. Default ''\n" + " -pagecache SIZE N use N slots of SZ bytes each for page cache memory\n" + " -scratch SIZE N use N slots of SZ bytes each for scratch memory\n" " -separator SEP set output field separator. Default: '|'\n" " -stats print memory stats before each finalize\n" " -version show SQLite version\n" @@ -4098,11 +4109,12 @@ static void main_init(ShellState *data) { memcpy(data->separator,"|", 2); memcpy(data->newline,"\r\n", 3); data->showHeader = 0; + data->shellFlgs = SHFLG_Lookaside; sqlite3_config(SQLITE_CONFIG_URI, 1); sqlite3_config(SQLITE_CONFIG_LOG, shellLog, data); + sqlite3_config(SQLITE_CONFIG_MULTITHREAD); sqlite3_snprintf(sizeof(mainPrompt), mainPrompt,"sqlite> "); sqlite3_snprintf(sizeof(continuePrompt), continuePrompt," ...> "); - sqlite3_config(SQLITE_CONFIG_SINGLETHREAD); } /* @@ -4211,6 +4223,33 @@ int main(int argc, char **argv){ if( szHeap>0x7fff0000 ) szHeap = 0x7fff0000; sqlite3_config(SQLITE_CONFIG_HEAP, malloc((int)szHeap), (int)szHeap, 64); #endif + }else if( strcmp(z,"-scratch")==0 ){ + int n, sz; + sz = (int)integerValue(cmdline_option_value(argc,argv,++i)); + if( sz>400000 ) sz = 400000; + if( sz<2500 ) sz = 2500; + n = (int)integerValue(cmdline_option_value(argc,argv,++i)); + if( n>10 ) n = 10; + if( n<1 ) n = 1; + sqlite3_config(SQLITE_CONFIG_SCRATCH, malloc(n*sz+1), sz, n); + data.shellFlgs |= SHFLG_Scratch; + }else if( strcmp(z,"-pagecache")==0 ){ + int n, sz; + sz = (int)integerValue(cmdline_option_value(argc,argv,++i)); + if( sz>70000 ) sz = 70000; + if( sz<800 ) sz = 800; + n = (int)integerValue(cmdline_option_value(argc,argv,++i)); + if( n<10 ) n = 10; + sqlite3_config(SQLITE_CONFIG_PAGECACHE, malloc(n*sz+1), sz, n); + data.shellFlgs |= SHFLG_Pagecache; + }else if( strcmp(z,"-lookaside")==0 ){ + int n, sz; + sz = (int)integerValue(cmdline_option_value(argc,argv,++i)); + if( sz<0 ) sz = 0; + n = (int)integerValue(cmdline_option_value(argc,argv,++i)); + if( n<0 ) n = 0; + sqlite3_config(SQLITE_CONFIG_LOOKASIDE, sz, n); + if( sz*n==0 ) data.shellFlgs &= ~SHFLG_Lookaside; #ifdef SQLITE_ENABLE_VFSTRACE }else if( strcmp(z,"-vfstrace")==0 ){ extern int vfstrace_register( @@ -4326,6 +4365,12 @@ int main(int argc, char **argv){ stdin_is_interactive = 0; }else if( strcmp(z,"-heap")==0 ){ i++; + }else if( strcmp(z,"-scratch")==0 ){ + i+=2; + }else if( strcmp(z,"-pagecache")==0 ){ + i+=2; + }else if( strcmp(z,"-lookaside")==0 ){ + i+=2; }else if( strcmp(z,"-mmap")==0 ){ i++; }else if( strcmp(z,"-vfs")==0 ){ diff --git a/src/sqlite.h.in b/src/sqlite.h.in index c85ec1dd5..2d8f1b6e4 100644 --- a/src/sqlite.h.in +++ b/src/sqlite.h.in @@ -3073,6 +3073,10 @@ int sqlite3_limit(sqlite3*, int id, int newVal); ** ** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(<dt>SQLITE_LIMIT_TRIGGER_DEPTH</dt> ** <dd>The maximum depth of recursion for triggers.</dd>)^ +** +** [[SQLITE_LIMIT_WORKER_THREADS]] ^(<dt>SQLITE_LIMIT_WORKER_THREADS</dt> +** <dd>The maximum number of auxiliary worker threads that a single +** [prepared statement] may start.</dd>)^ ** </dl> */ #define SQLITE_LIMIT_LENGTH 0 @@ -3086,6 +3090,7 @@ int sqlite3_limit(sqlite3*, int id, int newVal); #define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 #define SQLITE_LIMIT_VARIABLE_NUMBER 9 #define SQLITE_LIMIT_TRIGGER_DEPTH 10 +#define SQLITE_LIMIT_WORKER_THREADS 11 /* ** CAPI3REF: Compiling An SQL Statement @@ -6160,7 +6165,8 @@ int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_VDBE_COVERAGE 21 #define SQLITE_TESTCTRL_BYTEORDER 22 #define SQLITE_TESTCTRL_ISINIT 23 -#define SQLITE_TESTCTRL_LAST 23 +#define SQLITE_TESTCTRL_SORTER_MMAP 24 +#define SQLITE_TESTCTRL_LAST 24 /* ** CAPI3REF: SQLite Runtime Status diff --git a/src/sqliteInt.h b/src/sqliteInt.h index 695531ab3..248e51337 100644 --- a/src/sqliteInt.h +++ b/src/sqliteInt.h @@ -435,6 +435,27 @@ #endif /* +** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if +** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it +** to zero. +*/ +#if SQLITE_TEMP_STORE==3 || SQLITE_THREADSAFE==0 +# undef SQLITE_MAX_WORKER_THREADS +# define SQLITE_MAX_WORKER_THREADS 0 +#endif +#ifndef SQLITE_MAX_WORKER_THREADS +# define SQLITE_MAX_WORKER_THREADS 8 +#endif +#ifndef SQLITE_DEFAULT_WORKER_THREADS +# define SQLITE_DEFAULT_WORKER_THREADS 0 +#endif +#if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS +# undef SQLITE_MAX_WORKER_THREADS +# define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS +#endif + + +/* ** GCC does not define the offsetof() macro so we'll have to do it ** ourselves. */ @@ -818,6 +839,7 @@ typedef struct PrintfArguments PrintfArguments; typedef struct RowSet RowSet; typedef struct Savepoint Savepoint; typedef struct Select Select; +typedef struct SQLiteThread SQLiteThread; typedef struct SelectDest SelectDest; typedef struct SrcList SrcList; typedef struct StrAccum StrAccum; @@ -920,7 +942,7 @@ struct Schema { ** The number of different kinds of things that can be limited ** using the sqlite3_limit() interface. */ -#define SQLITE_N_LIMIT (SQLITE_LIMIT_TRIGGER_DEPTH+1) +#define SQLITE_N_LIMIT (SQLITE_LIMIT_WORKER_THREADS+1) /* ** Lookaside malloc is a set of fixed-size buffers that can be used @@ -997,6 +1019,7 @@ struct sqlite3 { int nChange; /* Value returned by sqlite3_changes() */ int nTotalChange; /* Value returned by sqlite3_total_changes() */ int aLimit[SQLITE_N_LIMIT]; /* Limits */ + int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */ struct sqlite3InitInfo { /* Information used during initialization */ int newTnum; /* Rootpage of table being initialized */ u8 iDb; /* Which db file is being initialized */ @@ -1667,7 +1690,7 @@ struct UnpackedRecord { KeyInfo *pKeyInfo; /* Collation and sort-order information */ u16 nField; /* Number of entries in apMem[] */ i8 default_rc; /* Comparison result if keys are equal */ - u8 isCorrupt; /* Corruption detected by xRecordCompare() */ + u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */ Mem *aMem; /* Values */ int r1; /* Value to return if (lhs > rhs) */ int r2; /* Value to return if (rhs < lhs) */ @@ -3715,4 +3738,12 @@ SQLITE_EXTERN void (*sqlite3IoTrace)(const char*,...); #define MEMTYPE_PCACHE 0x08 /* Page cache allocations */ #define MEMTYPE_DB 0x10 /* Uses sqlite3DbMalloc, not sqlite_malloc */ +/* +** Threading interface +*/ +#if SQLITE_MAX_WORKER_THREADS>0 +int sqlite3ThreadCreate(SQLiteThread**,void*(*)(void*),void*); +int sqlite3ThreadJoin(SQLiteThread*, void**); +#endif + #endif /* _SQLITEINT_H_ */ diff --git a/src/test1.c b/src/test1.c index 56487f6ab..34faeaadf 100644 --- a/src/test1.c +++ b/src/test1.c @@ -2718,6 +2718,46 @@ bad_args: } /* +** Usage: add_test_utf16bin_collate <db ptr> +** +** Add a utf-16 collation sequence named "utf16bin" to the database +** handle. This collation sequence compares arguments in the same way as the +** built-in collation "binary". +*/ +static int test_utf16bin_collate_func( + void *pCtx, + int nA, const void *zA, + int nB, const void *zB +){ + int nCmp = (nA>nB ? nB : nA); + int res = memcmp(zA, zB, nCmp); + if( res==0 ) res = nA - nB; + return res; +} +static int test_utf16bin_collate( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db; + int rc; + + if( objc!=2 ) goto bad_args; + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + + rc = sqlite3_create_collation(db, "utf16bin", SQLITE_UTF16, 0, + test_utf16bin_collate_func + ); + if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; + return TCL_OK; + +bad_args: + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; +} + +/* ** When the collation needed callback is invoked, record the name of ** the requested collating function here. The recorded name is linked ** to a TCL variable and used to make sure that the requested collation @@ -5895,6 +5935,7 @@ static int test_test_control( int i; } aVerb[] = { { "SQLITE_TESTCTRL_LOCALTIME_FAULT", SQLITE_TESTCTRL_LOCALTIME_FAULT }, + { "SQLITE_TESTCTRL_SORTER_MMAP", SQLITE_TESTCTRL_SORTER_MMAP }, }; int iVerb; int iFlag; @@ -5922,6 +5963,19 @@ static int test_test_control( sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, val); break; } + + case SQLITE_TESTCTRL_SORTER_MMAP: { + int val; + sqlite3 *db; + if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 2, objv, "DB LIMIT"); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[2]), &db) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[3], &val) ) return TCL_ERROR; + sqlite3_test_control(SQLITE_TESTCTRL_SORTER_MMAP, db, val); + break; + } } Tcl_ResetResult(interp); @@ -6335,6 +6389,113 @@ static int tclLoadStaticExtensionCmd( return TCL_OK; } +/* +** sorter_test_fakeheap BOOL +** +*/ +static int sorter_test_fakeheap( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + int bArg; + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "BOOL"); + return TCL_ERROR; + } + + if( Tcl_GetBooleanFromObj(interp, objv[1], &bArg) ){ + return TCL_ERROR; + } + + if( bArg ){ + if( sqlite3GlobalConfig.pHeap==0 ){ + sqlite3GlobalConfig.pHeap = SQLITE_INT_TO_PTR(-1); + } + }else{ + if( sqlite3GlobalConfig.pHeap==SQLITE_INT_TO_PTR(-1) ){ + sqlite3GlobalConfig.pHeap = 0; + } + } + + Tcl_ResetResult(interp); + return TCL_OK; +} + +/* +** sorter_test_sort4_helper DB SQL1 NSTEP SQL2 +** +** Compile SQL statement $SQL1 and step it $NSTEP times. For each row, +** check that the leftmost and rightmost columns returned are both integers, +** and that both contain the same value. +** +** Then execute statement $SQL2. Check that the statement returns the same +** set of integers in the same order as in the previous step (using $SQL1). +*/ +static int sorter_test_sort4_helper( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + const char *zSql1; + const char *zSql2; + int nStep; + int iStep; + int iCksum1 = 0; + int iCksum2 = 0; + int rc; + int iB; + sqlite3 *db; + sqlite3_stmt *pStmt; + + if( objc!=5 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB SQL1 NSTEP SQL2"); + return TCL_ERROR; + } + + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; + zSql1 = Tcl_GetString(objv[2]); + if( Tcl_GetIntFromObj(interp, objv[3], &nStep) ) return TCL_ERROR; + zSql2 = Tcl_GetString(objv[4]); + + rc = sqlite3_prepare_v2(db, zSql1, -1, &pStmt, 0); + if( rc!=SQLITE_OK ) goto sql_error; + + iB = sqlite3_column_count(pStmt)-1; + for(iStep=0; iStep<nStep && SQLITE_ROW==sqlite3_step(pStmt); iStep++){ + int a = sqlite3_column_int(pStmt, 0); + if( a!=sqlite3_column_int(pStmt, iB) ){ + Tcl_AppendResult(interp, "data error: (a!=b)", 0); + return TCL_ERROR; + } + + iCksum1 += (iCksum1 << 3) + a; + } + rc = sqlite3_finalize(pStmt); + if( rc!=SQLITE_OK ) goto sql_error; + + rc = sqlite3_prepare_v2(db, zSql2, -1, &pStmt, 0); + if( rc!=SQLITE_OK ) goto sql_error; + for(iStep=0; SQLITE_ROW==sqlite3_step(pStmt); iStep++){ + int a = sqlite3_column_int(pStmt, 0); + iCksum2 += (iCksum2 << 3) + a; + } + rc = sqlite3_finalize(pStmt); + if( rc!=SQLITE_OK ) goto sql_error; + + if( iCksum1!=iCksum2 ){ + Tcl_AppendResult(interp, "checksum mismatch", 0); + return TCL_ERROR; + } + + return TCL_OK; + sql_error: + Tcl_AppendResult(interp, "sql error: ", sqlite3_errmsg(db), 0); + return TCL_ERROR; +} + /* ** Register commands with the TCL interpreter. @@ -6537,6 +6698,7 @@ int Sqlitetest1_Init(Tcl_Interp *interp){ { "add_test_collate", test_collate, 0 }, { "add_test_collate_needed", test_collate_needed, 0 }, { "add_test_function", test_function, 0 }, + { "add_test_utf16bin_collate", test_utf16bin_collate, 0 }, #endif { "sqlite3_test_errstr", test_errstr, 0 }, { "tcl_variable_type", tcl_variable_type, 0 }, @@ -6570,6 +6732,8 @@ int Sqlitetest1_Init(Tcl_Interp *interp){ { "getrusage", test_getrusage }, #endif { "load_static_extension", tclLoadStaticExtensionCmd }, + { "sorter_test_fakeheap", sorter_test_fakeheap }, + { "sorter_test_sort4_helper", sorter_test_sort4_helper }, }; static int bitmask_size = sizeof(Bitmask)*8; int i; diff --git a/src/test_config.c b/src/test_config.c index 74f3c2e7c..00843146b 100644 --- a/src/test_config.c +++ b/src/test_config.c @@ -103,6 +103,10 @@ static void set_options(Tcl_Interp *interp){ Tcl_SetVar2(interp, "sqlite_options", "mmap", "0", TCL_GLOBAL_ONLY); #endif + Tcl_SetVar2(interp, "sqlite_options", "worker_threads", + STRINGVALUE(SQLITE_MAX_WORKER_THREADS), TCL_GLOBAL_ONLY + ); + #if 1 /* def SQLITE_MEMDEBUG */ Tcl_SetVar2(interp, "sqlite_options", "memdebug", "1", TCL_GLOBAL_ONLY); #else diff --git a/src/test_malloc.c b/src/test_malloc.c index e3cfcaa9f..900a8ac40 100644 --- a/src/test_malloc.c +++ b/src/test_malloc.c @@ -1253,6 +1253,7 @@ static int test_config_cis( return TCL_OK; } + /* ** Usage: sqlite3_dump_memsys3 FILENAME ** sqlite3_dump_memsys5 FILENAME diff --git a/src/threads.c b/src/threads.c new file mode 100644 index 000000000..213a129c9 --- /dev/null +++ b/src/threads.c @@ -0,0 +1,262 @@ +/* +** 2012 July 21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file presents a simple cross-platform threading interface for +** use internally by SQLite. +** +** A "thread" can be created using sqlite3ThreadCreate(). This thread +** runs independently of its creator until it is joined using +** sqlite3ThreadJoin(), at which point it terminates. +** +** Threads do not have to be real. It could be that the work of the +** "thread" is done by the main thread at either the sqlite3ThreadCreate() +** or sqlite3ThreadJoin() call. This is, in fact, what happens in +** single threaded systems. Nothing in SQLite requires multiple threads. +** This interface exists so that applications that want to take advantage +** of multiple cores can do so, while also allowing applications to stay +** single-threaded if desired. +*/ +#include "sqliteInt.h" + +#if SQLITE_MAX_WORKER_THREADS>0 + +/********************************* Unix Pthreads ****************************/ +#if SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) && SQLITE_THREADSAFE>0 + +#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */ +#include <pthread.h> + +/* A running thread */ +struct SQLiteThread { + pthread_t tid; /* Thread ID */ + int done; /* Set to true when thread finishes */ + void *pOut; /* Result returned by the thread */ + void *(*xTask)(void*); /* The thread routine */ + void *pIn; /* Argument to the thread */ +}; + +/* Create a new thread */ +int sqlite3ThreadCreate( + SQLiteThread **ppThread, /* OUT: Write the thread object here */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + SQLiteThread *p; + int rc; + + assert( ppThread!=0 ); + assert( xTask!=0 ); + /* This routine is never used in single-threaded mode */ + assert( sqlite3GlobalConfig.bCoreMutex!=0 ); + + *ppThread = 0; + p = sqlite3Malloc(sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM; + memset(p, 0, sizeof(*p)); + p->xTask = xTask; + p->pIn = pIn; + if( sqlite3FaultSim(200) ){ + rc = 1; + }else{ + rc = pthread_create(&p->tid, 0, xTask, pIn); + } + if( rc ){ + p->done = 1; + p->pOut = xTask(pIn); + } + *ppThread = p; + return SQLITE_OK; +} + +/* Get the results of the thread */ +int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ + int rc; + + assert( ppOut!=0 ); + if( NEVER(p==0) ) return SQLITE_NOMEM; + if( p->done ){ + *ppOut = p->pOut; + rc = SQLITE_OK; + }else{ + rc = pthread_join(p->tid, ppOut) ? SQLITE_ERROR : SQLITE_OK; + } + sqlite3_free(p); + return rc; +} + +#endif /* SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) */ +/******************************** End Unix Pthreads *************************/ + + +/********************************* Win32 Threads ****************************/ +#if SQLITE_OS_WIN && !SQLITE_OS_WINRT && SQLITE_THREADSAFE>0 + +#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */ +#include <process.h> + +/* A running thread */ +struct SQLiteThread { + uintptr_t tid; /* The thread handle */ + unsigned id; /* The thread identifier */ + void *(*xTask)(void*); /* The routine to run as a thread */ + void *pIn; /* Argument to xTask */ + void *pResult; /* Result of xTask */ +}; + +/* Thread procedure Win32 compatibility shim */ +static unsigned __stdcall sqlite3ThreadProc( + void *pArg /* IN: Pointer to the SQLiteThread structure */ +){ + SQLiteThread *p = (SQLiteThread *)pArg; + + assert( p!=0 ); +#if 0 + /* + ** This assert appears to trigger spuriously on certain + ** versions of Windows, possibly due to _beginthreadex() + ** and/or CreateThread() not fully setting their thread + ** ID parameter before starting the thread. + */ + assert( p->id==GetCurrentThreadId() ); +#endif + assert( p->xTask!=0 ); + p->pResult = p->xTask(p->pIn); + + _endthreadex(0); + return 0; /* NOT REACHED */ +} + +/* Create a new thread */ +int sqlite3ThreadCreate( + SQLiteThread **ppThread, /* OUT: Write the thread object here */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + SQLiteThread *p; + + assert( ppThread!=0 ); + assert( xTask!=0 ); + *ppThread = 0; + p = sqlite3Malloc(sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM; + if( sqlite3GlobalConfig.bCoreMutex==0 ){ + memset(p, 0, sizeof(*p)); + }else{ + p->xTask = xTask; + p->pIn = pIn; + p->tid = _beginthreadex(0, 0, sqlite3ThreadProc, p, 0, &p->id); + if( p->tid==0 ){ + memset(p, 0, sizeof(*p)); + } + } + if( p->xTask==0 ){ + p->id = GetCurrentThreadId(); + p->pResult = xTask(pIn); + } + *ppThread = p; + return SQLITE_OK; +} + +DWORD sqlite3Win32Wait(HANDLE hObject); /* os_win.c */ + +/* Get the results of the thread */ +int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ + DWORD rc; + BOOL bRc; + + assert( ppOut!=0 ); + if( NEVER(p==0) ) return SQLITE_NOMEM; + if( p->xTask==0 ){ + assert( p->id==GetCurrentThreadId() ); + rc = WAIT_OBJECT_0; + assert( p->tid==0 ); + }else{ + assert( p->id!=0 && p->id!=GetCurrentThreadId() ); + rc = sqlite3Win32Wait((HANDLE)p->tid); + assert( rc!=WAIT_IO_COMPLETION ); + bRc = CloseHandle((HANDLE)p->tid); + assert( bRc ); + } + if( rc==WAIT_OBJECT_0 ) *ppOut = p->pResult; + sqlite3_free(p); + return (rc==WAIT_OBJECT_0) ? SQLITE_OK : SQLITE_ERROR; +} + +#endif /* SQLITE_OS_WIN && !SQLITE_OS_WINRT */ +/******************************** End Win32 Threads *************************/ + + +/********************************* Single-Threaded **************************/ +#ifndef SQLITE_THREADS_IMPLEMENTED +/* +** This implementation does not actually create a new thread. It does the +** work of the thread in the main thread, when either the thread is created +** or when it is joined +*/ + +/* A running thread */ +struct SQLiteThread { + void *(*xTask)(void*); /* The routine to run as a thread */ + void *pIn; /* Argument to xTask */ + void *pResult; /* Result of xTask */ +}; + +/* Create a new thread */ +int sqlite3ThreadCreate( + SQLiteThread **ppThread, /* OUT: Write the thread object here */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + SQLiteThread *p; + + assert( ppThread!=0 ); + assert( xTask!=0 ); + *ppThread = 0; + p = sqlite3Malloc(sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM; + if( (SQLITE_PTR_TO_INT(p)/17)&1 ){ + p->xTask = xTask; + p->pIn = pIn; + }else{ + p->xTask = 0; + p->pResult = xTask(pIn); + } + *ppThread = p; + return SQLITE_OK; +} + +/* Get the results of the thread */ +int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ + + assert( ppOut!=0 ); + if( NEVER(p==0) ) return SQLITE_NOMEM; + if( p->xTask ){ + *ppOut = p->xTask(p->pIn); + }else{ + *ppOut = p->pResult; + } + sqlite3_free(p); + +#if defined(SQLITE_TEST) + { + void *pTstAlloc = sqlite3Malloc(10); + if (!pTstAlloc) return SQLITE_NOMEM; + sqlite3_free(pTstAlloc); + } +#endif + + return SQLITE_OK; +} + +#endif /* !defined(SQLITE_THREADS_IMPLEMENTED) */ +/****************************** End Single-Threaded *************************/ +#endif /* SQLITE_MAX_WORKER_THREADS>0 */ diff --git a/src/vdbe.c b/src/vdbe.c index 0f76cff82..36f05afbc 100644 --- a/src/vdbe.c +++ b/src/vdbe.c @@ -1175,7 +1175,7 @@ case OP_Move: { assert( pIn1<=&aMem[(p->nMem-p->nCursor)] ); assert( memIsValid(pIn1) ); memAboutToChange(p, pOut); - VdbeMemReleaseExtern(pOut); + sqlite3VdbeMemRelease(pOut); zMalloc = pOut->zMalloc; memcpy(pOut, pIn1, sizeof(Mem)); #ifdef SQLITE_DEBUG @@ -1555,8 +1555,8 @@ case OP_Function: { apVal = p->apArg; assert( apVal || n==0 ); assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); - pOut = &aMem[pOp->p3]; - memAboutToChange(p, pOut); + ctx.pOut = &aMem[pOp->p3]; + memAboutToChange(p, ctx.pOut); assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem-p->nCursor)+1) ); assert( pOp->p3<pOp->p2 || pOp->p3>=pOp->p2+n ); @@ -1572,16 +1572,7 @@ case OP_Function: { ctx.pFunc = pOp->p4.pFunc; ctx.iOp = pc; ctx.pVdbe = p; - - /* The output cell may already have a buffer allocated. Move - ** the pointer to ctx.s so in case the user-function can use - ** the already allocated buffer instead of allocating a new one. - */ - memcpy(&ctx.s, pOut, sizeof(Mem)); - pOut->flags = MEM_Null; - pOut->xDel = 0; - pOut->zMalloc = 0; - MemSetTypeFlag(&ctx.s, MEM_Null); + MemSetTypeFlag(ctx.pOut, MEM_Null); ctx.fErrorOrAux = 0; if( ctx.pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ @@ -1594,43 +1585,23 @@ case OP_Function: { (*ctx.pFunc->xFunc)(&ctx, n, apVal); /* IMP: R-24505-23230 */ lastRowid = db->lastRowid; - if( db->mallocFailed ){ - /* Even though a malloc() has failed, the implementation of the - ** user function may have called an sqlite3_result_XXX() function - ** to return a value. The following call releases any resources - ** associated with such a value. - */ - sqlite3VdbeMemRelease(&ctx.s); - goto no_mem; - } - /* If the function returned an error, throw an exception */ if( ctx.fErrorOrAux ){ if( ctx.isError ){ - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&ctx.s)); + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(ctx.pOut)); rc = ctx.isError; } sqlite3VdbeDeleteAuxData(p, pc, pOp->p1); } /* Copy the result of the function into register P3 */ - sqlite3VdbeChangeEncoding(&ctx.s, encoding); - assert( pOut->flags==MEM_Null ); - memcpy(pOut, &ctx.s, sizeof(Mem)); - if( sqlite3VdbeMemTooBig(pOut) ){ + sqlite3VdbeChangeEncoding(ctx.pOut, encoding); + if( sqlite3VdbeMemTooBig(ctx.pOut) ){ goto too_big; } -#if 0 - /* The app-defined function has done something that as caused this - ** statement to expire. (Perhaps the function called sqlite3_exec() - ** with a CREATE TABLE statement.) - */ - if( p->expired ) rc = SQLITE_ABORT; -#endif - - REGISTER_TRACE(pOp->p3, pOut); - UPDATE_MAX_BLOBSIZE(pOut); + REGISTER_TRACE(pOp->p3, ctx.pOut); + UPDATE_MAX_BLOBSIZE(ctx.pOut); break; } @@ -1779,6 +1750,7 @@ case OP_RealAffinity: { /* in1 */ #ifndef SQLITE_OMIT_CAST /* Opcode: Cast P1 P2 * * * +** Synopsis: affinity(r[P1]) ** ** Force the value in register P1 to be the type defined by P2. ** @@ -3400,11 +3372,15 @@ case OP_OpenEphemeral: { break; } -/* Opcode: SorterOpen P1 P2 * P4 * +/* Opcode: SorterOpen P1 P2 P3 P4 * ** ** This opcode works like OP_OpenEphemeral except that it opens ** a transient index that is specifically designed to sort large ** tables using an external merge-sort algorithm. +** +** If argument P3 is non-zero, then it indicates that the sorter may +** assume that a stable sort considering the first P3 fields of each +** key is sufficient to produce the required results. */ case OP_SorterOpen: { VdbeCursor *pCx; @@ -3416,7 +3392,25 @@ case OP_SorterOpen: { pCx->pKeyInfo = pOp->p4.pKeyInfo; assert( pCx->pKeyInfo->db==db ); assert( pCx->pKeyInfo->enc==ENC(db) ); - rc = sqlite3VdbeSorterInit(db, pCx); + rc = sqlite3VdbeSorterInit(db, pOp->p3, pCx); + break; +} + +/* Opcode: SequenceTest P1 P2 * * * +** Synopsis: if( cursor[P1].ctr++ ) pc = P2 +** +** P1 is a sorter cursor. If the sequence counter is currently zero, jump +** to P2. Regardless of whether or not the jump is taken, increment the +** the sequence value. +*/ +case OP_SequenceTest: { + VdbeCursor *pC; + assert( pOp->p1>=0 && pOp->p1<p->nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC->pSorter ); + if( (pC->seqCount++)==0 ){ + pc = pOp->p2 - 1; + } break; } @@ -4316,6 +4310,7 @@ case OP_SorterCompare: { assert( pOp->p4type==P4_INT32 ); pIn3 = &aMem[pOp->p3]; nKeyCol = pOp->p4.i; + res = 0; rc = sqlite3VdbeSorterCompare(pC, pIn3, nKeyCol, &res); VdbeBranchTaken(res!=0,2); if( res ){ @@ -4580,7 +4575,7 @@ case OP_Rewind: { /* jump */ pC->seekOp = OP_Rewind; #endif if( isSorter(pC) ){ - rc = sqlite3VdbeSorterRewind(db, pC, &res); + rc = sqlite3VdbeSorterRewind(pC, &res); }else{ pCrsr = pC->pCursor; assert( pCrsr ); @@ -4758,7 +4753,7 @@ case OP_IdxInsert: { /* in2 */ rc = ExpandBlob(pIn2); if( rc==SQLITE_OK ){ if( isSorter(pC) ){ - rc = sqlite3VdbeSorterWrite(db, pC, pIn2); + rc = sqlite3VdbeSorterWrite(pC, pIn2); }else{ nKey = pIn2->n; zKey = pIn2->z; @@ -5671,6 +5666,7 @@ case OP_AggStep: { int i; Mem *pMem; Mem *pRec; + Mem t; sqlite3_context ctx; sqlite3_value **apVal; @@ -5688,11 +5684,12 @@ case OP_AggStep: { assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); ctx.pMem = pMem = &aMem[pOp->p3]; pMem->n++; - ctx.s.flags = MEM_Null; - ctx.s.z = 0; - ctx.s.zMalloc = 0; - ctx.s.xDel = 0; - ctx.s.db = db; + t.flags = MEM_Null; + t.z = 0; + t.zMalloc = 0; + t.xDel = 0; + t.db = db; + ctx.pOut = &t; ctx.isError = 0; ctx.pColl = 0; ctx.skipFlag = 0; @@ -5704,7 +5701,7 @@ case OP_AggStep: { } (ctx.pFunc->xStep)(&ctx, n, apVal); /* IMP: R-24505-23230 */ if( ctx.isError ){ - sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&ctx.s)); + sqlite3SetString(&p->zErrMsg, db, "%s", sqlite3_value_text(&t)); rc = ctx.isError; } if( ctx.skipFlag ){ @@ -5712,9 +5709,7 @@ case OP_AggStep: { i = pOp[-1].p1; if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1); } - - sqlite3VdbeMemRelease(&ctx.s); - + sqlite3VdbeMemRelease(&t); break; } @@ -6164,27 +6159,14 @@ case OP_VColumn: { pModule = pVtab->pModule; assert( pModule->xColumn ); memset(&sContext, 0, sizeof(sContext)); - - /* The output cell may already have a buffer allocated. Move - ** the current contents to sContext.s so in case the user-function - ** can use the already allocated buffer instead of allocating a - ** new one. - */ - sqlite3VdbeMemMove(&sContext.s, pDest); - MemSetTypeFlag(&sContext.s, MEM_Null); - + sContext.pOut = pDest; + MemSetTypeFlag(pDest, MEM_Null); rc = pModule->xColumn(pCur->pVtabCursor, &sContext, pOp->p2); sqlite3VtabImportErrmsg(p, pVtab); if( sContext.isError ){ rc = sContext.isError; } - - /* Copy the result of the function to the P3 register. We - ** do this regardless of whether or not an error occurred to ensure any - ** dynamic allocation in sContext.s (a Mem struct) is released. - */ - sqlite3VdbeChangeEncoding(&sContext.s, encoding); - sqlite3VdbeMemMove(pDest, &sContext.s); + sqlite3VdbeChangeEncoding(pDest, encoding); REGISTER_TRACE(pOp->p3, pDest); UPDATE_MAX_BLOBSIZE(pDest); diff --git a/src/vdbeInt.h b/src/vdbeInt.h index 54dd63f54..3cb8ddca4 100644 --- a/src/vdbeInt.h +++ b/src/vdbeInt.h @@ -266,8 +266,8 @@ struct AuxData { ** (Mem) which are only defined there. */ struct sqlite3_context { + Mem *pOut; /* The return value is stored here */ FuncDef *pFunc; /* Pointer to function information. MUST BE FIRST */ - Mem s; /* The return value is stored here */ Mem *pMem; /* Memory cell used to store aggregate context */ CollSeq *pColl; /* Collating sequence */ Vdbe *pVdbe; /* The VM that owns this context */ @@ -462,13 +462,13 @@ void sqlite3VdbePreUpdateHook( Vdbe *, VdbeCursor *, int, const char*, Table *, i64, int); int sqlite3VdbeTransferError(Vdbe *p); -int sqlite3VdbeSorterInit(sqlite3 *, VdbeCursor *); +int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *); void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *); void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *); int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *); int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *, int *); -int sqlite3VdbeSorterRewind(sqlite3 *, const VdbeCursor *, int *); -int sqlite3VdbeSorterWrite(sqlite3 *, const VdbeCursor *, Mem *); +int sqlite3VdbeSorterRewind(const VdbeCursor *, int *); +int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *); int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 diff --git a/src/vdbeapi.c b/src/vdbeapi.c index 85cbd7203..9a8178e1b 100644 --- a/src/vdbeapi.c +++ b/src/vdbeapi.c @@ -223,7 +223,7 @@ static void setResultStrOrError( u8 enc, /* Encoding of z. 0 for BLOBs */ void (*xDel)(void*) /* Destructor function */ ){ - if( sqlite3VdbeMemSetStr(&pCtx->s, z, n, enc, xDel)==SQLITE_TOOBIG ){ + if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){ sqlite3_result_error_toobig(pCtx); } } @@ -234,38 +234,38 @@ void sqlite3_result_blob( void (*xDel)(void *) ){ assert( n>=0 ); - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, 0, xDel); } void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetDouble(&pCtx->s, rVal); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetDouble(pCtx->pOut, rVal); } void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; pCtx->fErrorOrAux = 1; - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); + sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); } #ifndef SQLITE_OMIT_UTF16 void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; pCtx->fErrorOrAux = 1; - sqlite3VdbeMemSetStr(&pCtx->s, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); + sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); } #endif void sqlite3_result_int(sqlite3_context *pCtx, int iVal){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetInt64(&pCtx->s, (i64)iVal); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal); } void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetInt64(&pCtx->s, iVal); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetInt64(pCtx->pOut, iVal); } void sqlite3_result_null(sqlite3_context *pCtx){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetNull(&pCtx->s); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetNull(pCtx->pOut); } void sqlite3_result_text( sqlite3_context *pCtx, @@ -273,7 +273,7 @@ void sqlite3_result_text( int n, void (*xDel)(void *) ){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); } #ifndef SQLITE_OMIT_UTF16 @@ -283,7 +283,7 @@ void sqlite3_result_text16( int n, void (*xDel)(void *) ){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); } void sqlite3_result_text16be( @@ -292,7 +292,7 @@ void sqlite3_result_text16be( int n, void (*xDel)(void *) ){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); } void sqlite3_result_text16le( @@ -301,43 +301,43 @@ void sqlite3_result_text16le( int n, void (*xDel)(void *) ){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); } #endif /* SQLITE_OMIT_UTF16 */ void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemCopy(&pCtx->s, pValue); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemCopy(pCtx->pOut, pValue); } void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetZeroBlob(&pCtx->s, n); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n); } void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ pCtx->isError = errCode; pCtx->fErrorOrAux = 1; - if( pCtx->s.flags & MEM_Null ){ - sqlite3VdbeMemSetStr(&pCtx->s, sqlite3ErrStr(errCode), -1, + if( pCtx->pOut->flags & MEM_Null ){ + sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1, SQLITE_UTF8, SQLITE_STATIC); } } /* Force an SQLITE_TOOBIG error. */ void sqlite3_result_error_toobig(sqlite3_context *pCtx){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_TOOBIG; pCtx->fErrorOrAux = 1; - sqlite3VdbeMemSetStr(&pCtx->s, "string or blob too big", -1, + sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1, SQLITE_UTF8, SQLITE_STATIC); } /* An SQLITE_NOMEM error. */ void sqlite3_result_error_nomem(sqlite3_context *pCtx){ - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); - sqlite3VdbeMemSetNull(&pCtx->s); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetNull(pCtx->pOut); pCtx->isError = SQLITE_NOMEM; pCtx->fErrorOrAux = 1; - pCtx->s.db->mallocFailed = 1; + pCtx->pOut->db->mallocFailed = 1; } /* @@ -568,7 +568,7 @@ void *sqlite3_user_data(sqlite3_context *p){ */ sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ assert( p && p->pFunc ); - return p->s.db; + return p->pOut->db; } /* @@ -578,7 +578,7 @@ sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){ Vdbe *v = p->pVdbe; int rc; if( v->iCurrentTime==0 ){ - rc = sqlite3OsCurrentTimeInt64(p->s.db->pVfs, &v->iCurrentTime); + rc = sqlite3OsCurrentTimeInt64(p->pOut->db->pVfs, &v->iCurrentTime); if( rc ) v->iCurrentTime = 0; } return v->iCurrentTime; @@ -635,7 +635,7 @@ static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){ */ void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){ assert( p && p->pFunc && p->pFunc->xStep ); - assert( sqlite3_mutex_held(p->s.db->mutex) ); + assert( sqlite3_mutex_held(p->pOut->db->mutex) ); testcase( nByte<0 ); if( (p->pMem->flags & MEM_Agg)==0 ){ return createAggContext(p, nByte); @@ -651,7 +651,7 @@ void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){ void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ AuxData *pAuxData; - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; } @@ -673,7 +673,7 @@ void sqlite3_set_auxdata( AuxData *pAuxData; Vdbe *pVdbe = pCtx->pVdbe; - assert( sqlite3_mutex_held(pCtx->s.db->mutex) ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); if( iArg<0 ) goto failed; for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ diff --git a/src/vdbeaux.c b/src/vdbeaux.c index 40ad6e1dc..da42db9be 100644 --- a/src/vdbeaux.c +++ b/src/vdbeaux.c @@ -3187,10 +3187,14 @@ void sqlite3VdbeRecordUnpack( ** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used ** in assert() statements to ensure that the optimized code in ** sqlite3VdbeRecordCompare() returns results with these two primitives. +** +** Return true if the result of comparison is equivalent to desiredResult. +** Return false if there is a disagreement. */ static int vdbeRecordCompareDebug( int nKey1, const void *pKey1, /* Left key */ - const UnpackedRecord *pPKey2 /* Right key */ + const UnpackedRecord *pPKey2, /* Right key */ + int desiredResult /* Correct answer */ ){ u32 d1; /* Offset into aKey[] of next data element */ u32 idx1; /* Offset into aKey[] of next header element */ @@ -3202,6 +3206,7 @@ static int vdbeRecordCompareDebug( Mem mem1; pKeyInfo = pPKey2->pKeyInfo; + if( pKeyInfo->db==0 ) return 1; mem1.enc = pKeyInfo->enc; mem1.db = pKeyInfo->db; /* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */ @@ -3252,7 +3257,7 @@ static int vdbeRecordCompareDebug( if( pKeyInfo->aSortOrder[i] ){ rc = -rc; /* Invert the result for DESC sort order. */ } - return rc; + goto debugCompareEnd; } i++; }while( idx1<szHdr1 && i<pPKey2->nField ); @@ -3266,7 +3271,15 @@ static int vdbeRecordCompareDebug( /* rc==0 here means that one of the keys ran out of fields and ** all the fields up to that point were equal. Return the the default_rc ** value. */ - return pPKey2->default_rc; + rc = pPKey2->default_rc; + +debugCompareEnd: + if( desiredResult==0 && rc==0 ) return 1; + if( desiredResult<0 && rc<0 ) return 1; + if( desiredResult>0 && rc>0 ) return 1; + if( CORRUPT_DB ) return 1; + if( pKeyInfo->db->mallocFailed ) return 1; + return 0; } #endif @@ -3279,7 +3292,8 @@ static int vdbeRecordCompareDebug( static int vdbeCompareMemString( const Mem *pMem1, const Mem *pMem2, - const CollSeq *pColl + const CollSeq *pColl, + u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */ ){ if( pMem1->enc==pColl->enc ){ /* The strings are already in the correct encoding. Call the @@ -3302,6 +3316,7 @@ static int vdbeCompareMemString( rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2); sqlite3VdbeMemRelease(&c1); sqlite3VdbeMemRelease(&c2); + if( (v1==0 || v2==0) && prcErr ) *prcErr = SQLITE_NOMEM; return rc; } } @@ -3384,7 +3399,7 @@ int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){ assert( !pColl || pColl->xCmp ); if( pColl ){ - return vdbeCompareMemString(pMem1, pMem2, pColl); + return vdbeCompareMemString(pMem1, pMem2, pColl, 0); } /* If a NULL pointer was passed as the collate function, fall through ** to the blob case and use memcmp(). */ @@ -3456,8 +3471,10 @@ static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){ ** fields that appear in both keys are equal, then pPKey2->default_rc is ** returned. ** -** If database corruption is discovered, set pPKey2->isCorrupt to non-zero -** and return 0. +** If database corruption is discovered, set pPKey2->errCode to +** SQLITE_CORRUPT and return 0. If an OOM error is encountered, +** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the +** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db). */ int sqlite3VdbeRecordCompare( int nKey1, const void *pKey1, /* Left key */ @@ -3488,7 +3505,7 @@ int sqlite3VdbeRecordCompare( idx1 = getVarint32(aKey1, szHdr1); d1 = szHdr1; if( d1>(unsigned)nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ } i = 0; @@ -3567,14 +3584,16 @@ int sqlite3VdbeRecordCompare( testcase( (d1+mem1.n)==(unsigned)nKey1 ); testcase( (d1+mem1.n+1)==(unsigned)nKey1 ); if( (d1+mem1.n) > (unsigned)nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ }else if( pKeyInfo->aColl[i] ){ mem1.enc = pKeyInfo->enc; mem1.db = pKeyInfo->db; mem1.flags = MEM_Str; mem1.z = (char*)&aKey1[d1]; - rc = vdbeCompareMemString(&mem1, pRhs, pKeyInfo->aColl[i]); + rc = vdbeCompareMemString( + &mem1, pRhs, pKeyInfo->aColl[i], &pPKey2->errCode + ); }else{ int nCmp = MIN(mem1.n, pRhs->n); rc = memcmp(&aKey1[d1], pRhs->z, nCmp); @@ -3594,7 +3613,7 @@ int sqlite3VdbeRecordCompare( testcase( (d1+nStr)==(unsigned)nKey1 ); testcase( (d1+nStr+1)==(unsigned)nKey1 ); if( (d1+nStr) > (unsigned)nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ }else{ int nCmp = MIN(nStr, pRhs->n); @@ -3614,11 +3633,7 @@ int sqlite3VdbeRecordCompare( if( pKeyInfo->aSortOrder[i] ){ rc = -rc; } - assert( CORRUPT_DB - || (rc<0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)<0) - || (rc>0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)>0) - || pKeyInfo->db->mallocFailed - ); + assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) ); assert( mem1.zMalloc==0 ); /* See comment below */ return rc; } @@ -3638,7 +3653,7 @@ int sqlite3VdbeRecordCompare( ** all the fields up to that point were equal. Return the the default_rc ** value. */ assert( CORRUPT_DB - || pPKey2->default_rc==vdbeRecordCompareDebug(nKey1, pKey1, pPKey2) + || vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc) || pKeyInfo->db->mallocFailed ); return pPKey2->default_rc; @@ -3737,11 +3752,7 @@ static int vdbeRecordCompareInt( res = pPKey2->default_rc; } - assert( (res==0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)==0) - || (res<0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)<0) - || (res>0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)>0) - || CORRUPT_DB - ); + assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) ); return res; } @@ -3775,7 +3786,7 @@ static int vdbeRecordCompareString( nStr = (serial_type-12) / 2; if( (szHdr + nStr) > nKey1 ){ - pPKey2->isCorrupt = (u8)SQLITE_CORRUPT_BKPT; + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ } nCmp = MIN( pPKey2->aMem[0].n, nStr ); @@ -3801,9 +3812,7 @@ static int vdbeRecordCompareString( } } - assert( (res==0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)==0) - || (res<0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)<0) - || (res>0 && vdbeRecordCompareDebug(nKey1, pKey1, pPKey2)>0) + assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) || CORRUPT_DB || pPKey2->pKeyInfo->db->mallocFailed ); diff --git a/src/vdbemem.c b/src/vdbemem.c index e4012593d..95e23c61a 100644 --- a/src/vdbemem.c +++ b/src/vdbemem.c @@ -200,15 +200,11 @@ int sqlite3VdbeMemExpandBlob(Mem *pMem){ } #endif - /* -** Make sure the given Mem is \u0000 terminated. +** It is already known that pMem contains an unterminated string. +** Add the zero terminator. */ -int sqlite3VdbeMemNulTerminate(Mem *pMem){ - assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); - if( (pMem->flags & MEM_Term)!=0 || (pMem->flags & MEM_Str)==0 ){ - return SQLITE_OK; /* Nothing to do */ - } +static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){ if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){ return SQLITE_NOMEM; } @@ -219,6 +215,20 @@ int sqlite3VdbeMemNulTerminate(Mem *pMem){ } /* +** Make sure the given Mem is \u0000 terminated. +*/ +int sqlite3VdbeMemNulTerminate(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) ); + testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 ); + if( (pMem->flags & (MEM_Term|MEM_Str))!=MEM_Str ){ + return SQLITE_OK; /* Nothing to do */ + }else{ + return vdbeMemAddTerminator(pMem); + } +} + +/* ** Add MEM_Str to the set of representations for the given Mem. Numbers ** are converted using sqlite3_snprintf(). Converting a BLOB to a string ** is a no-op. @@ -280,17 +290,20 @@ int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){ int rc = SQLITE_OK; if( ALWAYS(pFunc && pFunc->xFinalize) ){ sqlite3_context ctx; + Mem t; assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); memset(&ctx, 0, sizeof(ctx)); - ctx.s.flags = MEM_Null; - ctx.s.db = pMem->db; + memset(&t, 0, sizeof(t)); + t.flags = MEM_Null; + t.db = pMem->db; + ctx.pOut = &t; ctx.pMem = pMem; ctx.pFunc = pFunc; pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */ assert( 0==(pMem->flags&MEM_Dyn) && !pMem->xDel ); sqlite3DbFree(pMem->db, pMem->zMalloc); - memcpy(pMem, &ctx.s, sizeof(ctx.s)); + memcpy(pMem, &t, sizeof(t)); rc = ctx.isError; } return rc; @@ -606,13 +619,27 @@ void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ } /* +** The pMem is known to contain content that needs to be destroyed prior +** to a value change. So invoke the destructor, then set the value to +** a 64-bit integer. +*/ +static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){ + sqlite3VdbeMemReleaseExternal(pMem); + pMem->u.i = val; + pMem->flags = MEM_Int; +} + +/* ** Delete any previous value and set the value stored in *pMem to val, ** manifest type INTEGER. */ void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ - sqlite3VdbeMemRelease(pMem); - pMem->u.i = val; - pMem->flags = MEM_Int; + if( VdbeMemDynamic(pMem) ){ + vdbeReleaseAndSetInt64(pMem, val); + }else{ + pMem->u.i = val; + pMem->flags = MEM_Int; + } } #ifndef SQLITE_OMIT_FLOATING_POINT @@ -909,31 +936,25 @@ int sqlite3VdbeMemFromBtree( return rc; } -/* This function is only available internally, it is not part of the -** external API. It works in a similar way to sqlite3_value_text(), -** except the data returned is in the encoding specified by the second -** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or -** SQLITE_UTF8. -** -** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED. -** If that is the case, then the result must be aligned on an even byte -** boundary. +/* +** The pVal argument is known to be a value other than NULL. +** Convert it into a string with encoding enc and return a pointer +** to a zero-terminated version of that string. */ -const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ - if( !pVal ) return 0; - +SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){ + assert( pVal!=0 ); assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); assert( (pVal->flags & MEM_RowSet)==0 ); - - if( pVal->flags&MEM_Null ){ - return 0; - } - assert( (MEM_Blob>>3) == MEM_Str ); - pVal->flags |= (pVal->flags & MEM_Blob)>>3; - ExpandBlob(pVal); - if( pVal->flags&MEM_Str ){ - sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); + assert( (pVal->flags & (MEM_Null))==0 ); + if( pVal->flags & (MEM_Blob|MEM_Str) ){ + pVal->flags |= MEM_Str; + if( pVal->flags & MEM_Zero ){ + sqlite3VdbeMemExpandBlob(pVal); + } + if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){ + sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); + } if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){ assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 ); if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){ @@ -942,7 +963,6 @@ const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ } sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */ }else{ - assert( (pVal->flags&MEM_Blob)==0 ); sqlite3VdbeMemStringify(pVal, enc, 0); assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) ); } @@ -955,6 +975,30 @@ const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ } } +/* This function is only available internally, it is not part of the +** external API. It works in a similar way to sqlite3_value_text(), +** except the data returned is in the encoding specified by the second +** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or +** SQLITE_UTF8. +** +** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED. +** If that is the case, then the result must be aligned on an even byte +** boundary. +*/ +const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ + if( !pVal ) return 0; + assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); + assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); + assert( (pVal->flags & MEM_RowSet)==0 ); + if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){ + return pVal->z; + } + if( pVal->flags&MEM_Null ){ + return 0; + } + return valueToText(pVal, enc); +} + /* ** Create a new sqlite3_value object. */ diff --git a/src/vdbesort.c b/src/vdbesort.c index 6a5855f2e..7318ea409 100644 --- a/src/vdbesort.c +++ b/src/vdbesort.c @@ -1,5 +1,5 @@ /* -** 2011 July 9 +** 2011-07-09 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: @@ -10,44 +10,198 @@ ** ************************************************************************* ** This file contains code for the VdbeSorter object, used in concert with -** a VdbeCursor to sort large numbers of keys (as may be required, for -** example, by CREATE INDEX statements on tables too large to fit in main -** memory). +** a VdbeCursor to sort large numbers of keys for CREATE INDEX statements +** or by SELECT statements with ORDER BY clauses that cannot be satisfied +** using indexes and without LIMIT clauses. +** +** The VdbeSorter object implements a multi-threaded external merge sort +** algorithm that is efficient even if the number of elements being sorted +** exceeds the available memory. +** +** Here is the (internal, non-API) interface between this module and the +** rest of the SQLite system: +** +** sqlite3VdbeSorterInit() Create a new VdbeSorter object. +** +** sqlite3VdbeSorterWrite() Add a single new row to the VdbeSorter +** object. The row is a binary blob in the +** OP_MakeRecord format that contains both +** the ORDER BY key columns and result columns +** in the case of a SELECT w/ ORDER BY, or +** the complete record for an index entry +** in the case of a CREATE INDEX. +** +** sqlite3VdbeSorterRewind() Sort all content previously added. +** Position the read cursor on the +** first sorted element. +** +** sqlite3VdbeSorterNext() Advance the read cursor to the next sorted +** element. +** +** sqlite3VdbeSorterRowkey() Return the complete binary blob for the +** row currently under the read cursor. +** +** sqlite3VdbeSorterCompare() Compare the binary blob for the row +** currently under the read cursor against +** another binary blob X and report if +** X is strictly less than the read cursor. +** Used to enforce uniqueness in a +** CREATE UNIQUE INDEX statement. +** +** sqlite3VdbeSorterClose() Close the VdbeSorter object and reclaim +** all resources. +** +** sqlite3VdbeSorterReset() Refurbish the VdbeSorter for reuse. This +** is like Close() followed by Init() only +** much faster. +** +** The interfaces above must be called in a particular order. Write() can +** only occur in between Init()/Reset() and Rewind(). Next(), Rowkey(), and +** Compare() can only occur in between Rewind() and Close()/Reset(). i.e. +** +** Init() +** for each record: Write() +** Rewind() +** Rowkey()/Compare() +** Next() +** Close() +** +** Algorithm: +** +** Records passed to the sorter via calls to Write() are initially held +** unsorted in main memory. Assuming the amount of memory used never exceeds +** a threshold, when Rewind() is called the set of records is sorted using +** an in-memory merge sort. In this case, no temporary files are required +** and subsequent calls to Rowkey(), Next() and Compare() read records +** directly from main memory. +** +** If the amount of space used to store records in main memory exceeds the +** threshold, then the set of records currently in memory are sorted and +** written to a temporary file in "Packed Memory Array" (PMA) format. +** A PMA created at this point is known as a "level-0 PMA". Higher levels +** of PMAs may be created by merging existing PMAs together - for example +** merging two or more level-0 PMAs together creates a level-1 PMA. +** +** The threshold for the amount of main memory to use before flushing +** records to a PMA is roughly the same as the limit configured for the +** page-cache of the main database. Specifically, the threshold is set to +** the value returned by "PRAGMA main.page_size" multipled by +** that returned by "PRAGMA main.cache_size", in bytes. +** +** If the sorter is running in single-threaded mode, then all PMAs generated +** are appended to a single temporary file. Or, if the sorter is running in +** multi-threaded mode then up to (N+1) temporary files may be opened, where +** N is the configured number of worker threads. In this case, instead of +** sorting the records and writing the PMA to a temporary file itself, the +** calling thread usually launches a worker thread to do so. Except, if +** there are already N worker threads running, the main thread does the work +** itself. +** +** The sorter is running in multi-threaded mode if (a) the library was built +** with pre-processor symbol SQLITE_MAX_WORKER_THREADS set to a value greater +** than zero, and (b) worker threads have been enabled at runtime by calling +** sqlite3_config(SQLITE_CONFIG_WORKER_THREADS, ...). +** +** When Rewind() is called, any data remaining in memory is flushed to a +** final PMA. So at this point the data is stored in some number of sorted +** PMAs within temporary files on disk. +** +** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the +** sorter is running in single-threaded mode, then these PMAs are merged +** incrementally as keys are retreived from the sorter by the VDBE. The +** MergeEngine object, described in further detail below, performs this +** merge. +** +** Or, if running in multi-threaded mode, then a background thread is +** launched to merge the existing PMAs. Once the background thread has +** merged T bytes of data into a single sorted PMA, the main thread +** begins reading keys from that PMA while the background thread proceeds +** with merging the next T bytes of data. And so on. +** +** Parameter T is set to half the value of the memory threshold used +** by Write() above to determine when to create a new PMA. +** +** If there are more than SORTER_MAX_MERGE_COUNT PMAs in total when +** Rewind() is called, then a hierarchy of incremental-merges is used. +** First, T bytes of data from the first SORTER_MAX_MERGE_COUNT PMAs on +** disk are merged together. Then T bytes of data from the second set, and +** so on, such that no operation ever merges more than SORTER_MAX_MERGE_COUNT +** PMAs at a time. This done is to improve locality. +** +** If running in multi-threaded mode and there are more than +** SORTER_MAX_MERGE_COUNT PMAs on disk when Rewind() is called, then more +** than one background thread may be created. Specifically, there may be +** one background thread for each temporary file on disk, and one background +** thread to merge the output of each of the others to a single PMA for +** the main thread to read from. */ - #include "sqliteInt.h" #include "vdbeInt.h" +/* +** If SQLITE_DEBUG_SORTER_THREADS is defined, this module outputs various +** messages to stderr that may be helpful in understanding the performance +** characteristics of the sorter in multi-threaded mode. +*/ +#if 0 +# define SQLITE_DEBUG_SORTER_THREADS 1 +#endif -typedef struct VdbeSorterIter VdbeSorterIter; -typedef struct SorterRecord SorterRecord; -typedef struct FileWriter FileWriter; +/* +** Private objects used by the sorter +*/ +typedef struct MergeEngine MergeEngine; /* Merge PMAs together */ +typedef struct PmaReader PmaReader; /* Incrementally read one PMA */ +typedef struct PmaWriter PmaWriter; /* Incrementally write one PMA */ +typedef struct SorterRecord SorterRecord; /* A record being sorted */ +typedef struct SortSubtask SortSubtask; /* A sub-task in the sort process */ +typedef struct SorterFile SorterFile; /* Temporary file object wrapper */ +typedef struct SorterList SorterList; /* In-memory list of records */ +typedef struct IncrMerger IncrMerger; /* Read & merge multiple PMAs */ /* -** NOTES ON DATA STRUCTURE USED FOR N-WAY MERGES: +** A container for a temp file handle and the current amount of data +** stored in the file. +*/ +struct SorterFile { + sqlite3_file *pFd; /* File handle */ + i64 iEof; /* Bytes of data stored in pFd */ +}; + +/* +** An in-memory list of objects to be sorted. ** -** As keys are added to the sorter, they are written to disk in a series -** of sorted packed-memory-arrays (PMAs). The size of each PMA is roughly -** the same as the cache-size allowed for temporary databases. In order -** to allow the caller to extract keys from the sorter in sorted order, -** all PMAs currently stored on disk must be merged together. This comment -** describes the data structure used to do so. The structure supports -** merging any number of arrays in a single pass with no redundant comparison -** operations. +** If aMemory==0 then each object is allocated separately and the objects +** are connected using SorterRecord.u.pNext. If aMemory!=0 then all objects +** are stored in the aMemory[] bulk memory, one right after the other, and +** are connected using SorterRecord.u.iNext. +*/ +struct SorterList { + SorterRecord *pList; /* Linked list of records */ + u8 *aMemory; /* If non-NULL, bulk memory to hold pList */ + int szPMA; /* Size of pList as PMA in bytes */ +}; + +/* +** The MergeEngine object is used to combine two or more smaller PMAs into +** one big PMA using a merge operation. Separate PMAs all need to be +** combined into one big PMA in order to be able to step through the sorted +** records in order. ** -** The aIter[] array contains an iterator for each of the PMAs being merged. -** An aIter[] iterator either points to a valid key or else is at EOF. For -** the purposes of the paragraphs below, we assume that the array is actually -** N elements in size, where N is the smallest power of 2 greater to or equal -** to the number of iterators being merged. The extra aIter[] elements are -** treated as if they are empty (always at EOF). +** The aReadr[] array contains a PmaReader object for each of the PMAs being +** merged. An aReadr[] object either points to a valid key or else is at EOF. +** ("EOF" means "End Of File". When aReadr[] is at EOF there is no more data.) +** For the purposes of the paragraphs below, we assume that the array is +** actually N elements in size, where N is the smallest power of 2 greater +** to or equal to the number of PMAs being merged. The extra aReadr[] elements +** are treated as if they are empty (always at EOF). ** ** The aTree[] array is also N elements in size. The value of N is stored in -** the VdbeSorter.nTree variable. +** the MergeEngine.nTree variable. ** ** The final (N/2) elements of aTree[] contain the results of comparing -** pairs of iterator keys together. Element i contains the result of -** comparing aIter[2*i-N] and aIter[2*i-N+1]. Whichever key is smaller, the +** pairs of PMA keys together. Element i contains the result of +** comparing aReadr[2*i-N] and aReadr[2*i-N+1]. Whichever key is smaller, the ** aTree element is set to the index of it. ** ** For the purposes of this comparison, EOF is considered greater than any @@ -55,34 +209,34 @@ typedef struct FileWriter FileWriter; ** values), it doesn't matter which index is stored. ** ** The (N/4) elements of aTree[] that precede the final (N/2) described -** above contains the index of the smallest of each block of 4 iterators. -** And so on. So that aTree[1] contains the index of the iterator that +** above contains the index of the smallest of each block of 4 PmaReaders +** And so on. So that aTree[1] contains the index of the PmaReader that ** currently points to the smallest key value. aTree[0] is unused. ** ** Example: ** -** aIter[0] -> Banana -** aIter[1] -> Feijoa -** aIter[2] -> Elderberry -** aIter[3] -> Currant -** aIter[4] -> Grapefruit -** aIter[5] -> Apple -** aIter[6] -> Durian -** aIter[7] -> EOF +** aReadr[0] -> Banana +** aReadr[1] -> Feijoa +** aReadr[2] -> Elderberry +** aReadr[3] -> Currant +** aReadr[4] -> Grapefruit +** aReadr[5] -> Apple +** aReadr[6] -> Durian +** aReadr[7] -> EOF ** ** aTree[] = { X, 5 0, 5 0, 3, 5, 6 } ** ** The current element is "Apple" (the value of the key indicated by -** iterator 5). When the Next() operation is invoked, iterator 5 will +** PmaReader 5). When the Next() operation is invoked, PmaReader 5 will ** be advanced to the next key in its segment. Say the next key is ** "Eggplant": ** -** aIter[5] -> Eggplant +** aReadr[5] -> Eggplant ** -** The contents of aTree[] are updated first by comparing the new iterator -** 5 key to the current key of iterator 4 (still "Grapefruit"). The iterator +** The contents of aTree[] are updated first by comparing the new PmaReader +** 5 key to the current key of PmaReader 4 (still "Grapefruit"). The PmaReader ** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree. -** The value of iterator 6 - "Durian" - is now smaller than that of iterator +** The value of PmaReader 6 - "Durian" - is now smaller than that of PmaReader ** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Banana<Durian), ** so the value written into element 1 of the array is 0. As follows: ** @@ -92,97 +246,246 @@ typedef struct FileWriter FileWriter; ** key comparison operations are required, where N is the number of segments ** being merged (rounded up to the next power of 2). */ +struct MergeEngine { + int nTree; /* Used size of aTree/aReadr (power of 2) */ + SortSubtask *pTask; /* Used by this thread only */ + int *aTree; /* Current state of incremental merge */ + PmaReader *aReadr; /* Array of PmaReaders to merge data from */ +}; + +/* +** This object represents a single thread of control in a sort operation. +** Exactly VdbeSorter.nTask instances of this object are allocated +** as part of each VdbeSorter object. Instances are never allocated any +** other way. VdbeSorter.nTask is set to the number of worker threads allowed +** (see SQLITE_CONFIG_WORKER_THREADS) plus one (the main thread). Thus for +** single-threaded operation, there is exactly one instance of this object +** and for multi-threaded operation there are two or more instances. +** +** Essentially, this structure contains all those fields of the VdbeSorter +** structure for which each thread requires a separate instance. For example, +** each thread requries its own UnpackedRecord object to unpack records in +** as part of comparison operations. +** +** Before a background thread is launched, variable bDone is set to 0. Then, +** right before it exits, the thread itself sets bDone to 1. This is used for +** two purposes: +** +** 1. When flushing the contents of memory to a level-0 PMA on disk, to +** attempt to select a SortSubtask for which there is not already an +** active background thread (since doing so causes the main thread +** to block until it finishes). +** +** 2. If SQLITE_DEBUG_SORTER_THREADS is defined, to determine if a call +** to sqlite3ThreadJoin() is likely to block. Cases that are likely to +** block provoke debugging output. +** +** In both cases, the effects of the main thread seeing (bDone==0) even +** after the thread has finished are not dire. So we don't worry about +** memory barriers and such here. +*/ +struct SortSubtask { + SQLiteThread *pThread; /* Background thread, if any */ + int bDone; /* Set if thread is finished but not joined */ + VdbeSorter *pSorter; /* Sorter that owns this sub-task */ + UnpackedRecord *pUnpacked; /* Space to unpack a record */ + SorterList list; /* List for thread to write to a PMA */ + int nPMA; /* Number of PMAs currently in file */ + SorterFile file; /* Temp file for level-0 PMAs */ + SorterFile file2; /* Space for other PMAs */ +}; + +/* +** Main sorter structure. A single instance of this is allocated for each +** sorter cursor created by the VDBE. +** +** mxKeysize: +** As records are added to the sorter by calls to sqlite3VdbeSorterWrite(), +** this variable is updated so as to be set to the size on disk of the +** largest record in the sorter. +*/ struct VdbeSorter { - i64 iWriteOff; /* Current write offset within file pTemp1 */ - i64 iReadOff; /* Current read offset within file pTemp1 */ - int nInMemory; /* Current size of pRecord list as PMA */ - int nTree; /* Used size of aTree/aIter (power of 2) */ - int nPMA; /* Number of PMAs stored in pTemp1 */ int mnPmaSize; /* Minimum PMA size, in bytes */ int mxPmaSize; /* Maximum PMA size, in bytes. 0==no limit */ - VdbeSorterIter *aIter; /* Array of iterators to merge */ - int *aTree; /* Current state of incremental merge */ - sqlite3_file *pTemp1; /* PMA file 1 */ - SorterRecord *pRecord; /* Head of in-memory record list */ - UnpackedRecord *pUnpacked; /* Used to unpack keys */ + int mxKeysize; /* Largest serialized key seen so far */ + int pgsz; /* Main database page size */ + PmaReader *pReader; /* Readr data from here after Rewind() */ + MergeEngine *pMerger; /* Or here, if bUseThreads==0 */ + sqlite3 *db; /* Database connection */ + KeyInfo *pKeyInfo; /* How to compare records */ + UnpackedRecord *pUnpacked; /* Used by VdbeSorterCompare() */ + SorterList list; /* List of in-memory records */ + int iMemory; /* Offset of free space in list.aMemory */ + int nMemory; /* Size of list.aMemory allocation in bytes */ + u8 bUsePMA; /* True if one or more PMAs created */ + u8 bUseThreads; /* True to use background threads */ + u8 iPrev; /* Previous thread used to flush PMA */ + u8 nTask; /* Size of aTask[] array */ + SortSubtask aTask[1]; /* One or more subtasks */ }; /* -** The following type is an iterator for a PMA. It caches the current key in -** variables nKey/aKey. If the iterator is at EOF, pFile==0. +** An instance of the following object is used to read records out of a +** PMA, in sorted order. The next key to be read is cached in nKey/aKey. +** aKey might point into aMap or into aBuffer. If neither of those locations +** contain a contiguous representation of the key, then aAlloc is allocated +** and the key is copied into aAlloc and aKey is made to poitn to aAlloc. +** +** pFd==0 at EOF. +*/ +struct PmaReader { + i64 iReadOff; /* Current read offset */ + i64 iEof; /* 1 byte past EOF for this PmaReader */ + int nAlloc; /* Bytes of space at aAlloc */ + int nKey; /* Number of bytes in key */ + sqlite3_file *pFd; /* File handle we are reading from */ + u8 *aAlloc; /* Space for aKey if aBuffer and pMap wont work */ + u8 *aKey; /* Pointer to current key */ + u8 *aBuffer; /* Current read buffer */ + int nBuffer; /* Size of read buffer in bytes */ + u8 *aMap; /* Pointer to mapping of entire file */ + IncrMerger *pIncr; /* Incremental merger */ +}; + +/* +** Normally, a PmaReader object iterates through an existing PMA stored +** within a temp file. However, if the PmaReader.pIncr variable points to +** an object of the following type, it may be used to iterate/merge through +** multiple PMAs simultaneously. +** +** There are two types of IncrMerger object - single (bUseThread==0) and +** multi-threaded (bUseThread==1). +** +** A multi-threaded IncrMerger object uses two temporary files - aFile[0] +** and aFile[1]. Neither file is allowed to grow to more than mxSz bytes in +** size. When the IncrMerger is initialized, it reads enough data from +** pMerger to populate aFile[0]. It then sets variables within the +** corresponding PmaReader object to read from that file and kicks off +** a background thread to populate aFile[1] with the next mxSz bytes of +** sorted record data from pMerger. +** +** When the PmaReader reaches the end of aFile[0], it blocks until the +** background thread has finished populating aFile[1]. It then exchanges +** the contents of the aFile[0] and aFile[1] variables within this structure, +** sets the PmaReader fields to read from the new aFile[0] and kicks off +** another background thread to populate the new aFile[1]. And so on, until +** the contents of pMerger are exhausted. +** +** A single-threaded IncrMerger does not open any temporary files of its +** own. Instead, it has exclusive access to mxSz bytes of space beginning +** at offset iStartOff of file pTask->file2. And instead of using a +** background thread to prepare data for the PmaReader, with a single +** threaded IncrMerger the allocate part of pTask->file2 is "refilled" with +** keys from pMerger by the calling thread whenever the PmaReader runs out +** of data. */ -struct VdbeSorterIter { - i64 iReadOff; /* Current read offset */ - i64 iEof; /* 1 byte past EOF for this iterator */ - int nAlloc; /* Bytes of space at aAlloc */ - int nKey; /* Number of bytes in key */ - sqlite3_file *pFile; /* File iterator is reading from */ - u8 *aAlloc; /* Allocated space */ - u8 *aKey; /* Pointer to current key */ - u8 *aBuffer; /* Current read buffer */ - int nBuffer; /* Size of read buffer in bytes */ +struct IncrMerger { + SortSubtask *pTask; /* Task that owns this merger */ + MergeEngine *pMerger; /* Merge engine thread reads data from */ + i64 iStartOff; /* Offset to start writing file at */ + int mxSz; /* Maximum bytes of data to store */ + int bEof; /* Set to true when merge is finished */ + int bUseThread; /* True to use a bg thread for this object */ + SorterFile aFile[2]; /* aFile[0] for reading, [1] for writing */ }; /* -** An instance of this structure is used to organize the stream of records -** being written to files by the merge-sort code into aligned, page-sized -** blocks. Doing all I/O in aligned page-sized blocks helps I/O to go -** faster on many operating systems. +** An instance of this object is used for writing a PMA. +** +** The PMA is written one record at a time. Each record is of an arbitrary +** size. But I/O is more efficient if it occurs in page-sized blocks where +** each block is aligned on a page boundary. This object caches writes to +** the PMA so that aligned, page-size blocks are written. */ -struct FileWriter { +struct PmaWriter { int eFWErr; /* Non-zero if in an error state */ u8 *aBuffer; /* Pointer to write buffer */ int nBuffer; /* Size of write buffer in bytes */ int iBufStart; /* First byte of buffer to write */ int iBufEnd; /* Last byte of buffer to write */ i64 iWriteOff; /* Offset of start of buffer in file */ - sqlite3_file *pFile; /* File to write to */ + sqlite3_file *pFd; /* File handle to write to */ }; /* -** A structure to store a single record. All in-memory records are connected -** together into a linked list headed at VdbeSorter.pRecord using the -** SorterRecord.pNext pointer. +** This object is the header on a single record while that record is being +** held in memory and prior to being written out as part of a PMA. +** +** How the linked list is connected depends on how memory is being managed +** by this module. If using a separate allocation for each in-memory record +** (VdbeSorter.list.aMemory==0), then the list is always connected using the +** SorterRecord.u.pNext pointers. +** +** Or, if using the single large allocation method (VdbeSorter.list.aMemory!=0), +** then while records are being accumulated the list is linked using the +** SorterRecord.u.iNext offset. This is because the aMemory[] array may +** be sqlite3Realloc()ed while records are being accumulated. Once the VM +** has finished passing records to the sorter, or when the in-memory buffer +** is full, the list is sorted. As part of the sorting process, it is +** converted to use the SorterRecord.u.pNext pointers. See function +** vdbeSorterSort() for details. */ struct SorterRecord { - void *pVal; - int nVal; - SorterRecord *pNext; + int nVal; /* Size of the record in bytes */ + union { + SorterRecord *pNext; /* Pointer to next record in list */ + int iNext; /* Offset within aMemory of next record */ + } u; + /* The data for the record immediately follows this header */ }; -/* Minimum allowable value for the VdbeSorter.nWorking variable */ +/* Return a pointer to the buffer containing the record data for SorterRecord +** object p. Should be used as if: +** +** void *SRVAL(SorterRecord *p) { return (void*)&p[1]; } +*/ +#define SRVAL(p) ((void*)((SorterRecord*)(p) + 1)) + +/* The minimum PMA size is set to this value multiplied by the database +** page size in bytes. */ #define SORTER_MIN_WORKING 10 -/* Maximum number of segments to merge in a single pass. */ +/* Maximum number of PMAs that a single MergeEngine can merge */ #define SORTER_MAX_MERGE_COUNT 16 +static int vdbeIncrSwap(IncrMerger*); +static void vdbeIncrFree(IncrMerger *); + /* -** Free all memory belonging to the VdbeSorterIter object passed as the second +** Free all memory belonging to the PmaReader object passed as the ** argument. All structure fields are set to zero before returning. */ -static void vdbeSorterIterZero(sqlite3 *db, VdbeSorterIter *pIter){ - sqlite3DbFree(db, pIter->aAlloc); - sqlite3DbFree(db, pIter->aBuffer); - memset(pIter, 0, sizeof(VdbeSorterIter)); +static void vdbePmaReaderClear(PmaReader *pReadr){ + sqlite3_free(pReadr->aAlloc); + sqlite3_free(pReadr->aBuffer); + if( pReadr->aMap ) sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap); + vdbeIncrFree(pReadr->pIncr); + memset(pReadr, 0, sizeof(PmaReader)); } /* -** Read nByte bytes of data from the stream of data iterated by object p. +** Read the next nByte bytes of data from the PMA p. ** If successful, set *ppOut to point to a buffer containing the data ** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite ** error code. ** -** The buffer indicated by *ppOut may only be considered valid until the +** The buffer returned in *ppOut is only valid until the ** next call to this function. */ -static int vdbeSorterIterRead( - sqlite3 *db, /* Database handle (for malloc) */ - VdbeSorterIter *p, /* Iterator */ +static int vdbePmaReadBlob( + PmaReader *p, /* PmaReader from which to take the blob */ int nByte, /* Bytes of data to read */ u8 **ppOut /* OUT: Pointer to buffer containing data */ ){ int iBuf; /* Offset within buffer to read from */ int nAvail; /* Bytes of data available in buffer */ + + if( p->aMap ){ + *ppOut = &p->aMap[p->iReadOff]; + p->iReadOff += nByte; + return SQLITE_OK; + } + assert( p->aBuffer ); /* If there is no more data to be read from the buffer, read the next @@ -201,8 +504,8 @@ static int vdbeSorterIterRead( } assert( nRead>0 ); - /* Read data from the file. Return early if an error occurs. */ - rc = sqlite3OsRead(p->pFile, p->aBuffer, nRead, p->iReadOff); + /* Readr data from the file. Return early if an error occurs. */ + rc = sqlite3OsRead(p->pFd, p->aBuffer, nRead, p->iReadOff); assert( rc!=SQLITE_IOERR_SHORT_READ ); if( rc!=SQLITE_OK ) return rc; } @@ -222,11 +525,13 @@ static int vdbeSorterIterRead( /* Extend the p->aAlloc[] allocation if required. */ if( p->nAlloc<nByte ){ - int nNew = p->nAlloc*2; + u8 *aNew; + int nNew = MAX(128, p->nAlloc*2); while( nByte>nNew ) nNew = nNew*2; - p->aAlloc = sqlite3DbReallocOrFree(db, p->aAlloc, nNew); - if( !p->aAlloc ) return SQLITE_NOMEM; + aNew = sqlite3Realloc(p->aAlloc, nNew); + if( !aNew ) return SQLITE_NOMEM; p->nAlloc = nNew; + p->aAlloc = aNew; } /* Copy as much data as is available in the buffer into the start of @@ -238,13 +543,13 @@ static int vdbeSorterIterRead( /* The following loop copies up to p->nBuffer bytes per iteration into ** the p->aAlloc[] buffer. */ while( nRem>0 ){ - int rc; /* vdbeSorterIterRead() return code */ + int rc; /* vdbePmaReadBlob() return code */ int nCopy; /* Number of bytes to copy */ u8 *aNext; /* Pointer to buffer to copy data from */ nCopy = nRem; if( nRem>p->nBuffer ) nCopy = p->nBuffer; - rc = vdbeSorterIterRead(db, p, nCopy, &aNext); + rc = vdbePmaReadBlob(p, nCopy, &aNext); if( rc!=SQLITE_OK ) return rc; assert( aNext!=p->aAlloc ); memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy); @@ -261,108 +566,171 @@ static int vdbeSorterIterRead( ** Read a varint from the stream of data accessed by p. Set *pnOut to ** the value read. */ -static int vdbeSorterIterVarint(sqlite3 *db, VdbeSorterIter *p, u64 *pnOut){ +static int vdbePmaReadVarint(PmaReader *p, u64 *pnOut){ int iBuf; - iBuf = p->iReadOff % p->nBuffer; - if( iBuf && (p->nBuffer-iBuf)>=9 ){ - p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut); + if( p->aMap ){ + p->iReadOff += sqlite3GetVarint(&p->aMap[p->iReadOff], pnOut); }else{ - u8 aVarint[16], *a; - int i = 0, rc; - do{ - rc = vdbeSorterIterRead(db, p, 1, &a); - if( rc ) return rc; - aVarint[(i++)&0xf] = a[0]; - }while( (a[0]&0x80)!=0 ); - sqlite3GetVarint(aVarint, pnOut); + iBuf = p->iReadOff % p->nBuffer; + if( iBuf && (p->nBuffer-iBuf)>=9 ){ + p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut); + }else{ + u8 aVarint[16], *a; + int i = 0, rc; + do{ + rc = vdbePmaReadBlob(p, 1, &a); + if( rc ) return rc; + aVarint[(i++)&0xf] = a[0]; + }while( (a[0]&0x80)!=0 ); + sqlite3GetVarint(aVarint, pnOut); + } } return SQLITE_OK; } +/* +** Attempt to memory map file pFile. If successful, set *pp to point to the +** new mapping and return SQLITE_OK. If the mapping is not attempted +** (because the file is too large or the VFS layer is configured not to use +** mmap), return SQLITE_OK and set *pp to NULL. +** +** Or, if an error occurs, return an SQLite error code. The final value of +** *pp is undefined in this case. +*/ +static int vdbeSorterMapFile(SortSubtask *pTask, SorterFile *pFile, u8 **pp){ + int rc = SQLITE_OK; + if( pFile->iEof<=(i64)(pTask->pSorter->db->nMaxSorterMmap) ){ + rc = sqlite3OsFetch(pFile->pFd, 0, (int)pFile->iEof, (void**)pp); + testcase( rc!=SQLITE_OK ); + } + return rc; +} /* -** Advance iterator pIter to the next key in its PMA. Return SQLITE_OK if -** no error occurs, or an SQLite error code if one does. +** Attach PmaReader pReadr to file pFile (if it is not already attached to +** that file) and seek it to offset iOff within the file. Return SQLITE_OK +** if successful, or an SQLite error code if an error occurs. */ -static int vdbeSorterIterNext( - sqlite3 *db, /* Database handle (for sqlite3DbMalloc() ) */ - VdbeSorterIter *pIter /* Iterator to advance */ +static int vdbePmaReaderSeek( + SortSubtask *pTask, /* Task context */ + PmaReader *pReadr, /* Reader whose cursor is to be moved */ + SorterFile *pFile, /* Sorter file to read from */ + i64 iOff /* Offset in pFile */ ){ - int rc; /* Return Code */ + int rc = SQLITE_OK; + + assert( pReadr->pIncr==0 || pReadr->pIncr->bEof==0 ); + + if( sqlite3FaultSim(201) ) return SQLITE_IOERR_READ; + if( pReadr->aMap ){ + sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap); + pReadr->aMap = 0; + } + pReadr->iReadOff = iOff; + pReadr->iEof = pFile->iEof; + pReadr->pFd = pFile->pFd; + + rc = vdbeSorterMapFile(pTask, pFile, &pReadr->aMap); + if( rc==SQLITE_OK && pReadr->aMap==0 ){ + int pgsz = pTask->pSorter->pgsz; + int iBuf = pReadr->iReadOff % pgsz; + if( pReadr->aBuffer==0 ){ + pReadr->aBuffer = (u8*)sqlite3Malloc(pgsz); + if( pReadr->aBuffer==0 ) rc = SQLITE_NOMEM; + pReadr->nBuffer = pgsz; + } + if( rc==SQLITE_OK && iBuf ){ + int nRead = pgsz - iBuf; + if( (pReadr->iReadOff + nRead) > pReadr->iEof ){ + nRead = (int)(pReadr->iEof - pReadr->iReadOff); + } + rc = sqlite3OsRead( + pReadr->pFd, &pReadr->aBuffer[iBuf], nRead, pReadr->iReadOff + ); + testcase( rc!=SQLITE_OK ); + } + } + + return rc; +} + +/* +** Advance PmaReader pReadr to the next key in its PMA. Return SQLITE_OK if +** no error occurs, or an SQLite error code if one does. +*/ +static int vdbePmaReaderNext(PmaReader *pReadr){ + int rc = SQLITE_OK; /* Return Code */ u64 nRec = 0; /* Size of record in bytes */ - if( pIter->iReadOff>=pIter->iEof ){ - /* This is an EOF condition */ - vdbeSorterIterZero(db, pIter); - return SQLITE_OK; + + if( pReadr->iReadOff>=pReadr->iEof ){ + IncrMerger *pIncr = pReadr->pIncr; + int bEof = 1; + if( pIncr ){ + rc = vdbeIncrSwap(pIncr); + if( rc==SQLITE_OK && pIncr->bEof==0 ){ + rc = vdbePmaReaderSeek( + pIncr->pTask, pReadr, &pIncr->aFile[0], pIncr->iStartOff + ); + bEof = 0; + } + } + + if( bEof ){ + /* This is an EOF condition */ + vdbePmaReaderClear(pReadr); + testcase( rc!=SQLITE_OK ); + return rc; + } } - rc = vdbeSorterIterVarint(db, pIter, &nRec); if( rc==SQLITE_OK ){ - pIter->nKey = (int)nRec; - rc = vdbeSorterIterRead(db, pIter, (int)nRec, &pIter->aKey); + rc = vdbePmaReadVarint(pReadr, &nRec); + } + if( rc==SQLITE_OK ){ + pReadr->nKey = (int)nRec; + rc = vdbePmaReadBlob(pReadr, (int)nRec, &pReadr->aKey); + testcase( rc!=SQLITE_OK ); } return rc; } /* -** Initialize iterator pIter to scan through the PMA stored in file pFile +** Initialize PmaReader pReadr to scan through the PMA stored in file pFile ** starting at offset iStart and ending at offset iEof-1. This function -** leaves the iterator pointing to the first key in the PMA (or EOF if the +** leaves the PmaReader pointing to the first key in the PMA (or EOF if the ** PMA is empty). +** +** If the pnByte parameter is NULL, then it is assumed that the file +** contains a single PMA, and that that PMA omits the initial length varint. */ -static int vdbeSorterIterInit( - sqlite3 *db, /* Database handle */ - const VdbeSorter *pSorter, /* Sorter object */ +static int vdbePmaReaderInit( + SortSubtask *pTask, /* Task context */ + SorterFile *pFile, /* Sorter file to read from */ i64 iStart, /* Start offset in pFile */ - VdbeSorterIter *pIter, /* Iterator to populate */ + PmaReader *pReadr, /* PmaReader to populate */ i64 *pnByte /* IN/OUT: Increment this value by PMA size */ ){ - int rc = SQLITE_OK; - int nBuf; - - nBuf = sqlite3BtreeGetPageSize(db->aDb[0].pBt); - - assert( pSorter->iWriteOff>iStart ); - assert( pIter->aAlloc==0 ); - assert( pIter->aBuffer==0 ); - pIter->pFile = pSorter->pTemp1; - pIter->iReadOff = iStart; - pIter->nAlloc = 128; - pIter->aAlloc = (u8 *)sqlite3DbMallocRaw(db, pIter->nAlloc); - pIter->nBuffer = nBuf; - pIter->aBuffer = (u8 *)sqlite3DbMallocRaw(db, nBuf); - - if( !pIter->aBuffer ){ - rc = SQLITE_NOMEM; - }else{ - int iBuf; + int rc; - iBuf = iStart % nBuf; - if( iBuf ){ - int nRead = nBuf - iBuf; - if( (iStart + nRead) > pSorter->iWriteOff ){ - nRead = (int)(pSorter->iWriteOff - iStart); - } - rc = sqlite3OsRead( - pSorter->pTemp1, &pIter->aBuffer[iBuf], nRead, iStart - ); - } + assert( pFile->iEof>iStart ); + assert( pReadr->aAlloc==0 && pReadr->nAlloc==0 ); + assert( pReadr->aBuffer==0 ); + assert( pReadr->aMap==0 ); - if( rc==SQLITE_OK ){ - u64 nByte; /* Size of PMA in bytes */ - pIter->iEof = pSorter->iWriteOff; - rc = vdbeSorterIterVarint(db, pIter, &nByte); - pIter->iEof = pIter->iReadOff + nByte; - *pnByte += nByte; - } + rc = vdbePmaReaderSeek(pTask, pReadr, pFile, iStart); + if( rc==SQLITE_OK ){ + u64 nByte; /* Size of PMA in bytes */ + rc = vdbePmaReadVarint(pReadr, &nByte); + pReadr->iEof = pReadr->iReadOff + nByte; + *pnByte += nByte; } if( rc==SQLITE_OK ){ - rc = vdbeSorterIterNext(db, pIter); + rc = vdbePmaReaderNext(pReadr); } return rc; } @@ -370,135 +738,330 @@ static int vdbeSorterIterInit( /* ** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2, -** size nKey2 bytes). Argument pKeyInfo supplies the collation functions -** used by the comparison. If an error occurs, return an SQLite error code. -** Otherwise, return SQLITE_OK and set *pRes to a negative, zero or positive -** value, depending on whether key1 is smaller, equal to or larger than key2. -** -** If the bOmitRowid argument is non-zero, assume both keys end in a rowid -** field. For the purposes of the comparison, ignore it. Also, if bOmitRowid -** is true and key1 contains even a single NULL value, it is considered to -** be less than key2. Even if key2 also contains NULL values. -** -** If pKey2 is passed a NULL pointer, then it is assumed that the pCsr->aSpace -** has been allocated and contains an unpacked record that is used as key2. -*/ -static void vdbeSorterCompare( - const VdbeCursor *pCsr, /* Cursor object (for pKeyInfo) */ - int nKeyCol, /* Num of columns. 0 means "all" */ +** size nKey2 bytes). Use (pTask->pKeyInfo) for the collation sequences +** used by the comparison. Return the result of the comparison. +** +** Before returning, object (pTask->pUnpacked) is populated with the +** unpacked version of key2. Or, if pKey2 is passed a NULL pointer, then it +** is assumed that the (pTask->pUnpacked) structure already contains the +** unpacked key to use as key2. +** +** If an OOM error is encountered, (pTask->pUnpacked->error_rc) is set +** to SQLITE_NOMEM. +*/ +static int vdbeSorterCompare( + SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ const void *pKey1, int nKey1, /* Left side of comparison */ - const void *pKey2, int nKey2, /* Right side of comparison */ - int *pRes /* OUT: Result of comparison */ + const void *pKey2, int nKey2 /* Right side of comparison */ ){ - KeyInfo *pKeyInfo = pCsr->pKeyInfo; - VdbeSorter *pSorter = pCsr->pSorter; - UnpackedRecord *r2 = pSorter->pUnpacked; - int i; - + UnpackedRecord *r2 = pTask->pUnpacked; if( pKey2 ){ - sqlite3VdbeRecordUnpack(pKeyInfo, nKey2, pKey2, r2); + sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); } + return sqlite3VdbeRecordCompare(nKey1, pKey1, r2, 0); +} - if( nKeyCol ){ - r2->nField = nKeyCol; - for(i=0; i<nKeyCol; i++){ - if( r2->aMem[i].flags & MEM_Null ){ - *pRes = -1; - return; +/* +** Initialize the temporary index cursor just opened as a sorter cursor. +** +** Usually, the sorter module uses the value of (pCsr->pKeyInfo->nField) +** to determine the number of fields that should be compared from the +** records being sorted. However, if the value passed as argument nField +** is non-zero and the sorter is able to guarantee a stable sort, nField +** is used instead. This is used when sorting records for a CREATE INDEX +** statement. In this case, keys are always delivered to the sorter in +** order of the primary key, which happens to be make up the final part +** of the records being sorted. So if the sort is stable, there is never +** any reason to compare PK fields and they can be ignored for a small +** performance boost. +** +** The sorter can guarantee a stable sort when running in single-threaded +** mode, but not in multi-threaded mode. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +int sqlite3VdbeSorterInit( + sqlite3 *db, /* Database connection (for malloc()) */ + int nField, /* Number of key fields in each record */ + VdbeCursor *pCsr /* Cursor that holds the new sorter */ +){ + int pgsz; /* Page size of main database */ + int i; /* Used to iterate through aTask[] */ + int mxCache; /* Cache size */ + VdbeSorter *pSorter; /* The new sorter */ + KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */ + int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */ + int sz; /* Size of pSorter in bytes */ + int rc = SQLITE_OK; +#if SQLITE_MAX_WORKER_THREADS==0 +# define nWorker 0 +#else + int nWorker; +#endif + + /* Initialize the upper limit on the number of worker threads */ +#if SQLITE_MAX_WORKER_THREADS>0 + if( sqlite3TempInMemory(db) || sqlite3GlobalConfig.bCoreMutex==0 ){ + nWorker = 0; + }else{ + nWorker = db->aLimit[SQLITE_LIMIT_WORKER_THREADS]; + } +#endif + + /* Do not allow the total number of threads (main thread + all workers) + ** to exceed the maximum merge count */ +#if SQLITE_MAX_WORKER_THREADS>=SORTER_MAX_MERGE_COUNT + if( nWorker>=SORTER_MAX_MERGE_COUNT ){ + nWorker = SORTER_MAX_MERGE_COUNT-1; + } +#endif + + assert( pCsr->pKeyInfo && pCsr->pBt==0 ); + szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nField-1)*sizeof(CollSeq*); + sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); + + pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); + pCsr->pSorter = pSorter; + if( pSorter==0 ){ + rc = SQLITE_NOMEM; + }else{ + pSorter->pKeyInfo = pKeyInfo = (KeyInfo*)((u8*)pSorter + sz); + memcpy(pKeyInfo, pCsr->pKeyInfo, szKeyInfo); + pKeyInfo->db = 0; + if( nField && nWorker==0 ) pKeyInfo->nField = nField; + pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt); + pSorter->nTask = nWorker + 1; + pSorter->bUseThreads = (pSorter->nTask>1); + pSorter->db = db; + for(i=0; i<pSorter->nTask; i++){ + SortSubtask *pTask = &pSorter->aTask[i]; + pTask->pSorter = pSorter; + } + + if( !sqlite3TempInMemory(db) ){ + pSorter->mnPmaSize = SORTER_MIN_WORKING * pgsz; + mxCache = db->aDb[0].pSchema->cache_size; + if( mxCache<SORTER_MIN_WORKING ) mxCache = SORTER_MIN_WORKING; + pSorter->mxPmaSize = mxCache * pgsz; + + /* If the application has not configure scratch memory using + ** SQLITE_CONFIG_SCRATCH then we assume it is OK to do large memory + ** allocations. If scratch memory has been configured, then assume + ** large memory allocations should be avoided to prevent heap + ** fragmentation. + */ + if( sqlite3GlobalConfig.pScratch==0 ){ + assert( pSorter->iMemory==0 ); + pSorter->nMemory = pgsz; + pSorter->list.aMemory = (u8*)sqlite3Malloc(pgsz); + if( !pSorter->list.aMemory ) rc = SQLITE_NOMEM; } } - assert( r2->default_rc==0 ); } - *pRes = sqlite3VdbeRecordCompare(nKey1, pKey1, r2, 0); + return rc; } +#undef nWorker /* Defined at the top of this function */ /* -** This function is called to compare two iterator keys when merging -** multiple b-tree segments. Parameter iOut is the index of the aTree[] -** value to recalculate. +** Free the list of sorted records starting at pRecord. */ -static int vdbeSorterDoCompare(const VdbeCursor *pCsr, int iOut){ - VdbeSorter *pSorter = pCsr->pSorter; - int i1; - int i2; - int iRes; - VdbeSorterIter *p1; - VdbeSorterIter *p2; - - assert( iOut<pSorter->nTree && iOut>0 ); +static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){ + SorterRecord *p; + SorterRecord *pNext; + for(p=pRecord; p; p=pNext){ + pNext = p->u.pNext; + sqlite3DbFree(db, p); + } +} - if( iOut>=(pSorter->nTree/2) ){ - i1 = (iOut - pSorter->nTree/2) * 2; - i2 = i1 + 1; - }else{ - i1 = pSorter->aTree[iOut*2]; - i2 = pSorter->aTree[iOut*2+1]; +/* +** Free all resources owned by the object indicated by argument pTask. All +** fields of *pTask are zeroed before returning. +*/ +static void vdbeSortSubtaskCleanup(sqlite3 *db, SortSubtask *pTask){ + sqlite3DbFree(db, pTask->pUnpacked); + pTask->pUnpacked = 0; +#if SQLITE_MAX_WORKER_THREADS>0 + /* pTask->list.aMemory can only be non-zero if it was handed memory + ** from the main thread. That only occurs SQLITE_MAX_WORKER_THREADS>0 */ + if( pTask->list.aMemory ){ + sqlite3_free(pTask->list.aMemory); + pTask->list.aMemory = 0; + }else +#endif + { + assert( pTask->list.aMemory==0 ); + vdbeSorterRecordFree(0, pTask->list.pList); + } + pTask->list.pList = 0; + if( pTask->file.pFd ){ + sqlite3OsCloseFree(pTask->file.pFd); + pTask->file.pFd = 0; + pTask->file.iEof = 0; + } + if( pTask->file2.pFd ){ + sqlite3OsCloseFree(pTask->file2.pFd); + pTask->file2.pFd = 0; + pTask->file2.iEof = 0; } +} - p1 = &pSorter->aIter[i1]; - p2 = &pSorter->aIter[i2]; +#ifdef SQLITE_DEBUG_SORTER_THREADS +static void vdbeSorterWorkDebug(SortSubtask *pTask, const char *zEvent){ + i64 t; + int iTask = (pTask - pTask->pSorter->aTask); + sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); + fprintf(stderr, "%lld:%d %s\n", t, iTask, zEvent); +} +static void vdbeSorterRewindDebug(const char *zEvent){ + i64 t; + sqlite3OsCurrentTimeInt64(sqlite3_vfs_find(0), &t); + fprintf(stderr, "%lld:X %s\n", t, zEvent); +} +static void vdbeSorterPopulateDebug( + SortSubtask *pTask, + const char *zEvent +){ + i64 t; + int iTask = (pTask - pTask->pSorter->aTask); + sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); + fprintf(stderr, "%lld:bg%d %s\n", t, iTask, zEvent); +} +static void vdbeSorterBlockDebug( + SortSubtask *pTask, + int bBlocked, + const char *zEvent +){ + if( bBlocked ){ + i64 t; + sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); + fprintf(stderr, "%lld:main %s\n", t, zEvent); + } +} +#else +# define vdbeSorterWorkDebug(x,y) +# define vdbeSorterRewindDebug(y) +# define vdbeSorterPopulateDebug(x,y) +# define vdbeSorterBlockDebug(x,y,z) +#endif - if( p1->pFile==0 ){ - iRes = i2; - }else if( p2->pFile==0 ){ - iRes = i1; - }else{ - int res; - assert( pCsr->pSorter->pUnpacked!=0 ); /* allocated in vdbeSorterMerge() */ - vdbeSorterCompare( - pCsr, 0, p1->aKey, p1->nKey, p2->aKey, p2->nKey, &res - ); - if( res<=0 ){ - iRes = i1; - }else{ - iRes = i2; - } +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** Join thread pTask->thread. +*/ +static int vdbeSorterJoinThread(SortSubtask *pTask){ + int rc = SQLITE_OK; + if( pTask->pThread ){ +#ifdef SQLITE_DEBUG_SORTER_THREADS + int bDone = pTask->bDone; +#endif + void *pRet = SQLITE_INT_TO_PTR(SQLITE_ERROR); + vdbeSorterBlockDebug(pTask, !bDone, "enter"); + (void)sqlite3ThreadJoin(pTask->pThread, &pRet); + vdbeSorterBlockDebug(pTask, !bDone, "exit"); + rc = SQLITE_PTR_TO_INT(pRet); + assert( pTask->bDone==1 ); + pTask->bDone = 0; + pTask->pThread = 0; } + return rc; +} - pSorter->aTree[iOut] = iRes; - return SQLITE_OK; +/* +** Launch a background thread to run xTask(pIn). +*/ +static int vdbeSorterCreateThread( + SortSubtask *pTask, /* Thread will use this task object */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + assert( pTask->pThread==0 && pTask->bDone==0 ); + return sqlite3ThreadCreate(&pTask->pThread, xTask, pIn); } /* -** Initialize the temporary index cursor just opened as a sorter cursor. +** Join all outstanding threads launched by SorterWrite() to create +** level-0 PMAs. */ -int sqlite3VdbeSorterInit(sqlite3 *db, VdbeCursor *pCsr){ - int pgsz; /* Page size of main database */ - int mxCache; /* Cache size */ - VdbeSorter *pSorter; /* The new sorter */ - char *d; /* Dummy */ +static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){ + int rc = rcin; + int i; - assert( pCsr->pKeyInfo && pCsr->pBt==0 ); - pCsr->pSorter = pSorter = sqlite3DbMallocZero(db, sizeof(VdbeSorter)); - if( pSorter==0 ){ - return SQLITE_NOMEM; + /* This function is always called by the main user thread. + ** + ** If this function is being called after SorterRewind() has been called, + ** it is possible that thread pSorter->aTask[pSorter->nTask-1].pThread + ** is currently attempt to join one of the other threads. To avoid a race + ** condition where this thread also attempts to join the same object, join + ** thread pSorter->aTask[pSorter->nTask-1].pThread first. */ + for(i=pSorter->nTask-1; i>=0; i--){ + SortSubtask *pTask = &pSorter->aTask[i]; + int rc2 = vdbeSorterJoinThread(pTask); + if( rc==SQLITE_OK ) rc = rc2; } - - pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pCsr->pKeyInfo, 0, 0, &d); - if( pSorter->pUnpacked==0 ) return SQLITE_NOMEM; - assert( pSorter->pUnpacked==(UnpackedRecord *)d ); + return rc; +} +#else +# define vdbeSorterJoinAll(x,rcin) (rcin) +# define vdbeSorterJoinThread(pTask) SQLITE_OK +#endif - if( !sqlite3TempInMemory(db) ){ - pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt); - pSorter->mnPmaSize = SORTER_MIN_WORKING * pgsz; - mxCache = db->aDb[0].pSchema->cache_size; - if( mxCache<SORTER_MIN_WORKING ) mxCache = SORTER_MIN_WORKING; - pSorter->mxPmaSize = mxCache * pgsz; +/* +** Allocate a new MergeEngine object capable of handling up to +** nReader PmaReader inputs. +** +** nReader is automatically rounded up to the next power of two. +** nReader may not exceed SORTER_MAX_MERGE_COUNT even after rounding up. +*/ +static MergeEngine *vdbeMergeEngineNew(int nReader){ + int N = 2; /* Smallest power of two >= nReader */ + int nByte; /* Total bytes of space to allocate */ + MergeEngine *pNew; /* Pointer to allocated object to return */ + + assert( nReader<=SORTER_MAX_MERGE_COUNT ); + + while( N<nReader ) N += N; + nByte = sizeof(MergeEngine) + N * (sizeof(int) + sizeof(PmaReader)); + + pNew = sqlite3FaultSim(100) ? 0 : (MergeEngine*)sqlite3MallocZero(nByte); + if( pNew ){ + pNew->nTree = N; + pNew->pTask = 0; + pNew->aReadr = (PmaReader*)&pNew[1]; + pNew->aTree = (int*)&pNew->aReadr[N]; } + return pNew; +} - return SQLITE_OK; +/* +** Free the MergeEngine object passed as the only argument. +*/ +static void vdbeMergeEngineFree(MergeEngine *pMerger){ + int i; + if( pMerger ){ + for(i=0; i<pMerger->nTree; i++){ + vdbePmaReaderClear(&pMerger->aReadr[i]); + } + } + sqlite3_free(pMerger); } /* -** Free the list of sorted records starting at pRecord. +** Free all resources associated with the IncrMerger object indicated by +** the first argument. */ -static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){ - SorterRecord *p; - SorterRecord *pNext; - for(p=pRecord; p; p=pNext){ - pNext = p->pNext; - sqlite3DbFree(db, p); +static void vdbeIncrFree(IncrMerger *pIncr){ + if( pIncr ){ +#if SQLITE_MAX_WORKER_THREADS>0 + if( pIncr->bUseThread ){ + vdbeSorterJoinThread(pIncr->pTask); + if( pIncr->aFile[0].pFd ) sqlite3OsCloseFree(pIncr->aFile[0].pFd); + if( pIncr->aFile[1].pFd ) sqlite3OsCloseFree(pIncr->aFile[1].pFd); + } +#endif + vdbeMergeEngineFree(pIncr->pMerger); + sqlite3_free(pIncr); } } @@ -506,29 +1069,34 @@ static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){ ** Reset a sorting cursor back to its original empty state. */ void sqlite3VdbeSorterReset(sqlite3 *db, VdbeSorter *pSorter){ - if( pSorter->aIter ){ - int i; - for(i=0; i<pSorter->nTree; i++){ - vdbeSorterIterZero(db, &pSorter->aIter[i]); - } - sqlite3DbFree(db, pSorter->aIter); - pSorter->aIter = 0; - } - if( pSorter->pTemp1 ){ - sqlite3OsCloseFree(pSorter->pTemp1); - pSorter->pTemp1 = 0; - } - vdbeSorterRecordFree(db, pSorter->pRecord); - pSorter->pRecord = 0; - pSorter->iWriteOff = 0; - pSorter->iReadOff = 0; - pSorter->nInMemory = 0; - pSorter->nTree = 0; - pSorter->nPMA = 0; - pSorter->aTree = 0; + int i; + (void)vdbeSorterJoinAll(pSorter, SQLITE_OK); + assert( pSorter->bUseThreads || pSorter->pReader==0 ); +#if SQLITE_MAX_WORKER_THREADS>0 + if( pSorter->pReader ){ + vdbePmaReaderClear(pSorter->pReader); + sqlite3DbFree(db, pSorter->pReader); + pSorter->pReader = 0; + } +#endif + vdbeMergeEngineFree(pSorter->pMerger); + pSorter->pMerger = 0; + for(i=0; i<pSorter->nTask; i++){ + SortSubtask *pTask = &pSorter->aTask[i]; + vdbeSortSubtaskCleanup(db, pTask); + } + if( pSorter->list.aMemory==0 ){ + vdbeSorterRecordFree(0, pSorter->list.pList); + } + pSorter->list.pList = 0; + pSorter->list.szPMA = 0; + pSorter->bUsePMA = 0; + pSorter->iMemory = 0; + pSorter->mxKeysize = 0; + sqlite3DbFree(db, pSorter->pUnpacked); + pSorter->pUnpacked = 0; } - /* ** Free any cursor components allocated by sqlite3VdbeSorterXXX routines. */ @@ -536,54 +1104,110 @@ void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){ VdbeSorter *pSorter = pCsr->pSorter; if( pSorter ){ sqlite3VdbeSorterReset(db, pSorter); - sqlite3DbFree(db, pSorter->pUnpacked); + sqlite3_free(pSorter->list.aMemory); sqlite3DbFree(db, pSorter); pCsr->pSorter = 0; } } +#if SQLITE_MAX_MMAP_SIZE>0 +/* +** The first argument is a file-handle open on a temporary file. The file +** is guaranteed to be nByte bytes or smaller in size. This function +** attempts to extend the file to nByte bytes in size and to ensure that +** the VFS has memory mapped it. +** +** Whether or not the file does end up memory mapped of course depends on +** the specific VFS implementation. +*/ +static void vdbeSorterExtendFile(sqlite3 *db, sqlite3_file *pFd, i64 nByte){ + if( nByte<=(i64)(db->nMaxSorterMmap) ){ + int rc = sqlite3OsTruncate(pFd, nByte); + if( rc==SQLITE_OK ){ + void *p = 0; + sqlite3OsFetch(pFd, 0, (int)nByte, &p); + sqlite3OsUnfetch(pFd, 0, p); + } + } +} +#else +# define vdbeSorterExtendFile(x,y,z) +#endif + /* ** Allocate space for a file-handle and open a temporary file. If successful, -** set *ppFile to point to the malloc'd file-handle and return SQLITE_OK. -** Otherwise, set *ppFile to 0 and return an SQLite error code. +** set *ppFd to point to the malloc'd file-handle and return SQLITE_OK. +** Otherwise, set *ppFd to 0 and return an SQLite error code. */ -static int vdbeSorterOpenTempFile(sqlite3 *db, sqlite3_file **ppFile){ - int dummy; - return sqlite3OsOpenMalloc(db->pVfs, 0, ppFile, +static int vdbeSorterOpenTempFile( + sqlite3 *db, /* Database handle doing sort */ + i64 nExtend, /* Attempt to extend file to this size */ + sqlite3_file **ppFd +){ + int rc; + rc = sqlite3OsOpenMalloc(db->pVfs, 0, ppFd, SQLITE_OPEN_TEMP_JOURNAL | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | - SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &dummy + SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &rc ); + if( rc==SQLITE_OK ){ + i64 max = SQLITE_MAX_MMAP_SIZE; + sqlite3OsFileControlHint(*ppFd, SQLITE_FCNTL_MMAP_SIZE, (void*)&max); + if( nExtend>0 ){ + vdbeSorterExtendFile(db, *ppFd, nExtend); + } + } + return rc; +} + +/* +** If it has not already been allocated, allocate the UnpackedRecord +** structure at pTask->pUnpacked. Return SQLITE_OK if successful (or +** if no allocation was required), or SQLITE_NOMEM otherwise. +*/ +static int vdbeSortAllocUnpacked(SortSubtask *pTask){ + if( pTask->pUnpacked==0 ){ + char *pFree; + pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord( + pTask->pSorter->pKeyInfo, 0, 0, &pFree + ); + assert( pTask->pUnpacked==(UnpackedRecord*)pFree ); + if( pFree==0 ) return SQLITE_NOMEM; + pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nField; + pTask->pUnpacked->errCode = 0; + } + return SQLITE_OK; } + /* ** Merge the two sorted lists p1 and p2 into a single list. ** Set *ppOut to the head of the new list. */ static void vdbeSorterMerge( - const VdbeCursor *pCsr, /* For pKeyInfo */ + SortSubtask *pTask, /* Calling thread context */ SorterRecord *p1, /* First list to merge */ SorterRecord *p2, /* Second list to merge */ SorterRecord **ppOut /* OUT: Head of merged list */ ){ SorterRecord *pFinal = 0; SorterRecord **pp = &pFinal; - void *pVal2 = p2 ? p2->pVal : 0; + void *pVal2 = p2 ? SRVAL(p2) : 0; while( p1 && p2 ){ int res; - vdbeSorterCompare(pCsr, 0, p1->pVal, p1->nVal, pVal2, p2->nVal, &res); + res = vdbeSorterCompare(pTask, SRVAL(p1), p1->nVal, pVal2, p2->nVal); if( res<=0 ){ *pp = p1; - pp = &p1->pNext; - p1 = p1->pNext; + pp = &p1->u.pNext; + p1 = p1->u.pNext; pVal2 = 0; }else{ *pp = p2; - pp = &p2->pNext; - p2 = p2->pNext; + pp = &p2->u.pNext; + p2 = p2->u.pNext; if( p2==0 ) break; - pVal2 = p2->pVal; + pVal2 = SRVAL(p2); } } *pp = p1 ? p1 : p2; @@ -591,27 +1215,41 @@ static void vdbeSorterMerge( } /* -** Sort the linked list of records headed at pCsr->pRecord. Return SQLITE_OK -** if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if an error -** occurs. +** Sort the linked list of records headed at pTask->pList. Return +** SQLITE_OK if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if +** an error occurs. */ -static int vdbeSorterSort(const VdbeCursor *pCsr){ +static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ int i; SorterRecord **aSlot; SorterRecord *p; - VdbeSorter *pSorter = pCsr->pSorter; + int rc; + + rc = vdbeSortAllocUnpacked(pTask); + if( rc!=SQLITE_OK ) return rc; aSlot = (SorterRecord **)sqlite3MallocZero(64 * sizeof(SorterRecord *)); if( !aSlot ){ return SQLITE_NOMEM; } - p = pSorter->pRecord; + p = pList->pList; while( p ){ - SorterRecord *pNext = p->pNext; - p->pNext = 0; + SorterRecord *pNext; + if( pList->aMemory ){ + if( (u8*)p==pList->aMemory ){ + pNext = 0; + }else{ + assert( p->u.iNext<sqlite3MallocSize(pList->aMemory) ); + pNext = (SorterRecord*)&pList->aMemory[p->u.iNext]; + } + }else{ + pNext = p->u.pNext; + } + + p->u.pNext = 0; for(i=0; aSlot[i]; i++){ - vdbeSorterMerge(pCsr, p, aSlot[i], &p); + vdbeSorterMerge(pTask, p, aSlot[i], &p); aSlot[i] = 0; } aSlot[i] = p; @@ -620,42 +1258,43 @@ static int vdbeSorterSort(const VdbeCursor *pCsr){ p = 0; for(i=0; i<64; i++){ - vdbeSorterMerge(pCsr, p, aSlot[i], &p); + vdbeSorterMerge(pTask, p, aSlot[i], &p); } - pSorter->pRecord = p; + pList->pList = p; sqlite3_free(aSlot); - return SQLITE_OK; + assert( pTask->pUnpacked->errCode==SQLITE_OK + || pTask->pUnpacked->errCode==SQLITE_NOMEM + ); + return pTask->pUnpacked->errCode; } /* -** Initialize a file-writer object. +** Initialize a PMA-writer object. */ -static void fileWriterInit( - sqlite3 *db, /* Database (for malloc) */ - sqlite3_file *pFile, /* File to write to */ - FileWriter *p, /* Object to populate */ - i64 iStart /* Offset of pFile to begin writing at */ +static void vdbePmaWriterInit( + sqlite3_file *pFd, /* File handle to write to */ + PmaWriter *p, /* Object to populate */ + int nBuf, /* Buffer size */ + i64 iStart /* Offset of pFd to begin writing at */ ){ - int nBuf = sqlite3BtreeGetPageSize(db->aDb[0].pBt); - - memset(p, 0, sizeof(FileWriter)); - p->aBuffer = (u8 *)sqlite3DbMallocRaw(db, nBuf); + memset(p, 0, sizeof(PmaWriter)); + p->aBuffer = (u8*)sqlite3Malloc(nBuf); if( !p->aBuffer ){ p->eFWErr = SQLITE_NOMEM; }else{ p->iBufEnd = p->iBufStart = (iStart % nBuf); p->iWriteOff = iStart - p->iBufStart; p->nBuffer = nBuf; - p->pFile = pFile; + p->pFd = pFd; } } /* -** Write nData bytes of data to the file-write object. Return SQLITE_OK +** Write nData bytes of data to the PMA. Return SQLITE_OK ** if successful, or an SQLite error code if an error occurs. */ -static void fileWriterWrite(FileWriter *p, u8 *pData, int nData){ +static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){ int nRem = nData; while( nRem>0 && p->eFWErr==0 ){ int nCopy = nRem; @@ -666,7 +1305,7 @@ static void fileWriterWrite(FileWriter *p, u8 *pData, int nData){ memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy); p->iBufEnd += nCopy; if( p->iBufEnd==p->nBuffer ){ - p->eFWErr = sqlite3OsWrite(p->pFile, + p->eFWErr = sqlite3OsWrite(p->pFd, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); @@ -680,43 +1319,44 @@ static void fileWriterWrite(FileWriter *p, u8 *pData, int nData){ } /* -** Flush any buffered data to disk and clean up the file-writer object. -** The results of using the file-writer after this call are undefined. +** Flush any buffered data to disk and clean up the PMA-writer object. +** The results of using the PMA-writer after this call are undefined. ** Return SQLITE_OK if flushing the buffered data succeeds or is not ** required. Otherwise, return an SQLite error code. ** ** Before returning, set *piEof to the offset immediately following the ** last byte written to the file. */ -static int fileWriterFinish(sqlite3 *db, FileWriter *p, i64 *piEof){ +static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof){ int rc; if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){ - p->eFWErr = sqlite3OsWrite(p->pFile, + p->eFWErr = sqlite3OsWrite(p->pFd, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); } *piEof = (p->iWriteOff + p->iBufEnd); - sqlite3DbFree(db, p->aBuffer); + sqlite3_free(p->aBuffer); rc = p->eFWErr; - memset(p, 0, sizeof(FileWriter)); + memset(p, 0, sizeof(PmaWriter)); return rc; } /* -** Write value iVal encoded as a varint to the file-write object. Return +** Write value iVal encoded as a varint to the PMA. Return ** SQLITE_OK if successful, or an SQLite error code if an error occurs. */ -static void fileWriterWriteVarint(FileWriter *p, u64 iVal){ +static void vdbePmaWriteVarint(PmaWriter *p, u64 iVal){ int nByte; u8 aByte[10]; nByte = sqlite3PutVarint(aByte, iVal); - fileWriterWrite(p, aByte, nByte); + vdbePmaWriteBlob(p, aByte, nByte); } /* -** Write the current contents of the in-memory linked-list to a PMA. Return -** SQLITE_OK if successful, or an SQLite error code otherwise. +** Write the current contents of in-memory linked-list pList to a level-0 +** PMA in the temp file belonging to sub-task pTask. Return SQLITE_OK if +** successful, or an SQLite error code otherwise. ** ** The format of a PMA is: ** @@ -727,76 +1367,246 @@ static void fileWriterWriteVarint(FileWriter *p, u64 iVal){ ** Each record consists of a varint followed by a blob of data (the ** key). The varint is the number of bytes in the blob of data. */ -static int vdbeSorterListToPMA(sqlite3 *db, const VdbeCursor *pCsr){ +static int vdbeSorterListToPMA(SortSubtask *pTask, SorterList *pList){ + sqlite3 *db = pTask->pSorter->db; int rc = SQLITE_OK; /* Return code */ - VdbeSorter *pSorter = pCsr->pSorter; - FileWriter writer; + PmaWriter writer; /* Object used to write to the file */ - memset(&writer, 0, sizeof(FileWriter)); +#ifdef SQLITE_DEBUG + /* Set iSz to the expected size of file pTask->file after writing the PMA. + ** This is used by an assert() statement at the end of this function. */ + i64 iSz = pList->szPMA + sqlite3VarintLen(pList->szPMA) + pTask->file.iEof; +#endif - if( pSorter->nInMemory==0 ){ - assert( pSorter->pRecord==0 ); - return rc; + vdbeSorterWorkDebug(pTask, "enter"); + memset(&writer, 0, sizeof(PmaWriter)); + assert( pList->szPMA>0 ); + + /* If the first temporary PMA file has not been opened, open it now. */ + if( pTask->file.pFd==0 ){ + rc = vdbeSorterOpenTempFile(db, 0, &pTask->file.pFd); + assert( rc!=SQLITE_OK || pTask->file.pFd ); + assert( pTask->file.iEof==0 ); + assert( pTask->nPMA==0 ); } - rc = vdbeSorterSort(pCsr); + /* Try to get the file to memory map */ + if( rc==SQLITE_OK ){ + vdbeSorterExtendFile(db, pTask->file.pFd, pTask->file.iEof+pList->szPMA+9); + } - /* If the first temporary PMA file has not been opened, open it now. */ - if( rc==SQLITE_OK && pSorter->pTemp1==0 ){ - rc = vdbeSorterOpenTempFile(db, &pSorter->pTemp1); - assert( rc!=SQLITE_OK || pSorter->pTemp1 ); - assert( pSorter->iWriteOff==0 ); - assert( pSorter->nPMA==0 ); + /* Sort the list */ + if( rc==SQLITE_OK ){ + rc = vdbeSorterSort(pTask, pList); } if( rc==SQLITE_OK ){ SorterRecord *p; SorterRecord *pNext = 0; - fileWriterInit(db, pSorter->pTemp1, &writer, pSorter->iWriteOff); - pSorter->nPMA++; - fileWriterWriteVarint(&writer, pSorter->nInMemory); - for(p=pSorter->pRecord; p; p=pNext){ - pNext = p->pNext; - fileWriterWriteVarint(&writer, p->nVal); - fileWriterWrite(&writer, p->pVal, p->nVal); - sqlite3DbFree(db, p); + vdbePmaWriterInit(pTask->file.pFd, &writer, pTask->pSorter->pgsz, + pTask->file.iEof); + pTask->nPMA++; + vdbePmaWriteVarint(&writer, pList->szPMA); + for(p=pList->pList; p; p=pNext){ + pNext = p->u.pNext; + vdbePmaWriteVarint(&writer, p->nVal); + vdbePmaWriteBlob(&writer, SRVAL(p), p->nVal); + if( pList->aMemory==0 ) sqlite3_free(p); + } + pList->pList = p; + rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof); + } + + vdbeSorterWorkDebug(pTask, "exit"); + assert( rc!=SQLITE_OK || pList->pList==0 ); + assert( rc!=SQLITE_OK || pTask->file.iEof==iSz ); + return rc; +} + +/* +** Advance the MergeEngine to its next entry. +** Set *pbEof to true there is no next entry because +** the MergeEngine has reached the end of all its inputs. +** +** Return SQLITE_OK if successful or an error code if an error occurs. +*/ +static int vdbeMergeEngineStep( + MergeEngine *pMerger, /* The merge engine to advance to the next row */ + int *pbEof /* Set TRUE at EOF. Set false for more content */ +){ + int rc; + int iPrev = pMerger->aTree[1];/* Index of PmaReader to advance */ + SortSubtask *pTask = pMerger->pTask; + + /* Advance the current PmaReader */ + rc = vdbePmaReaderNext(&pMerger->aReadr[iPrev]); + + /* Update contents of aTree[] */ + if( rc==SQLITE_OK ){ + int i; /* Index of aTree[] to recalculate */ + PmaReader *pReadr1; /* First PmaReader to compare */ + PmaReader *pReadr2; /* Second PmaReader to compare */ + u8 *pKey2; /* To pReadr2->aKey, or 0 if record cached */ + + /* Find the first two PmaReaders to compare. The one that was just + ** advanced (iPrev) and the one next to it in the array. */ + pReadr1 = &pMerger->aReadr[(iPrev & 0xFFFE)]; + pReadr2 = &pMerger->aReadr[(iPrev | 0x0001)]; + pKey2 = pReadr2->aKey; + + for(i=(pMerger->nTree+iPrev)/2; i>0; i=i/2){ + /* Compare pReadr1 and pReadr2. Store the result in variable iRes. */ + int iRes; + if( pReadr1->pFd==0 ){ + iRes = +1; + }else if( pReadr2->pFd==0 ){ + iRes = -1; + }else{ + iRes = vdbeSorterCompare(pTask, + pReadr1->aKey, pReadr1->nKey, pKey2, pReadr2->nKey + ); + } + + /* If pReadr1 contained the smaller value, set aTree[i] to its index. + ** Then set pReadr2 to the next PmaReader to compare to pReadr1. In this + ** case there is no cache of pReadr2 in pTask->pUnpacked, so set + ** pKey2 to point to the record belonging to pReadr2. + ** + ** Alternatively, if pReadr2 contains the smaller of the two values, + ** set aTree[i] to its index and update pReadr1. If vdbeSorterCompare() + ** was actually called above, then pTask->pUnpacked now contains + ** a value equivalent to pReadr2. So set pKey2 to NULL to prevent + ** vdbeSorterCompare() from decoding pReadr2 again. + ** + ** If the two values were equal, then the value from the oldest + ** PMA should be considered smaller. The VdbeSorter.aReadr[] array + ** is sorted from oldest to newest, so pReadr1 contains older values + ** than pReadr2 iff (pReadr1<pReadr2). */ + if( iRes<0 || (iRes==0 && pReadr1<pReadr2) ){ + pMerger->aTree[i] = (int)(pReadr1 - pMerger->aReadr); + pReadr2 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ]; + pKey2 = pReadr2->aKey; + }else{ + if( pReadr1->pFd ) pKey2 = 0; + pMerger->aTree[i] = (int)(pReadr2 - pMerger->aReadr); + pReadr1 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ]; + } + } + *pbEof = (pMerger->aReadr[pMerger->aTree[1]].pFd==0); + } + + return (rc==SQLITE_OK ? pTask->pUnpacked->errCode : rc); +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** The main routine for background threads that write level-0 PMAs. +*/ +static void *vdbeSorterFlushThread(void *pCtx){ + SortSubtask *pTask = (SortSubtask*)pCtx; + int rc; /* Return code */ + assert( pTask->bDone==0 ); + rc = vdbeSorterListToPMA(pTask, &pTask->list); + pTask->bDone = 1; + return SQLITE_INT_TO_PTR(rc); +} +#endif /* SQLITE_MAX_WORKER_THREADS>0 */ + +/* +** Flush the current contents of VdbeSorter.list to a new PMA, possibly +** using a background thread. +*/ +static int vdbeSorterFlushPMA(VdbeSorter *pSorter){ +#if SQLITE_MAX_WORKER_THREADS==0 + pSorter->bUsePMA = 1; + return vdbeSorterListToPMA(&pSorter->aTask[0], &pSorter->list); +#else + int rc = SQLITE_OK; + int i; + SortSubtask *pTask = 0; /* Thread context used to create new PMA */ + int nWorker = (pSorter->nTask-1); + + /* Set the flag to indicate that at least one PMA has been written. + ** Or will be, anyhow. */ + pSorter->bUsePMA = 1; + + /* Select a sub-task to sort and flush the current list of in-memory + ** records to disk. If the sorter is running in multi-threaded mode, + ** round-robin between the first (pSorter->nTask-1) tasks. Except, if + ** the background thread from a sub-tasks previous turn is still running, + ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy, + ** fall back to using the final sub-task. The first (pSorter->nTask-1) + ** sub-tasks are prefered as they use background threads - the final + ** sub-task uses the main thread. */ + for(i=0; i<nWorker; i++){ + int iTest = (pSorter->iPrev + i + 1) % nWorker; + pTask = &pSorter->aTask[iTest]; + if( pTask->bDone ){ + rc = vdbeSorterJoinThread(pTask); + } + if( rc!=SQLITE_OK || pTask->pThread==0 ) break; + } + + if( rc==SQLITE_OK ){ + if( i==nWorker ){ + /* Use the foreground thread for this operation */ + rc = vdbeSorterListToPMA(&pSorter->aTask[nWorker], &pSorter->list); + }else{ + /* Launch a background thread for this operation */ + u8 *aMem = pTask->list.aMemory; + void *pCtx = (void*)pTask; + + assert( pTask->pThread==0 && pTask->bDone==0 ); + assert( pTask->list.pList==0 ); + assert( pTask->list.aMemory==0 || pSorter->list.aMemory!=0 ); + + pSorter->iPrev = (u8)(pTask - pSorter->aTask); + pTask->list = pSorter->list; + pSorter->list.pList = 0; + pSorter->list.szPMA = 0; + if( aMem ){ + pSorter->list.aMemory = aMem; + pSorter->nMemory = sqlite3MallocSize(aMem); + }else if( pSorter->list.aMemory ){ + pSorter->list.aMemory = sqlite3Malloc(pSorter->nMemory); + if( !pSorter->list.aMemory ) return SQLITE_NOMEM; + } + + rc = vdbeSorterCreateThread(pTask, vdbeSorterFlushThread, pCtx); } - pSorter->pRecord = p; - rc = fileWriterFinish(db, &writer, &pSorter->iWriteOff); } return rc; +#endif /* SQLITE_MAX_WORKER_THREADS!=0 */ } /* ** Add a record to the sorter. */ int sqlite3VdbeSorterWrite( - sqlite3 *db, /* Database handle */ - const VdbeCursor *pCsr, /* Sorter cursor */ + const VdbeCursor *pCsr, /* Sorter cursor */ Mem *pVal /* Memory cell containing record */ ){ VdbeSorter *pSorter = pCsr->pSorter; int rc = SQLITE_OK; /* Return Code */ SorterRecord *pNew; /* New list element */ - assert( pSorter ); - pSorter->nInMemory += sqlite3VarintLen(pVal->n) + pVal->n; + int bFlush; /* True to flush contents of memory to PMA */ + int nReq; /* Bytes of memory required */ + int nPMA; /* Bytes of PMA space required */ - pNew = (SorterRecord *)sqlite3DbMallocRaw(db, pVal->n + sizeof(SorterRecord)); - if( pNew==0 ){ - rc = SQLITE_NOMEM; - }else{ - pNew->pVal = (void *)&pNew[1]; - memcpy(pNew->pVal, pVal->z, pVal->n); - pNew->nVal = pVal->n; - pNew->pNext = pSorter->pRecord; - pSorter->pRecord = pNew; - } + assert( pSorter ); - /* See if the contents of the sorter should now be written out. They - ** are written out when either of the following are true: + /* Figure out whether or not the current contents of memory should be + ** flushed to a PMA before continuing. If so, do so. + ** + ** If using the single large allocation mode (pSorter->aMemory!=0), then + ** flush the contents of memory to a new PMA if (a) at least one value is + ** already in memory and (b) the new value will not fit in memory. + ** + ** Or, if using separate allocations for each record, flush the contents + ** of memory to a PMA if either of the following are true: ** ** * The total memory allocated for the in-memory list is greater ** than (page-size * cache-size), or @@ -804,161 +1614,778 @@ int sqlite3VdbeSorterWrite( ** * The total memory allocated for the in-memory list is greater ** than (page-size * 10) and sqlite3HeapNearlyFull() returns true. */ - if( rc==SQLITE_OK && pSorter->mxPmaSize>0 && ( - (pSorter->nInMemory>pSorter->mxPmaSize) - || (pSorter->nInMemory>pSorter->mnPmaSize && sqlite3HeapNearlyFull()) - )){ -#ifdef SQLITE_DEBUG - i64 nExpect = pSorter->iWriteOff - + sqlite3VarintLen(pSorter->nInMemory) - + pSorter->nInMemory; + nReq = pVal->n + sizeof(SorterRecord); + nPMA = pVal->n + sqlite3VarintLen(pVal->n); + if( pSorter->mxPmaSize ){ + if( pSorter->list.aMemory ){ + bFlush = pSorter->iMemory && (pSorter->iMemory+nReq) > pSorter->mxPmaSize; + }else{ + bFlush = ( + (pSorter->list.szPMA > pSorter->mxPmaSize) + || (pSorter->list.szPMA > pSorter->mnPmaSize && sqlite3HeapNearlyFull()) + ); + } + if( bFlush ){ + rc = vdbeSorterFlushPMA(pSorter); + pSorter->list.szPMA = 0; + pSorter->iMemory = 0; + assert( rc!=SQLITE_OK || pSorter->list.pList==0 ); + } + } + + pSorter->list.szPMA += nPMA; + if( nPMA>pSorter->mxKeysize ){ + pSorter->mxKeysize = nPMA; + } + + if( pSorter->list.aMemory ){ + int nMin = pSorter->iMemory + nReq; + + if( nMin>pSorter->nMemory ){ + u8 *aNew; + int nNew = pSorter->nMemory * 2; + while( nNew < nMin ) nNew = nNew*2; + if( nNew > pSorter->mxPmaSize ) nNew = pSorter->mxPmaSize; + if( nNew < nMin ) nNew = nMin; + + aNew = sqlite3Realloc(pSorter->list.aMemory, nNew); + if( !aNew ) return SQLITE_NOMEM; + pSorter->list.pList = (SorterRecord*)( + aNew + ((u8*)pSorter->list.pList - pSorter->list.aMemory) + ); + pSorter->list.aMemory = aNew; + pSorter->nMemory = nNew; + } + + pNew = (SorterRecord*)&pSorter->list.aMemory[pSorter->iMemory]; + pSorter->iMemory += ROUND8(nReq); + pNew->u.iNext = (int)((u8*)(pSorter->list.pList) - pSorter->list.aMemory); + }else{ + pNew = (SorterRecord *)sqlite3Malloc(nReq); + if( pNew==0 ){ + return SQLITE_NOMEM; + } + pNew->u.pNext = pSorter->list.pList; + } + + memcpy(SRVAL(pNew), pVal->z, pVal->n); + pNew->nVal = pVal->n; + pSorter->list.pList = pNew; + + return rc; +} + +/* +** Read keys from pIncr->pMerger and populate pIncr->aFile[1]. The format +** of the data stored in aFile[1] is the same as that used by regular PMAs, +** except that the number-of-bytes varint is omitted from the start. +*/ +static int vdbeIncrPopulate(IncrMerger *pIncr){ + int rc = SQLITE_OK; + int rc2; + i64 iStart = pIncr->iStartOff; + SorterFile *pOut = &pIncr->aFile[1]; + SortSubtask *pTask = pIncr->pTask; + MergeEngine *pMerger = pIncr->pMerger; + PmaWriter writer; + assert( pIncr->bEof==0 ); + + vdbeSorterPopulateDebug(pTask, "enter"); + + vdbePmaWriterInit(pOut->pFd, &writer, pTask->pSorter->pgsz, iStart); + while( rc==SQLITE_OK ){ + int dummy; + PmaReader *pReader = &pMerger->aReadr[ pMerger->aTree[1] ]; + int nKey = pReader->nKey; + i64 iEof = writer.iWriteOff + writer.iBufEnd; + + /* Check if the output file is full or if the input has been exhausted. + ** In either case exit the loop. */ + if( pReader->pFd==0 ) break; + if( (iEof + nKey + sqlite3VarintLen(nKey))>(iStart + pIncr->mxSz) ) break; + + /* Write the next key to the output. */ + vdbePmaWriteVarint(&writer, nKey); + vdbePmaWriteBlob(&writer, pReader->aKey, nKey); + assert( pIncr->pMerger->pTask==pTask ); + rc = vdbeMergeEngineStep(pIncr->pMerger, &dummy); + } + + rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof); + if( rc==SQLITE_OK ) rc = rc2; + vdbeSorterPopulateDebug(pTask, "exit"); + return rc; +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** The main routine for background threads that populate aFile[1] of +** multi-threaded IncrMerger objects. +*/ +static void *vdbeIncrPopulateThread(void *pCtx){ + IncrMerger *pIncr = (IncrMerger*)pCtx; + void *pRet = SQLITE_INT_TO_PTR( vdbeIncrPopulate(pIncr) ); + pIncr->pTask->bDone = 1; + return pRet; +} + +/* +** Launch a background thread to populate aFile[1] of pIncr. +*/ +static int vdbeIncrBgPopulate(IncrMerger *pIncr){ + void *p = (void*)pIncr; + assert( pIncr->bUseThread ); + return vdbeSorterCreateThread(pIncr->pTask, vdbeIncrPopulateThread, p); +} #endif - rc = vdbeSorterListToPMA(db, pCsr); - pSorter->nInMemory = 0; - assert( rc!=SQLITE_OK || (nExpect==pSorter->iWriteOff) ); + +/* +** This function is called when the PmaReader corresponding to pIncr has +** finished reading the contents of aFile[0]. Its purpose is to "refill" +** aFile[0] such that the PmaReader should start rereading it from the +** beginning. +** +** For single-threaded objects, this is accomplished by literally reading +** keys from pIncr->pMerger and repopulating aFile[0]. +** +** For multi-threaded objects, all that is required is to wait until the +** background thread is finished (if it is not already) and then swap +** aFile[0] and aFile[1] in place. If the contents of pMerger have not +** been exhausted, this function also launches a new background thread +** to populate the new aFile[1]. +** +** SQLITE_OK is returned on success, or an SQLite error code otherwise. +*/ +static int vdbeIncrSwap(IncrMerger *pIncr){ + int rc = SQLITE_OK; + +#if SQLITE_MAX_WORKER_THREADS>0 + if( pIncr->bUseThread ){ + rc = vdbeSorterJoinThread(pIncr->pTask); + + if( rc==SQLITE_OK ){ + SorterFile f0 = pIncr->aFile[0]; + pIncr->aFile[0] = pIncr->aFile[1]; + pIncr->aFile[1] = f0; + } + + if( rc==SQLITE_OK ){ + if( pIncr->aFile[0].iEof==pIncr->iStartOff ){ + pIncr->bEof = 1; + }else{ + rc = vdbeIncrBgPopulate(pIncr); + } + } + }else +#endif + { + rc = vdbeIncrPopulate(pIncr); + pIncr->aFile[0] = pIncr->aFile[1]; + if( pIncr->aFile[0].iEof==pIncr->iStartOff ){ + pIncr->bEof = 1; + } } return rc; } /* -** Helper function for sqlite3VdbeSorterRewind(). +** Allocate and return a new IncrMerger object to read data from pMerger. +** +** If an OOM condition is encountered, return NULL. In this case free the +** pMerger argument before returning. */ -static int vdbeSorterInitMerge( - sqlite3 *db, /* Database handle */ - const VdbeCursor *pCsr, /* Cursor handle for this sorter */ - i64 *pnByte /* Sum of bytes in all opened PMAs */ +static int vdbeIncrMergerNew( + SortSubtask *pTask, /* The thread that will be using the new IncrMerger */ + MergeEngine *pMerger, /* The MergeEngine that the IncrMerger will control */ + IncrMerger **ppOut /* Write the new IncrMerger here */ +){ + int rc = SQLITE_OK; + IncrMerger *pIncr = *ppOut = (IncrMerger*) + (sqlite3FaultSim(100) ? 0 : sqlite3MallocZero(sizeof(*pIncr))); + if( pIncr ){ + pIncr->pMerger = pMerger; + pIncr->pTask = pTask; + pIncr->mxSz = MAX(pTask->pSorter->mxKeysize+9,pTask->pSorter->mxPmaSize/2); + pTask->file2.iEof += pIncr->mxSz; + }else{ + vdbeMergeEngineFree(pMerger); + rc = SQLITE_NOMEM; + } + return rc; +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** Set the "use-threads" flag on object pIncr. +*/ +static void vdbeIncrMergerSetThreads(IncrMerger *pIncr){ + pIncr->bUseThread = 1; + pIncr->pTask->file2.iEof -= pIncr->mxSz; +} +#endif /* SQLITE_MAX_WORKER_THREADS>0 */ + + + +/* +** Recompute pMerger->aTree[iOut] by comparing the next keys on the +** two PmaReaders that feed that entry. Neither of the PmaReaders +** are advanced. This routine merely does the comparison. +*/ +static void vdbeMergeEngineCompare( + MergeEngine *pMerger, /* Merge engine containing PmaReaders to compare */ + int iOut /* Store the result in pMerger->aTree[iOut] */ +){ + int i1; + int i2; + int iRes; + PmaReader *p1; + PmaReader *p2; + + assert( iOut<pMerger->nTree && iOut>0 ); + + if( iOut>=(pMerger->nTree/2) ){ + i1 = (iOut - pMerger->nTree/2) * 2; + i2 = i1 + 1; + }else{ + i1 = pMerger->aTree[iOut*2]; + i2 = pMerger->aTree[iOut*2+1]; + } + + p1 = &pMerger->aReadr[i1]; + p2 = &pMerger->aReadr[i2]; + + if( p1->pFd==0 ){ + iRes = i2; + }else if( p2->pFd==0 ){ + iRes = i1; + }else{ + int res; + assert( pMerger->pTask->pUnpacked!=0 ); /* from vdbeSortSubtaskMain() */ + res = vdbeSorterCompare( + pMerger->pTask, p1->aKey, p1->nKey, p2->aKey, p2->nKey + ); + if( res<=0 ){ + iRes = i1; + }else{ + iRes = i2; + } + } + + pMerger->aTree[iOut] = iRes; +} + +/* +** Allowed values for the eMode parameter to vdbeMergeEngineInit() +** and vdbePmaReaderIncrMergeInit(). +** +** Only INCRINIT_NORMAL is valid in single-threaded builds (when +** SQLITE_MAX_WORKER_THREADS==0). The other values are only used +** when there exists one or more separate worker threads. +*/ +#define INCRINIT_NORMAL 0 +#define INCRINIT_TASK 1 +#define INCRINIT_ROOT 2 + +/* Forward reference. +** The vdbeIncrMergeInit() and vdbePmaReaderIncrMergeInit() routines call each +** other (when building a merge tree). +*/ +static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode); + +/* +** Initialize the MergeEngine object passed as the second argument. Once this +** function returns, the first key of merged data may be read from the +** MergeEngine object in the usual fashion. +** +** If argument eMode is INCRINIT_ROOT, then it is assumed that any IncrMerge +** objects attached to the PmaReader objects that the merger reads from have +** already been populated, but that they have not yet populated aFile[0] and +** set the PmaReader objects up to read from it. In this case all that is +** required is to call vdbePmaReaderNext() on each PmaReader to point it at +** its first key. +** +** Otherwise, if eMode is any value other than INCRINIT_ROOT, then use +** vdbePmaReaderIncrMergeInit() to initialize each PmaReader that feeds data +** to pMerger. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int vdbeMergeEngineInit( + SortSubtask *pTask, /* Thread that will run pMerger */ + MergeEngine *pMerger, /* MergeEngine to initialize */ + int eMode /* One of the INCRINIT_XXX constants */ ){ - VdbeSorter *pSorter = pCsr->pSorter; int rc = SQLITE_OK; /* Return code */ - int i; /* Used to iterator through aIter[] */ - i64 nByte = 0; /* Total bytes in all opened PMAs */ + int i; /* For looping over PmaReader objects */ + int nTree = pMerger->nTree; + + /* eMode is always INCRINIT_NORMAL in single-threaded mode */ + assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL ); + + /* Verify that the MergeEngine is assigned to a single thread */ + assert( pMerger->pTask==0 ); + pMerger->pTask = pTask; + + for(i=0; i<nTree; i++){ + if( SQLITE_MAX_WORKER_THREADS>0 && eMode==INCRINIT_ROOT ){ + /* PmaReaders should be normally initialized in order, as if they are + ** reading from the same temp file this makes for more linear file IO. + ** However, in the INCRINIT_ROOT case, if PmaReader aReadr[nTask-1] is + ** in use it will block the vdbePmaReaderNext() call while it uses + ** the main thread to fill its buffer. So calling PmaReaderNext() + ** on this PmaReader before any of the multi-threaded PmaReaders takes + ** better advantage of multi-processor hardware. */ + rc = vdbePmaReaderNext(&pMerger->aReadr[nTree-i-1]); + }else{ + rc = vdbePmaReaderIncrMergeInit(&pMerger->aReadr[i], INCRINIT_NORMAL); + } + if( rc!=SQLITE_OK ) return rc; + } - /* Initialize the iterators. */ - for(i=0; i<SORTER_MAX_MERGE_COUNT; i++){ - VdbeSorterIter *pIter = &pSorter->aIter[i]; - rc = vdbeSorterIterInit(db, pSorter, pSorter->iReadOff, pIter, &nByte); - pSorter->iReadOff = pIter->iEof; - assert( rc!=SQLITE_OK || pSorter->iReadOff<=pSorter->iWriteOff ); - if( rc!=SQLITE_OK || pSorter->iReadOff>=pSorter->iWriteOff ) break; + for(i=pMerger->nTree-1; i>0; i--){ + vdbeMergeEngineCompare(pMerger, i); } + return pTask->pUnpacked->errCode; +} - /* Initialize the aTree[] array. */ - for(i=pSorter->nTree-1; rc==SQLITE_OK && i>0; i--){ - rc = vdbeSorterDoCompare(pCsr, i); +/* +** Initialize the IncrMerge field of a PmaReader. +** +** If the PmaReader passed as the first argument is not an incremental-reader +** (if pReadr->pIncr==0), then this function is a no-op. Otherwise, it serves +** to open and/or initialize the temp file related fields of the IncrMerge +** object at (pReadr->pIncr). +** +** If argument eMode is set to INCRINIT_NORMAL, then all PmaReaders +** in the sub-tree headed by pReadr are also initialized. Data is then loaded +** into the buffers belonging to pReadr and it is set to +** point to the first key in its range. +** +** If argument eMode is set to INCRINIT_TASK, then pReadr is guaranteed +** to be a multi-threaded PmaReader and this function is being called in a +** background thread. In this case all PmaReaders in the sub-tree are +** initialized as for INCRINIT_NORMAL and the aFile[1] buffer belonging to +** pReadr is populated. However, pReadr itself is not set up to point +** to its first key. A call to vdbePmaReaderNext() is still required to do +** that. +** +** The reason this function does not call vdbePmaReaderNext() immediately +** in the INCRINIT_TASK case is that vdbePmaReaderNext() assumes that it has +** to block on thread (pTask->thread) before accessing aFile[1]. But, since +** this entire function is being run by thread (pTask->thread), that will +** lead to the current background thread attempting to join itself. +** +** Finally, if argument eMode is set to INCRINIT_ROOT, it may be assumed +** that pReadr->pIncr is a multi-threaded IncrMerge objects, and that all +** child-trees have already been initialized using IncrInit(INCRINIT_TASK). +** In this case vdbePmaReaderNext() is called on all child PmaReaders and +** the current PmaReader set to point to the first key in its range. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){ + int rc = SQLITE_OK; + IncrMerger *pIncr = pReadr->pIncr; + + /* eMode is always INCRINIT_NORMAL in single-threaded mode */ + assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL ); + + if( pIncr ){ + SortSubtask *pTask = pIncr->pTask; + sqlite3 *db = pTask->pSorter->db; + + rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode); + + /* Set up the required files for pIncr. A multi-theaded IncrMerge object + ** requires two temp files to itself, whereas a single-threaded object + ** only requires a region of pTask->file2. */ + if( rc==SQLITE_OK ){ + int mxSz = pIncr->mxSz; +#if SQLITE_MAX_WORKER_THREADS>0 + if( pIncr->bUseThread ){ + rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[0].pFd); + if( rc==SQLITE_OK ){ + rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[1].pFd); + } + }else +#endif + /*if( !pIncr->bUseThread )*/{ + if( pTask->file2.pFd==0 ){ + assert( pTask->file2.iEof>0 ); + rc = vdbeSorterOpenTempFile(db, pTask->file2.iEof, &pTask->file2.pFd); + pTask->file2.iEof = 0; + } + if( rc==SQLITE_OK ){ + pIncr->aFile[1].pFd = pTask->file2.pFd; + pIncr->iStartOff = pTask->file2.iEof; + pTask->file2.iEof += mxSz; + } + } + } + +#if SQLITE_MAX_WORKER_THREADS>0 + if( rc==SQLITE_OK && pIncr->bUseThread ){ + /* Use the current thread to populate aFile[1], even though this + ** PmaReader is multi-threaded. The reason being that this function + ** is already running in background thread pIncr->pTask->thread. */ + assert( eMode==INCRINIT_ROOT || eMode==INCRINIT_TASK ); + rc = vdbeIncrPopulate(pIncr); + } +#endif + + if( rc==SQLITE_OK + && (SQLITE_MAX_WORKER_THREADS==0 || eMode!=INCRINIT_TASK) + ){ + rc = vdbePmaReaderNext(pReadr); + } + } + return rc; +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** The main routine for vdbePmaReaderIncrMergeInit() operations run in +** background threads. +*/ +static void *vdbePmaReaderBgInit(void *pCtx){ + PmaReader *pReader = (PmaReader*)pCtx; + void *pRet = SQLITE_INT_TO_PTR( + vdbePmaReaderIncrMergeInit(pReader,INCRINIT_TASK) + ); + pReader->pIncr->pTask->bDone = 1; + return pRet; +} + +/* +** Use a background thread to invoke vdbePmaReaderIncrMergeInit(INCRINIT_TASK) +** on the the PmaReader object passed as the first argument. +** +** This call will initialize the various fields of the pReadr->pIncr +** structure and, if it is a multi-threaded IncrMerger, launch a +** background thread to populate aFile[1]. +*/ +static int vdbePmaReaderBgIncrInit(PmaReader *pReadr){ + void *pCtx = (void*)pReadr; + return vdbeSorterCreateThread(pReadr->pIncr->pTask, vdbePmaReaderBgInit, pCtx); +} +#endif + +/* +** Allocate a new MergeEngine object to merge the contents of nPMA level-0 +** PMAs from pTask->file. If no error occurs, set *ppOut to point to +** the new object and return SQLITE_OK. Or, if an error does occur, set *ppOut +** to NULL and return an SQLite error code. +** +** When this function is called, *piOffset is set to the offset of the +** first PMA to read from pTask->file. Assuming no error occurs, it is +** set to the offset immediately following the last byte of the last +** PMA before returning. If an error does occur, then the final value of +** *piOffset is undefined. +*/ +static int vdbeMergeEngineLevel0( + SortSubtask *pTask, /* Sorter task to read from */ + int nPMA, /* Number of PMAs to read */ + i64 *piOffset, /* IN/OUT: Readr offset in pTask->file */ + MergeEngine **ppOut /* OUT: New merge-engine */ +){ + MergeEngine *pNew; /* Merge engine to return */ + i64 iOff = *piOffset; + int i; + int rc = SQLITE_OK; + + *ppOut = pNew = vdbeMergeEngineNew(nPMA); + if( pNew==0 ) rc = SQLITE_NOMEM; + + for(i=0; i<nPMA && rc==SQLITE_OK; i++){ + i64 nDummy; + PmaReader *pReadr = &pNew->aReadr[i]; + rc = vdbePmaReaderInit(pTask, &pTask->file, iOff, pReadr, &nDummy); + iOff = pReadr->iEof; } - *pnByte = nByte; + if( rc!=SQLITE_OK ){ + vdbeMergeEngineFree(pNew); + *ppOut = 0; + } + *piOffset = iOff; return rc; } /* -** Once the sorter has been populated, this function is called to prepare -** for iterating through its contents in sorted order. +** Return the depth of a tree comprising nPMA PMAs, assuming a fanout of +** SORTER_MAX_MERGE_COUNT. The returned value does not include leaf nodes. +** +** i.e. +** +** nPMA<=16 -> TreeDepth() == 0 +** nPMA<=256 -> TreeDepth() == 1 +** nPMA<=65536 -> TreeDepth() == 2 */ -int sqlite3VdbeSorterRewind(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){ - VdbeSorter *pSorter = pCsr->pSorter; - int rc; /* Return code */ - sqlite3_file *pTemp2 = 0; /* Second temp file to use */ - i64 iWrite2 = 0; /* Write offset for pTemp2 */ - int nIter; /* Number of iterators used */ - int nByte; /* Bytes of space required for aIter/aTree */ - int N = 2; /* Power of 2 >= nIter */ +static int vdbeSorterTreeDepth(int nPMA){ + int nDepth = 0; + i64 nDiv = SORTER_MAX_MERGE_COUNT; + while( nDiv < (i64)nPMA ){ + nDiv = nDiv * SORTER_MAX_MERGE_COUNT; + nDepth++; + } + return nDepth; +} - assert( pSorter ); +/* +** pRoot is the root of an incremental merge-tree with depth nDepth (according +** to vdbeSorterTreeDepth()). pLeaf is the iSeq'th leaf to be added to the +** tree, counting from zero. This function adds pLeaf to the tree. +** +** If successful, SQLITE_OK is returned. If an error occurs, an SQLite error +** code is returned and pLeaf is freed. +*/ +static int vdbeSorterAddToTree( + SortSubtask *pTask, /* Task context */ + int nDepth, /* Depth of tree according to TreeDepth() */ + int iSeq, /* Sequence number of leaf within tree */ + MergeEngine *pRoot, /* Root of tree */ + MergeEngine *pLeaf /* Leaf to add to tree */ +){ + int rc = SQLITE_OK; + int nDiv = 1; + int i; + MergeEngine *p = pRoot; + IncrMerger *pIncr; - /* If no data has been written to disk, then do not do so now. Instead, - ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly - ** from the in-memory list. */ - if( pSorter->nPMA==0 ){ - *pbEof = !pSorter->pRecord; - assert( pSorter->aTree==0 ); - return vdbeSorterSort(pCsr); + rc = vdbeIncrMergerNew(pTask, pLeaf, &pIncr); + + for(i=1; i<nDepth; i++){ + nDiv = nDiv * SORTER_MAX_MERGE_COUNT; } - /* Write the current in-memory list to a PMA. */ - rc = vdbeSorterListToPMA(db, pCsr); - if( rc!=SQLITE_OK ) return rc; + for(i=1; i<nDepth && rc==SQLITE_OK; i++){ + int iIter = (iSeq / nDiv) % SORTER_MAX_MERGE_COUNT; + PmaReader *pReadr = &p->aReadr[iIter]; - /* Allocate space for aIter[] and aTree[]. */ - nIter = pSorter->nPMA; - if( nIter>SORTER_MAX_MERGE_COUNT ) nIter = SORTER_MAX_MERGE_COUNT; - assert( nIter>0 ); - while( N<nIter ) N += N; - nByte = N * (sizeof(int) + sizeof(VdbeSorterIter)); - pSorter->aIter = (VdbeSorterIter *)sqlite3DbMallocZero(db, nByte); - if( !pSorter->aIter ) return SQLITE_NOMEM; - pSorter->aTree = (int *)&pSorter->aIter[N]; - pSorter->nTree = N; - - do { - int iNew; /* Index of new, merged, PMA */ - - for(iNew=0; - rc==SQLITE_OK && iNew*SORTER_MAX_MERGE_COUNT<pSorter->nPMA; - iNew++ - ){ - int rc2; /* Return code from fileWriterFinish() */ - FileWriter writer; /* Object used to write to disk */ - i64 nWrite; /* Number of bytes in new PMA */ + if( pReadr->pIncr==0 ){ + MergeEngine *pNew = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + }else{ + rc = vdbeIncrMergerNew(pTask, pNew, &pReadr->pIncr); + } + } + if( rc==SQLITE_OK ){ + p = pReadr->pIncr->pMerger; + nDiv = nDiv / SORTER_MAX_MERGE_COUNT; + } + } + + if( rc==SQLITE_OK ){ + p->aReadr[iSeq % SORTER_MAX_MERGE_COUNT].pIncr = pIncr; + }else{ + vdbeIncrFree(pIncr); + } + return rc; +} - memset(&writer, 0, sizeof(FileWriter)); +/* +** This function is called as part of a SorterRewind() operation on a sorter +** that has already written two or more level-0 PMAs to one or more temp +** files. It builds a tree of MergeEngine/IncrMerger/PmaReader objects that +** can be used to incrementally merge all PMAs on disk. +** +** If successful, SQLITE_OK is returned and *ppOut set to point to the +** MergeEngine object at the root of the tree before returning. Or, if an +** error occurs, an SQLite error code is returned and the final value +** of *ppOut is undefined. +*/ +static int vdbeSorterMergeTreeBuild( + VdbeSorter *pSorter, /* The VDBE cursor that implements the sort */ + MergeEngine **ppOut /* Write the MergeEngine here */ +){ + MergeEngine *pMain = 0; + int rc = SQLITE_OK; + int iTask; + +#if SQLITE_MAX_WORKER_THREADS>0 + /* If the sorter uses more than one task, then create the top-level + ** MergeEngine here. This MergeEngine will read data from exactly + ** one PmaReader per sub-task. */ + assert( pSorter->bUseThreads || pSorter->nTask==1 ); + if( pSorter->nTask>1 ){ + pMain = vdbeMergeEngineNew(pSorter->nTask); + if( pMain==0 ) rc = SQLITE_NOMEM; + } +#endif - /* If there are SORTER_MAX_MERGE_COUNT or less PMAs in file pTemp1, - ** initialize an iterator for each of them and break out of the loop. - ** These iterators will be incrementally merged as the VDBE layer calls - ** sqlite3VdbeSorterNext(). - ** - ** Otherwise, if pTemp1 contains more than SORTER_MAX_MERGE_COUNT PMAs, - ** initialize interators for SORTER_MAX_MERGE_COUNT of them. These PMAs - ** are merged into a single PMA that is written to file pTemp2. - */ - rc = vdbeSorterInitMerge(db, pCsr, &nWrite); - assert( rc!=SQLITE_OK || pSorter->aIter[ pSorter->aTree[1] ].pFile ); - if( rc!=SQLITE_OK || pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ - break; + for(iTask=0; rc==SQLITE_OK && iTask<pSorter->nTask; iTask++){ + SortSubtask *pTask = &pSorter->aTask[iTask]; + assert( pTask->nPMA>0 || SQLITE_MAX_WORKER_THREADS>0 ); + if( SQLITE_MAX_WORKER_THREADS==0 || pTask->nPMA ){ + MergeEngine *pRoot = 0; /* Root node of tree for this task */ + int nDepth = vdbeSorterTreeDepth(pTask->nPMA); + i64 iReadOff = 0; + + if( pTask->nPMA<=SORTER_MAX_MERGE_COUNT ){ + rc = vdbeMergeEngineLevel0(pTask, pTask->nPMA, &iReadOff, &pRoot); + }else{ + int i; + int iSeq = 0; + pRoot = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT); + if( pRoot==0 ) rc = SQLITE_NOMEM; + for(i=0; i<pTask->nPMA && rc==SQLITE_OK; i += SORTER_MAX_MERGE_COUNT){ + MergeEngine *pMerger = 0; /* New level-0 PMA merger */ + int nReader; /* Number of level-0 PMAs to merge */ + + nReader = MIN(pTask->nPMA - i, SORTER_MAX_MERGE_COUNT); + rc = vdbeMergeEngineLevel0(pTask, nReader, &iReadOff, &pMerger); + if( rc==SQLITE_OK ){ + rc = vdbeSorterAddToTree(pTask, nDepth, iSeq++, pRoot, pMerger); + } + } } - /* Open the second temp file, if it is not already open. */ - if( pTemp2==0 ){ - assert( iWrite2==0 ); - rc = vdbeSorterOpenTempFile(db, &pTemp2); + if( rc==SQLITE_OK ){ +#if SQLITE_MAX_WORKER_THREADS>0 + if( pMain!=0 ){ + rc = vdbeIncrMergerNew(pTask, pRoot, &pMain->aReadr[iTask].pIncr); + }else +#endif + { + assert( pMain==0 ); + pMain = pRoot; + } + }else{ + vdbeMergeEngineFree(pRoot); } + } + } + + if( rc!=SQLITE_OK ){ + vdbeMergeEngineFree(pMain); + pMain = 0; + } + *ppOut = pMain; + return rc; +} + +/* +** This function is called as part of an sqlite3VdbeSorterRewind() operation +** on a sorter that has written two or more PMAs to temporary files. It sets +** up either VdbeSorter.pMerger (for single threaded sorters) or pReader +** (for multi-threaded sorters) so that it can be used to iterate through +** all records stored in the sorter. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int vdbeSorterSetupMerge(VdbeSorter *pSorter){ + int rc; /* Return code */ + SortSubtask *pTask0 = &pSorter->aTask[0]; + MergeEngine *pMain = 0; +#if SQLITE_MAX_WORKER_THREADS + sqlite3 *db = pTask0->pSorter->db; +#endif + rc = vdbeSorterMergeTreeBuild(pSorter, &pMain); + if( rc==SQLITE_OK ){ +#if SQLITE_MAX_WORKER_THREADS + assert( pSorter->bUseThreads==0 || pSorter->nTask>1 ); + if( pSorter->bUseThreads ){ + int iTask; + PmaReader *pReadr; + SortSubtask *pLast = &pSorter->aTask[pSorter->nTask-1]; + rc = vdbeSortAllocUnpacked(pLast); if( rc==SQLITE_OK ){ - int bEof = 0; - fileWriterInit(db, pTemp2, &writer, iWrite2); - fileWriterWriteVarint(&writer, nWrite); - while( rc==SQLITE_OK && bEof==0 ){ - VdbeSorterIter *pIter = &pSorter->aIter[ pSorter->aTree[1] ]; - assert( pIter->pFile ); - - fileWriterWriteVarint(&writer, pIter->nKey); - fileWriterWrite(&writer, pIter->aKey, pIter->nKey); - rc = sqlite3VdbeSorterNext(db, pCsr, &bEof); + pReadr = (PmaReader*)sqlite3DbMallocZero(db, sizeof(PmaReader)); + pSorter->pReader = pReadr; + if( pReadr==0 ) rc = SQLITE_NOMEM; + } + if( rc==SQLITE_OK ){ + rc = vdbeIncrMergerNew(pLast, pMain, &pReadr->pIncr); + if( rc==SQLITE_OK ){ + vdbeIncrMergerSetThreads(pReadr->pIncr); + for(iTask=0; iTask<(pSorter->nTask-1); iTask++){ + IncrMerger *pIncr; + if( (pIncr = pMain->aReadr[iTask].pIncr) ){ + vdbeIncrMergerSetThreads(pIncr); + assert( pIncr->pTask!=pLast ); + } + } + for(iTask=0; rc==SQLITE_OK && iTask<pSorter->nTask; iTask++){ + PmaReader *p = &pMain->aReadr[iTask]; + assert( p->pIncr==0 || p->pIncr->pTask==&pSorter->aTask[iTask] ); + if( p->pIncr ){ + if( iTask==pSorter->nTask-1 ){ + rc = vdbePmaReaderIncrMergeInit(p, INCRINIT_TASK); + }else{ + rc = vdbePmaReaderBgIncrInit(p); + } + } + } } - rc2 = fileWriterFinish(db, &writer, &iWrite2); - if( rc==SQLITE_OK ) rc = rc2; + pMain = 0; + } + if( rc==SQLITE_OK ){ + rc = vdbePmaReaderIncrMergeInit(pReadr, INCRINIT_ROOT); } + }else +#endif + { + rc = vdbeMergeEngineInit(pTask0, pMain, INCRINIT_NORMAL); + pSorter->pMerger = pMain; + pMain = 0; } + } + + if( rc!=SQLITE_OK ){ + vdbeMergeEngineFree(pMain); + } + return rc; +} - if( pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ - break; + +/* +** Once the sorter has been populated by calls to sqlite3VdbeSorterWrite, +** this function is called to prepare for iterating through the records +** in sorted order. +*/ +int sqlite3VdbeSorterRewind(const VdbeCursor *pCsr, int *pbEof){ + VdbeSorter *pSorter = pCsr->pSorter; + int rc = SQLITE_OK; /* Return code */ + + assert( pSorter ); + + /* If no data has been written to disk, then do not do so now. Instead, + ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly + ** from the in-memory list. */ + if( pSorter->bUsePMA==0 ){ + if( pSorter->list.pList ){ + *pbEof = 0; + rc = vdbeSorterSort(&pSorter->aTask[0], &pSorter->list); }else{ - sqlite3_file *pTmp = pSorter->pTemp1; - pSorter->nPMA = iNew; - pSorter->pTemp1 = pTemp2; - pTemp2 = pTmp; - pSorter->iWriteOff = iWrite2; - pSorter->iReadOff = 0; - iWrite2 = 0; + *pbEof = 1; } - }while( rc==SQLITE_OK ); + return rc; + } + + /* Write the current in-memory list to a PMA. When the VdbeSorterWrite() + ** function flushes the contents of memory to disk, it immediately always + ** creates a new list consisting of a single key immediately afterwards. + ** So the list is never empty at this point. */ + assert( pSorter->list.pList ); + rc = vdbeSorterFlushPMA(pSorter); + + /* Join all threads */ + rc = vdbeSorterJoinAll(pSorter, rc); - if( pTemp2 ){ - sqlite3OsCloseFree(pTemp2); + vdbeSorterRewindDebug("rewind"); + + /* Assuming no errors have occurred, set up a merger structure to + ** incrementally read and merge all remaining PMAs. */ + assert( pSorter->pReader==0 ); + if( rc==SQLITE_OK ){ + rc = vdbeSorterSetupMerge(pSorter); + *pbEof = 0; } - *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); + + vdbeSorterRewindDebug("rewinddone"); return rc; } @@ -969,63 +2396,27 @@ int sqlite3VdbeSorterNext(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){ VdbeSorter *pSorter = pCsr->pSorter; int rc; /* Return code */ - if( pSorter->aTree ){ - int iPrev = pSorter->aTree[1];/* Index of iterator to advance */ - rc = vdbeSorterIterNext(db, &pSorter->aIter[iPrev]); - if( rc==SQLITE_OK ){ - int i; /* Index of aTree[] to recalculate */ - VdbeSorterIter *pIter1; /* First iterator to compare */ - VdbeSorterIter *pIter2; /* Second iterator to compare */ - u8 *pKey2; /* To pIter2->aKey, or 0 if record cached */ - - /* Find the first two iterators to compare. The one that was just - ** advanced (iPrev) and the one next to it in the array. */ - pIter1 = &pSorter->aIter[(iPrev & 0xFFFE)]; - pIter2 = &pSorter->aIter[(iPrev | 0x0001)]; - pKey2 = pIter2->aKey; - - for(i=(pSorter->nTree+iPrev)/2; i>0; i=i/2){ - /* Compare pIter1 and pIter2. Store the result in variable iRes. */ - int iRes; - if( pIter1->pFile==0 ){ - iRes = +1; - }else if( pIter2->pFile==0 ){ - iRes = -1; - }else{ - vdbeSorterCompare(pCsr, 0, - pIter1->aKey, pIter1->nKey, pKey2, pIter2->nKey, &iRes - ); - } - - /* If pIter1 contained the smaller value, set aTree[i] to its index. - ** Then set pIter2 to the next iterator to compare to pIter1. In this - ** case there is no cache of pIter2 in pSorter->pUnpacked, so set - ** pKey2 to point to the record belonging to pIter2. - ** - ** Alternatively, if pIter2 contains the smaller of the two values, - ** set aTree[i] to its index and update pIter1. If vdbeSorterCompare() - ** was actually called above, then pSorter->pUnpacked now contains - ** a value equivalent to pIter2. So set pKey2 to NULL to prevent - ** vdbeSorterCompare() from decoding pIter2 again. */ - if( iRes<=0 ){ - pSorter->aTree[i] = (int)(pIter1 - pSorter->aIter); - pIter2 = &pSorter->aIter[ pSorter->aTree[i ^ 0x0001] ]; - pKey2 = pIter2->aKey; - }else{ - if( pIter1->pFile ) pKey2 = 0; - pSorter->aTree[i] = (int)(pIter2 - pSorter->aIter); - pIter1 = &pSorter->aIter[ pSorter->aTree[i ^ 0x0001] ]; - } - - } - *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); + assert( pSorter->bUsePMA || (pSorter->pReader==0 && pSorter->pMerger==0) ); + if( pSorter->bUsePMA ){ + assert( pSorter->pReader==0 || pSorter->pMerger==0 ); + assert( pSorter->bUseThreads==0 || pSorter->pReader ); + assert( pSorter->bUseThreads==1 || pSorter->pMerger ); +#if SQLITE_MAX_WORKER_THREADS>0 + if( pSorter->bUseThreads ){ + rc = vdbePmaReaderNext(pSorter->pReader); + *pbEof = (pSorter->pReader->pFd==0); + }else +#endif + /*if( !pSorter->bUseThreads )*/ { + assert( pSorter->pMerger->pTask==(&pSorter->aTask[0]) ); + rc = vdbeMergeEngineStep(pSorter->pMerger, pbEof); } }else{ - SorterRecord *pFree = pSorter->pRecord; - pSorter->pRecord = pFree->pNext; - pFree->pNext = 0; - vdbeSorterRecordFree(db, pFree); - *pbEof = !pSorter->pRecord; + SorterRecord *pFree = pSorter->list.pList; + pSorter->list.pList = pFree->u.pNext; + pFree->u.pNext = 0; + if( pSorter->list.aMemory==0 ) vdbeSorterRecordFree(db, pFree); + *pbEof = !pSorter->list.pList; rc = SQLITE_OK; } return rc; @@ -1040,14 +2431,21 @@ static void *vdbeSorterRowkey( int *pnKey /* OUT: Size of current key in bytes */ ){ void *pKey; - if( pSorter->aTree ){ - VdbeSorterIter *pIter; - pIter = &pSorter->aIter[ pSorter->aTree[1] ]; - *pnKey = pIter->nKey; - pKey = pIter->aKey; + if( pSorter->bUsePMA ){ + PmaReader *pReader; +#if SQLITE_MAX_WORKER_THREADS>0 + if( pSorter->bUseThreads ){ + pReader = pSorter->pReader; + }else +#endif + /*if( !pSorter->bUseThreads )*/{ + pReader = &pSorter->pMerger->aReadr[pSorter->pMerger->aTree[1]]; + } + *pnKey = pReader->nKey; + pKey = pReader->aKey; }else{ - *pnKey = pSorter->pRecord->nVal; - pKey = pSorter->pRecord->pVal; + *pnKey = pSorter->list.pList->nVal; + pKey = SRVAL(pSorter->list.pList); } return pKey; } @@ -1075,21 +2473,47 @@ int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){ ** passed as the first argument currently points to. For the purposes of ** the comparison, ignore the rowid field at the end of each record. ** +** If the sorter cursor key contains any NULL values, consider it to be +** less than pVal. Even if pVal also contains NULL values. +** ** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM). ** Otherwise, set *pRes to a negative, zero or positive value if the ** key in pVal is smaller than, equal to or larger than the current sorter ** key. +** +** This routine forms the core of the OP_SorterCompare opcode, which in +** turn is used to verify uniqueness when constructing a UNIQUE INDEX. */ int sqlite3VdbeSorterCompare( const VdbeCursor *pCsr, /* Sorter cursor */ Mem *pVal, /* Value to compare to current sorter key */ - int nKeyCol, /* Only compare this many fields */ + int nKeyCol, /* Compare this many columns */ int *pRes /* OUT: Result of comparison */ ){ VdbeSorter *pSorter = pCsr->pSorter; + UnpackedRecord *r2 = pSorter->pUnpacked; + KeyInfo *pKeyInfo = pCsr->pKeyInfo; + int i; void *pKey; int nKey; /* Sorter key to compare pVal with */ + if( r2==0 ){ + char *p; + r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo,0,0,&p); + assert( pSorter->pUnpacked==(UnpackedRecord*)p ); + if( r2==0 ) return SQLITE_NOMEM; + r2->nField = nKeyCol; + } + assert( r2->nField==nKeyCol ); + pKey = vdbeSorterRowkey(pSorter, &nKey); - vdbeSorterCompare(pCsr, nKeyCol, pVal->z, pVal->n, pKey, nKey, pRes); + sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, r2); + for(i=0; i<nKeyCol; i++){ + if( r2->aMem[i].flags & MEM_Null ){ + *pRes = -1; + return SQLITE_OK; + } + } + + *pRes = sqlite3VdbeRecordCompare(pVal->n, pVal->z, r2, 0); return SQLITE_OK; } diff --git a/src/where.c b/src/where.c index 4ba8de6e4..e1e1e1d52 100644 --- a/src/where.c +++ b/src/where.c @@ -2191,6 +2191,10 @@ static int whereRangeScanEst( tRowcnt iLower; tRowcnt iUpper; + if( pRec ){ + testcase( pRec->nField!=pBuilder->nRecValid ); + pRec->nField = pBuilder->nRecValid; + } if( nEq==p->nKeyCol ){ aff = SQLITE_AFF_INTEGER; }else{ @@ -2250,7 +2254,7 @@ static int whereRangeScanEst( if( nNew<nOut ){ nOut = nNew; } - WHERETRACE(0x10, ("range scan regions: %u..%u est=%d\n", + WHERETRACE(0x10, ("STAT4 range scan: %u..%u est=%d\n", (u32)iLower, (u32)iUpper, nOut)); } }else{ @@ -2278,6 +2282,12 @@ static int whereRangeScanEst( nOut -= (pLower!=0) + (pUpper!=0); if( nNew<10 ) nNew = 10; if( nNew<nOut ) nOut = nNew; +#if defined(WHERETRACE_ENABLED) + if( pLoop->nOut>nOut ){ + WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n", + pLoop->nOut, nOut)); + } +#endif pLoop->nOut = (LogEst)nOut; return rc; } @@ -2390,7 +2400,7 @@ static int whereInScanEst( if( rc==SQLITE_OK ){ if( nRowEst > nRow0 ) nRowEst = nRow0; *pnRow = nRowEst; - WHERETRACE(0x10,("IN row estimate: est=%g\n", nRowEst)); + WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst)); } assert( pBuilder->nRecValid==nRecValid ); return rc; @@ -4713,7 +4723,8 @@ static int whereLoopAddBtree( */ for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){ if( pProbe->pPartIdxWhere!=0 - && !whereUsablePartialIndex(pNew->iTab, pWC, pProbe->pPartIdxWhere) ){ + && !whereUsablePartialIndex(pSrc->iCursor, pWC, pProbe->pPartIdxWhere) ){ + testcase( pNew->iTab!=pSrc->iCursor ); /* See ticket [98d973b8f5] */ continue; /* Partial index inappropriate for this query */ } rSize = pProbe->aiRowLogEst[0]; diff --git a/test/index7.test b/test/index7.test index 1c81f6024..0f341a312 100644 --- a/test/index7.test +++ b/test/index7.test @@ -248,4 +248,34 @@ do_execsql_test index7-5.0 { SELECT stat+0 FROM sqlite_stat1 WHERE idx='t3b'; } {6 6} +# Verify that the problem identified by ticket [98d973b8f5] has been fixed. +# +do_execsql_test index7-6.1 { + CREATE TABLE t5(a, b); + CREATE TABLE t4(c, d); + INSERT INTO t5 VALUES(1, 'xyz'); + INSERT INTO t4 VALUES('abc', 'not xyz'); + SELECT * FROM (SELECT * FROM t5 WHERE a=1 AND b='xyz'), t4 WHERE c='abc'; +} { + 1 xyz abc {not xyz} +} +do_execsql_test index7-6.2 { + CREATE INDEX i4 ON t4(c) WHERE d='xyz'; + SELECT * FROM (SELECT * FROM t5 WHERE a=1 AND b='xyz'), t4 WHERE c='abc'; +} { + 1 xyz abc {not xyz} +} +do_execsql_test index7-6.3 { + CREATE VIEW v4 AS SELECT * FROM t4; + INSERT INTO t4 VALUES('def', 'xyz'); + SELECT * FROM v4 WHERE d='xyz' AND c='def' +} { + def xyz +} +do_eqp_test index7-6.4 { + SELECT * FROM v4 WHERE d='xyz' AND c='def' +} { + 0 0 0 {SEARCH TABLE t4 USING INDEX i4 (c=?)} +} + finish_test diff --git a/test/malloc.test b/test/malloc.test index 4276b58bb..86145672a 100644 --- a/test/malloc.test +++ b/test/malloc.test @@ -881,6 +881,48 @@ do_malloc_test 39 -tclprep { db close } +reset_db +add_test_utf16bin_collate db +do_execsql_test 40.1 { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES('fghij'); + INSERT INTO t1 VALUES('pqrst'); + INSERT INTO t1 VALUES('abcde'); + INSERT INTO t1 VALUES('uvwxy'); + INSERT INTO t1 VALUES('klmno'); +} +do_execsql_test 40.2 { + SELECT * FROM t1 ORDER BY 1 COLLATE utf16bin; +} {abcde fghij klmno pqrst uvwxy} +do_faultsim_test 40.3 -faults oom-trans* -body { + execsql { + SELECT * FROM t1 ORDER BY 1 COLLATE utf16bin; + } +} -test { + faultsim_test_result {0 {abcde fghij klmno pqrst uvwxy}} + faultsim_integrity_check +} + +reset_db +add_test_utf16bin_collate db +set big [string repeat x 200] +do_execsql_test 41.1 { + DROP TABLE IF EXISTS t1; + CREATE TABLE t1(a COLLATE utf16bin); + INSERT INTO t1 VALUES('fghij' || $::big); + INSERT INTO t1 VALUES('pqrst' || $::big); + INSERT INTO t1 VALUES('abcde' || $::big); + INSERT INTO t1 VALUES('uvwxy' || $::big); + INSERT INTO t1 VALUES('klmno' || $::big); + CREATE INDEX i1 ON t1(a); +} +do_faultsim_test 41.2 -faults oom* -body { + execsql { SELECT * FROM t1 WHERE a = ('abcde' || $::big)} +} -test { + faultsim_test_result [list 0 "abcde$::big"] + faultsim_integrity_check +} + # Ensure that no file descriptors were leaked. do_test malloc-99.X { catch {db close} diff --git a/test/mallocA.test b/test/mallocA.test index 61e88a61e..d6d6de822 100644 --- a/test/mallocA.test +++ b/test/mallocA.test @@ -25,7 +25,6 @@ if {!$MEMDEBUG} { return } - # Construct a test database # forcedelete test.db.bu @@ -116,6 +115,28 @@ ifcapable stat3 { } } +do_execsql_test 7.0 { + PRAGMA cache_size = 5; +} +do_faultsim_test 7 -faults oom-trans* -prep { + if {$iFail < 500} { set iFail 2000 } + if {$iFail > 1215} { set iFail 2000 } +} -body { + execsql { + WITH r(x,y) AS ( + SELECT 1, randomblob(100) + UNION ALL + SELECT x+1, randomblob(100) FROM r + LIMIT 1000 + ) + SELECT count(x), length(y) FROM r GROUP BY (x%5) + } +} -test { + set res [list 200 100 200 100 200 100 200 100 200 100] + faultsim_test_result [list 0 $res] +} + + # Ensure that no file descriptors were leaked. do_test malloc-99.X { catch {db close} diff --git a/test/permutations.test b/test/permutations.test index efa27468e..7bea39eb7 100644 --- a/test/permutations.test +++ b/test/permutations.test @@ -115,7 +115,7 @@ set allquicktests [test_set $alltests -exclude { incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test vtab_err.test walslow.test walcrash.test walcrash3.test walthread.test rtree3.test indexfault.test securedel2.test - fts4growth.test fts4growth2.test + sort3.test sort4.test fts4growth.test fts4growth2.test }] if {[info exists ::env(QUICKTEST_INCLUDE)]} { set allquicktests [concat $allquicktests $::env(QUICKTEST_INCLUDE)] @@ -358,6 +358,12 @@ test_suite "coverage-analyze" -description { analyze.test analyzeB.test mallocA.test } +test_suite "coverage-sorter" -description { + Coverage tests for file vdbesort.c. +} -files { + sort.test sortfault.test +} + lappend ::testsuitelist xxx #------------------------------------------------------------------------- @@ -489,7 +495,7 @@ test_suite "multithread" -description { } -files { delete.test delete2.test insert.test rollback.test select1.test select2.test trans.test update.test vacuum.test types.test - types2.test types3.test + types2.test types3.test sort4.test } -shutdown { catch {db close} sqlite3_shutdown diff --git a/test/sort.test b/test/sort.test index 08d496b25..1c89552bb 100644 --- a/test/sort.test +++ b/test/sort.test @@ -8,10 +8,10 @@ # May you share freely, never taking more than you give. # #*********************************************************************** +# # This file implements regression tests for SQLite library. The -# focus of this file is testing the CREATE TABLE statement. +# focus of this file is testing the sorter (code in vdbesort.c). # -# $Id: sort.test,v 1.25 2005/11/14 22:29:06 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -464,4 +464,175 @@ do_test sort-12.1 { } } {1 2 xxx 1 3 yyy 1 1 zzz} +#------------------------------------------------------------------------- +# Check that the sorter in vdbesort.c sorts in a stable fashion. +# +do_execsql_test sort-13.0 { + CREATE TABLE t10(a, b); +} +do_test sort-13.1 { + db transaction { + for {set i 0} {$i < 100000} {incr i} { + execsql { INSERT INTO t10 VALUES( $i/10, $i%10 ) } + } + } +} {} +do_execsql_test sort-13.2 { + SELECT a, b FROM t10 ORDER BY a; +} [db eval {SELECT a, b FROM t10 ORDER BY a, b}] +do_execsql_test sort-13.3 { + PRAGMA cache_size = 5; + SELECT a, b FROM t10 ORDER BY a; +} [db eval {SELECT a, b FROM t10 ORDER BY a, b}] + +#------------------------------------------------------------------------- +# Sort some large ( > 4KiB) records. +# +proc cksum {x} { + set i1 1 + set i2 2 + binary scan $x c* L + foreach {a b} $L { + set i1 [expr (($i2<<3) + $a) & 0x7FFFFFFF] + set i2 [expr (($i1<<3) + $b) & 0x7FFFFFFF] + } + list $i1 $i2 +} +db func cksum cksum + +do_execsql_test sort-14.0 { + PRAGMA cache_size = 5; + CREATE TABLE t11(a, b); + INSERT INTO t11 VALUES(randomblob(5000), NULL); + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --2 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --3 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --4 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --5 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --6 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --7 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --8 + INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --9 + UPDATE t11 SET b = cksum(a); +} + +foreach {tn mmap_limit} { + 1 0 + 2 1000000 +} { + do_test sort-14.$tn { + sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $mmap_limit + set prev "" + db eval { SELECT * FROM t11 ORDER BY b } { + if {$b != [cksum $a]} {error "checksum failed"} + if {[string compare $b $prev] < 0} {error "sort failed"} + set prev $b + } + set {} {} + } {} +} + +#------------------------------------------------------------------------- +# +foreach {tn mmap_limit nWorker tmpstore coremutex fakeheap softheaplimit} { + 1 0 3 file true false 0 + 2 0 3 file true true 0 + 3 0 0 file true false 0 + 4 1000000 3 file true false 0 + 5 0 0 memory false true 0 + 6 0 0 file false true 1000000 + 7 0 0 file false true 10000 +} { + db close + sqlite3_shutdown + if {$coremutex} { + sqlite3_config multithread + } else { + sqlite3_config singlethread + } + sqlite3_initialize + sorter_test_fakeheap $fakeheap + sqlite3_soft_heap_limit $softheaplimit + + reset_db + sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $mmap_limit + execsql "PRAGMA temp_store = $tmpstore; PRAGMA threads = $nWorker" + + + set ten [string repeat X 10300] + set one [string repeat y 200] + + if {$softheaplimit} { + execsql { PRAGMA cache_size = 20 }; + } else { + execsql { PRAGMA cache_size = 5 }; + } + + do_execsql_test 15.$tn.1 { + WITH rr AS ( + SELECT 4, $ten UNION ALL + SELECT 2, $one UNION ALL + SELECT 1, $ten UNION ALL + SELECT 3, $one + ) + SELECT * FROM rr ORDER BY 1; + } [list 1 $ten 2 $one 3 $one 4 $ten] + + do_execsql_test 15.$tn.2 { + CREATE TABLE t1(a); + INSERT INTO t1 VALUES(4); + INSERT INTO t1 VALUES(5); + INSERT INTO t1 VALUES(3); + INSERT INTO t1 VALUES(2); + INSERT INTO t1 VALUES(6); + INSERT INTO t1 VALUES(1); + CREATE INDEX i1 ON t1(a); + SELECT * FROM t1 ORDER BY a; + } {1 2 3 4 5 6} + + do_execsql_test 15.$tn.3 { + WITH rr AS ( + SELECT 4, $ten UNION ALL + SELECT 2, $one + ) + SELECT * FROM rr ORDER BY 1; + } [list 2 $one 4 $ten] + + sorter_test_fakeheap 0 +} + +db close +sqlite3_shutdown +set t(0) singlethread +set t(1) multithread +set t(2) serialized +sqlite3_config $t($sqlite_options(threadsafe)) +sqlite3_initialize +sqlite3_soft_heap_limit 0 + +reset_db +do_catchsql_test 16.1 { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(1, NULL, 3); + INSERT INTO t1 VALUES(NULL, 2, 3); + INSERT INTO t1 VALUES(1, 2, NULL); + INSERT INTO t1 VALUES(4, 5, 6); + CREATE UNIQUE INDEX i1 ON t1(b, a, c); +} {0 {}} +reset_db +do_catchsql_test 16.2 { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(1, NULL, 3); + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t1 VALUES(1, 2, NULL); + INSERT INTO t1 VALUES(4, 5, 6); + CREATE UNIQUE INDEX i1 ON t1(b, a, c); +} {1 {UNIQUE constraint failed: t1.b, t1.a, t1.c}} + +reset_db +do_execsql_test 17.1 { + SELECT * FROM sqlite_master ORDER BY sql; +} {} + finish_test diff --git a/test/sort2.test b/test/sort2.test new file mode 100644 index 000000000..29001f009 --- /dev/null +++ b/test/sort2.test @@ -0,0 +1,80 @@ +# 2014 March 25. +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# Specifically, the tests in this file attempt to verify that +# multi-threaded sorting works. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix sort2 + +foreach {tn script} { + 1 { } + 2 { + catch { db close } + reset_db + catch { db eval {PRAGMA threads=7} } + } +} { + + eval $script + + do_execsql_test $tn.1 { + PRAGMA cache_size = 5; + WITH r(x,y) AS ( + SELECT 1, randomblob(100) + UNION ALL + SELECT x+1, randomblob(100) FROM r + LIMIT 100000 + ) + SELECT count(x), length(y) FROM r GROUP BY (x%5) + } { + 20000 100 20000 100 20000 100 20000 100 20000 100 + } + + do_execsql_test $tn.2.1 { + CREATE TABLE t1(a, b); + WITH r(x,y) AS ( + SELECT 1, randomblob(100) + UNION ALL + SELECT x+1, randomblob(100) FROM r + LIMIT 10000 + ) INSERT INTO t1 SELECT * FROM r; + } + + do_execsql_test $tn.2.2 { + CREATE UNIQUE INDEX i1 ON t1(b, a); + } + + do_execsql_test $tn.2.3 { + CREATE UNIQUE INDEX i2 ON t1(a); + } + + do_execsql_test $tn.2.4 { PRAGMA integrity_check } {ok} + + breakpoint + do_execsql_test $tn.3 { + PRAGMA cache_size = 5; + WITH r(x,y) AS ( + SELECT 1, randomblob(100) + UNION ALL + SELECT x+1, randomblob(100) FROM r + LIMIT 1000000 + ) + SELECT count(x), length(y) FROM r GROUP BY (x%5) + } { + 200000 100 200000 100 200000 100 200000 100 200000 100 + } +} + +finish_test diff --git a/test/sort3.test b/test/sort3.test new file mode 100644 index 000000000..80d8bbca3 --- /dev/null +++ b/test/sort3.test @@ -0,0 +1,67 @@ +# 2014 March 25. +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The tests in this file verify that sorting works when the library is +# configured to use mmap(), but the temporary files generated by the +# sorter are too large to be completely mapped. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix sort3 + +# Sort roughly 20MB of data. Once with a mmap limit of 5MB and once without. +# +foreach {itest limit} { + 1 5000000 + 2 0x7FFFFFFF +} { + sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $limit + do_execsql_test 1.$itest { + WITH r(x,y) AS ( + SELECT 1, randomblob(1000) + UNION ALL + SELECT x+1, randomblob(1000) FROM r + LIMIT 20000 + ) + SELECT count(*), sum(length(y)) FROM r GROUP BY (x%5); + } { + 4000 4000000 + 4000 4000000 + 4000 4000000 + 4000 4000000 + 4000 4000000 + } +} + +# Sort more than 2GB of data. At one point this was causing a problem. +# This test might take one minute or more to run. +# +do_execsql_test 2 { + PRAGMA cache_size = 20000; + WITH r(x,y) AS ( + SELECT 1, randomblob(1000) + UNION ALL + SELECT x+1, randomblob(1000) FROM r + LIMIT 2200000 + ) + SELECT count(*), sum(length(y)) FROM r GROUP BY (x%5); +} { + 440000 440000000 + 440000 440000000 + 440000 440000000 + 440000 440000000 + 440000 440000000 +} + +finish_test + diff --git a/test/sort4.test b/test/sort4.test new file mode 100644 index 000000000..01fcbfee9 --- /dev/null +++ b/test/sort4.test @@ -0,0 +1,189 @@ +# 2014 May 6. +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# The tests in this file are brute force tests of the multi-threaded +# sorter. +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix sort4 + +# Configure the sorter to use 3 background threads. +db eval {PRAGMA threads=3} + +# Minimum number of seconds to run for. If the value is 0, each test +# is run exactly once. Otherwise, tests are repeated until the timeout +# expires. +set SORT4TIMEOUT 0 +if {[permutation] == "multithread"} { set SORT4TIMEOUT 300 } + +#-------------------------------------------------------------------- +# Set up a table "t1" containing $nRow rows. Each row contains also +# contains blob fields that collectively contain at least $nPayload +# bytes of content. The table schema is as follows: +# +# CREATE TABLE t1(a INTEGER, <extra-columns>, b INTEGER); +# +# For each row, the values of columns "a" and "b" are set to the same +# pseudo-randomly selected integer. The "extra-columns", of which there +# are at most eight, are named c0, c1, c2 etc. Column c0 contains a 4 +# byte string. Column c1 an 8 byte string. Field c2 16 bytes, and so on. +# +# This table is intended to be used for testing queries of the form: +# +# SELECT a, <cols>, b FROM t1 ORDER BY a; +# +# The test code checks that rows are returned in order, and that the +# values of "a" and "b" are the same for each row (the idea being that +# if field "b" at the end of the sorter record has not been corrupted, +# the rest of the record is probably Ok as well). +# +proc populate_table {nRow nPayload} { + set nCol 0 + + set n 0 + for {set nCol 0} {$n < $nPayload} {incr nCol} { + incr n [expr (4 << $nCol)] + } + + set cols [lrange [list xxx c0 c1 c2 c3 c4 c5 c6 c7] 1 $nCol] + set data [lrange [list xxx \ + randomblob(4) randomblob(8) randomblob(16) randomblob(32) \ + randomblob(64) randomblob(128) randomblob(256) randomblob(512) \ + ] 1 $nCol] + + execsql { DROP TABLE IF EXISTS t1 } + + db transaction { + execsql "CREATE TABLE t1(a, [join $cols ,], b);" + set insert "INSERT INTO t1 VALUES(:k, [join $data ,], :k)" + for {set i 0} {$i < $nRow} {incr i} { + set k [expr int(rand()*1000000000)] + execsql $insert + } + } +} + +# Helper for [do_sorter_test] +# +proc sorter_test {nRow nRead nPayload} { + set res [list] + + set nLoad [expr ($nRow > $nRead) ? $nRead : $nRow] + + set nPayload [expr (($nPayload+3)/4) * 4] + set cols [list] + foreach {mask col} { + 0x04 c0 0x08 c1 0x10 c2 0x20 c3 + 0x40 c4 0x80 c5 0x100 c6 0x200 c7 + } { + if {$nPayload & $mask} { lappend cols $col } + } + + # Create two SELECT statements. Statement $sql1 uses the sorter to sort + # $nRow records of a bit over $nPayload bytes each read from the "t1" + # table created by [populate_table] proc above. Rows are sorted in order + # of the integer field in each "t1" record. + # + # The second SQL statement sorts the same set of rows as the first, but + # uses a LIMIT clause, causing SQLite to use a temp table instead of the + # sorter for sorting. + # + set sql1 "SELECT a, [join $cols ,], b FROM t1 WHERE rowid<=$nRow ORDER BY a" + set sql2 "SELECT a FROM t1 WHERE rowid<=$nRow ORDER BY a LIMIT $nRead" + + # Pass the two SQL statements to a helper command written in C. This + # command steps statement $sql1 $nRead times and compares the integer + # values in the rows returned with the results of executing $sql2. If + # the comparison fails (indicating some bug in the sorter), a Tcl + # exception is thrown. + # + sorter_test_sort4_helper db $sql1 $nRead $sql2 + set {} {} +} + +# Usage: +# +# do_sorter_test <testname> <args>... +# +# where <args> are any of the following switches: +# +# -rows N (number of rows to have sorter sort) +# -read N (number of rows to read out of sorter) +# -payload N (bytes of payload to read with each row) +# -cachesize N (Value for "PRAGMA cache_size = ?") +# -repeats N (number of times to repeat test) +# -fakeheap BOOL (true to use separate allocations for in-memory records) +# +proc do_sorter_test {tn args} { + set a(-rows) 1000 + set a(-repeats) 1 + set a(-read) 100 + set a(-payload) 100 + set a(-cachesize) 100 + set a(-fakeheap) 0 + + foreach {s val} $args { + if {[info exists a($s)]==0} { + unset a(-cachesize) + set optlist "[join [array names a] ,] or -cachesize" + error "Unknown option $s, expected $optlist" + } + set a($s) $val + } + if {[permutation] == "memsys3" || [permutation] == "memsys5"} { + set a(-fakeheap) 0 + } + if {$a(-fakeheap)} { sorter_test_fakeheap 1 } + + + db eval "PRAGMA cache_size = $a(-cachesize)" + do_test $tn [subst -nocommands { + for {set i 0} {[set i] < $a(-repeats)} {incr i} { + sorter_test $a(-rows) $a(-read) $a(-payload) + } + }] {} + + if {$a(-fakeheap)} { sorter_test_fakeheap 0 } +} + +proc clock_seconds {} { + db one {SELECT strftime('%s')} +} + +#------------------------------------------------------------------------- +# Begin tests here. + +# Create a test database. +do_test 1 { + execsql "PRAGMA page_size = 4096" + populate_table 100000 500 +} {} + +set iTimeLimit [expr [clock_seconds] + $SORT4TIMEOUT] + +for {set t 2} {1} {incr tn} { + do_sorter_test $t.2 -repeats 10 -rows 1000 -read 100 + do_sorter_test $t.3 -repeats 10 -rows 100000 -read 1000 + do_sorter_test $t.4 -repeats 10 -rows 100000 -read 1000 -payload 500 + do_sorter_test $t.5 -repeats 10 -rows 100000 -read 100000 -payload 8 + do_sorter_test $t.6 -repeats 10 -rows 100000 -read 10 -payload 8 + do_sorter_test $t.7 -repeats 10 -rows 10000 -read 10000 -payload 8 -fakeheap 1 + do_sorter_test $t.8 -repeats 10 -rows 100000 -read 10000 -cachesize 250 + + set iNow [clock_seconds] + if {$iNow>=$iTimeLimit} break + do_test "$testprefix-([expr $iTimeLimit-$iNow] seconds remain)" {} {} +} + +finish_test diff --git a/test/sortfault.test b/test/sortfault.test new file mode 100644 index 000000000..a1983ac1c --- /dev/null +++ b/test/sortfault.test @@ -0,0 +1,165 @@ +# 2014 March 25. +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# Specifically, it tests the effects of fault injection on the sorter +# module (code in vdbesort.c). +# + +set testdir [file dirname $argv0] +source $testdir/tester.tcl +set testprefix sortfault + +do_execsql_test 1.0 { + PRAGMA cache_size = 5; +} + +foreach {tn mmap_limit nWorker tmpstore threadsmode fakeheap lookaside} { + 1 0 0 file multithread false false + 2 100000 0 file multithread false false + 3 100000 1 file multithread false false + 4 2000000 0 file singlethread false true +} { + if {$sqlite_options(threadsafe)} { set threadsmode singlethread } + + db eval "PRAGMA threads=$nWorker" + sqlite3_config $threadsmode + if { $lookaside } { + sqlite3_config_lookaside 100 500 + } else { + sqlite3_config_lookaside 0 0 + } + sqlite3_initialize + sorter_test_fakeheap $fakeheap + + set str [string repeat a 1000] + puts $threadsmode + + do_faultsim_test 1.$tn -prep { + sqlite3 db test.db + sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $::mmap_limit + execsql { PRAGMA cache_size = 5 } + } -body { + execsql { + WITH r(x,y) AS ( + SELECT 1, $::str + UNION ALL + SELECT x+1, $::str FROM r + LIMIT 200 + ) + SELECT count(x), length(y) FROM r GROUP BY (x%5) + } + } -test { + faultsim_test_result {0 {40 1000 40 1000 40 1000 40 1000 40 1000}} + } + + do_faultsim_test 2.$tn -faults oom* -prep { + sqlite3 db test.db + sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $::mmap_limit + add_test_utf16bin_collate db + execsql { PRAGMA cache_size = 5 } + } -body { + execsql { + WITH r(x,y) AS ( + SELECT 100, $::str + UNION ALL + SELECT x-1, $::str FROM r + LIMIT 100 + ) + SELECT count(x), length(y) FROM r GROUP BY y COLLATE utf16bin, (x%5) + } + } -test { + faultsim_test_result {0 {20 1000 20 1000 20 1000 20 1000 20 1000}} + } + + if {$mmap_limit > 1000000} { + set str2 [string repeat $str 10] + + sqlite3_memdebug_vfs_oom_test 0 + sqlite3 db test.db + sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $::mmap_limit + execsql { PRAGMA cache_size = 5 } + + do_faultsim_test 3.$tn -faults oom-trans* -body { + execsql { + WITH r(x,y) AS ( + SELECT 300, $::str2 + UNION ALL + SELECT x-1, $::str2 FROM r + LIMIT 300 + ) + SELECT count(x), length(y) FROM r GROUP BY y, (x%5) + } + } -test { + faultsim_test_result {0 {60 10000 60 10000 60 10000 60 10000 60 10000}} + } + + sqlite3_memdebug_vfs_oom_test 1 + } +} + +catch { db close } +sqlite3_shutdown +set t(0) singlethread +set t(1) multithread +set t(2) serialized +sqlite3_config $t($sqlite_options(threadsafe)) +sqlite3_config_lookaside 100 500 +sqlite3_initialize + +#------------------------------------------------------------------------- +# +reset_db +do_execsql_test 4.0 { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES(1, 2, 3); +} +do_test 4.1 { + for {set i 0} {$i < 256} {incr i} { + execsql { + INSERT INTO t1 SELECT + ((a<<3) + b) & 2147483647, + ((b<<3) + c) & 2147483647, + ((c<<3) + a) & 2147483647 + FROM t1 ORDER BY rowid DESC LIMIT 1; + } + } +} {} + +faultsim_save_and_close + +do_faultsim_test 4.2 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + execsql { CREATE UNIQUE INDEX i1 ON t1(a,b,c) } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db +set a [string repeat a 500] +set b [string repeat b 500] +set c [string repeat c 500] +do_execsql_test 5.0 { + CREATE TABLE t1(a, b, c); + INSERT INTO t1 VALUES($a, $b, $c); + INSERT INTO t1 VALUES($c, $b, $a); +} + +do_faultsim_test 5.1 -faults oom* -body { + execsql { SELECT * FROM t1 ORDER BY a } +} -test { + faultsim_test_result [list 0 [list $::a $::b $::c $::c $::b $::a]] +} + +finish_test diff --git a/test/speedtest1.c b/test/speedtest1.c index 383f5809a..8589b1633 100644 --- a/test/speedtest1.c +++ b/test/speedtest1.c @@ -27,6 +27,7 @@ static const char zHelp[] = " --stats Show statistics at the end\n" " --testset T Run test-set T\n" " --trace Turn on SQL tracing\n" + " --threads N Use up to N threads for sorting\n" " --utf16be Set text encoding to UTF-16BE\n" " --utf16le Set text encoding to UTF-16LE\n" " --verify Run additional verification steps.\n" @@ -1141,6 +1142,7 @@ int main(int argc, char **argv){ int nPCache = 0, szPCache = 0;/* --pcache configuration */ int nScratch = 0, szScratch=0;/* --scratch configuration */ int showStats = 0; /* True for --stats */ + int nThread = 0; /* --threads value */ const char *zTSet = "main"; /* Which --testset torun */ int doTrace = 0; /* True for --trace */ const char *zEncoding = 0; /* --utf16be or --utf16le */ @@ -1225,6 +1227,9 @@ int main(int argc, char **argv){ zTSet = argv[++i]; }else if( strcmp(z,"trace")==0 ){ doTrace = 1; + }else if( strcmp(z,"threads")==0 ){ + if( i>=argc-1 ) fatal_error("missing argument on %s\n", argv[i]); + nThread = integerValue(argv[++i]); }else if( strcmp(z,"utf16le")==0 ){ zEncoding = "utf16le"; }else if( strcmp(z,"utf16be")==0 ){ @@ -1290,6 +1295,7 @@ int main(int argc, char **argv){ /* Set database connection options */ sqlite3_create_function(g.db, "random", 0, SQLITE_UTF8, 0, randomFunc, 0, 0); if( doTrace ) sqlite3_trace(g.db, traceCallback, 0); + speedtest1_exec("PRAGMA threads=%d", nThread); if( zKey ){ speedtest1_exec("PRAGMA key('%s')", zKey); } diff --git a/test/tester.tcl b/test/tester.tcl index d19658d38..05c2aaeb0 100644 --- a/test/tester.tcl +++ b/test/tester.tcl @@ -1083,6 +1083,7 @@ proc explain_i {sql {db db}} { foreach opcode { Seek SeekGe SeekGt SeekLe SeekLt NotFound Last Rewind NoConflict Next Prev VNext VPrev VFilter + SorterSort SorterNext } { set color($opcode) $B } @@ -1105,6 +1106,7 @@ proc explain_i {sql {db db}} { if {$opcode=="Next" || $opcode=="Prev" || $opcode=="VNext" || $opcode=="VPrev" + || $opcode=="SorterNext" } { for {set i $p2} {$i<$addr} {incr i} { incr x($i) 2 diff --git a/test/whereJ.test b/test/whereJ.test index 5209f1619..7c37321cb 100644 --- a/test/whereJ.test +++ b/test/whereJ.test @@ -371,5 +371,52 @@ do_execsql_test whereJ-2.2 { ORDER BY t4.x; } {~/SCAN/} +############################################################################ + +ifcapable stat4 { + # Create and populate table. + do_execsql_test 3.1 { CREATE TABLE t1(a, b, c) } + for {set i 0} {$i < 32} {incr i 2} { + for {set x 0} {$x < 100} {incr x} { + execsql { INSERT INTO t1 VALUES($i, $x, $c) } + incr c + } + execsql { INSERT INTO t1 VALUES($i+1, 5, $c) } + incr c + } + + do_execsql_test 3.2 { + SELECT a, count(*) FROM t1 GROUP BY a HAVING a < 8; + } { + 0 100 1 1 2 100 3 1 4 100 5 1 6 100 7 1 + } + + do_execsql_test 3.3 { + CREATE INDEX idx_ab ON t1(a, b); + CREATE INDEX idx_c ON t1(c); + ANALYZE; + } {} + + # This one should use index "idx_c". + do_eqp_test 3.4 { + SELECT * FROM t1 WHERE + a = 4 AND b BETWEEN 20 AND 80 -- Matches 80 rows + AND + c BETWEEN 150 AND 160 -- Matches 10 rows + } { + 0 0 0 {SEARCH TABLE t1 USING INDEX idx_c (c>? AND c<?)} + } + + # This one should use index "idx_ab". + do_eqp_test 3.5 { + SELECT * FROM t1 WHERE + a = 5 AND b BETWEEN 20 AND 80 -- Matches 1 row + AND + c BETWEEN 150 AND 160 -- Matches 10 rows + } { + 0 0 0 {SEARCH TABLE t1 USING INDEX idx_ab (a=? AND b>? AND b<?)} + } +} + finish_test diff --git a/tool/mkpragmatab.tcl b/tool/mkpragmatab.tcl index 28a1e468b..aa7c8078c 100644 --- a/tool/mkpragmatab.tcl +++ b/tool/mkpragmatab.tcl @@ -294,6 +294,8 @@ set pragma_def { IF: defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD) NAME: soft_heap_limit + + NAME: threads } fconfigure stdout -translation lf set name {} diff --git a/tool/mksqlite3c-noext.tcl b/tool/mksqlite3c-noext.tcl index ecb9cb043..f54b347be 100644 --- a/tool/mksqlite3c-noext.tcl +++ b/tool/mksqlite3c-noext.tcl @@ -239,6 +239,7 @@ foreach file { malloc.c printf.c random.c + threads.c utf.c util.c hash.c diff --git a/tool/mksqlite3c.tcl b/tool/mksqlite3c.tcl index 11e0f05f7..7f25be992 100644 --- a/tool/mksqlite3c.tcl +++ b/tool/mksqlite3c.tcl @@ -255,6 +255,7 @@ foreach file { malloc.c printf.c random.c + threads.c utf.c util.c hash.c |