aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--VERSION2
-rw-r--r--manifest28
-rw-r--r--manifest.uuid2
-rw-r--r--src/md5.c33
-rw-r--r--src/tclsqlite.c8
-rw-r--r--src/test1.c20
-rw-r--r--test/trans.test134
-rw-r--r--www/changes.tcl4
-rw-r--r--www/formatchng.tcl4
-rw-r--r--www/lang.tcl61
-rw-r--r--www/speed.tcl491
11 files changed, 555 insertions, 232 deletions
diff --git a/VERSION b/VERSION
index 9125dd1dc..197c4d5c2 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.4.0-beta2
+2.4.0
diff --git a/manifest b/manifest
index ae35d4703..9f859b51a 100644
--- a/manifest
+++ b/manifest
@@ -1,9 +1,9 @@
-C Bug\sfix:\supdates\swithin\sa\stransaction\swould\sfail\sif\sthere\swas\sexisted\na\stemporary\stable.\s(CVS\s425)
-D 2002-03-10T21:21:00
+C Preparing\sfor\sthe\s2.4.0\srelease.\s(CVS\s426)
+D 2002-03-11T02:06:13
F Makefile.in 50f1b3351df109b5774771350d8c1b8d3640130d
F Makefile.template 89e373b2dad0321df00400fa968dc14b61a03296
F README a4c0ba11354ef6ba0776b400d057c59da47a4cc0
-F VERSION 4bed6a4fd03c5b6580757d22549c836e7cf6211a
+F VERSION b4f17c505b8cd87aca34ebc2eb916ff0b4bc259d
F aclocal.m4 11faa843caa38fd451bc6aeb43e248d1723a269d
F config.guess f38b1e93d1e0fa6f5a6913e9e7b12774b9232588
F config.log 6a73d03433669b10a3f0c221198c3f26b9413914
@@ -29,7 +29,7 @@ F src/hash.c cc259475e358baaf299b00a2c7370f2b03dda892
F src/hash.h dca065dda89d4575f3176e75e9a3dc0f4b4fb8b9
F src/insert.c 42bfd145efd428d7e5f200dd49ea0b816fc30d79
F src/main.c b21019084b93fe685a8a25217d01f6958584ae9b
-F src/md5.c 52f677bfc590e09f71d07d7e327bd59da738d07c
+F src/md5.c b2b1a34fce66ceca97f4e0dabc20be8be7933c92
F src/os.c db969ecd1bcb4fef01b0b541b8b17401b0eb7ed2
F src/os.h a17596ecc7f38a228b83ecdb661fb03ce44726d6
F src/pager.c f136f5ba82c896d500a10b6a2e5caea62abf716b
@@ -43,8 +43,8 @@ F src/shell.tcl 27ecbd63dd88396ad16d81ab44f73e6c0ea9d20e
F src/sqlite.h.in 1dae50411aee9439860d7fbe315183c582d27197
F src/sqliteInt.h 6f4a1bea4858089eb516f59562762965c6ef5cb8
F src/table.c 203a09d5d0009eeeb1f670370d52b4ce163a3b52
-F src/tclsqlite.c b9cf346e95291cb4c4f1bf5ac1d77db6b8ad023d
-F src/test1.c 33efd350dca27c52c58c553c04fd3a6a51f13c1f
+F src/tclsqlite.c df847b71b28277f1cfa1ee1e3e51452ffe5a9a26
+F src/test1.c d46ab7a82a9c16a3b1ee363cb4c0f98c5ff65743
F src/test2.c d410dbd8a90faa466c3ab694fa0aa57f5a773aa6
F src/test3.c 4e52fff8b01f08bd202f7633feda5639b7ba2b5e
F src/threadtest.c 81f0598e0f031c1bd506af337fdc1b7e8dff263f
@@ -96,7 +96,7 @@ F test/tableapi.test 51d0c209aa6b1158cb952ec917c656d4ce66e9e4
F test/tclsqlite.test ca8dd89b02ab68bd4540163c24551756a69f6783
F test/temptable.test 0e9934283259a5e637eec756a7eefd6964c0f79b
F test/tester.tcl dc1b56bd628b487e4d75bfd1e7480b5ed8810ac6
-F test/trans.test 9e49495c06b1c41f889bf4f0fb195a015b126de0
+F test/trans.test ae0b9a82d5d34122c3a3108781eb8d078091ccee
F test/unique.test 07776624b82221a80c8b4138ce0dd8b0853bb3ea
F test/update.test 3cf1ca0565f678063c2dfa9a7948d2d66ae1a778
F test/vacuum.test 059871b312eb910bbe49dafde1d01490cc2c6bbe
@@ -115,22 +115,22 @@ F www/arch.fig d5f9752a4dbf242e9cfffffd3f5762b6c63b3bcf
F www/arch.png 82ef36db1143828a7abc88b1e308a5f55d4336f4
F www/arch.tcl 72a0c80e9054cc7025a50928d28d9c75c02c2b8b
F www/c_interface.tcl 567cda531aac9d68a61ef02e26c6b202bd856db2
-F www/changes.tcl b43d9e32ed7af9a93c5a9b7321abe2ee6a8f4ea9
+F www/changes.tcl 6e2b0b5347bb38b2ad371fce2c486db616f0437b
F www/conflict.tcl 81dd21f9a679e60aae049e9dd8ab53d59570cda2
F www/crosscompile.tcl 3622ebbe518927a3854a12de51344673eb2dd060
F www/download.tcl a6d75b8b117cd33dcb090bef7e80d7556d28ebe0
F www/dynload.tcl 02eb8273aa78cfa9070dd4501dca937fb22b466c
F www/faq.tcl c6d1d6d69a9083734ee73d1b5ee4253ae8f10074
-F www/formatchng.tcl 5cffc0ebd00b3085c976a527eeeef70db4ccc7a7
+F www/formatchng.tcl 2ce21ff30663fad6618198fe747ce675df577590
F www/index.tcl eacd99bcc3132d6e6b74a51422d415cc0bf7bfdf
-F www/lang.tcl db13f9a9c5ce7a400fa7ae021cd99dc6b05fd74a
+F www/lang.tcl d589f9a39c925d81fa9198b9215b4fd56da4192b
F www/mingw.tcl f1c7c0a7f53387dd9bb4f8c7e8571b7561510ebc
F www/opcode.tcl bdec8ef9f100dbd87bbef8976c54b88e43fd8ccc
-F www/speed.tcl 83457b2bf6bb430900bd48ca3dd98264d9a916a5
+F www/speed.tcl da8afcc1d3ccc5696cfb388a68982bc3d9f7f00f
F www/sqlite.tcl 8b5884354cb615049aed83039f8dfe1552a44279
F www/tclsqlite.tcl 829b393d1ab187fd7a5e978631b3429318885c49
F www/vdbe.tcl 2013852c27a02a091d39a766bc87cff329f21218
-P 145516c93b1a03231e7d84f7f799a39655d7aa99
-R 6aa24ae4349921bb6f6914f243156bfe
+P 02cc2d60b2a5ee50efdbd90df90810ba559a453f
+R 7b12f4656109e08ea529f581aa14155c
U drh
-Z 61ec9c5d0ba02401da95448b5e8eb2b6
+Z f234e1ef2f8006b126be3e8559884083
diff --git a/manifest.uuid b/manifest.uuid
index a307586f0..9ad614850 100644
--- a/manifest.uuid
+++ b/manifest.uuid
@@ -1 +1 @@
-02cc2d60b2a5ee50efdbd90df90810ba559a453f \ No newline at end of file
+9f5b241cb2fc89f66d3762b4b4978b8e114caf53 \ No newline at end of file
diff --git a/src/md5.c b/src/md5.c
index c6c70628a..9e791f2fa 100644
--- a/src/md5.c
+++ b/src/md5.c
@@ -30,6 +30,7 @@
*/
#include <tcl.h>
#include <string.h>
+#include "sqlite.h"
/*
* If compiled on a machine that doesn't have a 32-bit integer,
@@ -350,3 +351,35 @@ int Md5_Init(Tcl_Interp *interp){
Tcl_CreateCommand(interp, "md5file", md5file_cmd, 0, 0);
return TCL_OK;
}
+
+/*
+** During testing, the special md5sum() aggregate function is available.
+** inside SQLite. The following routines implement that function.
+*/
+static void md5step(sqlite_func *context, int argc, const char **argv){
+ MD5Context *p;
+ int i;
+ if( argc<1 ) return;
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ if( p==0 ) return;
+ if( sqlite_aggregate_count(context)==1 ){
+ MD5Init(p);
+ }
+ for(i=0; i<argc; i++){
+ if( argv[i] ){
+ MD5Update(p, (unsigned char*)argv[i], strlen(argv[i]));
+ }
+ }
+}
+static void md5finalize(sqlite_func *context){
+ MD5Context *p;
+ unsigned char digest[16];
+ char zBuf[33];
+ p = sqlite_aggregate_context(context, sizeof(*p));
+ MD5Final(digest,p);
+ DigestToBase16(digest, zBuf);
+ sqlite_set_result_string(context, zBuf, strlen(zBuf));
+}
+void Md5_Register(sqlite *db){
+ sqlite_create_aggregate(db, "md5sum", -1, md5step, md5finalize, 0);
+}
diff --git a/src/tclsqlite.c b/src/tclsqlite.c
index 541914161..edf947655 100644
--- a/src/tclsqlite.c
+++ b/src/tclsqlite.c
@@ -11,7 +11,7 @@
*************************************************************************
** A TCL Interface to SQLite
**
-** $Id: tclsqlite.c,v 1.29 2002/01/16 21:00:27 drh Exp $
+** $Id: tclsqlite.c,v 1.30 2002/03/11 02:06:13 drh Exp $
*/
#ifndef NO_TCL /* Omit this whole file if TCL is unavailable */
@@ -531,6 +531,12 @@ static int DbMain(void *cd, Tcl_Interp *interp, int argc, char **argv){
return TCL_ERROR;
}
Tcl_CreateObjCommand(interp, argv[1], DbObjCmd, (char*)p, DbDeleteCmd);
+#ifdef SQLITE_TEST
+ {
+ extern void Md5_Register(sqlite*);
+ Md5_Register(p->db);
+ }
+#endif
return TCL_OK;
}
diff --git a/src/test1.c b/src/test1.c
index 0ca93fe06..217d1f9d9 100644
--- a/src/test1.c
+++ b/src/test1.c
@@ -13,7 +13,7 @@
** is not included in the SQLite library. It is used for automated
** testing of the SQLite library.
**
-** $Id: test1.c,v 1.6 2002/01/16 21:00:27 drh Exp $
+** $Id: test1.c,v 1.7 2002/03/11 02:06:13 drh Exp $
*/
#include "sqliteInt.h"
#include "tcl.h"
@@ -325,6 +325,23 @@ static int sqlite_malloc_stat(
#endif
/*
+** Usage: sqlite_abort
+**
+** Shutdown the process immediately. This is not a clean shutdown.
+** This command is used to test the recoverability of a database in
+** the event of a program crash.
+*/
+static int sqlite_abort(
+ void *NotUsed,
+ Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
+ int argc, /* Number of arguments */
+ char **argv /* Text of each argument */
+){
+ assert( interp==0 ); /* This will always fail */
+ return TCL_OK;
+}
+
+/*
** Register commands with the TCL interpreter.
*/
int Sqlitetest1_Init(Tcl_Interp *interp){
@@ -344,5 +361,6 @@ int Sqlitetest1_Init(Tcl_Interp *interp){
Tcl_CreateCommand(interp, "sqlite_malloc_fail", sqlite_malloc_fail, 0, 0);
Tcl_CreateCommand(interp, "sqlite_malloc_stat", sqlite_malloc_stat, 0, 0);
#endif
+ Tcl_CreateCommand(interp, "sqlite_abort", sqlite_abort, 0, 0);
return TCL_OK;
}
diff --git a/test/trans.test b/test/trans.test
index 1f0d6ac31..63902b038 100644
--- a/test/trans.test
+++ b/test/trans.test
@@ -11,7 +11,7 @@
# This file implements regression tests for SQLite library. The
# focus of this script is database locks.
#
-# $Id: trans.test,v 1.10 2002/01/10 14:31:49 drh Exp $
+# $Id: trans.test,v 1.11 2002/03/11 02:06:14 drh Exp $
set testdir [file dirname $argv0]
@@ -664,4 +664,136 @@ do_test trans-6.39 {
}
} {1 -2 -3 4 -5 -6}
+# Test to make sure rollback restores the database back to its original
+# state.
+#
+do_test trans-7.1 {
+ execsql {BEGIN}
+ for {set i 0} {$i<1000} {incr i} {
+ set r1 [expr {rand()}]
+ set r2 [expr {rand()}]
+ set r3 [expr {rand()}]
+ execsql "INSERT INTO t2 VALUES($r1,$r2,$r3)"
+ }
+ execsql {COMMIT}
+ set ::checksum [execsql {SELECT md5sum(x,y,z) FROM t2}]
+ set ::checksum2 [
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+ ]
+ execsql {SELECT count(*) FROM t2}
+} {1001}
+do_test trans-7.2 {
+ execsql {SELECT md5sum(x,y,z) FROM t2}
+} $checksum
+do_test trans-7.2.1 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.3 {
+ execsql {
+ BEGIN;
+ DELETE FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.4 {
+ execsql {
+ BEGIN;
+ INSERT INTO t2 SELECT * FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.5 {
+ execsql {
+ BEGIN;
+ DELETE FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.6 {
+ execsql {
+ BEGIN;
+ INSERT INTO t2 SELECT * FROM t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.7 {
+ execsql {
+ BEGIN;
+ CREATE TABLE t3 AS SELECT * FROM t2;
+ INSERT INTO t2 SELECT * FROM t3;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.8 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.9 {
+ execsql {
+ BEGIN;
+ CREATE TEMP TABLE t3 AS SELECT * FROM t2;
+ INSERT INTO t2 SELECT * FROM t3;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.10 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.11 {
+ execsql {
+ BEGIN;
+ CREATE TEMP TABLE t3 AS SELECT * FROM t2;
+ INSERT INTO t2 SELECT * FROM t3;
+ DROP INDEX i2x;
+ DROP INDEX i2y;
+ CREATE INDEX i3a ON t3(x);
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.12 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+do_test trans-7.13 {
+ execsql {
+ BEGIN;
+ DROP TABLE t2;
+ ROLLBACK;
+ SELECT md5sum(x,y,z) FROM t2;
+ }
+} $checksum
+do_test trans-7.14 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+
+# Arrange for another process to begin modifying the database but abort
+# and die in the middle of the modification. Then have this process read
+# the database. This process should detect the journal file and roll it
+# back. Verify that this happens correctly.
+#
+set fd [open test.tcl w]
+puts $fd {
+ sqlite db test.db
+ db eval {
+ BEGIN;
+ CREATE TABLE t3 AS SELECT * FROM t2;
+ DELETE FROM t2;
+ }
+ sqlite_abort
+}
+close $fd
+do_test trans-8.1 {
+ catch {exec [info nameofexec] test.tcl}
+ execsql {SELECT md5sum(x,y,z) FROM t2}
+} $checksum
+do_test trans-8.2 {
+ execsql {SELECT md5sum(type,name,tbl_name,rootpage,sql) FROM sqlite_master}
+} $checksum2
+
+
finish_test
diff --git a/www/changes.tcl b/www/changes.tcl
index 0397ac124..b31f15788 100644
--- a/www/changes.tcl
+++ b/www/changes.tcl
@@ -17,7 +17,7 @@ proc chng {date desc} {
puts "<DD><P><UL>$desc</UL></P></DD>"
}
-chng {2002 Mar * (2.4.0)} {
+chng {2002 Mar 10 (2.4.0)} {
<li>Change the name of the sanity_check PRAGMA to <b>integrity_check</b>
and make it available in all compiles.</li>
<li>SELECT min() or max() of an indexed column with no WHERE or GROUP BY
@@ -40,6 +40,8 @@ chng {2002 Mar * (2.4.0)} {
about 2.5 times faster and large DELETEs about 5 times faster.</li>
<li>Made the CACHE_SIZE pragma persistent</li>
<li>Added the SYNCHRONOUS pragma</li>
+<li>Fixed a bug that was causing updates to fail inside of transactions when
+ the database contained a temporary table.</li>
}
chng {2002 Feb 18 (2.3.3)} {
diff --git a/www/formatchng.tcl b/www/formatchng.tcl
index 4f0c68483..47580585b 100644
--- a/www/formatchng.tcl
+++ b/www/formatchng.tcl
@@ -1,7 +1,7 @@
#
# Run this Tcl script to generate the formatchng.html file.
#
-set rcsid {$Id: formatchng.tcl,v 1.3 2002/03/04 02:26:17 drh Exp $ }
+set rcsid {$Id: formatchng.tcl,v 1.4 2002/03/11 02:06:14 drh Exp $ }
puts {<html>
<head>
@@ -93,7 +93,7 @@ occurred since version 1.0.0:
</tr>
<tr>
<td valign="top">2.3.3 to 2.4.0</td>
- <td valign="top">2002-Mar-?</td>
+ <td valign="top">2002-Mar-10</td>
<td>Beginning with version 2.4.0, SQLite added support for views.
Information about views is stored in the SQLITE_MASTER table. If an older
version of SQLite attempts to read a database that contains VIEW information
diff --git a/www/lang.tcl b/www/lang.tcl
index 6aabd1974..18f80fd5e 100644
--- a/www/lang.tcl
+++ b/www/lang.tcl
@@ -1,7 +1,7 @@
#
# Run this Tcl script to generate the sqlite.html file.
#
-set rcsid {$Id: lang.tcl,v 1.27 2002/03/04 02:26:17 drh Exp $}
+set rcsid {$Id: lang.tcl,v 1.28 2002/03/11 02:06:14 drh Exp $}
puts {<html>
<head>
@@ -817,13 +817,19 @@ with caution.</p>
<p>The current implementation supports the following pragmas:</p>
<ul>
-<li><p><b>PRAGMA cache_size = </b><i>Number-of-pages</i><b>;</b></p>
- <p>Change the maximum number of database disk pages that SQLite
- will hold in memory at once. Each page uses about 1.5K of RAM.
- The default cache size is 100. If you are doing UPDATEs or DELETEs
+<li><p><b>PRAGMA cache_size;
+ <br>PRAGMA cache_size = </b><i>Number-of-pages</i><b>;</b></p>
+ <p>Query or change the maximum number of database disk pages that SQLite
+ will hold in memory at once. Each page uses about 1.5K of memory.
+ The default cache size is 2000. If you are doing UPDATEs or DELETEs
that change many rows of a database and you do not mind if SQLite
uses more memory, you can increase the cache size for a possible speed
- improvement.</p></li>
+ improvement.</p>
+ <p>When you change the cache size using the cache_size pragma, the
+ change only endures for the current session. The cache size reverts
+ to the default value when the database is closed and reopened. Use
+ the <b>default_cache_size</b> pragma to check the cache size permanently
+ </p></li>
<li><p><b>PRAGMA count_changes = ON;
<br>PRAGMA count_changes = OFF;</b></p>
@@ -831,6 +837,39 @@ with caution.</p>
be invoked once for each DELETE, INSERT, or UPDATE operation. The
argument is the number of rows that were changed.</p>
+<li><p><b>PRAGMA default_cache_size;
+ <br>PRAGMA default_cache_size = </b><i>Number-of-pages</i><b>;</b></p>
+ <p>Query or change the maximum number of database disk pages that SQLite
+ will hold in memory at once. Each page uses about 1.5K of memory.
+ This pragma works like the <b>cache_size</b> pragma with the addition
+ feature that it changes the cache size persistently. With this pragma,
+ you can set the cache size once and that setting is retained and reused
+ everytime you reopen the database.</p></li>
+
+<li><p><b>PRAGMA default_synchronous;
+ <br>PRAGMA default_synchronous = ON;
+ <br>PRAGMA default_synchronous = OFF;</b></p>
+ <p>Query or change the setting of the "synchronous" flag in
+ the database. When synchronous is on (the default), the SQLite database
+ engine will pause at critical moments to make sure that data has actually
+ be written to the disk surface. (In other words, it invokes the
+ equivalent of the <b>fsync()</b> system call.) In synchronous mode,
+ an SQLite database should be fully recoverable even if the operating
+ system crashes or power is interrupted unexpectedly. The penalty for
+ this assurance is that some database operations take longer because the
+ engine has to wait on the (relatively slow) disk drive. The alternative
+ is to turn synchronous off. With synchronous off, SQLite continues
+ processing as soon as it has handed data off to the operating system.
+ If the application running SQLite crashes, the data will be safe, but
+ the database could (in theory) become corrupted if the operating system
+ crashes or the computer suddenly loses power. On the other hand, some
+ operations are as much as 50 or more times faster with synchronous off.
+ </p>
+ <p>This pragma changes the synchronous mode persistently. Once changed,
+ the mode stays as set even if the database is closed and reopened. The
+ <b>synchronous</b> pragma does the same thing but only applies the setting
+ to the current session.</p>
+
<li><p><b>PRAGMA empty_result_callbacks = ON;
<br>PRAGMA empty_result_callbacks = OFF;</b></p>
<p>When on, the EMPTY_RESULT_CALLBACKS pragma causes the callback
@@ -873,6 +912,16 @@ with caution.</p>
a description of all problems. If everything is in order, "ok" is
returned.</p>
+<li><p><b>PRAGMA synchronous;
+ <br>PRAGMA synchronous = ON;
+ <br>PRAGMA synchronous = OFF;</b></p>
+ <p>Query or change the setting of the "synchronous" flag in
+ the database for the duration of the current database connect.
+ The synchronous flag reverts to its default value when the database
+ is closed and reopened. For additional information on the synchronous
+ flag, see the description of the <b>default_synchronous</b> pragma.</p>
+ </li>
+
<li><p><b>PRAGMA table_info(</b><i>table-name</i><b>);</b></p>
<p>For each column in the named table, invoke the callback function
once with information about that column, including the column name,
diff --git a/www/speed.tcl b/www/speed.tcl
index 2833c810b..0e387a290 100644
--- a/www/speed.tcl
+++ b/www/speed.tcl
@@ -1,7 +1,7 @@
#
# Run this Tcl script to generate the speed.html file.
#
-set rcsid {$Id: speed.tcl,v 1.5 2001/11/24 13:23:05 drh Exp $ }
+set rcsid {$Id: speed.tcl,v 1.6 2002/03/11 02:06:14 drh Exp $ }
puts {<html>
<head>
@@ -18,282 +18,365 @@ puts "<p align=center>
puts {
<h2>Executive Summary</h2>
-<p>A series of tests are run to measure the relative performance of
-SQLite version 1.0 and 2.0 and PostgreSQL version 6.4.
+<p>A series of tests were run to measure the relative performance of
+SQLite 2.4.0, PostgreSQL, and MySQL
The following are general
conclusions drawn from these experiments:
</p>
<ul>
<li><p>
- SQLite 2.0 is significantly faster than both SQLite 1.0 and PostgreSQL
+ SQLite 2.4.0 is significantly faster than PostgreSQL
for most common operations.
- SQLite 2.0 is over 4 times faster than PostgreSQL for simple
- query operations and about 7 times faster for <b>INSERT</b> statements
- within a transaction.
</p></li>
<li><p>
- PostgreSQL performs better on complex queries, possibly due to having
- a more sophisticated query optimizer.
-</p></li>
-<li><p>
- SQLite 2.0 is significantly slower than both SQLite 1.0 and PostgreSQL
- on <b>DROP TABLE</b> statements and on doing lots of small <b>INSERT</b>
- statements that are not grouped into a single transaction.
+ The speed of SQLite 2.4.0 is similar to MySQL.
+ This is true in spite of the
+ fact that SQLite contains full transaction support whereas the
+ version of MySQL tested did not.
</p></li>
</ul>
<h2>Test Environment</h2>
<p>
-The platform used for these tests is a 550MHz Athlon with 256MB or memory
-and 33MHz IDE disk drives. The operating system is RedHat Linux 6.0 with
-various upgrades, including an upgrade to kernel version 2.2.18.
+The platform used for these tests is a 1.6GHz Athlon with 1GB or memory
+and an IDE disk drive. The operating system is RedHat Linux 7.2 with
+a stock kernel.
</p>
<p>
-PostgreSQL version 6.4.2 was used for these tests because that is what
-came pre-installed with RedHat 6.0. Newer version of PostgreSQL may give
-better performance.
+The PostgreSQL and MySQL servers used were as delivered by default on
+RedHat 7.2. No effort was made to tune these engines. Note in particular
+the the default MySQL configuration on RedHat 7.2 does not support
+transactions. Not having to support transactions gives MySQL a
+big advantage, but SQLite is still able to hold its own on most
+tests.
</p>
<p>
-SQLite version 1.0.32 was compiled with -O2 optimization and without
-the -DNDEBUG=1 switch. Setting the NDEBUG macro disables all "assert()"
-statements within the code, but SQLite version 1.0 does not have any
-expensive assert() statements so the difference in performance is
-negligible.
-</p>
-
-<p>
-SQLite version 2.0-alpha-2 was compiled with -O2 optimization and
-with the -DNDEBUG=1 compiler switch. Setting the NDEBUG macro is very
-important in SQLite version 2.0. SQLite 2.0 contains some expensive
-"assert()" statements in the inner loop of its processing. Setting
-the NDEBUG macro makes SQLite 2.0 run nearly twice as fast.
+SQLite was compiled with -O6 optimization and with
+the -DNDEBUG=1 switch which disables the many "assert()" statements
+in the SQLite code. The -DNDEBUG=1 compiler option roughly doubles
+the speed of SQLite.
</p>
<p>
All tests are conducted on an otherwise quiescent machine.
-A simple shell script was used to generate and run all the tests.
-Each test reports three different times:
+A simple Tcl script was used to generate and run all the tests.
+A copy of this Tcl script can be found in the SQLite source tree
+in the file <b>tools/speedtest.tcl</b>.
</p>
<p>
-<ol>
-<li> "<b>Real</b>" or wall-clock time. </li>
-<li> "<b>User</b>" time, the time spent executing user-level code. </li>
-<li> "<b>Sys</b>" or system time, the time spent in the operating system. </li>
-</ol>
+The times reported on all tests represent wall-clock time
+in seconds. Two separate time values are reported for SQLite.
+The first value is for SQLite in its default configuration with
+full disk synchronization turned on. With synchronization turned
+on, SQLite executes
+an <b>fsync()</b> system call (or the equivalent) at key points
+to make certain that critical data has
+actually been written to the disk drive surface. Synchronization
+is necessary to guarantee the integrity of the database if the
+operating system crashes or the computer powers down unexpectedly
+in the middle of a database update. The second time reported for SQLite is
+when synchronization is turned off. With synchronization off,
+SQLite is sometimes much faster, but there is a risk that an
+operating system crash or an unexpected power failure could
+damage the database. Generally speaking, the synchronous SQLite
+times are for comparison against PostgreSQL (which is also
+synchronous) and the asynchronous SQLite times are for
+comparison against the asynchronous MySQL engine.
</p>
+<h2>Test 1: 1000 INSERTs</h2>
+<blockquote>
+CREATE TABLE t1(a INTEGER, b INTEGER, c VARCHAR(100));<br>
+INSERT INTO t1 VALUES(1,13153,'thirteen thousand one hundred fifty three');<br>
+INSERT INTO t1 VALUES(2,75560,'seventy five thousand five hundred sixty');<br>
+<i>... 995 lines omitted</i><br>
+INSERT INTO t1 VALUES(998,66289,'sixty six thousand two hundred eighty nine');<br>
+INSERT INTO t1 VALUES(999,24322,'twenty four thousand three hundred twenty two');<br>
+INSERT INTO t1 VALUES(1000,94142,'ninety four thousand one hundred forty two');<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;4.027</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.113</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;8.409</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.188</td></tr>
+</table>
+
+<p>SQLite must close and reopen the database file, and thus invalidate
+its cache, for each SQL statement. In spite of this, the asynchronous
+version of SQLite is still nearly as fast as MySQL. Notice how much slower
+the synchronous version is, however. This is due to the necessity of
+calling <b>fsync()</b> after each SQL statement.</p>
+
+<h2>Test 2: 25000 INSERTs in a transaction</h2>
+<blockquote>
+BEGIN;<br>
+CREATE TABLE t2(a INTEGER, b INTEGER, c VARCHAR(100));<br>
+INSERT INTO t2 VALUES(1,298361,'two hundred ninety eight thousand three hundred sixty one');<br>
+<i>... 24997 lines omitted</i><br>
+INSERT INTO t2 VALUES(24999,447847,'four hundred forty seven thousand eight hundred forty seven');<br>
+INSERT INTO t2 VALUES(25000,473330,'four hundred seventy three thousand three hundred thirty');<br>
+COMMIT;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;5.175</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;2.444</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;0.858</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.739</td></tr>
+</table>
+
<p>
-PostgreSQL uses a client-server model. The experiment is unable to measure
-CPU used by the server, only the client, so the "user" and "sys" numbers
-from PostgreSQL are meaningless.
+When all the INSERTs are put in a transaction, SQLite no longer has to
+close and reopen the database between each statement. It also does not
+have to do any fsync()s until the very end. When unshackled in
+this way, SQLite is much faster than either PostgreSQL and MySQL.
</p>
-<h2>Test 1: CREATE TABLE</h2>
-
-<blockquote><pre>
-CREATE TABLE t1(f1 int, f2 int, f3 int);
-COPY t1 FROM '/home/drh/sqlite/bld/speeddata3.txt';
-
-PostgreSQL: real 1.84
-SQLite 1.0: real 3.29 user 0.64 sys 1.60
-SQLite 2.0: real 0.77 user 0.51 sys 0.05
-</pre></blockquote>
+<h2>Test 3: 100 SELECTs without an index</h2>
+<blockquote>
+SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<1000;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<1100;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<1200;<br>
+<i>... 94 lines omitted</i><br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=9700 AND b<10700;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=9800 AND b<10800;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=9900 AND b<10900;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;3.773</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;3.023</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;6.281</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;6.247</td></tr>
+</table>
<p>
-The speeddata3.txt data file contains 30000 rows of data.
+This test does 100 queries on a 25000 entry table without an index,
+thus requiring a full table scan. SQLite is about half the speed of
+PostgreSQL and MySQL. This is because SQLite stores all data as strings
+and must therefore call <b>strtod()</b> 5 million times in the
+course of evaluating the WHERE clauses. Both PostgreSQL and MySQL
+store data as binary values where appropriate and can forego
+this conversion effort.
</p>
-<h2>Test 2: SELECT</h2>
-<blockquote><pre>
-SELECT max(f2), min(f3), count(*) FROM t1
-WHERE f3<10000 OR f1>=20000;
+<h2>Test 4: 100 SELECTs on a string comparison</h2>
+<blockquote>
+SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one%';<br>
+SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%two%';<br>
+SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%three%';<br>
+<i>... 94 lines omitted</i><br>
+SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety eight%';<br>
+SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%ninety nine%';<br>
+SELECT count(*), avg(b) FROM t2 WHERE c LIKE '%one hundred%';<br>
-PostgreSQL: real 1.22
-SQLite 1.0: real 0.80 user 0.67 sys 0.12
-SQLite 2.0: real 0.65 user 0.60 sys 0.05
-</pre></blockquote>
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;16.726</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;5.237</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;6.137</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;6.112</td></tr>
+</table>
<p>
-With no indices, a complete scan of the table must be performed
-(all 30000 rows) in order to complete this query.
+This set of 100 queries uses string comparisons instead of
+numerical comparisions. As a result, the speed of SQLite is
+compariable to are better then PostgreSQL and MySQL.
</p>
-<h2>Test 3: CREATE INDEX</h2>
-
-<blockquote><pre>
-CREATE INDEX idx1 ON t1(f1);
-CREATE INDEX idx2 ON t1(f2,f3);
-
-PostgreSQL: real 2.24
-SQLite 1.0: real 5.37 user 1.22 sys 3.10
-SQLite 2.0: real 3.71 user 2.31 sys 1.06
-</pre></blockquote>
+<h2>Test 5: Creating an index</h2>
+<blockquote>
+CREATE INDEX i2a ON t2(a);<br>CREATE INDEX i2b ON t2(b);
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.510</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.352</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;0.809</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.720</td></tr>
+</table>
<p>
-PostgreSQL is fastest at creating new indices.
-Note that SQLite 2.0 is faster than SQLite 1.0 but still
-spends longer in user-space code.
+SQLite is slower at creating new indices. But since creating
+new indices is an uncommon operation, this is not seen as a
+problem.
</p>
-<h2>Test 4: SELECT using an index</h2>
-
-<blockquote><pre>
-SELECT max(f2), min(f3), count(*) FROM t1
-WHERE f3<10000 OR f1>=20000;
-
-PostgreSQL: real 0.19
-SQLite 1.0: real 0.77 user 0.66 sys 0.12
-SQLite 2.0: real 0.62 user 0.62 sys 0.01
-</pre></blockquote>
+<h2>Test 6: 5000 SELECTs with an index</h2>
+<blockquote>
+SELECT count(*), avg(b) FROM t2 WHERE b>=0 AND b<100;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=100 AND b<200;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=200 AND b<300;<br>
+<i>... 4994 lines omitted</i><br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=499700 AND b<499800;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=499800 AND b<499900;<br>
+SELECT count(*), avg(b) FROM t2 WHERE b>=499900 AND b<500000;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;5.318</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;1.555</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;1.289</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;1.273</td></tr>
+</table>
<p>
-This is the same query as in Test 2, but now there are indices.
-Unfortunately, SQLite is reasonably simple-minded about its querying
-and not able to take advantage of the indices. It still does a
-linear scan of the entire table. PostgreSQL, on the other hand,
-is able to use the indices to make its query over six times faster.
+This test runs a set of 5000 queries that are similar in form to
+those in test 3. But now instead of being half as fast, SQLite
+is faster than both PostgreSQL and MySQL.
</p>
-<h2>Test 5: SELECT a single record</h2>
-
-<blockquote><pre>
-SELECT f2, f3 FROM t1 WHERE f1==1;
-SELECT f2, f3 FROM t1 WHERE f1==2;
-SELECT f2, f3 FROM t1 WHERE f1==3;
-...
-SELECT f2, f3 FROM t1 WHERE f1==998;
-SELECT f2, f3 FROM t1 WHERE f1==999;
-SELECT f2, f3 FROM t1 WHERE f1==1000;
-
-PostgreSQL: real 0.95
-SQLite 1.0: real 15.70 user 0.70 sys 14.41
-SQLite 2.0: real 0.20 user 0.15 sys 0.05
-</pre></blockquote>
+<h2>Test 7: 1000 UPDATEs without an index</h2>
+<blockquote>
+BEGIN;<br>
+UPDATE t1 SET b=b*2 WHERE a>=0 AND a<10;<br>
+UPDATE t1 SET b=b*2 WHERE a>=10 AND a<20;<br>
+<i>... 996 lines omitted</i><br>
+UPDATE t1 SET b=b*2 WHERE a>=9980 AND a<9990;<br>
+UPDATE t1 SET b=b*2 WHERE a>=9990 AND a<10000;<br>
+COMMIT;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;1.828</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;9.272</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;0.915</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.889</td></tr>
+</table>
<p>
-This test involves 1000 separate SELECT statements, only the first
-and last three of which are show above. SQLite 2.0 is the clear
-winner. The miserable showing by SQLite 1.0 is due (it is thought)
-to the high overhead of executing <b>gdbm_open</b> 2000 times in
-quick succession.
+Here is a case where MySQL is over 10 times slower than SQLite.
+The reason for this is unclear.
</p>
-<h2>Test 6: UPDATE</h2>
-
-<blockquote><pre>
-UPDATE t1 SET f2=f3, f3=f2
-WHERE f1 BETWEEN 15000 AND 20000;
-
-PostgreSQL: real 6.56
-SQLite 1.0: real 3.54 user 0.74 sys 1.16
-SQLite 2.0: real 2.70 user 0.70 sys 1.25
-</pre></blockquote>
+<h2>Test 8: 25000 UPDATEs with an index</h2>
+<blockquote>
+BEGIN;<br>
+UPDATE t2 SET b=271822 WHERE a=1;<br>
+UPDATE t2 SET b=28304 WHERE a=2;<br>
+<i>... 24996 lines omitted</i><br>
+UPDATE t2 SET b=442549 WHERE a=24999;<br>
+UPDATE t2 SET b=423958 WHERE a=25000;<br>
+COMMIT;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;28.021</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;8.565</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;10.939</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;11.199</td></tr>
+</table>
<p>
-We have no explanation for why PostgreSQL does poorly here.
+In this case MySQL is slightly faster than SQLite, though not by much.
+The difference is believed to have to do with the fact SQLite
+handles the integers as strings instead of binary numbers.
</p>
-<h2>Test 7: INSERT from a SELECT</h2>
-
-<blockquote><pre>
-CREATE TABLE t2(f1 int, f2 int);
-INSERT INTO t2 SELECT f1, f2 FROM t1 WHERE f3<10000;
-
-PostgreSQL: real 2.05
-SQLite 1.0: real 1.80 user 0.81 sys 0.73
-SQLite 2.0: real 0.69 user 0.58 sys 0.07
-</pre></blockquote>
-
-
-<h2>Test 8: Many small INSERTs</h2>
-
-<blockquote><pre>
-CREATE TABLE t3(f1 int, f2 int, f3 int);
-INSERT INTO t3 VALUES(1,1641,1019);
-INSERT INTO t3 VALUES(2,984,477);
-...
-INSERT INTO t3 VALUES(998,1411,1392);
-INSERT INTO t3 VALUES(999,1715,526);
-INSERT INTO t3 VALUES(1000,1906,1037);
-
-PostgreSQL: real 5.28
-SQLite 1.0: real 2.20 user 0.21 sys 0.67
-SQLite 2.0: real 10.99 user 0.21 sys 7.02
-</pre></blockquote>
+<h2>Test 9: 25000 text UPDATEs with an index</h2>
+<blockquote>
+BEGIN;<br>
+UPDATE t2 SET c='four hundred sixty eight thousand twenty six' WHERE a=1;<br>
+UPDATE t2 SET c='one hundred twenty one thousand nine hundred twenty eight' WHERE a=2;<br>
+<i>... 24996 lines omitted</i><br>
+UPDATE t2 SET c='thirty five thousand sixty five' WHERE a=24999;<br>
+UPDATE t2 SET c='three hundred forty seven thousand three hundred ninety three' WHERE a=25000;<br>
+COMMIT;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;48.739</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;7.059</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;7.868</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;6.720</td></tr>
+</table>
<p>
-This test involves 1000 separate INSERT statements, only 5 of which
-are shown above. SQLite 2.0 does poorly because of its atomic commit
-logic. A minimum of two calls to <b>fsync()</b> are required for each
-INSERT statement, and that really slows things down. On the other hand,
-PostgreSQL also has to support atomic commits and it seems to do so
-efficiently.
+When updating a text field instead of an integer field,
+SQLite is slightly faster than MySQL.
</p>
-<h2>Test 9: Many small INSERTs within a TRANSACTION</h2>
-
-<blockquote><pre>
-CREATE TABLE t4(f1 int, f2 int, f3 int);
-BEGIN TRANSACTION;
-INSERT INTO t4 VALUES(1,440,1084);
-...
-INSERT INTO t4 VALUES(999,1527,423);
-INSERT INTO t4 VALUES(1000,74,1865);
-COMMIT;
-
-PostgreSQL: real 0.68
-SQLite 1.0: real 1.72 user 0.09 sys 0.55
-SQLite 2.0: real 0.10 user 0.08 sys 0.02
-</pre></blockquote>
+<h2>Test 10: INSERTs from a SELECT</h2>
+<blockquote>
+BEGIN;<br>INSERT INTO t1 SELECT * FROM t2;<br>INSERT INTO t2 SELECT * FROM t1;<br>COMMIT;
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;54.822</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;1.512</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;4.423</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;2.386</td></tr>
+</table>
<p>
-By putting all the inserts inside a single transaction, there
-only needs to be a single atomic commit at the very end. This
-allows SQLite 2.0 to go (literally) 100 times faster! PostgreSQL
-only gets a eight-fold speedup. Perhaps PostgreSQL is limited here by
-the IPC overhead.
+The poor performance of PostgreSQL in this case appears to be due to its
+synchronous behavior. The CPU was mostly idle during the 55 second run.
</p>
-<h2>Test 10: DELETE</h2>
-
-<blockquote><pre>
-DELETE FROM t1 WHERE f2 NOT BETWEEN 10000 AND 20000;
-
-PostgreSQL: real 7.25
-SQLite 1.0: real 6.98 user 1.66 sys 4.11
-SQLite 2.0: real 5.89 user 1.35 sys 3.11
-</pre></blockquote>
+<h2>Test 11: DELETE without an index</h2>
+<blockquote>
+DELETE FROM t2 WHERE c LIKE '%fifty%';
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.734</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.888</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;5.405</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.731</td></tr>
+</table>
+
+
+<h2>Test 12: DELETE with an index</h2>
+<blockquote>
+DELETE FROM t2 WHERE a>10 AND a<20000;
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;2.318</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;2.600</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;1.436</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.775</td></tr>
+</table>
+
+
+<h2>Test 13: A big INSERT after a big DELETE</h2>
+<blockquote>
+INSERT INTO t2 SELECT * FROM t1;
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;63.867</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;1.839</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;3.971</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;1.993</td></tr>
+</table>
<p>
-All three database run at about the same speed here.
+Earlier versions of SQLite would show decreasing performance after a
+sequence DELETEs followed by new INSERTs. As this test shows, the
+problem has now been resolved.
</p>
-<h2>Test 11: DROP TABLE</h2>
-
-<blockquote><pre>
-BEGIN TRANSACTION;
-DROP TABLE t1; DROP TABLE t2;
-DROP TABLE t3; DROP TABLE t4;
-COMMIT;
-
-PostgreSQL: real 0.06
-SQLite 1.0: real 0.03 user 0.00 sys 0.02
-SQLite 2.0: real 3.12 user 0.02 sys 0.31
-</pre></blockquote>
+<h2>Test 14: A big DELETE followed by many small INSERTs</h2>
+<blockquote>
+BEGIN;<br>
+DELETE FROM t1;<br>
+INSERT INTO t1 VALUES(1,29676,'twenty nine thousand six hundred seventy six');<br>
+<i>... 2997 lines omitted</i><br>
+INSERT INTO t1 VALUES(2999,37835,'thirty seven thousand eight hundred thirty five');<br>
+INSERT INTO t1 VALUES(3000,97817,'ninety seven thousand eight hundred seventeen');<br>
+COMMIT;<br>
+
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;1.209</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;1.031</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;0.298</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.282</td></tr>
+</table>
+
+<h2>Test 15: DROP TABLE</h2>
+<blockquote>
+DROP TABLE t1;<br>DROP TABLE t2;
+</blockquote><table border=0 cellpadding=0 cellspacing=0>
+<tr><td>PostgreSQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.105</td></tr>
+<tr><td>MySQL:</td><td align="right">&nbsp;&nbsp;&nbsp;0.015</td></tr>
+<tr><td>SQLite 2.4:</td><td align="right">&nbsp;&nbsp;&nbsp;0.472</td></tr>
+<tr><td>SQLite 2.4 (nosync):</td><td align="right">&nbsp;&nbsp;&nbsp;0.232</td></tr>
+</table>
<p>
-SQLite 2.0 is much slower at dropping tables. This may be because
-both SQLite 1.0 and PostgreSQL can drop a table simply by unlinking
-or renaming a file, since both store database tables in separate files.
-SQLite 2.0, on the other hand, uses a single file for the entire
-database, so dropping a table involves moving lots of page of that
-file to the free-list, which takes time.
+SQLite is slower than the other databases when it comes to dropping tables.
+This is not seen as a big problem, however, since DROP TABLE is seldom
+used in speed-critical situations.
</p>
}