summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorHans-Christoph Steiner <hans@eds.org>2013-01-17 14:18:26 -0500
committerHans-Christoph Steiner <hans@eds.org>2013-01-17 14:18:26 -0500
commit1b5ba8e022836fa8ab93bc90df1b34a29ea6e134 (patch)
treee2a832468ccbf52965f18c37b3c4e692fe97ed06 /test
parent487e15dc239ccdb3344d1c99ce120e872bab4a74 (diff)
Imported Upstream version 2.1.1
Diffstat (limited to 'test')
-rw-r--r--test/aggnested.test71
-rw-r--r--test/atof1.test60
-rw-r--r--test/backup.test2
-rw-r--r--test/bigfile.test2
-rw-r--r--test/bigfile2.test2
-rw-r--r--test/capi3.test11
-rw-r--r--test/capi3c.test2
-rw-r--r--test/crash.test2
-rw-r--r--test/crypto.test306
-rw-r--r--test/date.test2
-rw-r--r--test/e_uri.test31
-rw-r--r--test/fts3atoken.test10
-rw-r--r--test/fts3auto.test14
-rw-r--r--test/fts3defer.test2
-rw-r--r--test/fts3defer2.test5
-rw-r--r--test/fts3fault2.test24
-rw-r--r--test/fts3matchinfo.test13
-rw-r--r--test/fts4aa.test18
-rw-r--r--test/fts4unicode.test387
-rw-r--r--test/func.test2
-rw-r--r--test/index5.test75
-rw-r--r--test/journal1.test2
-rw-r--r--test/loadext.test2
-rw-r--r--test/misc1.test6
-rw-r--r--test/misc7.test2
-rw-r--r--test/pager1.test35
-rw-r--r--test/permutations.test21
-rw-r--r--test/pragma.test109
-rw-r--r--test/quota.test7
-rw-r--r--test/quota2.test33
-rw-r--r--test/releasetest.tcl12
-rw-r--r--test/rowid.test2
-rw-r--r--test/securedel2.test95
-rw-r--r--test/select6.test44
-rw-r--r--test/shared.test130
-rw-r--r--test/shared8.test113
-rw-r--r--test/shell1.test2
-rw-r--r--test/spellfix.test151
-rw-r--r--test/tclsqlite.test11
-rw-r--r--test/tester.tcl30
-rw-r--r--test/uri.test17
-rw-r--r--test/vtab1.test18
-rw-r--r--test/wal.test6
-rw-r--r--test/wal2.test8
-rw-r--r--test/wal3.test2
-rw-r--r--test/walro.test127
-rw-r--r--test/walthread.test4
-rw-r--r--test/whereD.test189
48 files changed, 2146 insertions, 73 deletions
diff --git a/test/aggnested.test b/test/aggnested.test
new file mode 100644
index 0000000..22f0fb6
--- /dev/null
+++ b/test/aggnested.test
@@ -0,0 +1,71 @@
+# 2012 August 23
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests for processing aggregate queries with
+# subqueries in which the subqueries hold the aggregate functions
+# or in which the subqueries are themselves aggregate queries
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+do_test aggnested-1.1 {
+ db eval {
+ CREATE TABLE t1(a1 INTEGER);
+ INSERT INTO t1 VALUES(1), (2), (3);
+ CREATE TABLE t2(b1 INTEGER);
+ INSERT INTO t2 VALUES(4), (5);
+ SELECT (SELECT group_concat(a1,'x') FROM t2) FROM t1;
+ }
+} {1x2x3}
+do_test aggnested-1.2 {
+ db eval {
+ SELECT
+ (SELECT group_concat(a1,'x') || '-' || group_concat(b1,'y') FROM t2)
+ FROM t1;
+ }
+} {1x2x3-4y5}
+do_test aggnested-1.3 {
+ db eval {
+ SELECT (SELECT group_concat(b1,a1) FROM t2) FROM t1;
+ }
+} {415 425 435}
+do_test aggnested-1.4 {
+ db eval {
+ SELECT (SELECT group_concat(a1,b1) FROM t2) FROM t1;
+ }
+} {151 252 353}
+
+
+# This test case is a copy of the one in
+# http://www.mail-archive.com/sqlite-users@sqlite.org/msg70787.html
+#
+do_test aggnested-2.0 {
+ sqlite3 db2 :memory:
+ db2 eval {
+ CREATE TABLE t1 (A1 INTEGER NOT NULL,A2 INTEGER NOT NULL,A3 INTEGER NOT
+ NULL,A4 INTEGER NOT NULL,PRIMARY KEY(A1));
+ REPLACE INTO t1 VALUES(1,11,111,1111);
+ REPLACE INTO t1 VALUES(2,22,222,2222);
+ REPLACE INTO t1 VALUES(3,33,333,3333);
+ CREATE TABLE t2 (B1 INTEGER NOT NULL,B2 INTEGER NOT NULL,B3 INTEGER NOT
+ NULL,B4 INTEGER NOT NULL,PRIMARY KEY(B1));
+ REPLACE INTO t2 VALUES(1,88,888,8888);
+ REPLACE INTO t2 VALUES(2,99,999,9999);
+ SELECT (SELECT GROUP_CONCAT(CASE WHEN a1=1 THEN'A' ELSE 'B' END) FROM t2),
+ t1.*
+ FROM t1;
+ }
+} {A,B,B 3 33 333 3333}
+db2 close
+
+finish_test
diff --git a/test/atof1.test b/test/atof1.test
new file mode 100644
index 0000000..76eb427
--- /dev/null
+++ b/test/atof1.test
@@ -0,0 +1,60 @@
+# 2012 June 18
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests of the sqlite3AtoF() function.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+if {![info exists __GNUC__]} {
+ finish_test
+ return
+}
+
+expr srand(1)
+for {set i 1} {$i<20000} {incr i} {
+ set pow [expr {int((rand()-0.5)*100)}]
+ set x [expr {pow((rand()-0.5)*2*rand(),$pow)}]
+ set xf [format %.32e $x]
+
+ # Verify that text->real conversions get exactly same ieee754 floating-
+ # point value in SQLite as they do in TCL.
+ #
+ do_test atof1-1.$i.1 {
+ set y [db eval "SELECT $xf=\$x"]
+ if {!$y} {
+ puts -nonewline \173[db eval "SELECT real2hex($xf), real2hex(\$x)"]\175
+ db eval "SELECT $xf+0.0 AS a, \$x AS b" {
+ puts [format "\n%.60e\n%.60e\n%.60e" $x $a $b]
+ }
+ }
+ set y
+ } {1}
+
+ # Verify that round-trip real->text->real conversions using the quote()
+ # function preserve the bits of the numeric value exactly.
+ #
+ do_test atof1-1.$i.2 {
+ set y [db eval {SELECT $x=CAST(quote($x) AS real)}]
+ if {!$y} {
+ db eval {SELECT real2hex($x) a, real2hex(CAST(quote($x) AS real)) b} {}
+ puts "\nIN: $a $xf"
+ puts [format {QUOTE: %16s %s} {} [db eval {SELECT quote($x)}]]
+ db eval {SELECT CAST(quote($x) AS real) c} {}
+ puts "OUT: $b [format %.32e $c]"
+ }
+ set y
+ } {1}
+}
+
+
+finish_test
diff --git a/test/backup.test b/test/backup.test
index 4d7213c..444619c 100644
--- a/test/backup.test
+++ b/test/backup.test
@@ -423,7 +423,7 @@ do_test backup-4.3.2 {
} {SQLITE_BUSY}
do_test backup-4.3.3 {
sqlite3_errmsg db2
-} {unable to close due to unfinished backup operation}
+} {unable to close due to unfinalized statements or unfinished backups}
do_test backup-4.3.4 {
B step 50
} {SQLITE_DONE}
diff --git a/test/bigfile.test b/test/bigfile.test
index d9470ac..59e9f18 100644
--- a/test/bigfile.test
+++ b/test/bigfile.test
@@ -15,6 +15,8 @@
# $Id: bigfile.test,v 1.12 2009/03/05 04:27:08 shane Exp $
#
+if {[file exists skip-big-file]} return
+
set testdir [file dirname $argv0]
source $testdir/tester.tcl
diff --git a/test/bigfile2.test b/test/bigfile2.test
index b13b756..1f0ea85 100644
--- a/test/bigfile2.test
+++ b/test/bigfile2.test
@@ -13,6 +13,8 @@
# files larger than 4GB.
#
+if {[file exists skip-big-file]} return
+
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bigfile2
diff --git a/test/capi3.test b/test/capi3.test
index d910626..9d7434d 100644
--- a/test/capi3.test
+++ b/test/capi3.test
@@ -649,13 +649,18 @@ do_test capi3-6.1 {
db cache flush
sqlite3_close $DB
} {SQLITE_BUSY}
+
+# 6.2 and 6.3 used to return SQLITE_ERROR and SQLITE_SCHEMA, respectively.
+# But since attempting to close a connection no longer resets the internal
+# schema and expires all statements, this is no longer the case.
do_test capi3-6.2 {
sqlite3_step $STMT
-} {SQLITE_ERROR}
+} {SQLITE_ROW}
#check_data $STMT capi3-6.3 {INTEGER} {1} {1.0} {1}
do_test capi3-6.3 {
sqlite3_finalize $STMT
-} {SQLITE_SCHEMA}
+} {SQLITE_OK}
+
do_test capi3-6.4-misuse {
db cache flush
sqlite3_close $DB
@@ -778,6 +783,7 @@ foreach {code english} $code2english {
}
# Test the error message when a "real" out of memory occurs.
+if { [permutation] != "nofaultsim" } {
ifcapable memdebug {
do_test capi3-10-1 {
sqlite3 db test.db
@@ -816,6 +822,7 @@ ifcapable memdebug {
db close
sqlite3_memdebug_fail -1
}
+}
# The following tests - capi3-11.* - test that a COMMIT or ROLLBACK
# statement issued while there are still outstanding VMs that are part of
diff --git a/test/capi3c.test b/test/capi3c.test
index 4092091..14545c0 100644
--- a/test/capi3c.test
+++ b/test/capi3c.test
@@ -751,6 +751,7 @@ foreach {code english} $code2english {
}
# Test the error message when a "real" out of memory occurs.
+if { [permutation] != "nofaultsim" } {
ifcapable memdebug {
do_test capi3c-10-1 {
sqlite3 db test.db
@@ -771,6 +772,7 @@ ifcapable memdebug {
db close
sqlite3_memdebug_fail -1
}
+}
# The following tests - capi3c-11.* - test that a COMMIT or ROLLBACK
# statement issued while there are still outstanding VMs that are part of
diff --git a/test/crash.test b/test/crash.test
index f644dca..c1901da 100644
--- a/test/crash.test
+++ b/test/crash.test
@@ -119,7 +119,7 @@ do_test crash-1.11 {
} {0 {}}
#--------------------------------------------------------------------------
-# The following tests test recovery when both the database file and the the
+# The following tests test recovery when both the database file and the
# journal file contain corrupt data. This can happen after pages are
# written to the database file before a transaction is committed due to
# cache-pressure.
diff --git a/test/crypto.test b/test/crypto.test
index 5fb11f2..e8bbe85 100644
--- a/test/crypto.test
+++ b/test/crypto.test
@@ -1007,8 +1007,6 @@ do_test export-database {
}
} {1000 1000 1 1000 1001 1001 1000000}
db close
-file copy -force test.db test-debug.db
-file copy -force test2.db test2-debug.db
file delete -force test.db
file delete -force test2.db
@@ -1394,7 +1392,7 @@ do_test verify-pragma-cipher-version {
execsql {
PRAGMA cipher_version;
}
-} {2.0.6}
+} {2.1.1}
db close
file delete -force test.db
@@ -1556,4 +1554,306 @@ do_test multipage-schema-autovacuum-shortread-wal {
db close
file delete -force test.db
+# open a 2.0 database with little endian hmac page numbers (default)
+# verify it can be opened
+do_test open-2.0-le-database {
+ sqlite_orig db sqlcipher-2.0-le-testkey.db
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM t1;
+ SELECT * FROM t1;
+ }
+} {4 1 1 one one 1 2 one two}
+db close
+
+# open a 2.0 database with big-endian hmac page numbers
+# verify it can be opened
+do_test open-2.0-be-database {
+ sqlite_orig db sqlcipher-2.0-be-testkey.db
+ execsql {
+ PRAGMA key = 'testkey';
+ PRAGMA cipher_hmac_pgno = be;
+ SELECT count(*) FROM t1;
+ SELECT * FROM t1;
+ }
+} {4 1 1 one one 1 2 one two}
+db close
+
+# open a 2.0 database with big-endian hmac page numbers
+# attach a new database with little endian page numbers (default)
+# copy schema between the two, and verify the latter
+# can be opened
+do_test be-to-le-migration {
+ sqlite_orig db sqlcipher-2.0-be-testkey.db
+
+ execsql {
+ PRAGMA key = 'testkey';
+ PRAGMA cipher_hmac_pgno = be;
+ ATTACH DATABASE 'test.db' AS db2 KEY 'testkey';
+ CREATE TABLE db2.t1(a,b);
+ INSERT INTO db2.t1 SELECT * FROM main.t1;
+ DETACH DATABASE db2;
+ }
+ db close
+
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM t1;
+ SELECT * FROM t1;
+ }
+} {4 1 1 one one 1 2 one two}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_use_hmac
+# is set to true be default
+do_test verify-pragma-cipher-use-hmac-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_use_hmac;
+ }
+} {1}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_use_hmac
+# reports the flag turned off
+do_test verify-pragma-cipher-use-hmac-off {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_use_hmac = off;
+ PRAGMA cipher_use_hmac;
+ }
+} {0}
+db close
+file delete -force test.db
+
+# verify the pragma default_cipher_use_hmac
+# is set to true by default
+do_test verify-pragma-cipher-default-use-hmac-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA cipher_default_use_hmac;
+ }
+} {1}
+db close
+file delete -force test.db
+
+# verify the pragma default_cipher_use_hmac
+# reports the flag turned off
+do_test verify-pragma-cipher-default-use-hmac-off {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA cipher_default_use_hmac = off;
+ PRAGMA cipher_default_use_hmac;
+ -- Be sure to turn cipher_default_use_hmac
+ -- back on or it will break later tests
+ -- (it's a global flag)
+ PRAGMA cipher_default_use_hmac = ON;
+ }
+} {0}
+db close
+file delete -force test.db
+
+# verify the pragma kdf_iter
+# reports the default value
+do_test verify-pragma-kdf-iter-reports-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA kdf_iter;
+ }
+} {4000}
+db close
+file delete -force test.db
+
+# verify the pragma kdf_iter
+# reports value changed
+do_test verify-pragma-kdf-iter-reports-value-changed {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA kdf_iter = 8000;
+ PRAGMA kdf_iter;
+ }
+} {8000}
+db close
+file delete -force test.db
+
+# verify the pragma fast_kdf_iter
+# reports the default value
+do_test verify-pragma-fast-kdf-iter-reports-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA fast_kdf_iter;
+ }
+} {2}
+db close
+file delete -force test.db
+
+# verify the pragma fast_kdf_iter
+# reports value changed
+do_test verify-pragma-kdf-iter-reports-value-changed {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA fast_kdf_iter = 4000;
+ PRAGMA fast_kdf_iter;
+ }
+} {4000}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_page_size
+# reports default value
+do_test verify-pragma-cipher-page-size-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_page_size;
+ }
+} {1024}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_page_size
+# reports change in value
+do_test verify-pragma-cipher-page-size-changed {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_page_size = 4096;
+ PRAGMA cipher_page_size;
+ }
+} {4096}
+db close
+file delete -force test.db
+
+# verify the pragma cipher
+# reports the default value
+do_test verify-pragma-cipher-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher;
+ }
+} {AES-256-CBC}
+db close
+file delete -force test.db
+
+# verify the pragma cipher
+# reports a change in value
+do_test verify-pragma-cipher-changed {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher = 'AES-256-ECB';
+ PRAGMA cipher;
+ }
+} {AES-256-ECB}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_hmac_salt_mask reports default
+do_test verify-pragma-hmac-salt-mask-reports-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_hmac_salt_mask;
+ }
+} {3a}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_hmac_salt_mask reports
+# reports value changed
+do_test verify-pragma-hmac-salt-mask-reports-value-changed {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_hmac_salt_mask = "x'11'";
+ PRAGMA cipher_hmac_salt_mask;
+ }
+} {11}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_hmac_pgno reports default
+do_test verify-pragma-hmac-pgno-reports-default {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_hmac_pgno;
+ }
+} {le}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_hmac_pgno
+# reports value changed
+do_test verify-pragma-hmac-pgno-reports-value-changed {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test';
+ PRAGMA cipher_hmac_pgno = be;
+ PRAGMA cipher_hmac_pgno;
+ PRAGMA cipher_hmac_pgno = native;
+ PRAGMA cipher_hmac_pgno;
+ PRAGMA cipher_hmac_pgno = le;
+ PRAGMA cipher_hmac_pgno;
+ }
+} {be native le}
+db close
+file delete -force test.db
+
+# open a 2.0 beta database with 4000 round hmac kdf and 0x00
+# hmac salt mask
+# verify it can be opened
+do_test open-2.0-beta-database {
+ sqlite_orig db sqlcipher-2.0-beta-testkey.db
+ execsql {
+ PRAGMA key = 'testkey';
+ PRAGMA fast_kdf_iter = 4000;
+ PRAGMA cipher_hmac_salt_mask = "x'00'";
+ SELECT count(*) FROM t1;
+ SELECT * FROM t1;
+ }
+} {2 test-0-0 test-0-1 test-1-0 test-1-1}
+db close
+
+# open a 2.0 beta database
+# attach a new standard database
+# copy schema between the two, and verify the latter
+# can be opened
+do_test 2.0-beta-to-2.0-migration {
+ sqlite_orig db sqlcipher-2.0-beta-testkey.db
+
+ execsql {
+ PRAGMA key = 'testkey';
+ PRAGMA cipher_hmac_salt_mask = "x'00'";
+ PRAGMA fast_kdf_iter = 4000;
+ SELECT count(*) FROM sqlite_master;
+
+ PRAGMA cipher_hmac_salt_mask = "x'3a'";
+ ATTACH DATABASE 'test.db' AS db2 KEY 'testkey';
+
+ CREATE TABLE db2.t1(a,b);
+ INSERT INTO db2.t1 SELECT * FROM main.t1;
+ DETACH DATABASE db2;
+ }
+ db close
+
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT * FROM t1;
+ }
+} {test-0-0 test-0-1 test-1-0 test-1-1}
+db close
+file delete -force test.db
+
finish_test
diff --git a/test/date.test b/test/date.test
index 9bfec12..a30402c 100644
--- a/test/date.test
+++ b/test/date.test
@@ -151,7 +151,7 @@ datetest 3.2.1 {strftime('pre%fpost','2003-10-31 12:34:56.432')} pre56.432post
datetest 3.2.2 {strftime('%f','2003-10-31 12:34:59.9999999')} 59.999
datetest 3.3 {strftime('%H','2003-10-31 12:34:56.432')} 12
datetest 3.4 {strftime('%j','2003-10-31 12:34:56.432')} 304
-datetest 3.5 {strftime('%J','2003-10-31 12:34:56.432')} 2452944.02426426
+datetest 3.5 {strftime('%J','2003-10-31 12:34:56.432')} 2452944.024264259
datetest 3.6 {strftime('%m','2003-10-31 12:34:56.432')} 10
datetest 3.7 {strftime('%M','2003-10-31 12:34:56.432')} 34
datetest 3.8.1 {strftime('%s','2003-10-31 12:34:56.432')} 1067603696
diff --git a/test/e_uri.test b/test/e_uri.test
index 8110d70..8c9949e 100644
--- a/test/e_uri.test
+++ b/test/e_uri.test
@@ -131,10 +131,10 @@ sqlite3_config_uri 1
if {$tcl_platform(platform) == "unix"} {
set flags [list SQLITE_OPEN_READWRITE SQLITE_OPEN_CREATE SQLITE_OPEN_URI]
foreach {tn uri error} "
- 1 {file://localhost[get_pwd]/test.db} {not an error}
- 2 {file://[get_pwd]/test.db} {not an error}
- 3 {file://x[get_pwd]/test.db} {invalid uri authority: x}
- 4 {file://invalid[get_pwd]/test.db} {invalid uri authority: invalid}
+ 1 {file://localhost[test_pwd /]test.db} {not an error}
+ 2 {file://[test_pwd /]test.db} {not an error}
+ 3 {file://x[test_pwd /]test.db} {invalid uri authority: x}
+ 4 {file://invalid[test_pwd /]test.db} {invalid uri authority: invalid}
" {
do_test 2.$tn {
set DB [sqlite3_open_v2 $uri $flags ""]
@@ -153,9 +153,9 @@ if {$tcl_platform(platform) == "unix"} {
# parameters passed through to the VFS xOpen() methods.
#
foreach {tn uri parse} "
- 1 {file:test.db#abc} {[get_pwd]/test.db {}}
- 2 {file:test.db?a=b#abc} {[get_pwd]/test.db {a b}}
- 3 {file:test.db?a=b#?c=d} {[get_pwd]/test.db {a b}}
+ 1 {file:test.db#abc} {[test_pwd / {}]test.db {}}
+ 2 {file:test.db?a=b#abc} {[test_pwd / {}]test.db {a b}}
+ 3 {file:test.db?a=b#?c=d} {[test_pwd / {}]test.db {a b}}
" {
do_filepath_test 3.$tn { parse_uri $uri } $parse
}
@@ -171,7 +171,7 @@ foreach {tn uri parse} "
# path is interpreted as a relative path.
#
foreach {tn uri parse} "
- 1 {file:test.db} {[get_pwd]/test.db {}}
+ 1 {file:test.db} {[test_pwd / {}]test.db {}}
2 {file:/test.db} {/test.db {}}
3 {file:///test.db} {/test.db {}}
4 {file://localhost/test.db} {/test.db {}}
@@ -241,9 +241,9 @@ do_test 6.1 {
} {no such vfs: nosuchvfs}
-# EVIDENCE-OF: R-60479-64270 The mode parameter may be set to either
-# "ro", "rw" or "rwc". Attempting to set it to any other value is an
-# error
+# EVIDENCE-OF: R-44013-13102 The mode parameter may be set to either
+# "ro", "rw", "rwc", or "memory". Attempting to set it to any other
+# value is an error
#
sqlite3 db test.db
db close
@@ -254,6 +254,8 @@ foreach {tn uri error} "
4 {file:test.db?mode=Ro} {no such access mode: Ro}
5 {file:test.db?mode=Rw} {no such access mode: Rw}
6 {file:test.db?mode=Rwc} {no such access mode: Rwc}
+ 7 {file:test.db?mode=memory} {not an error}
+ 8 {file:test.db?mode=MEMORY} {no such access mode: MEMORY}
" {
do_test 7.$tn { open_uri_error $uri } $error
}
@@ -306,10 +308,9 @@ foreach {tn uri read write create} {
catch {db close}
}
-# EVIDENCE-OF: R-56032-32287 If sqlite3_open_v2() is used, it is an
-# error to specify a value for the mode parameter that is less
-# restrictive than that specified by the flags passed as the third
-# parameter.
+# EVIDENCE-OF: R-20590-08726 It is an error to specify a value for the
+# mode parameter that is less restrictive than that specified by the
+# flags passed in the third parameter to sqlite3_open_v2().
#
forcedelete test.db
sqlite3 db test.db
diff --git a/test/fts3atoken.test b/test/fts3atoken.test
index 554259d..9277bfb 100644
--- a/test/fts3atoken.test
+++ b/test/fts3atoken.test
@@ -174,6 +174,16 @@ ifcapable icu {
insert into x1 (name) values (NULL);
delete from x1;
}
+
+ proc cp_to_str {codepoint_list} {
+ set fmt [string repeat %c [llength $codepoint_list]]
+ eval [list format $fmt] $codepoint_list
+ }
+
+ do_test 5.2 {
+ set str [cp_to_str {19968 26085 32822 32645 27874 23433 20986}]
+ execsql { INSERT INTO x1 VALUES($str) }
+ } {}
}
diff --git a/test/fts3auto.test b/test/fts3auto.test
index d5ab4ef..20b2812 100644
--- a/test/fts3auto.test
+++ b/test/fts3auto.test
@@ -67,7 +67,7 @@ proc do_fts3query_test {tn args} {
foreach {k v} [lrange $args 0 [expr $nArg-3]] {
switch -- $k {
-deferred {
- set deferred $v
+ ifcapable fts4_deferred { set deferred $v }
}
default {
error "bad option \"$k\": must be -deferred"
@@ -509,9 +509,9 @@ foreach {tn create} {
do_fts3query_test 3.$tn.2.1 t1 {a OR c}
- do_test 3.$tn.3 {
- fts3_zero_long_segments t1 $limit
- } {1}
+ ifcapable fts4_deferred {
+ do_test 3.$tn.3 { fts3_zero_long_segments t1 $limit } {1}
+ }
foreach {tn2 expr def} {
1 {a NEAR c} {}
@@ -550,7 +550,11 @@ foreach {tn create} {
do_test 4.$tn.2 {
set limit [fts3_make_deferrable t1 five]
execsql { INSERT INTO t1(t1) VALUES('optimize') }
- expr {[fts3_zero_long_segments t1 $limit]>0}
+ ifcapable fts4_deferred {
+ expr {[fts3_zero_long_segments t1 $limit]>0}
+ } else {
+ expr 1
+ }
} {1}
do_fts3query_test 4.$tn.3.1 -deferred five t1 {one AND five}
diff --git a/test/fts3defer.test b/test/fts3defer.test
index 4c8213d..532d4df 100644
--- a/test/fts3defer.test
+++ b/test/fts3defer.test
@@ -13,7 +13,7 @@ set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
-ifcapable !fts3 {
+ifcapable !fts3||!fts4_deferred {
finish_test
return
}
diff --git a/test/fts3defer2.test b/test/fts3defer2.test
index 92a4491..337359a 100644
--- a/test/fts3defer2.test
+++ b/test/fts3defer2.test
@@ -13,7 +13,10 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
-ifcapable !fts3 { finish_test ; return }
+ifcapable !fts3||!fts4_deferred {
+ finish_test
+ return
+}
set testprefix fts3defer2
diff --git a/test/fts3fault2.test b/test/fts3fault2.test
index 0178ed2..f2d10bc 100644
--- a/test/fts3fault2.test
+++ b/test/fts3fault2.test
@@ -131,4 +131,28 @@ do_faultsim_test 4.1 -prep {
faultsim_test_result {0 {}}
}
+ifcapable fts3_unicode {
+ do_test 5.0 {
+ faultsim_delete_and_reopen
+ execsql {
+ CREATE VIRTUAL TABLE ft USING fts4(a, tokenize=unicode61);
+ }
+ faultsim_save_and_close
+ } {}
+
+ do_faultsim_test 5.1 -faults oom* -prep {
+ faultsim_restore_and_reopen
+ db eval {SELECT * FROM sqlite_master}
+ } -body {
+ execsql { INSERT INTO ft VALUES('the quick brown fox'); }
+ execsql { INSERT INTO ft VALUES(
+ 'theunusuallylongtokenthatjustdragsonandonandonandthendragsonsomemoreeof'
+ );
+ }
+ execsql { SELECT docid FROM ft WHERE ft MATCH 'th*' }
+ } -test {
+ faultsim_test_result {0 {1 2}}
+ }
+}
+
finish_test
diff --git a/test/fts3matchinfo.test b/test/fts3matchinfo.test
index 0e88858..924db9c 100644
--- a/test/fts3matchinfo.test
+++ b/test/fts3matchinfo.test
@@ -275,11 +275,14 @@ do_matchinfo_test 4.3.4 t5 {t5 MATCH 'a a a'} { s {3 1} }
do_matchinfo_test 4.3.5 t5 {t5 MATCH '"a b" "a b"'} { s {2} }
do_matchinfo_test 4.3.6 t5 {t5 MATCH 'a OR b'} { s {1 2 1 1} }
-do_execsql_test 4.4.0 {
- INSERT INTO t5(t5) VALUES('optimize');
- UPDATE t5_segments
- SET block = zeroblob(length(block))
- WHERE length(block)>10000;
+do_execsql_test 4.4.0.1 { INSERT INTO t5(t5) VALUES('optimize') }
+
+ifcapable fts4_deferred {
+ do_execsql_test 4.4.0.2 {
+ UPDATE t5_segments
+ SET block = zeroblob(length(block))
+ WHERE length(block)>10000;
+ }
}
do_matchinfo_test 4.4.2 t5 {t5 MATCH 'a b'} { s {2} }
diff --git a/test/fts4aa.test b/test/fts4aa.test
index c569331..2e6baf8 100644
--- a/test/fts4aa.test
+++ b/test/fts4aa.test
@@ -1655,14 +1655,16 @@ do_test fts4aa-1.8 {
SELECT docid FROM t1_docsize EXCEPT SELECT docid FROM t1
}
} {}
-do_test fts4aa-1.9 {
- # Note: Token 'in' is being deferred in the following query.
- db eval {
- SELECT docid, mit(matchinfo(t1, 'pcxnal')) FROM t1
- WHERE t1 MATCH 'joseph died in egypt'
- ORDER BY docid;
- }
-} {1050026 {4 1 1 1 1 1 1 1 2 1 1 1 1 1 1 23 23}}
+ifcapable fts4_deferred {
+ do_test fts4aa-1.9 {
+ # Note: Token 'in' is being deferred in the following query.
+ db eval {
+ SELECT docid, mit(matchinfo(t1, 'pcxnal')) FROM t1
+ WHERE t1 MATCH 'joseph died in egypt'
+ ORDER BY docid;
+ }
+ } {1050026 {4 1 1 1 1 1 1 1 2 1 1 1 1 1 1 23 23}}
+}
# Should get the same search results from FTS3
#
diff --git a/test/fts4unicode.test b/test/fts4unicode.test
new file mode 100644
index 0000000..0ac60a6
--- /dev/null
+++ b/test/fts4unicode.test
@@ -0,0 +1,387 @@
+# 2012 May 25
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+#
+# The tests in this file focus on testing the "unicode" FTS tokenizer.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+ifcapable !fts3_unicode { finish_test ; return }
+set ::testprefix fts4unicode
+
+proc do_unicode_token_test {tn input res} {
+ set input [string map {' ''} $input]
+ uplevel [list do_execsql_test $tn "
+ SELECT fts3_tokenizer_test('unicode61', 'remove_diacritics=0', '$input');
+ " [list [list {*}$res]]]
+}
+
+proc do_unicode_token_test2 {tn input res} {
+ set input [string map {' ''} $input]
+ uplevel [list do_execsql_test $tn "
+ SELECT fts3_tokenizer_test('unicode61', '$input');
+ " [list [list {*}$res]]]
+}
+
+proc do_unicode_token_test3 {tn args} {
+ set res [lindex $args end]
+ set sql "SELECT fts3_tokenizer_test('unicode61'"
+ foreach a [lrange $args 0 end-1] {
+ append sql ", '"
+ append sql [string map {' ''} $a]
+ append sql "'"
+ }
+ append sql ")"
+ uplevel [list do_execsql_test $tn $sql [list [list {*}$res]]]
+}
+
+do_unicode_token_test 1.0 {a B c D} {0 a a 1 b B 2 c c 3 d D}
+do_unicode_token_test 1.1 {Ä Ö Ü} {0 ä Ä 1 ö Ö 2 ü Ü}
+do_unicode_token_test 1.2 {xÄx xÖx xÜx} {0 xäx xÄx 1 xöx xÖx 2 xüx xÜx}
+
+# 0x00DF is a small "sharp s". 0x1E9E is a capital sharp s.
+do_unicode_token_test 1.3 "\uDF" "0 \uDF \uDF"
+do_unicode_token_test 1.4 "\u1E9E" "0 ß \u1E9E"
+do_unicode_token_test 1.5 "\u1E9E" "0 \uDF \u1E9E"
+
+do_unicode_token_test 1.6 "The quick brown fox" {
+ 0 the The 1 quick quick 2 brown brown 3 fox fox
+}
+do_unicode_token_test 1.7 "The\u00bfquick\u224ebrown\u2263fox" {
+ 0 the The 1 quick quick 2 brown brown 3 fox fox
+}
+
+do_unicode_token_test2 1.8 {a B c D} {0 a a 1 b B 2 c c 3 d D}
+do_unicode_token_test2 1.9 {Ä Ö Ü} {0 a Ä 1 o Ö 2 u Ü}
+do_unicode_token_test2 1.10 {xÄx xÖx xÜx} {0 xax xÄx 1 xox xÖx 2 xux xÜx}
+
+# Check that diacritics are removed if remove_diacritics=1 is specified.
+# And that they do not break tokens.
+do_unicode_token_test2 1.10 "xx\u0301xx" "0 xxxx xx\u301xx"
+
+#-------------------------------------------------------------------------
+#
+set docs [list {
+ Enhance the INSERT syntax to allow multiple rows to be inserted via the
+ VALUES clause.
+} {
+ Enhance the CREATE VIRTUAL TABLE command to support the IF NOT EXISTS clause.
+} {
+ Added the sqlite3_stricmp() interface as a counterpart to sqlite3_strnicmp().
+} {
+ Added the sqlite3_db_readonly() interface.
+} {
+ Added the SQLITE_FCNTL_PRAGMA file control, giving VFS implementations the
+ ability to add new PRAGMA statements or to override built-in PRAGMAs.
+} {
+ Queries of the form: "SELECT max(x), y FROM table" returns the value of y on
+ the same row that contains the maximum x value.
+} {
+ Added support for the FTS4 languageid option.
+} {
+ Documented support for the FTS4 content option. This feature has actually
+ been in the code since version 3.7.9 but is only now considered to be
+ officially supported.
+} {
+ Pending statements no longer block ROLLBACK. Instead, the pending statement
+ will return SQLITE_ABORT upon next access after the ROLLBACK.
+} {
+ Improvements to the handling of CSV inputs in the command-line shell
+} {
+ Fix a bug introduced in version 3.7.10 that might cause a LEFT JOIN to be
+ incorrectly converted into an INNER JOIN if the WHERE clause indexable terms
+ connected by OR.
+}]
+
+set map(a) [list "\u00C4" "\u00E4"] ; # LATIN LETTER A WITH DIAERESIS
+set map(e) [list "\u00CB" "\u00EB"] ; # LATIN LETTER E WITH DIAERESIS
+set map(i) [list "\u00CF" "\u00EF"] ; # LATIN LETTER I WITH DIAERESIS
+set map(o) [list "\u00D6" "\u00F6"] ; # LATIN LETTER O WITH DIAERESIS
+set map(u) [list "\u00DC" "\u00FC"] ; # LATIN LETTER U WITH DIAERESIS
+set map(y) [list "\u0178" "\u00FF"] ; # LATIN LETTER Y WITH DIAERESIS
+set map(h) [list "\u1E26" "\u1E27"] ; # LATIN LETTER H WITH DIAERESIS
+set map(w) [list "\u1E84" "\u1E85"] ; # LATIN LETTER W WITH DIAERESIS
+set map(x) [list "\u1E8C" "\u1E8D"] ; # LATIN LETTER X WITH DIAERESIS
+foreach k [array names map] {
+ lappend mappings [string toupper $k] [lindex $map($k) 0]
+ lappend mappings $k [lindex $map($k) 1]
+}
+proc mapdoc {doc} {
+ set doc [regsub -all {[[:space:]]+} $doc " "]
+ string map $::mappings [string trim $doc]
+}
+
+do_test 2.0 {
+ execsql { CREATE VIRTUAL TABLE t2 USING fts4(tokenize=unicode61, x); }
+ foreach doc $docs {
+ set d [mapdoc $doc]
+ execsql { INSERT INTO t2 VALUES($d) }
+ }
+} {}
+
+do_test 2.1 {
+ set q [mapdoc "row"]
+ execsql { SELECT * FROM t2 WHERE t2 MATCH $q }
+} [list [mapdoc {
+ Queries of the form: "SELECT max(x), y FROM table" returns the value of y on
+ the same row that contains the maximum x value.
+}]]
+
+foreach {tn query snippet} {
+ 2 "row" {
+ ...returns the value of y on the same [row] that contains
+ the maximum x value.
+ }
+ 3 "ROW" {
+ ...returns the value of y on the same [row] that contains
+ the maximum x value.
+ }
+ 4 "rollback" {
+ ...[ROLLBACK]. Instead, the pending statement
+ will return SQLITE_ABORT upon next access after the [ROLLBACK].
+ }
+ 5 "rOllback" {
+ ...[ROLLBACK]. Instead, the pending statement
+ will return SQLITE_ABORT upon next access after the [ROLLBACK].
+ }
+ 6 "lang*" {
+ Added support for the FTS4 [languageid] option.
+ }
+} {
+ do_test 2.$tn {
+ set q [mapdoc $query]
+ execsql { SELECT snippet(t2, '[', ']', '...') FROM t2 WHERE t2 MATCH $q }
+ } [list [mapdoc $snippet]]
+}
+
+#-------------------------------------------------------------------------
+# Make sure the unicode61 tokenizer does not crash if it is passed a
+# NULL pointer.
+reset_db
+do_execsql_test 3.1 {
+ CREATE VIRTUAL TABLE t1 USING fts4(tokenize=unicode61, x, y);
+ INSERT INTO t1 VALUES(NULL, 'a b c');
+}
+
+do_execsql_test 3.2 {
+ SELECT snippet(t1, '[', ']') FROM t1 WHERE t1 MATCH 'b'
+} {{a [b] c}}
+
+do_execsql_test 3.3 {
+ BEGIN;
+ DELETE FROM t1;
+ INSERT INTO t1 VALUES('b b b b b b b b b b b', 'b b b b b b b b b b b b b');
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 VALUES('a b c', NULL);
+ INSERT INTO t1 VALUES('a x c', NULL);
+ COMMIT;
+}
+
+do_execsql_test 3.4 {
+ SELECT * FROM t1 WHERE t1 MATCH 'a b';
+} {{a b c} {}}
+
+#-------------------------------------------------------------------------
+#
+reset_db
+
+do_test 4.1 {
+ set a "abc\uFFFEdef"
+ set b "abc\uD800def"
+ set c "\uFFFEdef"
+ set d "\uD800def"
+ execsql {
+ CREATE VIRTUAL TABLE t1 USING fts4(tokenize=unicode61, x);
+ INSERT INTO t1 VALUES($a);
+ INSERT INTO t1 VALUES($b);
+ INSERT INTO t1 VALUES($c);
+ INSERT INTO t1 VALUES($d);
+ }
+} {}
+
+do_test 4.2 {
+ set a [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0x62}]
+ set b [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0xBF 0x62}]
+ set c [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0xBF 0xBF 0x62}]
+ set d [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0xBF 0xBF 0xBF 0x62}]
+ execsql {
+ INSERT INTO t1 VALUES($a);
+ INSERT INTO t1 VALUES($b);
+ INSERT INTO t1 VALUES($c);
+ INSERT INTO t1 VALUES($d);
+ }
+} {}
+
+do_test 4.3 {
+ set a [binary format c* {0xF7 0xBF 0xBF 0xBF}]
+ set b [binary format c* {0xF7 0xBF 0xBF 0xBF 0xBF}]
+ set c [binary format c* {0xF7 0xBF 0xBF 0xBF 0xBF 0xBF}]
+ set d [binary format c* {0xF7 0xBF 0xBF 0xBF 0xBF 0xBF 0xBF}]
+ execsql {
+ INSERT INTO t1 VALUES($a);
+ INSERT INTO t1 VALUES($b);
+ INSERT INTO t1 VALUES($c);
+ INSERT INTO t1 VALUES($d);
+ }
+} {}
+
+#-------------------------------------------------------------------------
+
+do_unicode_token_test3 5.1 {tokenchars=} {
+ sqlite3_reset sqlite3_column_int
+} {
+ 0 sqlite3 sqlite3
+ 1 reset reset
+ 2 sqlite3 sqlite3
+ 3 column column
+ 4 int int
+}
+
+do_unicode_token_test3 5.2 {tokenchars=_} {
+ sqlite3_reset sqlite3_column_int
+} {
+ 0 sqlite3_reset sqlite3_reset
+ 1 sqlite3_column_int sqlite3_column_int
+}
+
+do_unicode_token_test3 5.3 {separators=xyz} {
+ Laotianxhorseyrunszfast
+} {
+ 0 laotian Laotian
+ 1 horse horse
+ 2 runs runs
+ 3 fast fast
+}
+
+do_unicode_token_test3 5.4 {tokenchars=xyz} {
+ Laotianxhorseyrunszfast
+} {
+ 0 laotianxhorseyrunszfast Laotianxhorseyrunszfast
+}
+
+do_unicode_token_test3 5.5 {tokenchars=_} {separators=zyx} {
+ sqlite3_resetxsqlite3_column_intyhonda_phantom
+} {
+ 0 sqlite3_reset sqlite3_reset
+ 1 sqlite3_column_int sqlite3_column_int
+ 2 honda_phantom honda_phantom
+}
+
+do_unicode_token_test3 5.6 "separators=\u05D1" "abc\u05D1def" {
+ 0 abc abc 1 def def
+}
+
+do_unicode_token_test3 5.7 \
+ "tokenchars=\u2444\u2445" \
+ "separators=\u05D0\u05D1\u05D2" \
+ "\u2444fre\u2445sh\u05D0water\u05D2fish.\u2445timer" \
+ [list \
+ 0 \u2444fre\u2445sh \u2444fre\u2445sh \
+ 1 water water \
+ 2 fish fish \
+ 3 \u2445timer \u2445timer \
+ ]
+
+# Check that it is not possible to add a standalone diacritic codepoint
+# to either separators or tokenchars.
+do_unicode_token_test3 5.8 "separators=\u0301" \
+ "hello\u0301world \u0301helloworld" \
+ "0 helloworld hello\u0301world 1 helloworld helloworld"
+
+do_unicode_token_test3 5.9 "tokenchars=\u0301" \
+ "hello\u0301world \u0301helloworld" \
+ "0 helloworld hello\u0301world 1 helloworld helloworld"
+
+do_unicode_token_test3 5.10 "separators=\u0301" \
+ "remove_diacritics=0" \
+ "hello\u0301world \u0301helloworld" \
+ "0 hello\u0301world hello\u0301world 1 helloworld helloworld"
+
+do_unicode_token_test3 5.11 "tokenchars=\u0301" \
+ "remove_diacritics=0" \
+ "hello\u0301world \u0301helloworld" \
+ "0 hello\u0301world hello\u0301world 1 helloworld helloworld"
+
+
+#-------------------------------------------------------------------------
+
+proc do_tokenize {tokenizer txt} {
+ set res [list]
+ foreach {a b c} [db one {SELECT fts3_tokenizer_test($tokenizer, $txt)}] {
+ lappend res $b
+ }
+ set res
+}
+
+# Argument $lCodepoint must be a list of codepoints (integers) that
+# correspond to whitespace characters. This command creates a string
+# $W from the codepoints, then tokenizes "${W}hello{$W}world${W}"
+# using tokenizer $tokenizer. The test passes if the tokenizer successfully
+# extracts the two 5 character tokens.
+#
+proc do_isspace_test {tn tokenizer lCp} {
+ set whitespace [format [string repeat %c [llength $lCp]] {*}$lCp]
+ set txt "${whitespace}hello${whitespace}world${whitespace}"
+ uplevel [list do_test $tn [list do_tokenize $tokenizer $txt] {hello world}]
+}
+
+set tokenizers [list unicode61]
+ifcapable icu { lappend tokenizers icu }
+
+# Some tests to check that the tokenizers can both identify white-space
+# codepoints. All codepoints tested below are of type "Zs" in the
+# UnicodeData.txt file.
+foreach T $tokenizers {
+ do_isspace_test 6.$T.1 $T 32
+ do_isspace_test 6.$T.2 $T 160
+ do_isspace_test 6.$T.3 $T 5760
+ do_isspace_test 6.$T.4 $T 6158
+ do_isspace_test 6.$T.5 $T 8192
+ do_isspace_test 6.$T.6 $T 8193
+ do_isspace_test 6.$T.7 $T 8194
+ do_isspace_test 6.$T.8 $T 8195
+ do_isspace_test 6.$T.9 $T 8196
+ do_isspace_test 6.$T.10 $T 8197
+ do_isspace_test 6.$T.11 $T 8198
+ do_isspace_test 6.$T.12 $T 8199
+ do_isspace_test 6.$T.13 $T 8200
+ do_isspace_test 6.$T.14 $T 8201
+ do_isspace_test 6.$T.15 $T 8202
+ do_isspace_test 6.$T.16 $T 8239
+ do_isspace_test 6.$T.17 $T 8287
+ do_isspace_test 6.$T.18 $T 12288
+
+ do_isspace_test 6.$T.19 $T {32 160 5760 6158}
+ do_isspace_test 6.$T.19 $T {8192 8193 8194 8195}
+ do_isspace_test 6.$T.19 $T {8196 8197 8198 8199}
+ do_isspace_test 6.$T.19 $T {8200 8201 8202 8239}
+ do_isspace_test 6.$T.19 $T {8287 12288}
+}
+
+
+finish_test
+
+
diff --git a/test/func.test b/test/func.test
index ba1ea02..e44c44b 100644
--- a/test/func.test
+++ b/test/func.test
@@ -312,7 +312,7 @@ ifcapable floatingpoint {
execsql {SELECT round(9999999999999.55,1);}
} {9999999999999.6}
do_test func-4.38 {
- execsql {SELECT round(9999999999999.555,2);}
+ execsql {SELECT round(9999999999999.556,2);}
} {9999999999999.56}
}
diff --git a/test/index5.test b/test/index5.test
new file mode 100644
index 0000000..c8e94b3
--- /dev/null
+++ b/test/index5.test
@@ -0,0 +1,75 @@
+# 2012 August 6
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix index5
+
+do_test 1.1 {
+ execsql {
+ PRAGMA page_size = 1024;
+ CREATE TABLE t1(x);
+ BEGIN;
+ }
+ for {set i 0} {$i < 100000} {incr i} {
+ execsql { INSERT INTO t1 VALUES(randstr(100,100)) }
+ }
+ execsql COMMIT
+ execsql {
+ CREATE INDEX i1 ON t1(x);
+ DROP INDEX I1;
+ PRAGMA main.page_size;
+ }
+} {1024}
+
+db close
+testvfs tvfs
+tvfs filter xWrite
+tvfs script write_cb
+proc write_cb {xCall file handle iOfst} {
+ if {[file tail $file]=="test.db"} {
+ lappend ::write_list [expr $iOfst/1024]
+ }
+ puts "$xCall $file $args"
+}
+
+do_test 1.2 {
+ sqlite3 db test.db -vfs tvfs
+ set ::write_list [list]
+ execsql { CREATE INDEX i1 ON t1(x) }
+} {}
+
+do_test 1.3 {
+ set nForward 0
+ set nBackward 0
+ set nNoncont 0
+ set iPrev [lindex $::write_list 0]
+ for {set i 1} {$i < [llength $::write_list]} {incr i} {
+ set iNext [lindex $::write_list $i]
+ if {$iNext==($iPrev+1)} {
+ incr nForward
+ } elseif {$iNext==($iPrev-1)} {
+ incr nBackward
+ } else {
+ incr nNoncont
+ }
+ set iPrev $iNext
+ }
+
+ expr {$nForward > $nBackward}
+} {1}
+db close
+tvfs delete
+
+finish_test
+
diff --git a/test/journal1.test b/test/journal1.test
index 2fdadfd..c89dd2b 100644
--- a/test/journal1.test
+++ b/test/journal1.test
@@ -41,7 +41,7 @@ do_test journal1-1.1 {
} 8
# Make changes to the database and save the journal file.
-# Then delete the database. Replace the the journal file
+# Then delete the database. Replace the journal file
# and try to create a new database with the same name. The
# old journal should not attempt to rollback into the new
# database.
diff --git a/test/loadext.test b/test/loadext.test
index 8f8339e..72eff12 100644
--- a/test/loadext.test
+++ b/test/loadext.test
@@ -23,7 +23,7 @@ ifcapable !load_ext {
# The name of the test extension varies by operating system.
#
-if {$::tcl_platform(platform) eq "windows" || $::tcl_platform(platform) eq "os2"} {
+if {$::tcl_platform(platform) eq "windows"} {
set testextension ./testloadext.dll
} else {
set testextension ./libtestloadext.so
diff --git a/test/misc1.test b/test/misc1.test
index e3f1b95..188a283 100644
--- a/test/misc1.test
+++ b/test/misc1.test
@@ -472,6 +472,11 @@ ifcapable subquery {
} {1 2 3 4 5 6 7 8 9 10 11}
}
+#
+# The following tests can only work if the current SQLite VFS has the concept
+# of a current directory.
+#
+ifcapable curdir {
# Make sure a database connection still works after changing the
# working directory.
#
@@ -495,6 +500,7 @@ do_test misc1-14.3 {
execsql {COMMIT}
file exists ./test.db-journal
} {0}
+}
# A failed create table should not leave the table in the internal
# data structures. Ticket #238.
diff --git a/test/misc7.test b/test/misc7.test
index 146dca0..4868c12 100644
--- a/test/misc7.test
+++ b/test/misc7.test
@@ -377,7 +377,7 @@ do_test misc7-16.X {
# These tests do not work on windows due to restrictions in the
# windows file system.
#
-if {$tcl_platform(platform)!="windows" && $tcl_platform(platform)!="os2"} {
+if {$tcl_platform(platform)!="windows"} {
# Some network filesystems (ex: AFP) do not support setting read-only
# permissions. Only run these tests if full unix permission setting
diff --git a/test/pager1.test b/test/pager1.test
index 9c62e87..61a0c0c 100644
--- a/test/pager1.test
+++ b/test/pager1.test
@@ -524,18 +524,27 @@ db close
# file-system is saved just before the xDelete() call to remove the
# master journal file from the file-system.
#
+set pwd [get_pwd]
testvfs tv -default 1
tv script copy_on_mj_delete
set ::mj_filename_length 0
proc copy_on_mj_delete {method filename args} {
if {[string match *mj* [file tail $filename]]} {
- set ::mj_filename_length [string length $filename]
+ #
+ # NOTE: Is the file name relative? If so, add the length of the current
+ # directory.
+ #
+ if {[is_relative_file $filename]} {
+ set ::mj_filename_length \
+ [expr {[string length $filename] + [string length $::pwd]}]
+ } else {
+ set ::mj_filename_length [string length $filename]
+ }
faultsim_save
}
return SQLITE_OK
}
-set pwd [get_pwd]
foreach {tn1 tcl} {
1 { set prefix "test.db" }
2 {
@@ -1019,8 +1028,17 @@ do_test pager1-5.4.1 {
# the master-journal name encoded as utf-8 with no nul term.
#
set mj_pointer [expr {
- 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"]
+ 20 + [string length "test.db-mjXXXXXX9XX"]
}]
+ #
+ # NOTE: For item 3 above, if the current SQLite VFS lacks the concept of a
+ # current directory, the length of the current directory name plus 1
+ # character for the directory separator character are NOT counted as
+ # part of the total size; otherwise, they are.
+ #
+ ifcapable curdir {
+ set mj_pointer [expr {$mj_pointer + [string length [get_pwd]] + 1}]
+ }
expr {$::max_journal==(512+2*(1024+8)+$mj_pointer)}
} 1
do_test pager1-5.4.2 {
@@ -1038,8 +1056,17 @@ do_test pager1-5.4.2 {
# written starting at the next (in this case 512 byte) sector boundary.
#
set mj_pointer [expr {
- 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"]
+ 20 + [string length "test.db-mjXXXXXX9XX"]
}]
+ #
+ # NOTE: If the current SQLite VFS lacks the concept of a current directory,
+ # the length of the current directory name plus 1 character for the
+ # directory separator character are NOT counted as part of the total
+ # size; otherwise, they are.
+ #
+ ifcapable curdir {
+ set mj_pointer [expr {$mj_pointer + [string length [get_pwd]] + 1}]
+ }
expr {$::max_journal==(((512+2*(1024+8)+511)/512)*512 + $mj_pointer)}
} 1
db close
diff --git a/test/permutations.test b/test/permutations.test
index 3165ea3..2ff77f9 100644
--- a/test/permutations.test
+++ b/test/permutations.test
@@ -111,7 +111,7 @@ set allquicktests [test_set $alltests -exclude {
thread003.test thread004.test thread005.test trans2.test vacuum3.test
incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test
vtab_err.test walslow.test walcrash.test walcrash3.test
- walthread.test rtree3.test indexfault.test
+ walthread.test rtree3.test indexfault.test securedel2.test
}]
if {[info exists ::env(QUICKTEST_INCLUDE)]} {
set allquicktests [concat $allquicktests $::env(QUICKTEST_INCLUDE)]
@@ -142,7 +142,7 @@ test_suite "valgrind" -prefix "" -description {
Run the "veryquick" test suite with a couple of multi-process tests (that
fail under valgrind) omitted.
} -files [
- test_set $allquicktests -exclude *malloc* *ioerr* *fault* wal.test
+ test_set $allquicktests -exclude *malloc* *ioerr* *fault* wal.test atof1.test
] -initialize {
set ::G(valgrind) 1
} -shutdown {
@@ -185,9 +185,24 @@ test_suite "fts3" -prefix "" -description {
fts4aa.test fts4content.test
fts3conf.test fts3prefix.test fts3fault2.test fts3corrupt.test
fts3corrupt2.test fts3first.test fts4langid.test fts4merge.test
- fts4check.test
+ fts4check.test fts4unicode.test
}
+test_suite "nofaultsim" -prefix "" -description {
+ "Very" quick test suite. Runs in less than 5 minutes on a workstation.
+ This test suite is the same as the "quick" tests, except that some files
+ that test malloc and IO errors are omitted.
+} -files [
+ test_set $allquicktests -exclude *malloc* *ioerr* *fault*
+] -initialize {
+ catch {db close}
+ sqlite3_shutdown
+ install_malloc_faultsim 0
+ sqlite3_initialize
+ autoinstall_test_functions
+} -shutdown {
+ unset -nocomplain ::G(valgrind)
+}
lappend ::testsuitelist xxx
#-------------------------------------------------------------------------
diff --git a/test/pragma.test b/test/pragma.test
index bb10327..3c8d23a 100644
--- a/test/pragma.test
+++ b/test/pragma.test
@@ -16,6 +16,7 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+set testprefix pragma
# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
@@ -40,6 +41,9 @@ do_not_use_codec
# pragma-15.*: Test that the value set using the cache_size pragma is not
# reset when the schema is reloaded.
# pragma-16.*: Test proxy locking
+# pragma-20.*: Test data_store_directory.
+# pragma-22.*: Test that "PRAGMA [db].integrity_check" respects the "db"
+# directive - if it is present.
#
ifcapable !pragma {
@@ -1510,5 +1514,110 @@ do_test pragma-19.5 {
file tail [lindex [execsql {PRAGMA filename}] 0]
} {test.db}
+if {$tcl_platform(platform)=="windows"} {
+# Test data_store_directory pragma
+#
+db close
+sqlite3 db test.db
+file mkdir data_dir
+do_test pragma-20.1 {
+ catchsql {PRAGMA data_store_directory}
+} {0 {}}
+do_test pragma-20.2 {
+ set pwd [string map {' ''} [file nativename [get_pwd]]]
+ catchsql "PRAGMA data_store_directory='$pwd';"
+} {0 {}}
+do_test pragma-20.3 {
+ catchsql {PRAGMA data_store_directory}
+} [list 0 [list [file nativename [get_pwd]]]]
+do_test pragma-20.4 {
+ set pwd [string map {' ''} [file nativename \
+ [file join [get_pwd] data_dir]]]
+ catchsql "PRAGMA data_store_directory='$pwd';"
+} {0 {}}
+do_test pragma-20.5 {
+ sqlite3 db2 test2.db
+ catchsql "PRAGMA database_list;" db2
+} [list 0 [list 0 main [file nativename \
+ [file join [get_pwd] data_dir test2.db]]]]
+catch {db2 close}
+do_test pragma-20.6 {
+ sqlite3 db2 [file join [get_pwd] test2.db]
+ catchsql "PRAGMA database_list;" db2
+} [list 0 [list 0 main [file nativename \
+ [file join [get_pwd] test2.db]]]]
+catch {db2 close}
+do_test pragma-20.7 {
+ catchsql "PRAGMA data_store_directory='';"
+} {0 {}}
+do_test pragma-20.8 {
+ catchsql {PRAGMA data_store_directory}
+} {0 {}}
+
+forcedelete data_dir
+} ;# endif windows
+
+do_test 21.1 {
+ # Create a corrupt database in testerr.db. And a non-corrupt at test.db.
+ #
+ db close
+ forcedelete test.db
+ sqlite3 db test.db
+ execsql {
+ PRAGMA page_size = 1024;
+ PRAGMA auto_vacuum = 0;
+ CREATE TABLE t1(a PRIMARY KEY, b);
+ INSERT INTO t1 VALUES(1, 1);
+ }
+ for {set i 0} {$i < 10} {incr i} {
+ execsql { INSERT INTO t1 SELECT a + (1 << $i), b + (1 << $i) FROM t1 }
+ }
+ db close
+ forcecopy test.db testerr.db
+ hexio_write testerr.db 15000 [string repeat 55 100]
+} {100}
+
+set mainerr {*** in database main ***
+Multiple uses for byte 672 of page 15}
+set auxerr {*** in database aux ***
+Multiple uses for byte 672 of page 15}
+
+do_test 22.2 {
+ catch { db close }
+ sqlite3 db testerr.db
+ execsql { PRAGMA integrity_check }
+} [list $mainerr]
+
+do_test 22.3.1 {
+ catch { db close }
+ sqlite3 db test.db
+ execsql {
+ ATTACH 'testerr.db' AS 'aux';
+ PRAGMA integrity_check;
+ }
+} [list $auxerr]
+do_test 22.3.2 {
+ execsql { PRAGMA main.integrity_check; }
+} {ok}
+do_test 22.3.3 {
+ execsql { PRAGMA aux.integrity_check; }
+} [list $auxerr]
+
+do_test 22.4.1 {
+ catch { db close }
+ sqlite3 db testerr.db
+ execsql {
+ ATTACH 'test.db' AS 'aux';
+ PRAGMA integrity_check;
+ }
+} [list $mainerr]
+do_test 22.4.2 {
+ execsql { PRAGMA main.integrity_check; }
+} [list $mainerr]
+do_test 22.4.3 {
+ execsql { PRAGMA aux.integrity_check; }
+} {ok}
finish_test
+
+
diff --git a/test/quota.test b/test/quota.test
index ec89086..816dec8 100644
--- a/test/quota.test
+++ b/test/quota.test
@@ -12,6 +12,13 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+
+# If SQLITE_CURDIR is not defined, omit this file.
+ifcapable !curdir {
+ finish_test
+ return
+}
+
source $testdir/malloc_common.tcl
unset -nocomplain defaultVfs
diff --git a/test/quota2.test b/test/quota2.test
index 5bb50d7..1482db6 100644
--- a/test/quota2.test
+++ b/test/quota2.test
@@ -12,6 +12,13 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+
+# If SQLITE_CURDIR is not defined, omit this file.
+ifcapable !curdir {
+ finish_test
+ return
+}
+
source $testdir/malloc_common.tcl
db close
@@ -164,11 +171,17 @@ do_test quota2-2.1 {
do_test quota2-2.2 {
set ::quota
} {}
-do_test quota2-2.3 {
+do_test quota2-2.3.1 {
sqlite3_quota_rewind $::h1
+ sqlite3_quota_file_available $::h1
+} {7000}
+do_test quota2-2.3.2 {
set ::x [sqlite3_quota_fread $::h1 1001 7]
string length $::x
} {6006}
+do_test quota2-2.3.3 {
+ sqlite3_quota_file_available $::h1
+} {0}
do_test quota2-2.4 {
string match $::x [string range $::bigtext 0 6005]
} {1}
@@ -180,22 +193,40 @@ do_test quota2-2.6 {
sqlite3_quota_fseek $::h1 -100 SEEK_END
sqlite3_quota_ftell $::h1
} {6900}
+do_test quota2-2.6.1 {
+ sqlite3_quota_file_available $::h1
+} {100}
do_test quota2-2.7 {
sqlite3_quota_fseek $::h1 -100 SEEK_CUR
sqlite3_quota_ftell $::h1
} {6800}
+do_test quota2-2.7.1 {
+ sqlite3_quota_file_available $::h1
+} {200}
do_test quota2-2.8 {
sqlite3_quota_fseek $::h1 50 SEEK_CUR
sqlite3_quota_ftell $::h1
} {6850}
+do_test quota2-2.8.1 {
+ sqlite3_quota_file_available $::h1
+} {150}
do_test quota2-2.9 {
sqlite3_quota_fseek $::h1 50 SEEK_SET
sqlite3_quota_ftell $::h1
} {50}
+do_test quota2-2.9.1 {
+ sqlite3_quota_file_available $::h1
+} {6950}
do_test quota2-2.10 {
sqlite3_quota_rewind $::h1
sqlite3_quota_ftell $::h1
} {0}
+do_test quota2-2.10.1 {
+ sqlite3_quota_file_available $::h1
+} {7000}
+do_test quota2-2.10.2 {
+ sqlite3_quota_ferror $::h1
+} {0}
do_test quota2-2.11 {
standard_path [sqlite3_quota_dump]
} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 0}}
diff --git a/test/releasetest.tcl b/test/releasetest.tcl
index 7725630..3b4662c 100644
--- a/test/releasetest.tcl
+++ b/test/releasetest.tcl
@@ -151,6 +151,15 @@ array set ::Configs {
-DSQLITE_ENABLE_OVERSIZE_CELL_CHECK=1
-DSQLITE_MAX_ATTACHED=62
}
+ "Devkit" {
+ -DSQLITE_DEFAULT_FILE_FORMAT=4
+ -DSQLITE_MAX_ATTACHED=30
+ -DSQLITE_ENABLE_COLUMN_METADATA
+ -DSQLITE_ENABLE_FTS4
+ -DSQLITE_ENABLE_FTS4_PARENTHESIS
+ -DSQLITE_DISABLE_FTS4_DEFERRED
+ -DSQLITE_ENABLE_RTREE
+ }
}
array set ::Platforms {
@@ -166,6 +175,7 @@ array set ::Platforms {
"Device-One" fulltest
}
Linux-i686 {
+ "Devkit" test
"Unlock-Notify" "QUICKTEST_INCLUDE=notify2.test test"
"Device-One" test
"Device-Two" test
@@ -218,8 +228,6 @@ proc run_test_suite {name testtarget config} {
if {$::tcl_platform(platform)=="windows"} {
append opts " -DSQLITE_OS_WIN=1"
- } elseif {$::tcl_platform(platform)=="os2"} {
- append opts " -DSQLITE_OS_OS2=1"
} else {
append opts " -DSQLITE_OS_UNIX=1"
}
diff --git a/test/rowid.test b/test/rowid.test
index 4a9404d..5daf581 100644
--- a/test/rowid.test
+++ b/test/rowid.test
@@ -657,7 +657,7 @@ do_test rowid-11.4 {
# Test the automatic generation of rowids when the table already contains
# a rowid with the maximum value.
#
-# Once the the maximum rowid is taken, rowids are normally chosen at
+# Once the maximum rowid is taken, rowids are normally chosen at
# random. By by reseting the random number generator, we can cause
# the rowid guessing loop to collide with prior rowids, and test the
# loop out to its limit of 100 iterations. After 100 collisions, the
diff --git a/test/securedel2.test b/test/securedel2.test
new file mode 100644
index 0000000..b20f4f9
--- /dev/null
+++ b/test/securedel2.test
@@ -0,0 +1,95 @@
+# 2012 August 7
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+#
+# Tests for the secure_delete pragma.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix securedel2
+
+# Generate 1000 pseudo-random 64-bit blobs.
+#
+for {set i 1} {$i <= 1000} {incr i} {
+ set aBlob($i) [string range [db one {SELECT quote(randomblob(8))}] 2 end-1]
+}
+
+proc detect_blob_prepare {zFile} {
+ set nByte [file size $zFile]
+ set ::detect_blob_data [hexio_read $zFile 0 $nByte]
+}
+
+proc detect_blob {zFile iBlob} {
+ if {$zFile != ""} { detect_blob_prepare $zFile }
+ string match "*$::aBlob($iBlob)*" $::detect_blob_data
+}
+
+do_test 1.1 {
+ execsql { PRAGMA secure_delete = 1 }
+ execsql { PRAGMA auto_vacuum = 0 }
+ execsql { CREATE TABLE t1(x, y) }
+ for {set i 1} {$i <= 1000} {incr i} {
+ set x "X'[string repeat $aBlob($i) 1]'"
+ set y "X'[string repeat $aBlob($i) 500]'"
+ execsql "INSERT INTO t1 VALUES($x, $y)"
+ }
+} {}
+
+do_test 1.2 { detect_blob test.db 1 } {1}
+
+forcecopy test.db test.db.bak
+do_execsql_test 1.3.1 { PRAGMA secure_delete = 0 } {0}
+do_execsql_test 1.3.2 { DELETE FROM t1 WHERE rowid = 1 }
+do_test 1.3.3 { detect_blob test.db 1 } {1}
+
+db close
+forcecopy test.db.bak test.db
+sqlite3 db test.db
+do_execsql_test 1.4.1 { PRAGMA secure_delete = 1 } {1}
+do_execsql_test 1.4.2 { DELETE FROM t1 WHERE rowid = 1 }
+do_test 1.4.3 { detect_blob test.db 1 } {0}
+
+do_execsql_test 1.5.1 { DELETE FROM t1 WHERE rowid>850 } {}
+do_test 1.5.2 {
+ set n 0
+ detect_blob_prepare test.db
+ for {set i 851} {$i <= 1000} {incr i 5} {
+ incr n [detect_blob {} $i]
+ }
+ set n
+} {0}
+
+db close
+sqlite3 db test.db
+do_test 1.6.1 {
+ execsql {
+ PRAGMA cache_size = 200;
+ PRAGMA secure_delete = 1;
+ CREATE TABLE t2(x);
+ SELECT * FROM t1;
+ }
+ for {set i 100} {$i < 5000} {incr i} {
+ execsql { INSERT INTO t2 VALUES(randomblob($i)) }
+ }
+ execsql { DELETE FROM t1 }
+} {}
+
+do_test 1.6.2 {
+ set n 0
+ detect_blob_prepare test.db
+ for {set i 2} {$i <= 850} {incr i 5} {
+ incr n [detect_blob {} $i]
+ }
+ set n
+} {0}
+
+finish_test
+
diff --git a/test/select6.test b/test/select6.test
index e0ff165..64a8519 100644
--- a/test/select6.test
+++ b/test/select6.test
@@ -22,6 +22,7 @@ ifcapable !subquery {
finish_test
return
}
+set ::testprefix select6
do_test select6-1.0 {
execsql {
@@ -513,5 +514,48 @@ do_test select6-9.11 {
} {2 12 3 13 4 14}
+#-------------------------------------------------------------------------
+# Test that if a UNION ALL sub-query that would otherwise be eligible for
+# flattening consists of two or more SELECT statements that do not all
+# return the same number of result columns, the error is detected.
+#
+do_execsql_test 10.1 {
+ CREATE TABLE t(i,j,k);
+ CREATE TABLE j(l,m);
+ CREATE TABLE k(o);
+}
+
+set err [list 1 {SELECTs to the left and right of UNION ALL do not have the same number of result columns}]
+
+do_execsql_test 10.2 {
+ SELECT * FROM (SELECT * FROM t), j;
+}
+do_catchsql_test 10.3 {
+ SELECT * FROM t UNION ALL SELECT * FROM j
+} $err
+do_catchsql_test 10.4 {
+ SELECT * FROM (SELECT i FROM t UNION ALL SELECT l, m FROM j)
+} $err
+do_catchsql_test 10.5 {
+ SELECT * FROM (SELECT j FROM t UNION ALL SELECT * FROM j)
+} $err
+do_catchsql_test 10.6 {
+ SELECT * FROM (SELECT * FROM t UNION ALL SELECT * FROM j)
+} $err
+do_catchsql_test 10.7 {
+ SELECT * FROM (
+ SELECT * FROM t UNION ALL
+ SELECT l,m,l FROM j UNION ALL
+ SELECT * FROM k
+ )
+} $err
+do_catchsql_test 10.8 {
+ SELECT * FROM (
+ SELECT * FROM k UNION ALL
+ SELECT * FROM t UNION ALL
+ SELECT l,m,l FROM j
+ )
+} $err
+
finish_test
diff --git a/test/shared.test b/test/shared.test
index 37564e6..4eab476 100644
--- a/test/shared.test
+++ b/test/shared.test
@@ -904,9 +904,11 @@ do_test shared-$av.11.8 {
set res
} {1 4 {} 7}
if {[llength [info command sqlite3_shared_cache_report]]==1} {
- do_test shared-$av.11.9 {
- string tolower [sqlite3_shared_cache_report]
- } [string tolower [list [file nativename [file normalize test.db]] 2]]
+ ifcapable curdir {
+ do_test shared-$av.11.9 {
+ string tolower [sqlite3_shared_cache_report]
+ } [string tolower [list [file nativename [file normalize test.db]] 2]]
+ }
}
do_test shared-$av.11.11 {
@@ -1056,7 +1058,127 @@ do_test shared-$av-15.2 {
db close
db2 close
-}
+# Shared cache on a :memory: database. This only works for URI filenames.
+#
+do_test shared-$av-16.1 {
+ sqlite3 db1 file::memory: -uri 1
+ sqlite3 db2 file::memory: -uri 1
+ db1 eval {
+ CREATE TABLE t1(x); INSERT INTO t1 VALUES(1),(2),(3);
+ }
+ db2 eval {
+ SELECT x FROM t1 ORDER BY x;
+ }
+} {1 2 3}
+do_test shared-$av-16.2 {
+ db2 eval {
+ INSERT INTO t1 VALUES(99);
+ DELETE FROM t1 WHERE x=2;
+ }
+ db1 eval {
+ SELECT x FROM t1 ORDER BY x;
+ }
+} {1 3 99}
+
+# Verify that there is no cache sharing ordinary (non-URI) filenames are
+# used.
+#
+do_test shared-$av-16.3 {
+ db1 close
+ db2 close
+ sqlite3 db1 :memory:
+ sqlite3 db2 :memory:
+ db1 eval {
+ CREATE TABLE t1(x); INSERT INTO t1 VALUES(4),(5),(6);
+ }
+ catchsql {
+ SELECT * FROM t1;
+ } db2
+} {1 {no such table: t1}}
+
+# Shared cache on named memory databases.
+#
+do_test shared-$av-16.4 {
+ db1 close
+ db2 close
+ forcedelete test.db test.db-wal test.db-journal
+ sqlite3 db1 file:test.db?mode=memory -uri 1
+ sqlite3 db2 file:test.db?mode=memory -uri 1
+ db1 eval {
+ CREATE TABLE t1(x); INSERT INTO t1 VALUES(1),(2),(3);
+ }
+ db2 eval {
+ SELECT x FROM t1 ORDER BY x;
+ }
+} {1 2 3}
+do_test shared-$av-16.5 {
+ db2 eval {
+ INSERT INTO t1 VALUES(99);
+ DELETE FROM t1 WHERE x=2;
+ }
+ db1 eval {
+ SELECT x FROM t1 ORDER BY x;
+ }
+} {1 3 99}
+do_test shared-$av-16.6 {
+ file exists test.db
+} {0} ;# Verify that the database is in-memory
+
+# Shared cache on named memory databases with different names.
+#
+do_test shared-$av-16.7 {
+ db1 close
+ db2 close
+ forcedelete test1.db test2.db
+ sqlite3 db1 file:test1.db?mode=memory -uri 1
+ sqlite3 db2 file:test2.db?mode=memory -uri 1
+ db1 eval {
+ CREATE TABLE t1(x); INSERT INTO t1 VALUES(1),(2),(3);
+ }
+ catchsql {
+ SELECT x FROM t1 ORDER BY x;
+ } db2
+} {1 {no such table: t1}}
+do_test shared-$av-16.8 {
+ file exists test1.db
+} {0} ;# Verify that the database is in-memory
+
+# Shared cache on named memory databases attached to readonly connections.
+#
+do_test shared-$av-16.8.1 {
+ db1 close
+ db2 close
+
+ sqlite3 db test1.db
+ db eval {
+ CREATE TABLE yy(a, b);
+ INSERT INTO yy VALUES(77, 88);
+ }
+ db close
+
+ sqlite3 db1 test1.db -uri 1 -readonly 1
+ sqlite3 db2 test2.db -uri 1
+
+ db1 eval {
+ ATTACH 'file:mem?mode=memory&cache=shared' AS shared;
+ CREATE TABLE shared.xx(a, b);
+ INSERT INTO xx VALUES(55, 66);
+ }
+ db2 eval {
+ ATTACH 'file:mem?mode=memory&cache=shared' AS shared;
+ SELECT * FROM xx;
+ }
+} {55 66}
+
+do_test shared-$av-16.8.2 { db1 eval { SELECT * FROM yy } } {77 88}
+do_test shared-$av-16.8.3 {
+ list [catch {db1 eval { INSERT INTO yy VALUES(1, 2) }} msg] $msg
+} {1 {attempt to write a readonly database}}
+
+db1 close
+db2 close
+
+} ;# end of autovacuum on/off loop
sqlite3_enable_shared_cache $::enable_shared_cache
finish_test
diff --git a/test/shared8.test b/test/shared8.test
new file mode 100644
index 0000000..600e02b
--- /dev/null
+++ b/test/shared8.test
@@ -0,0 +1,113 @@
+# 2012 May 15
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The tests in this file are intended to show that closing one database
+# connection to a shared-cache while there exist other connections (a)
+# does not cause the schema to be reloaded and (b) does not cause any
+# other problems.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+ifcapable !shared_cache { finish_test ; return }
+set testprefix shared8
+
+db close
+set ::enable_shared_cache [sqlite3_enable_shared_cache 1]
+do_test 0.0 { sqlite3_enable_shared_cache } {1}
+
+proc roman {n} {
+ array set R {1 i 2 ii 3 iii 4 iv 5 v 6 vi 7 vii 8 viii 9 ix 10 x}
+ set R($n)
+}
+
+#-------------------------------------------------------------------------
+# The following tests work as follows:
+#
+# 1.0: Open connection [db1] and populate the database.
+#
+# 1.1: Using "PRAGMA writable_schema", destroy the database schema on
+# disk. The schema is still in memory, so it is possible to keep
+# using it, but any attempt to reload it from disk will fail.
+#
+# 1.3-4: Open connection db2. Check that it can see the db schema. Then
+# close db1 and check that db2 still works. This shows that closing
+# db1 did not reset the in-memory schema.
+#
+# 1.5-7: Similar to 1.3-4.
+#
+# 1.8: Close all database connections (deleting the in-memory schema).
+# Then open a new connection and check that it cannot read the db.
+#
+do_test 1.0 {
+ sqlite3 db1 test.db
+ db1 func roman roman
+ execsql {
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 1);
+ INSERT INTO t1 VALUES(2, 2);
+ INSERT INTO t1 VALUES(3, 3);
+ INSERT INTO t1 VALUES(4, 4);
+ CREATE VIEW v1 AS SELECT a, roman(b) FROM t1;
+ SELECT * FROM v1;
+ } db1
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.1 {
+ execsql {
+ PRAGMA writable_schema = 1;
+ DELETE FROM sqlite_master WHERE 1;
+ PRAGMA writable_schema = 0;
+ SELECT * FROM sqlite_master;
+ } db1
+} {}
+
+do_test 1.2 {
+ execsql { SELECT * FROM v1 } db1
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.3 {
+ sqlite3 db2 test.db
+ db2 func roman roman
+ execsql { SELECT * FROM v1 } db2
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.4 {
+ db1 close
+ execsql { SELECT * FROM v1 } db2
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.5 {
+ sqlite3 db3 test.db
+ db3 func roman roman
+ execsql { SELECT * FROM v1 } db3
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.6 {
+ execsql { SELECT * FROM v1 } db2
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.7 {
+ db2 close
+ execsql { SELECT * FROM v1 } db3
+} {1 i 2 ii 3 iii 4 iv}
+
+do_test 1.8 {
+ db3 close
+ sqlite3 db4 test.db
+ catchsql { SELECT * FROM v1 } db4
+} {1 {no such table: v1}}
+
+
+foreach db {db1 db2 db3 db4} { catch { $db close } }
+sqlite3_enable_shared_cache $::enable_shared_cache
+finish_test
+
diff --git a/test/shell1.test b/test/shell1.test
index 0cafc35..47f9e41 100644
--- a/test/shell1.test
+++ b/test/shell1.test
@@ -283,7 +283,7 @@ do_test shell1-3.2.4 {
# .databases List names and files of attached databases
do_test shell1-3.3.1 {
catchcmd "-csv test.db" ".databases"
-} "/0 +.*main +[string map {/ .} [string range [pwd] 0 10]].*/"
+} "/0 +.*main +[string map {/ .} [string range [get_pwd] 0 10]].*/"
do_test shell1-3.3.2 {
# too many arguments
catchcmd "test.db" ".databases BAD"
diff --git a/test/spellfix.test b/test/spellfix.test
new file mode 100644
index 0000000..afef981
--- /dev/null
+++ b/test/spellfix.test
@@ -0,0 +1,151 @@
+# 2012 July 12
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix spellfix
+
+ifcapable !vtab { finish_test ; return }
+
+register_spellfix_module db
+
+set vocab {
+rabbi rabbit rabbits rabble rabid rabies raccoon raccoons race raced racer
+racers races racetrack racial racially racing rack racked racket racketeer
+racketeering racketeers rackets racking racks radar radars radial radially
+radian radiance radiant radiantly radiate radiated radiates radiating radiation
+radiations radiator radiators radical radically radicals radices radii radio
+radioactive radioastronomy radioed radiography radioing radiology radios radish
+radishes radium radius radix radon raft rafter rafters rafts rag rage raged
+rages ragged raggedly raggedness raging rags ragweed raid raided raider raiders
+raiding raids rail railed railer railers railing railroad railroaded railroader
+railroaders railroading railroads rails railway railways raiment rain rainbow
+raincoat raincoats raindrop raindrops rained rainfall rainier rainiest raining
+rains rainstorm rainy raise raised raiser raisers raises raisin raising rake
+raked rakes raking rallied rallies rally rallying ram ramble rambler rambles
+rambling ramblings ramification ramifications ramp rampage rampant rampart
+ramps ramrod rams ran ranch ranched rancher ranchers ranches ranching rancid
+random randomization randomize randomized randomizes randomly randomness randy
+rang range ranged rangeland ranger rangers ranges ranging rangy rank ranked
+ranker rankers rankest ranking rankings rankle rankly rankness ranks ransack
+ransacked ransacking ransacks ransom ransomer ransoming ransoms rant ranted
+ranter ranters ranting rants rap rapacious rape raped raper rapes rapid
+rapidity rapidly rapids rapier raping rapport rapprochement raps rapt raptly
+rapture raptures rapturous rare rarely rareness rarer rarest rarity rascal
+rascally rascals rash rasher rashly rashness rasp raspberry rasped rasping
+rasps raster rat rate rated rater raters rates rather ratification ratified
+ratifies ratify ratifying rating ratings ratio ration rational rationale
+rationales rationalities rationality rationalization rationalizations
+rationalize rationalized rationalizes rationalizing rationally rationals
+rationing rations ratios rats rattle rattled rattler rattlers rattles
+rattlesnake rattlesnakes rattling raucous ravage ravaged ravager ravagers
+ravages ravaging rave raved raven ravening ravenous ravenously ravens raves
+ravine ravines raving ravings raw rawer rawest rawly rawness ray rays raze
+razor razors re reabbreviate reabbreviated reabbreviates reabbreviating reach
+reachability reachable reachably reached reacher reaches reaching reacquired
+react reacted reacting reaction reactionaries reactionary reactions reactivate
+reactivated reactivates reactivating reactivation reactive reactively
+reactivity reactor reactors reacts read readability readable reader readers
+readied readier readies readiest readily readiness reading readings readjusted
+readout readouts reads ready readying real realest realign realigned realigning
+realigns realism realist realistic realistically realists realities reality
+}
+
+do_test 1.1 {
+ execsql { CREATE VIRTUAL TABLE t1 USING spellfix1 }
+ foreach word $vocab {
+ execsql { INSERT INTO t1(word) VALUES($word) }
+ }
+} {}
+
+foreach {tn word res} {
+ 1 raxpi* {rasping 5 rasped 5 ragweed 5 raspberry 6 rasp 4}
+ 2 ril* {rail 4 railed 4 railer 4 railers 4 railing 4}
+ 3 rilis* {realism 6 realist 6 realistic 6 realistically 6 realists 6}
+ 4 reail* {real 3 realest 3 realign 3 realigned 3 realigning 3}
+ 5 ras* {rascal 3 rascally 3 rascals 3 rash 3 rasher 3}
+ 6 realistss* {realists 8 realigns 8 realistic 9 realistically 9 realest 7}
+ 7 realistss {realists 8 realist 7 realigns 8 realistic 9 realest 7}
+ 8 rllation* {realities 9 reality 7 rallied 7 railed 4}
+ 9 renstom* {rainstorm 8 ransom 6 ransomer 6 ransoming 6 ransoms 6}
+} {
+ do_execsql_test 1.2.$tn {
+ SELECT word, matchlen FROM t1 WHERE word MATCH $word
+ ORDER BY score, word LIMIT 5
+ } $res
+}
+
+
+do_execsql_test 2.1 {
+ CREATE VIRTUAL TABLE t2 USING spellfix1;
+ INSERT INTO t2 (word, soundslike) VALUES('school', 'skuul');
+ INSERT INTO t2 (word, soundslike) VALUES('psalm', 'sarm');
+ SELECT word, matchlen FROM t2 WHERE word MATCH 'sar*' LIMIT 5;
+} {psalm 4}
+
+do_execsql_test 2.2 {
+ SELECT word, matchlen FROM t2 WHERE word MATCH 'skol*' LIMIT 5;
+} {school 6}
+
+set vocab {
+kangaroo kanji kappa karate keel keeled keeling keels keen keener keenest
+keenly keenness keep keeper keepers keeping keeps ken kennel kennels kept
+kerchief kerchiefs kern kernel kernels kerosene ketchup kettle
+kettles key keyboard keyboards keyed keyhole keying keynote keypad keypads keys
+keystroke keystrokes keyword keywords kick kicked kicker kickers kicking
+kickoff kicks kid kidded kiddie kidding kidnap kidnapper kidnappers kidnapping
+kidnappings kidnaps kidney kidneys kids kill killed killer killers killing
+killingly killings killjoy kills kilobit kilobits kiloblock kilobyte kilobytes
+kilogram kilograms kilohertz kilohm kilojoule kilometer kilometers kiloton
+kilovolt kilowatt kiloword kimono kin kind kinder kindergarten kindest
+kindhearted kindle kindled kindles kindling kindly kindness kindred kinds
+kinetic king kingdom kingdoms kingly kingpin kings kink kinky kinship kinsman
+kiosk kiss kissed kisser kissers kisses kissing kit kitchen kitchenette
+kitchens kite kited kites kiting kits kitten kittenish kittens kitty klaxon
+kludge kludges klystron knack knapsack knapsacks knave knaves knead kneads knee
+kneecap kneed kneeing kneel kneeled kneeling kneels knees knell knells knelt
+knew knife knifed knifes knifing knight knighted knighthood knighting knightly
+knights knit knits knives knob knobs knock knockdown knocked knocker knockers
+knocking knockout knocks knoll knolls knot knots knotted knotting know knowable
+knower knowhow knowing knowingly knowledge knowledgeable known knows knuckle
+knuckled knuckles koala kosher kudo
+}
+
+do_execsql_test 3.1 {
+ CREATE TABLE costs(iLang, cFrom, cTo, iCost);
+ INSERT INTO costs VALUES(0, 'a', 'e', 1);
+ INSERT INTO costs VALUES(0, 'e', 'i', 1);
+ INSERT INTO costs VALUES(0, 'i', 'o', 1);
+ INSERT INTO costs VALUES(0, 'o', 'u', 1);
+ INSERT INTO costs VALUES(0, 'u', 'a', 1);
+ CREATE VIRTUAL TABLE t3 USING spellfix1(edit_cost_table=costs);
+}
+
+do_test 3.2 {
+ foreach w $vocab {
+ execsql { INSERT INTO t3(word) VALUES($w) }
+ }
+} {}
+
+breakpoint
+foreach {tn word res} {
+ 1 kos* {kosher 3 kiosk 4 kudo 2 kiss 3 kissed 3}
+ 2 kellj* {killjoy 5 kill 4 killed 4 killer 4 killers 4}
+ 3 kellj {kill 4 kills 5 killjoy 7 keel 4 killed 6}
+} {
+ do_execsql_test 1.2.$tn {
+ SELECT word, matchlen FROM t3 WHERE word MATCH $word
+ ORDER BY score, word LIMIT 5
+ } $res
+}
+
+finish_test
diff --git a/test/tclsqlite.test b/test/tclsqlite.test
index c8b0303..c954c71 100644
--- a/test/tclsqlite.test
+++ b/test/tclsqlite.test
@@ -319,14 +319,23 @@ do_test tcl-8.1 {
execsql {INSERT INTO t1 VALUES(30,NULL)}
db eval {SELECT * FROM t1 WHERE b IS NULL}
} {30 NaN}
+proc concatFunc args {return [join $args {}]}
do_test tcl-8.2 {
+ db function concat concatFunc
+ db eval {SELECT concat('a', b, 'z') FROM t1 WHERE b is NULL}
+} {aNaNz}
+do_test tcl-8.3 {
db nullvalue NULL
db nullvalue
} {NULL}
-do_test tcl-8.3 {
+do_test tcl-8.4 {
db nullvalue {}
db eval {SELECT * FROM t1 WHERE b IS NULL}
} {30 {}}
+do_test tcl-8.5 {
+ db function concat concatFunc
+ db eval {SELECT concat('a', b, 'z') FROM t1 WHERE b is NULL}
+} {az}
# Test the return type of user-defined functions
#
diff --git a/test/tester.tcl b/test/tester.tcl
index 07eebcb..68b2c8d 100644
--- a/test/tester.tcl
+++ b/test/tester.tcl
@@ -19,6 +19,8 @@
#
# Commands to manipulate the db and the file-system at a high level:
#
+# is_relative_file
+# test_pwd
# get_pwd
# copy_file FROM TO
# delete_file FILENAME
@@ -212,6 +214,34 @@ proc do_copy_file {force from to} {
}
}
+# Check if a file name is relative
+#
+proc is_relative_file { file } {
+ return [expr {[file pathtype $file] != "absolute"}]
+}
+
+# If the VFS supports using the current directory, returns [pwd];
+# otherwise, it returns only the provided suffix string (which is
+# empty by default).
+#
+proc test_pwd { args } {
+ if {[llength $args] > 0} {
+ set suffix1 [lindex $args 0]
+ if {[llength $args] > 1} {
+ set suffix2 [lindex $args 1]
+ } else {
+ set suffix2 $suffix1
+ }
+ } else {
+ set suffix1 ""; set suffix2 ""
+ }
+ ifcapable curdir {
+ return "[get_pwd]$suffix1"
+ } else {
+ return $suffix2
+ }
+}
+
# Delete a file or directory
#
proc delete_file {args} {
diff --git a/test/uri.test b/test/uri.test
index 93a32b7..af1ad67 100644
--- a/test/uri.test
+++ b/test/uri.test
@@ -52,11 +52,24 @@ foreach {tn uri file} {
16 file://localhostPWD/test.db%3Fhello test.db?hello
} {
+
+ ifcapable !curdir { if {$tn==3} break }
+
if {$tcl_platform(platform)=="windows"} {
+ #
+ # NOTE: Due to limits on legal characters for file names imposed by
+ # Windows, we must skip the final two tests here (i.e. the
+ # question mark is illegal in a file name on Windows).
+ #
if {$tn>14} break
- set uri [string map [list PWD /[get_pwd]] $uri]
+
+ #
+ # NOTE: On Windows, we need to account for the fact that the current
+ # directory does not start with a forward slash.
+ #
+ set uri [string map [list PWD/ /[test_pwd /]] $uri]
} else {
- set uri [string map [list PWD [get_pwd]] $uri]
+ set uri [string map [list PWD/ [test_pwd /]] $uri]
}
if {[file isdir $file]} {error "$file is a directory"}
diff --git a/test/vtab1.test b/test/vtab1.test
index 38aec09..3409943 100644
--- a/test/vtab1.test
+++ b/test/vtab1.test
@@ -1222,6 +1222,10 @@ do_test vtab1-17.1 {
}
} {}
+do_test vtab1-17.2 {
+ execsql { DELETE FROM sqlite_master WHERE sql LIKE 'insert%' }
+} {}
+
#-------------------------------------------------------------------------
# The following tests - vtab1-18.* - test that the optimization of LIKE
# constraints in where.c plays well with virtual tables.
@@ -1275,4 +1279,18 @@ foreach {tn sql res filter} {
}
do_execsql_test 18.2.x { PRAGMA case_sensitive_like = OFF }
+#-------------------------------------------------------------------------
+# Test that an existing module may not be overridden.
+#
+do_test 19.1 {
+ sqlite3 db2 test.db
+ register_echo_module [sqlite3_connection_pointer db2]
+} SQLITE_OK
+do_test 19.2 {
+ register_echo_module [sqlite3_connection_pointer db2]
+} SQLITE_MISUSE
+do_test 19.3 {
+ db2 close
+} {}
+
finish_test
diff --git a/test/wal.test b/test/wal.test
index 32b2608..24ce5f8 100644
--- a/test/wal.test
+++ b/test/wal.test
@@ -1478,7 +1478,11 @@ foreach pgsz {512 1024 2048 4096 8192 16384 32768 65536} {
# Test that when 1 or more pages are recovered from a WAL file,
# sqlite3_log() is invoked to report this to the user.
#
-set walfile [file nativename [file join [get_pwd] test.db-wal]]
+ifcapable curdir {
+ set walfile [file nativename [file join [get_pwd] test.db-wal]]
+} else {
+ set walfile test.db-wal
+}
catch {db close}
forcedelete test.db
do_test wal-23.1 {
diff --git a/test/wal2.test b/test/wal2.test
index f30c011..4371e98 100644
--- a/test/wal2.test
+++ b/test/wal2.test
@@ -86,7 +86,7 @@ proc incr_tvfs_hdr {file idx incrval} {
#
# 2. Attempt to read the database using the reader. Before the reader
# has a chance to snapshot the wal-index header, increment one
-# of the the integer fields (so that the reader ends up with a corrupted
+# of the integer fields (so that the reader ends up with a corrupted
# header).
#
# 3. Check that the reader recovers the wal-index and reads the correct
@@ -126,9 +126,11 @@ set RECOVER [list \
{1 7 unlock exclusive} {0 1 unlock exclusive} \
]
set READ [list \
- {4 1 lock exclusive} {4 1 unlock exclusive} \
{4 1 lock shared} {4 1 unlock shared} \
]
+set INITSLOT [list \
+ {4 1 lock exclusive} {4 1 unlock exclusive} \
+]
foreach {tn iInsert res wal_index_hdr_mod wal_locks} "
2 5 {5 15} 0 {$RECOVER $READ}
@@ -141,7 +143,7 @@ foreach {tn iInsert res wal_index_hdr_mod wal_locks} "
9 12 {12 78} 7 {$RECOVER $READ}
10 13 {13 91} 8 {$RECOVER $READ}
11 14 {14 105} 9 {$RECOVER $READ}
- 12 15 {15 120} -1 {$READ}
+ 12 15 {15 120} -1 {$INITSLOT $READ}
" {
do_test wal2-1.$tn.1 {
diff --git a/test/wal3.test b/test/wal3.test
index ccab93e..18e6075 100644
--- a/test/wal3.test
+++ b/test/wal3.test
@@ -655,7 +655,7 @@ T filter xShmLock
T script lock_callback
proc lock_callback {method file handle spec} {
- if {$spec == "4 1 unlock exclusive"} {
+ if {$spec == "1 7 unlock exclusive"} {
T filter {}
set ::r [catchsql { SELECT * FROM b } db2]
}
diff --git a/test/walro.test b/test/walro.test
index 3ae7d53..465ce83 100644
--- a/test/walro.test
+++ b/test/walro.test
@@ -56,6 +56,7 @@ do_multiclient_test tn {
do_test 1.1.1 {
code2 { sqlite3 db2 test.db }
sql2 {
+ PRAGMA auto_vacuum = 0;
PRAGMA journal_mode = WAL;
CREATE TABLE t1(x, y);
INSERT INTO t1 VALUES('a', 'b');
@@ -163,6 +164,132 @@ do_multiclient_test tn {
do_test 1.3.2.4 {
code1 { sqlite3_extended_errcode db }
} {SQLITE_READONLY_RECOVERY}
+
+ #-----------------------------------------------------------------------
+ # Test cases 1.4.* check that checkpoints and log wraps don't prevent
+ # read-only connections from reading the database.
+ do_test 1.4.1 {
+ code1 { db close }
+ forcedelete test.db-shm
+ file exists test.db-shm
+ } {0}
+
+ # Open one read-only and one read-write connection. Write some data
+ # and then run a checkpoint using the read-write connection. Then
+ # check the read-only connection can still read.
+ do_test 1.4.2 {
+ code1 { sqlite3 db file:test.db?readonly_shm=1 }
+ code2 { sqlite3 db2 test.db }
+ csql2 {
+ INSERT INTO t1 VALUES(1, 2);
+ INSERT INTO t1 VALUES(3, 4);
+ INSERT INTO t1 VALUES(5, 6);
+ PRAGMA wal_checkpoint;
+ }
+ } {0 {0 3 3}}
+ do_test 1.4.3 {
+ csql1 { SELECT * FROM t1 }
+ } {0 {a b c d e f g h i j k l 1 2 3 4 5 6}}
+
+ # Using the read-write connection, open a transaction and write lots
+ # of data - causing a cache spill and a log wrap. Then check that the
+ # read-only connection can still read the database.
+ do_test 1.4.4.1 {
+ csql2 {
+ PRAGMA cache_size = 10;
+ BEGIN;
+ CREATE TABLE t2(x, y);
+ INSERT INTO t2 VALUES('abc', 'xyz');
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ }
+ file size test.db-wal
+ } {147800}
+ do_test 1.4.4.2 {
+ csql1 { SELECT * FROM t1 }
+ } {0 {a b c d e f g h i j k l 1 2 3 4 5 6}}
+ do_test 1.4.4.3 {
+ csql2 COMMIT
+ csql1 { SELECT count(*) FROM t2 }
+ } {0 512}
+ do_test 1.4.5 {
+ code2 { db2 close }
+ code1 { db close }
+ } {}
+}
+
+forcedelete test.db
+
+#-----------------------------------------------------------------------
+# Test cases 2.* check that a read-only connection may read the
+# database file while a checkpoint operation is ongoing.
+#
+do_multiclient_test tn {
+ # Do not run tests with the connections in the same process.
+ #
+ if {$tn==2} continue
+
+ # Close all connections and delete the database.
+ #
+ code1 { db close }
+ code2 { db2 close }
+ code3 { db3 close }
+ forcedelete test.db
+ forcedelete walro
+
+ foreach c {code1 code2 code3} {
+ $c {
+ sqlite3_shutdown
+ sqlite3_config_uri 1
+ }
+ }
+
+ proc tv_hook {x file args} {
+ if {[file tail $file]=="test.db-wal"} {
+ do_test 2.1.2 {
+ code2 { sqlite3 db2 file:test.db?readonly_shm=1 }
+ csql2 { SELECT count(*) FROM t2 }
+ } {0 4}
+ do_test 2.1.3 {
+ code2 { db2 close }
+ } {}
+ }
+ }
+
+ do_test 2.1.1 {
+ testvfs tv -default 1 -fullshm 1
+ tv script tv_hook
+ tv filter {}
+ code1 { sqlite3 db test.db }
+ csql1 {
+ PRAGMA auto_vacuum = 0;
+ PRAGMA journal_mode = WAL;
+ BEGIN;
+ CREATE TABLE t2(x, y);
+ INSERT INTO t2 VALUES('abc', 'xyz');
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ INSERT INTO t2 SELECT x||y, y||x FROM t2;
+ COMMIT;
+ }
+ } {0 wal}
+
+ tv filter xSync
+ set res [csql1 { PRAGMA wal_checkpoint }]
+ do_test 2.1.4 { set res } {0 {0 2 2}}
+
+ do_test 2.1.5 {
+ code1 { db close }
+ code1 { tv delete }
+ } {}
}
finish_test
+
+
diff --git a/test/walthread.test b/test/walthread.test
index cbd6371..6249ce1 100644
--- a/test/walthread.test
+++ b/test/walthread.test
@@ -277,8 +277,8 @@ do_thread_test2 walthread-1 -seconds $seconds(walthread-1) -init {
proc write_transaction {} {
db eval {
BEGIN;
- INSERT INTO t1 VALUES(randomblob(100));
- INSERT INTO t1 VALUES(randomblob(100));
+ INSERT INTO t1 VALUES(randomblob(101 + $::E(pid)));
+ INSERT INTO t1 VALUES(randomblob(101 + $::E(pid)));
INSERT INTO t1 SELECT md5sum(x) FROM t1;
COMMIT;
}
diff --git a/test/whereD.test b/test/whereD.test
new file mode 100644
index 0000000..58fe934
--- /dev/null
+++ b/test/whereD.test
@@ -0,0 +1,189 @@
+# 2012 August 24
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing that an index may be used as a covering
+# index when there are OR expressions in the WHERE clause.
+#
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix whereD
+
+do_execsql_test 1.1 {
+ CREATE TABLE t(i,j,k,m,n);
+ CREATE INDEX ijk ON t(i,j,k);
+ CREATE INDEX jmn ON t(j,m,n);
+
+ INSERT INTO t VALUES(3, 3, 'three', 3, 'tres');
+ INSERT INTO t VALUES(2, 2, 'two', 2, 'dos');
+ INSERT INTO t VALUES(1, 1, 'one', 1, 'uno');
+ INSERT INTO t VALUES(4, 4, 'four', 4, 'cuatro');
+}
+
+do_execsql_test 1.2 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2);
+} {one two}
+do_execsql_test 1.3 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (+i=2 AND j=2);
+} {one two}
+do_execsql_test 1.4 {
+ SELECT n FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2);
+} {uno dos}
+do_execsql_test 1.5 {
+ SELECT k, n FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2);
+} {one uno two dos}
+do_execsql_test 1.6 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2) OR (i=3 AND j=3);
+} {one two three}
+do_execsql_test 1.7 {
+ SELECT n FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2) OR (i=3 AND j=3);
+} {uno dos tres}
+do_execsql_test 1.8 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (j=2 AND m=2);
+} {one two}
+do_execsql_test 1.9 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2) OR (j=3 AND m=3);
+} {one two three}
+do_execsql_test 1.10 {
+ SELECT n FROM t WHERE (i=1 AND j=1) OR (i=2 AND j=2) OR (j=3 AND m=3);
+} {uno dos tres}
+do_execsql_test 1.11 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (j=2 AND m=2) OR (i=3 AND j=3);
+} {one two three}
+do_execsql_test 1.12 {
+ SELECT n FROM t WHERE (i=1 AND j=1) OR (j=2 AND m=2) OR (i=3 AND j=3);
+} {uno dos tres}
+do_execsql_test 1.13 {
+ SELECT k FROM t WHERE (j=1 AND m=1) OR (i=2 AND j=2) OR (i=3 AND j=3);
+} {one two three}
+do_execsql_test 1.14 {
+ SELECT k FROM t WHERE (i=1 AND j=1) OR (j=2 AND i=2) OR (i=3 AND j=3);
+} {one two three}
+do_execsql_test 1.15 {
+ SELECT k FROM t WHERE (i=1 AND j=2) OR (i=2 AND j=1) OR (i=3 AND j=4);
+} {}
+do_execsql_test 1.16 {
+ SELECT k FROM t WHERE (i=1 AND (j=1 or j=2)) OR (i=3 AND j=3);
+} {one three}
+
+do_execsql_test 2.0 {
+ CREATE TABLE t1(a,b,c,d);
+ CREATE INDEX t1b ON t1(b);
+ CREATE INDEX t1c ON t1(c);
+ CREATE INDEX t1d ON t1(d);
+ CREATE TABLE t2(x,y);
+ CREATE INDEX t2y ON t2(y);
+
+ INSERT INTO t1 VALUES(1,2,3,4);
+ INSERT INTO t1 VALUES(5,6,7,8);
+ INSERT INTO t2 VALUES(1,2);
+ INSERT INTO t2 VALUES(2,7);
+ INSERT INTO t2 VALUES(3,4);
+} {}
+do_execsql_test 2.1 {
+ SELECT a, x FROM t1 JOIN t2 ON +y=d OR x=7 ORDER BY a, x;
+} {1 3}
+do_execsql_test 2.2 {
+ SELECT a, x FROM t1 JOIN t2 ON y=d OR x=7 ORDER BY a, x;
+} {1 3}
+
+
+# Similar to [do_execsql_test], except that two elements are appended
+# to the result - the string "search" and the number of times test variable
+# sqlite3_search_count is incremented by running the supplied SQL. e.g.
+#
+# do_searchcount_test 1.0 { SELECT * FROM t1 } {x y search 2}
+#
+proc do_searchcount_test {tn sql res} {
+ uplevel [subst -nocommands {
+ do_test $tn {
+ set ::sqlite_search_count 0
+ concat [db eval {$sql}] search [set ::sqlite_search_count]
+ } [list $res]
+ }]
+}
+
+do_execsql_test 3.0 {
+ CREATE TABLE t3(a, b, c);
+ CREATE UNIQUE INDEX i3 ON t3(a, b);
+ INSERT INTO t3 VALUES(1, 'one', 'i');
+ INSERT INTO t3 VALUES(3, 'three', 'iii');
+ INSERT INTO t3 VALUES(6, 'six', 'vi');
+ INSERT INTO t3 VALUES(2, 'two', 'ii');
+ INSERT INTO t3 VALUES(4, 'four', 'iv');
+ INSERT INTO t3 VALUES(5, 'five', 'v');
+
+ CREATE TABLE t4(x PRIMARY KEY, y);
+ INSERT INTO t4 VALUES('a', 'one');
+ INSERT INTO t4 VALUES('b', 'two');
+}
+
+do_searchcount_test 3.1 {
+ SELECT a, b FROM t3 WHERE (a=1 AND b='one') OR (a=2 AND b='two')
+} {1 one 2 two search 2}
+
+do_searchcount_test 3.2 {
+ SELECT a, c FROM t3 WHERE (a=1 AND b='one') OR (a=2 AND b='two')
+} {1 i 2 ii search 4}
+
+do_searchcount_test 3.4.1 {
+ SELECT y FROM t4 WHERE x='a'
+} {one search 2}
+do_searchcount_test 3.4.2 {
+ SELECT a, b FROM t3 WHERE
+ (a=1 AND b=(SELECT y FROM t4 WHERE x='a'))
+ OR (a=2 AND b='two')
+} {1 one 2 two search 4}
+do_searchcount_test 3.4.3 {
+ SELECT a, b FROM t3 WHERE
+ (a=2 AND b='two')
+ OR (a=1 AND b=(SELECT y FROM t4 WHERE x='a'))
+} {2 two 1 one search 4}
+do_searchcount_test 3.4.4 {
+ SELECT a, b FROM t3 WHERE
+ (a=2 AND b=(SELECT y FROM t4 WHERE x='b'))
+ OR (a=1 AND b=(SELECT y FROM t4 WHERE x='a'))
+} {2 two 1 one search 6}
+
+do_searchcount_test 3.5.1 {
+ SELECT a, b FROM t3 WHERE (a=1 AND b='one') OR rowid=4
+} {1 one 2 two search 2}
+do_searchcount_test 3.5.2 {
+ SELECT a, c FROM t3 WHERE (a=1 AND b='one') OR rowid=4
+} {1 i 2 ii search 2}
+
+# Ticket [d02e1406a58ea02d] (2012-10-04)
+# LEFT JOIN with an OR in the ON clause causes segfault
+#
+do_test 4.1 {
+ db eval {
+ CREATE TABLE t41(a,b,c);
+ INSERT INTO t41 VALUES(1,2,3), (4,5,6);
+ CREATE TABLE t42(d,e,f);
+ INSERT INTO t42 VALUES(3,6,9), (4,8,12);
+ SELECT * FROM t41 AS x LEFT JOIN t42 AS y ON (y.d=x.c) OR (y.e=x.b);
+ }
+} {1 2 3 3 6 9 4 5 6 {} {} {}}
+do_test 4.2 {
+ db eval {
+ CREATE INDEX t42d ON t42(d);
+ CREATE INDEX t42e ON t42(e);
+ SELECT * FROM t41 AS x LEFT JOIN t42 AS y ON (y.d=x.c) OR (y.e=x.b);
+ }
+} {1 2 3 3 6 9 4 5 6 {} {} {}}
+do_test 4.2 {
+ db eval {
+ SELECT * FROM t41 AS x LEFT JOIN t42 AS y ON (y.d=x.c) OR (y.d=x.b);
+ }
+} {1 2 3 3 6 9 4 5 6 {} {} {}}
+
+finish_test