summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorHans-Christoph Steiner <hans@eds.org>2012-09-20 18:34:38 -0400
committerHans-Christoph Steiner <hans@eds.org>2012-09-20 18:34:38 -0400
commit487e15dc239ccdb3344d1c99ce120e872bab4a74 (patch)
treec986d492f6092ca7b4401d91515f74daed17fae2 /test
parent7bb481fda9ecb134804b49c2ce77ca28f7eea583 (diff)
Imported Upstream version 2.0.6
Diffstat (limited to 'test')
-rw-r--r--test/alter.test3
-rw-r--r--test/attach.test19
-rw-r--r--test/backcompat.test338
-rw-r--r--test/backup.test2
-rw-r--r--test/backup2.test25
-rw-r--r--test/bc_common.tcl72
-rw-r--r--test/bigfile.test6
-rw-r--r--test/bigfile2.test59
-rw-r--r--test/cache.test2
-rw-r--r--test/capi3.test19
-rw-r--r--test/capi3c.test19
-rw-r--r--test/capi3d.test26
-rw-r--r--test/check.test52
-rw-r--r--test/corruptF.test150
-rw-r--r--test/crash5.test4
-rw-r--r--test/crypto.test417
-rw-r--r--test/dbstatus.test17
-rw-r--r--test/dbstatus2.test25
-rw-r--r--test/distinct.test42
-rw-r--r--test/e_createtable.test17
-rw-r--r--test/e_delete.test11
-rw-r--r--test/e_droptrigger.test2
-rw-r--r--test/e_dropview.test2
-rw-r--r--test/e_expr.test12
-rw-r--r--test/e_fkey.test4
-rw-r--r--test/e_insert.test21
-rw-r--r--test/e_reindex.test6
-rw-r--r--test/e_select.test23
-rw-r--r--test/e_update.test12
-rw-r--r--test/e_uri.test16
-rw-r--r--test/e_vacuum.test4
-rw-r--r--test/eqp.test5
-rw-r--r--test/filectrl.test2
-rw-r--r--test/fts3_common.tcl134
-rw-r--r--test/fts3auto.test22
-rw-r--r--test/fts3defer.test34
-rw-r--r--test/fts3prefix2.test62
-rw-r--r--test/fts4check.test155
-rw-r--r--test/fts4content.test24
-rw-r--r--test/fts4langid.test485
-rw-r--r--test/fts4merge.test341
-rw-r--r--test/fts4merge2.test38
-rw-r--r--test/fts4merge3.test105
-rw-r--r--test/func.test44
-rw-r--r--test/fuzz-oss1.test2001
-rw-r--r--test/fuzzer1.test650
-rw-r--r--test/fuzzerfault.test92
-rw-r--r--test/in.test16
-rw-r--r--test/incrblob.test10
-rw-r--r--test/incrblob4.test90
-rw-r--r--test/incrvacuum2.test4
-rw-r--r--test/insert.test17
-rw-r--r--test/insert4.test175
-rw-r--r--test/io.test6
-rw-r--r--test/ioerr2.test2
-rw-r--r--test/join6.test42
-rw-r--r--test/journal2.test3
-rw-r--r--test/journal3.test7
-rw-r--r--test/malloc5.test4
-rw-r--r--test/memsubsys1.test10
-rw-r--r--test/minmax4.test150
-rw-r--r--test/misc7.test8
-rw-r--r--test/multiplex.test24
-rw-r--r--test/multiplex2.test70
-rw-r--r--test/multiplex3.test166
-rw-r--r--test/pager1.test154
-rw-r--r--test/permutations.test12
-rw-r--r--test/pragma.test30
-rw-r--r--test/quota-glob.test87
-rw-r--r--test/quota.test8
-rw-r--r--test/quota2.test271
-rw-r--r--test/randexpr1.test5
-rw-r--r--test/savepoint.test20
-rw-r--r--test/savepoint7.test96
-rw-r--r--test/schema5.test69
-rw-r--r--test/select1.test6
-rw-r--r--test/select4.test19
-rw-r--r--test/select9.test35
-rw-r--r--test/selectB.test71
-rw-r--r--test/selectC.test2
-rw-r--r--test/shared2.test42
-rw-r--r--test/shell1.test733
-rw-r--r--test/shell2.test197
-rw-r--r--test/shell3.test97
-rw-r--r--test/shell4.test116
-rw-r--r--test/shell5.test229
-rw-r--r--test/shrink.test43
-rw-r--r--test/stat.test2
-rw-r--r--test/subquery.test85
-rw-r--r--test/superlock.test5
-rw-r--r--test/syscall.test3
-rw-r--r--test/tclsqlite.test2
-rw-r--r--test/tester.tcl86
-rw-r--r--test/tkt-02a8e81d44.test5
-rw-r--r--test/tkt-2a5629202f.test71
-rw-r--r--test/tkt-385a5b56b9.test54
-rw-r--r--test/tkt-38cb5df375.test5
-rw-r--r--test/tkt-3a77c9714e.test73
-rw-r--r--test/tkt-7bbfb7d442.test156
-rw-r--r--test/tkt-80ba201079.test24
-rw-r--r--test/tkt-94c04eaadb.test2
-rw-r--r--test/tkt-b72787b1.test5
-rw-r--r--test/tkt-bdc6bbbb38.test90
-rw-r--r--test/tkt-d82e3f3721.test5
-rw-r--r--test/tkt-f777251dc7a.test7
-rw-r--r--test/tkt3527.test5
-rw-r--r--test/tkt3773.test5
-rw-r--r--test/tkt3838.test17
-rw-r--r--test/trace2.test9
-rw-r--r--test/trans3.test7
-rw-r--r--test/trigger1.test16
-rw-r--r--test/unixexcl.test45
-rw-r--r--test/uri.test8
-rw-r--r--test/vtab1.test91
-rw-r--r--test/vtabD.test10
-rw-r--r--test/vtab_shared.test36
-rw-r--r--test/wal.test48
-rw-r--r--test/wal2.test79
-rw-r--r--test/wal3.test7
-rw-r--r--test/wal5.test44
-rw-r--r--test/wal8.test90
-rw-r--r--test/walbig.test2
-rw-r--r--test/walcrash.test14
-rw-r--r--test/walcrash3.test129
-rw-r--r--test/walfault.test8
-rw-r--r--test/walpersist.test55
-rw-r--r--test/where.test6
-rw-r--r--test/where7.test2
-rw-r--r--test/where9.test30
-rw-r--r--test/whereC.test70
-rw-r--r--test/zerodamage.test119
131 files changed, 9648 insertions, 751 deletions
diff --git a/test/alter.test b/test/alter.test
index 1481bc2..aca71c4 100644
--- a/test/alter.test
+++ b/test/alter.test
@@ -349,7 +349,8 @@ db func trigfunc trigfunc
do_test alter-3.1.0 {
execsql {
CREATE TABLE t6(a, b, c);
- CREATE TRIGGER trig1 AFTER INSERT ON t6 BEGIN
+ -- Different case for the table name in the trigger.
+ CREATE TRIGGER trig1 AFTER INSERT ON T6 BEGIN
SELECT trigfunc('trig1', new.a, new.b, new.c);
END;
}
diff --git a/test/attach.test b/test/attach.test
index d57f5bf..be5f988 100644
--- a/test/attach.test
+++ b/test/attach.test
@@ -51,6 +51,25 @@ do_test attach-1.3 {
SELECT * FROM two.t2;
}
} {1 x 2 y}
+
+# Tests for the sqlite3_db_filename interface
+#
+do_test attach-1.3.1 {
+ file tail [sqlite3_db_filename db main]
+} {test.db}
+do_test attach-1.3.2 {
+ file tail [sqlite3_db_filename db MAIN]
+} {test.db}
+do_test attach-1.3.3 {
+ file tail [sqlite3_db_filename db temp]
+} {}
+do_test attach-1.3.4 {
+ file tail [sqlite3_db_filename db two]
+} {test2.db}
+do_test attach-1.3.5 {
+ file tail [sqlite3_db_filename db three]
+} {}
+
do_test attach-1.4 {
execsql {
SELECT * FROM t2;
diff --git a/test/backcompat.test b/test/backcompat.test
index e8e2f61..509dfe5 100644
--- a/test/backcompat.test
+++ b/test/backcompat.test
@@ -27,37 +27,13 @@ set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/lock_common.tcl
source $testdir/malloc_common.tcl
+source $testdir/bc_common.tcl
db close
-# Search for binaries to test against. Any executable files that match
-# our naming convention are assumed to be testfixture binaries to test
-# against.
-#
-set binaries [list]
-set pattern "[file tail [info nameofexec]]?*"
-if {$tcl_platform(platform)=="windows"} {
- set pattern [string map {\.exe {}} $pattern]
-}
-foreach file [glob -nocomplain $pattern] {
- if {[file executable $file] && [file isfile $file]} {lappend binaries $file}
-}
-if {[llength $binaries]==0} {
- puts "WARNING: No historical binaries to test against."
- puts "WARNING: No backwards-compatibility tests have been run."
+if {"" == [bc_find_binaries backcompat.test]} {
finish_test
return
}
-proc get_version {binary} {
- set chan [launch_testfixture $binary]
- set v [testfixture $chan { sqlite3 -version }]
- close $chan
- set v
-}
-foreach bin $binaries {
- puts -nonewline "Testing against $bin - "
- flush stdout
- puts "version [get_version $bin]"
-}
proc do_backcompat_test {rv bin1 bin2 script} {
@@ -93,7 +69,7 @@ proc do_backcompat_test {rv bin1 bin2 script} {
array set ::incompatible [list]
proc do_allbackcompat_test {script} {
- foreach bin $::binaries {
+ foreach bin $::BC(binaries) {
set nErr [set_test_counter errors]
foreach dir {0 1} {
@@ -275,96 +251,98 @@ do_allbackcompat_test {
# Test that FTS3 tables may be read/written by different versions of
# SQLite.
#
-set contents {
- CREATE VIRTUAL TABLE t1 USING fts3(a, b);
-}
-foreach {num doc} {
- one "jk zm jk eczkjblu urvysbnykk sk gnl jk ttvgf hmjf"
- two "jk bnhc jjrxpjkb mjpavjuhw fibokdry igju jk zm zm xh"
- three "wxe ogttbykvt uhzq xr iaf zf urvysbnykk aayxpmve oacaxgjoo mjpavjuhw"
- four "gazrt jk ephknonq myjp uenvbm wuvajhwqz jk zm xnxhf nvfasfh"
- five "zm aayxpmve csjqxhgj xnxhf xr jk aayxpmve xnxhf zm zm"
- six "sokcyf zm ogyavjvv jk zm fibokdry zm jk igju igju"
- seven "vgsld bvgimjik xuprtlyle jk akmikrqyt jk aayxpmve hkfoudzftq ddjj"
- eight "zm uhzq ovkyevlgv zk uenvbm csjqxhgj jk vgsld pgybs jk"
- nine "zm agmckuiu zexh fibokdry jk uhzq bu tugflixoex xnxhf sk"
-} {
- append contents "INSERT INTO t1 VALUES('$num', '$doc');"
-}
-do_allbackcompat_test {
- if {[code1 {set ::sqlite_options(fts3)}]
- && [code2 {set ::sqlite_options(fts3)}]
+ifcapable fts3 {
+ set contents {
+ CREATE VIRTUAL TABLE t1 USING fts3(a, b);
+ }
+ foreach {num doc} {
+ one "jk zm jk eczkjblu urvysbnykk sk gnl jk ttvgf hmjf"
+ two "jk bnhc jjrxpjkb mjpavjuhw fibokdry igju jk zm zm xh"
+ three "wxe ogttbykvt uhzq xr iaf zf urvysbnykk aayxpmve oacaxgjoo mjpavjuhw"
+ four "gazrt jk ephknonq myjp uenvbm wuvajhwqz jk zm xnxhf nvfasfh"
+ five "zm aayxpmve csjqxhgj xnxhf xr jk aayxpmve xnxhf zm zm"
+ six "sokcyf zm ogyavjvv jk zm fibokdry zm jk igju igju"
+ seven "vgsld bvgimjik xuprtlyle jk akmikrqyt jk aayxpmve hkfoudzftq ddjj"
+ eight "zm uhzq ovkyevlgv zk uenvbm csjqxhgj jk vgsld pgybs jk"
+ nine "zm agmckuiu zexh fibokdry jk uhzq bu tugflixoex xnxhf sk"
} {
-
- do_test backcompat-3.1 { sql1 $contents } {}
-
- foreach {n q} {
- 1 "SELECT * FROM t1 ORDER BY a, b"
- 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
- 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
- 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
- 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
- } {
- do_test backcompat-3.2 [list sql1 $q] [sql2 $q]
- }
-
- do_test backcompat-3.3 { sql1 {
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- INSERT INTO t1 SELECT * FROM t1;
- } } {}
-
- foreach {n q} {
- 1 "SELECT * FROM t1 ORDER BY a, b"
- 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
- 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
- 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
- 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
- } {
- do_test backcompat-3.4 [list sql1 $q] [sql2 $q]
- }
-
- set alphabet "a b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4"
- for {set i 0} {$i < 900} {incr i} {
- set term "[lindex $alphabet [expr $i/30]][lindex $alphabet [expr $i%30]] "
- sql1 "INSERT INTO t1 VALUES($i, '[string repeat $term 14]')"
- }
-
- foreach {n q} {
- 1 "SELECT * FROM t1 ORDER BY a, b"
- 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
- 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
- 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
- 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
-
- 6 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'aa'"
- 7 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH '44'"
- 8 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'a*'"
- } {
- do_test backcompat-3.5 [list sql1 $q] [sql2 $q]
- }
-
- do_test backcompat-3.6 {
- sql1 "SELECT optimize(t1) FROM t1 LIMIT 1"
- } {{Index optimized}}
-
- foreach {n q} {
- 1 "SELECT * FROM t1 ORDER BY a, b"
- 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
- 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
- 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
- 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
-
- 6 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'aa'"
- 7 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH '44'"
- 8 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'a*'"
+ append contents "INSERT INTO t1 VALUES('$num', '$doc');"
+ }
+ do_allbackcompat_test {
+ if {[code1 {set ::sqlite_options(fts3)}]
+ && [code2 {set ::sqlite_options(fts3)}]
} {
- do_test backcompat-3.7 [list sql1 $q] [sql2 $q]
+
+ do_test backcompat-3.1 { sql1 $contents } {}
+
+ foreach {n q} {
+ 1 "SELECT * FROM t1 ORDER BY a, b"
+ 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
+ 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
+ 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
+ 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
+ } {
+ do_test backcompat-3.2 [list sql1 $q] [sql2 $q]
+ }
+
+ do_test backcompat-3.3 { sql1 {
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ INSERT INTO t1 SELECT * FROM t1;
+ } } {}
+
+ foreach {n q} {
+ 1 "SELECT * FROM t1 ORDER BY a, b"
+ 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
+ 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
+ 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
+ 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
+ } {
+ do_test backcompat-3.4 [list sql1 $q] [sql2 $q]
+ }
+
+ set alphabet "a b c d e f g h i j k l m n o p q r s t u v w x y z 1 2 3 4"
+ for {set i 0} {$i < 900} {incr i} {
+ set term "[lindex $alphabet [expr $i/30]][lindex $alphabet [expr $i%30]] "
+ sql1 "INSERT INTO t1 VALUES($i, '[string repeat $term 14]')"
+ }
+
+ foreach {n q} {
+ 1 "SELECT * FROM t1 ORDER BY a, b"
+ 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
+ 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
+ 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
+ 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
+
+ 6 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'aa'"
+ 7 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH '44'"
+ 8 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'a*'"
+ } {
+ do_test backcompat-3.5 [list sql1 $q] [sql2 $q]
+ }
+
+ do_test backcompat-3.6 {
+ sql1 "SELECT optimize(t1) FROM t1 LIMIT 1"
+ } {{Index optimized}}
+
+ foreach {n q} {
+ 1 "SELECT * FROM t1 ORDER BY a, b"
+ 2 "SELECT rowid FROM t1 WHERE a MATCH 'five'"
+ 3 "SELECT * FROM t1 WHERE a MATCH 'five'"
+ 4 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'jk'"
+ 5 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'tug* OR eight'"
+
+ 6 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'aa'"
+ 7 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH '44'"
+ 8 "SELECT offsets(t1) FROM t1 WHERE t1 MATCH 'a*'"
+ } {
+ do_test backcompat-3.7 [list sql1 $q] [sql2 $q]
+ }
}
}
}
@@ -373,72 +351,74 @@ do_allbackcompat_test {
# Test that Rtree tables may be read/written by different versions of
# SQLite.
#
-set contents {
- CREATE VIRTUAL TABLE t1 USING rtree(id, x1, x2, y1, y2);
-}
-foreach {id x1 x2 y1 y2} {
- 1 -47.64 43.87 33.86 34.42 2 -21.51 17.32 2.05 31.04
- 3 -43.67 -38.33 -19.79 3.43 4 32.41 35.16 9.12 19.82
- 5 33.28 34.87 14.78 28.26 6 49.31 116.59 -9.87 75.09
- 7 -14.93 34.51 -17.64 64.09 8 -43.05 23.43 -1.19 69.44
- 9 44.79 133.56 28.09 80.30 10 -2.66 81.47 -41.38 -10.46
- 11 -42.89 -3.54 15.76 71.63 12 -3.50 84.96 -11.64 64.95
- 13 -45.69 26.25 11.14 55.06 14 -44.09 11.23 17.52 44.45
- 15 36.23 133.49 -19.38 53.67 16 -17.89 81.54 14.64 50.61
- 17 -41.97 -24.04 -39.43 28.95 18 -5.85 7.76 -6.38 47.02
- 19 18.82 27.10 42.82 100.09 20 39.17 113.45 26.14 73.47
- 21 22.31 103.17 49.92 106.05 22 -43.06 40.38 -1.75 76.08
- 23 2.43 57.27 -14.19 -3.83 24 -47.57 -4.35 8.93 100.06
- 25 -37.47 49.14 -29.11 8.81 26 -7.86 75.72 49.34 107.42
- 27 1.53 45.49 20.36 49.74 28 -48.48 32.54 28.81 54.45
- 29 2.67 39.77 -4.05 13.67 30 4.11 62.88 -47.44 -5.72
- 31 -21.47 51.75 37.25 116.09 32 45.59 111.37 -6.43 43.64
- 33 35.23 48.29 23.54 113.33 34 16.61 68.35 -14.69 65.97
- 35 13.98 16.60 48.66 102.87 36 19.74 23.84 31.15 77.27
- 37 -27.61 24.43 7.96 94.91 38 -34.77 12.05 -22.60 -6.29
- 39 -25.83 8.71 -13.48 -12.53 40 -17.11 -1.01 18.06 67.89
- 41 14.13 71.72 -3.78 39.25 42 23.75 76.00 -16.30 8.23
- 43 -39.15 28.63 38.12 125.88 44 48.62 86.09 36.49 102.95
- 45 -31.39 -21.98 2.52 89.78 46 5.65 56.04 15.94 89.10
- 47 18.28 95.81 46.46 143.08 48 30.93 102.82 -20.08 37.36
- 49 -20.78 -3.48 -5.58 35.46 50 49.85 90.58 -24.48 46.29
-} {
-if {$x1 >= $x2 || $y1 >= $y2} { error "$x1 $x2 $y1 $y2" }
- append contents "INSERT INTO t1 VALUES($id, $x1, $x2, $y1, $y2);"
-}
-set queries {
- 1 "SELECT id FROM t1 WHERE x1>10 AND x2<44"
- 2 "SELECT id FROM t1 WHERE y1<100"
- 3 "SELECT id FROM t1 WHERE y1<100 AND x1>0"
- 4 "SELECT id FROM t1 WHERE y1>10 AND x1>0 AND x2<50 AND y2<550"
-}
-do_allbackcompat_test {
- if {[code1 {set ::sqlite_options(fts3)}]
- && [code2 {set ::sqlite_options(fts3)}]
+ifcapable rtree {
+ set contents {
+ CREATE VIRTUAL TABLE t1 USING rtree(id, x1, x2, y1, y2);
+ }
+ foreach {id x1 x2 y1 y2} {
+ 1 -47.64 43.87 33.86 34.42 2 -21.51 17.32 2.05 31.04
+ 3 -43.67 -38.33 -19.79 3.43 4 32.41 35.16 9.12 19.82
+ 5 33.28 34.87 14.78 28.26 6 49.31 116.59 -9.87 75.09
+ 7 -14.93 34.51 -17.64 64.09 8 -43.05 23.43 -1.19 69.44
+ 9 44.79 133.56 28.09 80.30 10 -2.66 81.47 -41.38 -10.46
+ 11 -42.89 -3.54 15.76 71.63 12 -3.50 84.96 -11.64 64.95
+ 13 -45.69 26.25 11.14 55.06 14 -44.09 11.23 17.52 44.45
+ 15 36.23 133.49 -19.38 53.67 16 -17.89 81.54 14.64 50.61
+ 17 -41.97 -24.04 -39.43 28.95 18 -5.85 7.76 -6.38 47.02
+ 19 18.82 27.10 42.82 100.09 20 39.17 113.45 26.14 73.47
+ 21 22.31 103.17 49.92 106.05 22 -43.06 40.38 -1.75 76.08
+ 23 2.43 57.27 -14.19 -3.83 24 -47.57 -4.35 8.93 100.06
+ 25 -37.47 49.14 -29.11 8.81 26 -7.86 75.72 49.34 107.42
+ 27 1.53 45.49 20.36 49.74 28 -48.48 32.54 28.81 54.45
+ 29 2.67 39.77 -4.05 13.67 30 4.11 62.88 -47.44 -5.72
+ 31 -21.47 51.75 37.25 116.09 32 45.59 111.37 -6.43 43.64
+ 33 35.23 48.29 23.54 113.33 34 16.61 68.35 -14.69 65.97
+ 35 13.98 16.60 48.66 102.87 36 19.74 23.84 31.15 77.27
+ 37 -27.61 24.43 7.96 94.91 38 -34.77 12.05 -22.60 -6.29
+ 39 -25.83 8.71 -13.48 -12.53 40 -17.11 -1.01 18.06 67.89
+ 41 14.13 71.72 -3.78 39.25 42 23.75 76.00 -16.30 8.23
+ 43 -39.15 28.63 38.12 125.88 44 48.62 86.09 36.49 102.95
+ 45 -31.39 -21.98 2.52 89.78 46 5.65 56.04 15.94 89.10
+ 47 18.28 95.81 46.46 143.08 48 30.93 102.82 -20.08 37.36
+ 49 -20.78 -3.48 -5.58 35.46 50 49.85 90.58 -24.48 46.29
} {
-
- do_test backcompat-4.1 { sql1 $contents } {}
-
- foreach {n q} $::queries {
- do_test backcompat-4.2.$n [list sql1 $q] [sql2 $q]
- }
-
- do_test backcompat-4.3 { sql1 {
- INSERT INTO t1 SELECT id+100, x1+10.0, x2+10.0, y1-10.0, y2-10.0 FROM t1;
- } } {}
-
- foreach {n q} $::queries {
- do_test backcompat-4.4.$n [list sql1 $q] [sql2 $q]
- }
-
- do_test backcompat-4.5 { sql2 {
- INSERT INTO t1 SELECT id+200, x1+20.0, x2+20.0, y1-20.0, y2-20.0 FROM t1;
- } } {}
-
- foreach {n q} $::queries {
- do_test backcompat-4.6.$n [list sql1 $q] [sql2 $q]
+ if {$x1 >= $x2 || $y1 >= $y2} { error "$x1 $x2 $y1 $y2" }
+ append contents "INSERT INTO t1 VALUES($id, $x1, $x2, $y1, $y2);"
+ }
+ set queries {
+ 1 "SELECT id FROM t1 WHERE x1>10 AND x2<44"
+ 2 "SELECT id FROM t1 WHERE y1<100"
+ 3 "SELECT id FROM t1 WHERE y1<100 AND x1>0"
+ 4 "SELECT id FROM t1 WHERE y1>10 AND x1>0 AND x2<50 AND y2<550"
+ }
+ do_allbackcompat_test {
+ if {[code1 {set ::sqlite_options(fts3)}]
+ && [code2 {set ::sqlite_options(fts3)}]
+ } {
+
+ do_test backcompat-4.1 { sql1 $contents } {}
+
+ foreach {n q} $::queries {
+ do_test backcompat-4.2.$n [list sql1 $q] [sql2 $q]
+ }
+
+ do_test backcompat-4.3 { sql1 {
+ INSERT INTO t1 SELECT id+100, x1+10.0, x2+10.0, y1-10.0, y2-10.0 FROM t1;
+ } } {}
+
+ foreach {n q} $::queries {
+ do_test backcompat-4.4.$n [list sql1 $q] [sql2 $q]
+ }
+
+ do_test backcompat-4.5 { sql2 {
+ INSERT INTO t1 SELECT id+200, x1+20.0, x2+20.0, y1-20.0, y2-20.0 FROM t1;
+ } } {}
+
+ foreach {n q} $::queries {
+ do_test backcompat-4.6.$n [list sql1 $q] [sql2 $q]
+ }
+
}
-
}
}
diff --git a/test/backup.test b/test/backup.test
index 6269885..4d7213c 100644
--- a/test/backup.test
+++ b/test/backup.test
@@ -463,7 +463,7 @@ do_test backup-4.5.3 {
db close
db2 close
#
-# End of backup-5.* tests.
+# End of backup-4.* tests.
#---------------------------------------------------------------------
#---------------------------------------------------------------------
diff --git a/test/backup2.test b/test/backup2.test
index 34924b0..9893199 100644
--- a/test/backup2.test
+++ b/test/backup2.test
@@ -142,21 +142,18 @@ do_test backup2-9 {
# Try to restore from an unreadable file.
#
if {$tcl_platform(platform)=="windows"} {
- do_test backup2-10 {
- forcedelete bu3.db
- file mkdir bu3.db
- set rc [catch {db restore temp bu3.db} res]
- lappend rc $res
- } {1 {cannot open source database: unable to open database file}}
-}
-if {$tcl_platform(platform)!="windows"} {
- do_test backup2-10 {
- forcedelete bu3.db
- file mkdir bu3.db
- set rc [catch {db restore temp bu3.db} res]
- lappend rc $res
- } {1 {cannot open source database: disk I/O error}}
+ set msg {cannot open source database: unable to open database file}
+} elseif {$tcl_platform(os)=="OpenBSD"} {
+ set msg {restore failed: file is encrypted or is not a database}
+} else {
+ set msg {cannot open source database: disk I/O error}
}
+do_test backup2-10 {
+ forcedelete bu3.db
+ file mkdir bu3.db
+ set rc [catch {db restore temp bu3.db} res]
+ lappend rc $res
+} [list 1 $msg]
# Try to restore from something that is not a database file.
#
diff --git a/test/bc_common.tcl b/test/bc_common.tcl
new file mode 100644
index 0000000..eb9b6db
--- /dev/null
+++ b/test/bc_common.tcl
@@ -0,0 +1,72 @@
+
+
+
+proc bc_find_binaries {zCaption} {
+ # Search for binaries to test against. Any executable files that match
+ # our naming convention are assumed to be testfixture binaries to test
+ # against.
+ #
+ set binaries [list]
+ set pattern "[file tail [info nameofexec]]?*"
+ if {$::tcl_platform(platform)=="windows"} {
+ set pattern [string map {\.exe {}} $pattern]
+ }
+ foreach file [glob -nocomplain $pattern] {
+ if {[file executable $file] && [file isfile $file]} {lappend binaries $file}
+ }
+
+ if {[llength $binaries]==0} {
+ puts "WARNING: No historical binaries to test against."
+ puts "WARNING: Omitting backwards-compatibility tests"
+ }
+
+ foreach bin $binaries {
+ puts -nonewline "Testing against $bin - "
+ flush stdout
+ puts "version [get_version $bin]"
+ }
+
+ set ::BC(binaries) $binaries
+ return $binaries
+}
+
+proc get_version {binary} {
+ set chan [launch_testfixture $binary]
+ set v [testfixture $chan { sqlite3 -version }]
+ close $chan
+ set v
+}
+
+proc do_bc_test {bin script} {
+
+ forcedelete test.db
+ set ::bc_chan [launch_testfixture $bin]
+
+ proc code1 {tcl} { uplevel #0 $tcl }
+ proc code2 {tcl} { testfixture $::bc_chan $tcl }
+ proc sql1 sql { code1 [list db eval $sql] }
+ proc sql2 sql { code2 [list db eval $sql] }
+
+ code1 { sqlite3 db test.db }
+ code2 { sqlite3 db test.db }
+
+ set bintag [string map {testfixture {}} $bin]
+ set bintag [string map {\.exe {}} $bintag]
+ if {$bintag == ""} {set bintag self}
+ set saved_prefix $::testprefix
+ append ::testprefix ".$bintag"
+
+ uplevel $script
+
+ set ::testprefix $saved_prefix
+
+ catch { code1 { db close } }
+ catch { code2 { db close } }
+ catch { close $::bc_chan }
+}
+
+proc do_all_bc_test {script} {
+ foreach bin $::BC(binaries) {
+ uplevel [list do_bc_test $bin $script]
+ }
+}
diff --git a/test/bigfile.test b/test/bigfile.test
index 52d74ed..d9470ac 100644
--- a/test/bigfile.test
+++ b/test/bigfile.test
@@ -69,7 +69,7 @@ do_test bigfile-1.1 {
# large files. So skip all of the remaining tests in this file.
#
db close
-if {[catch {fake_big_file 4096 [pwd]/test.db} msg]} {
+if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} {
puts "**** Unable to create a file larger than 4096 MB. *****"
finish_test
return
@@ -109,7 +109,7 @@ do_test bigfile-1.4 {
} $::MAGIC_SUM
db close
-if {[catch {fake_big_file 8192 [pwd]/test.db}]} {
+if {[catch {fake_big_file 8192 [get_pwd]/test.db}]} {
puts "**** Unable to create a file larger than 8192 MB. *****"
finish_test
return
@@ -148,7 +148,7 @@ do_test bigfile-1.9 {
} $::MAGIC_SUM
db close
-if {[catch {fake_big_file 16384 [pwd]/test.db}]} {
+if {[catch {fake_big_file 16384 [get_pwd]/test.db}]} {
puts "**** Unable to create a file larger than 16384 MB. *****"
finish_test
return
diff --git a/test/bigfile2.test b/test/bigfile2.test
new file mode 100644
index 0000000..b13b756
--- /dev/null
+++ b/test/bigfile2.test
@@ -0,0 +1,59 @@
+# 2011 December 20
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script testing the ability of SQLite to handle database
+# files larger than 4GB.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix bigfile2
+
+# Create a small database.
+#
+do_execsql_test 1.1 {
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 2);
+}
+
+# Pad the file out to 4GB in size. Then clear the file-size field in the
+# db header. This will cause SQLite to assume that the first 4GB of pages
+# are actually in use and new pages will be appended to the file.
+#
+db close
+if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} {
+ puts "**** Unable to create a file larger than 4096 MB. *****"
+ finish_test
+ return
+}
+hexio_write test.db 28 00000000
+
+do_test 1.2 {
+ file size test.db
+} [expr 14 + 4096 * (1<<20)]
+
+# Now insert a large row. The overflow pages will be located past the 4GB
+# boundary. Then, after opening and closing the database, test that the row
+# can be read back in.
+#
+set str [string repeat k 30000]
+do_test 1.3 {
+ sqlite3 db test.db
+ execsql { INSERT INTO t1 VALUES(3, $str) }
+ db close
+ sqlite3 db test.db
+ db one { SELECT b FROM t1 WHERE a = 3 }
+} $str
+
+db close
+file delete test.db
+
+finish_test
diff --git a/test/cache.test b/test/cache.test
index 8d801f0..f81948b 100644
--- a/test/cache.test
+++ b/test/cache.test
@@ -14,7 +14,7 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
-ifcapable {!pager_pragmas} {
+ifcapable !pager_pragmas||!compound {
finish_test
return
}
diff --git a/test/capi3.test b/test/capi3.test
index cbba04c..d910626 100644
--- a/test/capi3.test
+++ b/test/capi3.test
@@ -894,18 +894,25 @@ do_test capi3-11.9.2 {
catchsql {
ROLLBACK;
}
-} {1 {cannot rollback transaction - SQL statements in progress}}
+} {0 {}}
do_test capi3-11.9.3 {
sqlite3_get_autocommit $DB
-} 0
+} 1
do_test capi3-11.10 {
sqlite3_step $STMT
-} {SQLITE_ROW}
+} {SQLITE_ERROR}
+ifcapable !autoreset {
+ # If SQLITE_OMIT_AUTORESET is defined, then the statement must be
+ # reset() before it can be passed to step() again.
+ do_test capi3-11.11a { sqlite3_step $STMT } {SQLITE_MISUSE}
+ do_test capi3-11.11b { sqlite3_reset $STMT } {SQLITE_ABORT}
+}
do_test capi3-11.11 {
sqlite3_step $STMT
} {SQLITE_ROW}
do_test capi3-11.12 {
sqlite3_step $STMT
+ sqlite3_step $STMT
} {SQLITE_DONE}
do_test capi3-11.13 {
sqlite3_finalize $STMT
@@ -914,15 +921,15 @@ do_test capi3-11.14 {
execsql {
SELECT a FROM t2;
}
-} {1 2 3}
+} {1 2}
do_test capi3-11.14.1 {
sqlite3_get_autocommit $DB
-} 0
+} 1
do_test capi3-11.15 {
catchsql {
ROLLBACK;
}
-} {0 {}}
+} {1 {cannot rollback - no transaction is active}}
do_test capi3-11.15.1 {
sqlite3_get_autocommit $DB
} 1
diff --git a/test/capi3c.test b/test/capi3c.test
index c1d5510..4092091 100644
--- a/test/capi3c.test
+++ b/test/capi3c.test
@@ -849,18 +849,25 @@ do_test capi3c-11.9.2 {
catchsql {
ROLLBACK;
}
-} {1 {cannot rollback transaction - SQL statements in progress}}
+} {0 {}}
do_test capi3c-11.9.3 {
sqlite3_get_autocommit $DB
-} 0
+} 1
do_test capi3c-11.10 {
sqlite3_step $STMT
-} {SQLITE_ROW}
+} {SQLITE_ABORT}
+ifcapable !autoreset {
+ # If SQLITE_OMIT_AUTORESET is defined, then the statement must be
+ # reset() before it can be passed to step() again.
+ do_test capi3-11.11a { sqlite3_step $STMT } {SQLITE_MISUSE}
+ do_test capi3-11.11b { sqlite3_reset $STMT } {SQLITE_ABORT}
+}
do_test capi3c-11.11 {
sqlite3_step $STMT
} {SQLITE_ROW}
do_test capi3c-11.12 {
sqlite3_step $STMT
+ sqlite3_step $STMT
} {SQLITE_DONE}
do_test capi3c-11.13 {
sqlite3_finalize $STMT
@@ -869,15 +876,15 @@ do_test capi3c-11.14 {
execsql {
SELECT a FROM t2;
}
-} {1 2 3}
+} {1 2}
do_test capi3c-11.14.1 {
sqlite3_get_autocommit $DB
-} 0
+} 1
do_test capi3c-11.15 {
catchsql {
ROLLBACK;
}
-} {0 {}}
+} {1 {cannot rollback - no transaction is active}}
do_test capi3c-11.15.1 {
sqlite3_get_autocommit $DB
} 1
diff --git a/test/capi3d.test b/test/capi3d.test
index 49e6447..746ec20 100644
--- a/test/capi3d.test
+++ b/test/capi3d.test
@@ -11,7 +11,7 @@
# This file implements regression tests for SQLite library.
#
# This file is devoted to testing the sqlite3_next_stmt and
-# sqlite3_stmt_readonly interfaces.
+# sqlite3_stmt_readonly and sqlite3_stmt_busy interfaces.
#
# $Id: capi3d.test,v 1.2 2008/07/14 15:11:20 drh Exp $
#
@@ -112,5 +112,29 @@ do_test capi3-2.99 {
sqlite3_stmt_readonly 0
} 1
+# Tests for sqlite3_stmt_busy
+#
+do_test capi3d-3.1 {
+ db eval {INSERT INTO t1 VALUES(6); INSERT INTO t1 VALUES(7);}
+ set STMT [sqlite3_prepare db {SELECT * FROM t1} -1 TAIL]
+ sqlite3_stmt_busy $STMT
+} {0}
+do_test capi3d-3.2 {
+ sqlite3_step $STMT
+ sqlite3_stmt_busy $STMT
+} {1}
+do_test capi3d-3.3 {
+ sqlite3_step $STMT
+ sqlite3_stmt_busy $STMT
+} {1}
+do_test capi3d-3.4 {
+ sqlite3_reset $STMT
+ sqlite3_stmt_busy $STMT
+} {0}
+
+do_test capi3d-3.99 {
+ sqlite3_finalize $STMT
+ sqlite3_stmt_busy 0
+} {0}
finish_test
diff --git a/test/check.test b/test/check.test
index d2867a0..bf0b770 100644
--- a/test/check.test
+++ b/test/check.test
@@ -117,9 +117,9 @@ do_test check-1.17 {
do_test check-2.1 {
execsql {
CREATE TABLE t2(
- x INTEGER CHECK( typeof(coalesce(x,0))=="integer" ),
- y REAL CHECK( typeof(coalesce(y,0.1))=='real' ),
- z TEXT CHECK( typeof(coalesce(z,''))=='text' )
+ x INTEGER CONSTRAINT one CHECK( typeof(coalesce(x,0))=="integer" ),
+ y REAL CONSTRAINT two CHECK( typeof(coalesce(y,0.1))=='real' ),
+ z TEXT CONSTRAINT three CHECK( typeof(coalesce(z,''))=='text' )
);
}
} {}
@@ -141,17 +141,59 @@ do_test check-2.4 {
catchsql {
INSERT INTO t2 VALUES(1.1, NULL, NULL);
}
-} {1 {constraint failed}}
+} {1 {constraint one failed}}
do_test check-2.5 {
catchsql {
INSERT INTO t2 VALUES(NULL, 5, NULL);
}
-} {1 {constraint failed}}
+} {1 {constraint two failed}}
do_test check-2.6 {
catchsql {
INSERT INTO t2 VALUES(NULL, NULL, 3.14159);
}
+} {1 {constraint three failed}}
+
+# Undocumented behavior: The CONSTRAINT name clause can follow a constraint.
+# Such a clause is ignored. But the parser must accept it for backwards
+# compatibility.
+#
+do_test check-2.10 {
+ execsql {
+ CREATE TABLE t2b(
+ x INTEGER CHECK( typeof(coalesce(x,0))=='integer' ) CONSTRAINT one,
+ y TEXT PRIMARY KEY constraint two,
+ z INTEGER,
+ UNIQUE(x,z) constraint three
+ );
+ }
+} {}
+do_test check-2.11 {
+ catchsql {
+ INSERT INTO t2b VALUES('xyzzy','hi',5);
+ }
} {1 {constraint failed}}
+do_test check-2.12 {
+ execsql {
+ CREATE TABLE t2c(
+ x INTEGER CONSTRAINT x_one CONSTRAINT x_two
+ CHECK( typeof(coalesce(x,0))=='integer' )
+ CONSTRAINT x_two CONSTRAINT x_three,
+ y INTEGER, z INTEGER,
+ CONSTRAINT u_one UNIQUE(x,y,z) CONSTRAINT u_two
+ );
+ }
+} {}
+do_test check-2.13 {
+ catchsql {
+ INSERT INTO t2c VALUES('xyzzy',7,8);
+ }
+} {1 {constraint x_two failed}}
+do_test check-2.cleanup {
+ execsql {
+ DROP TABLE IF EXISTS t2b;
+ DROP TABLE IF EXISTS t2c;
+ }
+} {}
ifcapable subquery {
do_test check-3.1 {
diff --git a/test/corruptF.test b/test/corruptF.test
new file mode 100644
index 0000000..33eef39
--- /dev/null
+++ b/test/corruptF.test
@@ -0,0 +1,150 @@
+# 2012 January 12
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix corruptF
+
+# Do not use a codec for tests in this file, as the database file is
+# manipulated directly using tcl scripts (using the [hexio_write] command).
+#
+do_not_use_codec
+
+proc str {i} { format %08d $i }
+
+# Create a 6 page database containing a single table - t1. Table t1
+# consists of page 2 (the root page) and pages 5 and 6 (leaf pages).
+# Database pages 3 and 4 are on the free list.
+#
+proc create_test_db {} {
+ catch { db close }
+ forcedelete test.db
+ sqlite3 db test.db
+ db func str str
+ execsql {
+ PRAGMA auto_vacuum = 0;
+ PRAGMA page_size = 1024;
+ CREATE TABLE t1(x); /* root page = 2 */
+ CREATE TABLE t2(x); /* root page = 3 */
+ CREATE TABLE t3(x); /* root page = 4 */
+
+ INSERT INTO t1 VALUES(str(1));
+ INSERT INTO t1 SELECT str(rowid+1) FROM t1;
+ INSERT INTO t1 SELECT str(rowid+2) FROM t1;
+ INSERT INTO t1 SELECT str(rowid+4) FROM t1;
+ INSERT INTO t1 SELECT str(rowid+8) FROM t1;
+ INSERT INTO t1 SELECT str(rowid+16) FROM t1;
+ INSERT INTO t1 SELECT str(rowid+32) FROM t1;
+ INSERT INTO t1 SELECT str(rowid+64) FROM t1;
+ DROP TABLE t2;
+ DROP TABLE t3;
+ }
+ db close
+}
+
+do_test 1.1 { create_test_db } {}
+
+# Check the db is as we expect. 6 pages in total, with 3 and 4 on the free
+# list. Page 3 is the free list trunk and page 4 is a leaf.
+#
+do_test 1.2 { file size test.db } [expr 6*1024]
+do_test 1.3 { hexio_read test.db 32 4 } 00000003
+do_test 1.4 { hexio_read test.db [expr 2*1024] 12 } 000000000000000100000004
+
+# Change the free-list entry to page 6 and reopen the db file.
+do_test 1.5 {
+ hexio_write test.db [expr 2*1024 + 8] 00000006
+ sqlite3 db test.db
+} {}
+
+# Now create a new table in the database file. The root of the new table
+# is page 6, which is also the right-most leaf page in table t1.
+#
+do_execsql_test 1.6 {
+ CREATE TABLE t4(x);
+ SELECT * FROM sqlite_master;
+} {
+ table t1 t1 2 {CREATE TABLE t1(x)}
+ table t4 t4 6 {CREATE TABLE t4(x)}
+}
+
+# At one point this was causing an assert to fail.
+#
+# This statement opens a cursor on table t1 and does a full table scan. As
+# each row is visited, it is copied into table t4. There is no temporary
+# table.
+#
+# When the t1 cursor reaches page 6 (which is both the right-most leaf of
+# t1 and the root of t4), it continues to iterate through the keys within
+# it (which at this point are keys that have been inserted into t4). And
+# for each row visited, another row is inserted into page 6 - it being the
+# root page of t4. Eventually, page 6 becomes full and the height of the
+# b-tree for table t4 increased. From the point of view of the t1 cursor,
+# this unexpectedly reduces the number of keys on page 6 in the middle of
+# its iteration, which causes an assert() to fail.
+#
+db_save_and_close
+if 1 {
+for {set i 0} {$i < 128} {incr i} {
+ db_restore_and_reopen
+ do_test 1.7.$i {
+ set res [
+ catchsql { INSERT INTO t4 SELECT x FROM t1 WHERE rowid>$i }
+ ]
+ if {$res == "0 {}" || $res == "1 {database disk image is malformed}"} {
+ set res ""
+ }
+ set res
+ } {}
+}
+}
+
+do_test 2.1 { create_test_db } {}
+do_test 2.2 { file size test.db } [expr 6*1024]
+do_test 2.3 { hexio_read test.db 32 4 } 00000003
+do_test 2.4 { hexio_read test.db [expr 2*1024] 12 } 000000000000000100000004
+
+# Change the free-list entry to page 5 and reopen the db file.
+do_test 2.5 {
+ hexio_write test.db [expr 2*1024 + 8] 00000005
+ sqlite3 db test.db
+} {}
+
+# Now create a new table in the database file. The root of the new table
+# is page 5, which is also the right-most leaf page in table t1.
+#
+do_execsql_test 2.6 {
+ CREATE TABLE t4(x);
+ SELECT * FROM sqlite_master;
+} {
+ table t1 t1 2 {CREATE TABLE t1(x)}
+ table t4 t4 5 {CREATE TABLE t4(x)}
+}
+
+db_save_and_close
+for {set i 127} {$i >= 0} {incr i -1} {
+ db_restore_and_reopen
+ do_test 2.7.$i {
+ set res [
+ catchsql {
+ INSERT INTO t4 SELECT x FROM t1 WHERE rowid<$i ORDER BY rowid DESC
+ }
+ ]
+ if {$res == "0 {}" || $res == "1 {database disk image is malformed}"} {
+ set res ""
+ }
+ set res
+ } {}
+}
+
+finish_test
+
diff --git a/test/crash5.test b/test/crash5.test
index 42248d7..a786712 100644
--- a/test/crash5.test
+++ b/test/crash5.test
@@ -47,7 +47,7 @@ for {set ii 0} {$ii < 10} {incr ii} {
do_test crash5-$ii.$jj.1 {
crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \
[list set iFail $jj] {
- sqlite3_crashparams 0 [file join [pwd] test.db-journal]
+ sqlite3_crashparams 0 [file join [get_pwd] test.db-journal]
# Begin a transaction and evaluate a "CREATE INDEX" statement
# with the iFail'th malloc() set to fail. This operation will
@@ -89,7 +89,7 @@ for {set ii 0} {$ii < 10} {incr ii} {
# by writing page 4 out to the db file. If it crashes later on,
# before syncing the journal... Corruption!
#
- sqlite3_crashparams 1 [file join [pwd] test.db-journal]
+ sqlite3_crashparams 1 [file join [get_pwd] test.db-journal]
sqlite3_release_memory 8092
}]] {}
expr 1
diff --git a/test/crypto.test b/test/crypto.test
index aabb481..5fb11f2 100644
--- a/test/crypto.test
+++ b/test/crypto.test
@@ -201,6 +201,186 @@ do_test rekey-as-first-operation {
db close
file delete -force test.db
+# create a new database, insert some data
+# then rekey it with the same password
+do_test rekey-same-passkey {
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'test123';
+ CREATE TABLE t1(a,b);
+ BEGIN;
+ }
+
+ for {set i 1} {$i<=1000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ execsql "INSERT INTO t1 VALUES($i,'value $r');"
+ }
+
+ execsql {
+ COMMIT;
+ SELECT count(*) FROM t1;
+ PRAGMA rekey = 'test123';
+ SELECT count(*) FROM t1;
+ }
+} {1000 1000}
+db close
+file delete -force test.db
+
+# create a new database, insert some data
+# then rekey it. Make sure it is immediately
+# readable. Then close it and make sure it can be
+# read back
+do_test rekey-and-query-1 {
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'test123';
+ CREATE TABLE t1(a,b);
+ BEGIN;
+ }
+
+ for {set i 1} {$i<=1000} {incr i} {
+ set r [expr {int(rand()*500000)}]
+ execsql "INSERT INTO t1 VALUES($i,'value $r');"
+ }
+
+ execsql {
+ COMMIT;
+ SELECT count(*) FROM t1;
+ PRAGMA rekey = 'test321';
+ SELECT count(*) FROM t1;
+ }
+} {1000 1000}
+
+db close
+
+do_test rekey-and-query-2 {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test321';
+ SELECT count(*) FROM t1;
+ }
+} {1000}
+db close
+file delete -force test.db
+
+# create a new database, insert some data
+# delete about 50% of the data
+# write some new data
+# delete another 50%
+# then rekey it. Make sure it is immediately
+# readable. Then close it and make sure it can be
+# read back. This test will ensure that Secure Delete
+# is enabled and all pages are being written and are not
+# being optimized out by sqlite3PagerDontWrite
+do_test rekey-delete-and-query-1 {
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'test123';
+ CREATE TABLE t1(a,b);
+ CREATE INDEX ta_a ON t1(a);
+ BEGIN;
+ }
+
+ for {set i 1} {$i<1000} {incr i} {
+ set r [expr {int(rand()*32767)}]
+ set r1 [expr {int(rand()*32767)}]
+ execsql "INSERT INTO t1 VALUES($r,$r1);"
+ }
+ set r [expr {int(rand()*32767)}]
+ set r1 [expr {int(rand()*32767)}]
+ execsql "UPDATE t1 SET b = $r WHERE a < $r1;"
+
+ set r [expr {int(rand()*32767)}]
+
+ execsql "DELETE FROM t1 WHERE a < $r;"
+
+ execsql {
+ COMMIT;
+ SELECT (count(*) > 0) FROM t1;
+ }
+} {1}
+db close
+
+do_test rekey-delete-and-query-2 {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test123';
+ PRAGMA rekey = 'test321';
+ SELECT count(*) > 1 FROM t1;
+ PRAGMA integrity_check;
+ }
+} {1 ok}
+db close
+
+do_test rekey-delete-and-query-3 {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test321';
+ SELECT count(*) > 1 FROM t1;
+ }
+} {1}
+db close
+file delete -force test.db
+
+
+# same as previous test, but use WAL
+do_test rekey-delete-and-query-wal-1 {
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'test123';
+ PRAGMA journal_mode = WAL;
+ CREATE TABLE t1(a,b);
+ CREATE INDEX ta_a ON t1(a);
+ BEGIN;
+ }
+
+ for {set i 1} {$i<1000} {incr i} {
+ set r [expr {int(rand()*32767)}]
+ set r1 [expr {int(rand()*32767)}]
+ execsql "INSERT INTO t1 VALUES($r,$r1);"
+ }
+ set r [expr {int(rand()*32767)}]
+ set r1 [expr {int(rand()*32767)}]
+ execsql "UPDATE t1 SET b = $r WHERE a < $r1;"
+
+ set r [expr {int(rand()*32767)}]
+
+ execsql "DELETE FROM t1 WHERE a < $r;"
+
+ execsql {
+ COMMIT;
+ SELECT (count(*) > 0) FROM t1;
+ }
+} {1}
+db close
+
+do_test rekey-delete-and-query-wal-2 {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test123';
+ PRAGMA journal_mode = WAL;
+ PRAGMA rekey = 'test321';
+ SELECT count(*) > 1 FROM t1;
+ PRAGMA integrity_check;
+ }
+} {wal 1 ok}
+db close
+
+do_test rekey-delete-and-query-wal-3 {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'test321';
+ PRAGMA journal_mode = WAL;
+ SELECT count(*) > 1 FROM t1;
+ }
+} {wal 1}
+db close
+file delete -force test.db
+
# attach an encrypted database
# where both database have the same
# key
@@ -611,8 +791,9 @@ do_test hmac-tamper-resistence {
db close
- # write some junk into the middle of the page
- hexio_write test.db 2560 00
+ # write some junk into the hmac segment, leaving
+ # the page data valid but with an invalid signature
+ hexio_write test.db 1000 0000
sqlite_orig db test.db
@@ -621,7 +802,7 @@ do_test hmac-tamper-resistence {
SELECT count(*) FROM t1;
}
-} {1 {database disk image is malformed}}
+} {1 {file is encrypted or is not a database}}
db close
file delete -force test.db
@@ -1145,4 +1326,234 @@ do_test cipher-options-before-keys {
db close
file delete -force test.db
+# open a 1.1.8 database (no HMAC), then
+# try to open another 1.1.8 database. The
+# attached database should have the same hmac
+# setting as the original
+do_test default-use-hmac-attach {
+ file copy -force sqlcipher-1.1.8-testkey.db test.db
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA cipher_default_use_hmac = OFF;
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM t1;
+ ATTACH 'sqlcipher-1.1.8-testkey.db' AS db2;
+ SELECT count(*) from db2.t1;
+ PRAGMA cipher_default_use_hmac = ON;
+ }
+} {4 4}
+db close
+file delete -force test.db
+
+# open a 2.0 database (with HMAC), then
+# try to a 1.1.8 database. this should
+# fail because the hmac setting for the
+# attached database is not compatible
+do_test attach-1.1.8-database-from-2.0-fails {
+ sqlite_orig db test.db
+ catchsql {
+ PRAGMA key = 'testkey';
+ CREATE table t1(a,b);
+ ATTACH 'sqlcipher-1.1.8-testkey.db' AS db2;
+ }
+} {1 {file is encrypted or is not a database}}
+db close
+file delete -force test.db
+
+# open a 2.0 database (with HMAC), then
+# set the default hmac setting to OFF.
+# try to a 1.1.8 database. this should
+# succeed now that hmac is off by default
+# before the attach
+do_test change-default-use-hmac-attach {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ CREATE table t1(a,b);
+ INSERT INTO t1(a,b) VALUES (1,2);
+ }
+ db close
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM t1;
+ PRAGMA cipher_default_use_hmac = OFF;
+ ATTACH 'sqlcipher-1.1.8-testkey.db' AS db2;
+ SELECT count(*) from db2.t1;
+ PRAGMA cipher_default_use_hmac = ON;
+ }
+} {1 4}
+db close
+file delete -force test.db
+
+# verify the pragma cipher_version
+# returns the currently configured
+# sqlcipher version
+do_test verify-pragma-cipher-version {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA cipher_version;
+ }
+} {2.0.6}
+db close
+file delete -force test.db
+
+# create a new database, insert some data
+# and delete some data with
+# auto_vacuum on
+do_test auto-vacuum-full {
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'test123';
+ PRAGMA auto_vacuum = FULL;
+ CREATE TABLE t1(a,b);
+ BEGIN;
+ }
+
+ for {set i 1} {$i<10000} {incr i} {
+ set r [expr {int(rand()*32767)}]
+ set r1 [expr {int(rand()*32767)}]
+ execsql "INSERT INTO t1 VALUES($r,$r1);"
+ }
+ set r [expr {int(rand()*32767)}]
+ execsql "DELETE FROM t1 WHERE a < $r;"
+
+ execsql {
+ COMMIT;
+ PRAGMA integrity_check;
+ PRAGMA freelist_count;
+ SELECT (count(*) > 0) FROM t1;
+ }
+} {ok 0 1}
+db close
+file delete -force test.db
+
+# create a new database, insert some data
+# and delete some data with
+# auto_vacuum incremental
+do_test auto-vacuum-incremental {
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'test123';
+ PRAGMA auto_vacuum = INCREMENTAL;
+ CREATE TABLE t1(a,b);
+ BEGIN;
+ }
+
+ for {set i 1} {$i<10000} {incr i} {
+ set r [expr {int(rand()*32767)}]
+ set r1 [expr {int(rand()*32767)}]
+ execsql "INSERT INTO t1 VALUES($r,$r1);"
+ }
+ set r [expr {int(rand()*32767)}]
+ execsql "DELETE FROM t1 WHERE a < $r;"
+
+ execsql {
+ COMMIT;
+ PRAGMA incremental_vacuum;
+ PRAGMA freelist_count;
+ PRAGMA integrity_check;
+ SELECT (count(*) > 0) FROM t1;
+ }
+} {0 ok 1}
+db close
+file delete -force test.db
+
+
+# create a database with many hundred tables such that the schema
+# will overflow the first several pages of the database. verify the schema
+# is intact on open.
+do_test multipage-schema {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ BEGIN EXCLUSIVE;
+ } db
+
+ for {set i 1} {$i<=300} {incr i} {
+ execsql "CREATE TABLE tab$i (a TEXT, b TEXT, c TEXT, d TEXT, e TEXT, f TEXT, g TEXT, h TEXT, i TEXT, j TEXT, k, TEXT, l, m TEXT, n TEXT, o TEXT, p TEXT);" db
+ }
+
+ execsql {
+ COMMIT;
+ } db
+
+ db close
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM sqlite_master where type = 'table';
+ } db
+
+} {300}
+db close
+file delete -force test.db
+
+# create a database with many hundred tables such that the schema
+# will overflow the first several pages of the database. this time, enable
+# autovacuum on the database, which will cause sqlite to do some "short reads"
+# after the end of the main database file. verify that there are no HMAC errors
+# resulting from the short reads, and that the schema is intact when
+# the database is reopened
+do_test multipage-schema-autovacuum-shortread {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ PRAGMA auto_vacuum = FULL;
+ BEGIN EXCLUSIVE;
+ } db
+
+ for {set i 1} {$i<=300} {incr i} {
+ execsql "CREATE TABLE tab$i (a TEXT, b TEXT, c TEXT, d TEXT, e TEXT, f TEXT, g TEXT, h TEXT, i TEXT, j TEXT, k, TEXT, l, m TEXT, n TEXT, o TEXT, p TEXT);" db
+ }
+
+ execsql {
+ COMMIT;
+ } db
+
+ db close
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM sqlite_master where type = 'table';
+ } db
+
+} {300}
+db close
+file delete -force test.db
+
+# same as multi-page-schema-autovacuum-shortread, except
+# using write ahead log mode
+do_test multipage-schema-autovacuum-shortread-wal {
+ sqlite_orig db test.db
+ execsql {
+ PRAGMA key = 'testkey';
+ PRAGMA auto_vacuum = FULL;
+ PRAGMA journal_mode = WAL;
+ BEGIN EXCLUSIVE;
+ } db
+
+ for {set i 1} {$i<=300} {incr i} {
+ execsql "CREATE TABLE tab$i (a TEXT, b TEXT, c TEXT, d TEXT, e TEXT, f TEXT, g TEXT, h TEXT, i TEXT, j TEXT, k, TEXT, l, m TEXT, n TEXT, o TEXT, p TEXT);" db
+ }
+
+ execsql {
+ COMMIT;
+ } db
+
+ db close
+ sqlite_orig db test.db
+
+ execsql {
+ PRAGMA key = 'testkey';
+ SELECT count(*) FROM sqlite_master where type = 'table';
+ } db
+} {300}
+db close
+file delete -force test.db
+
finish_test
diff --git a/test/dbstatus.test b/test/dbstatus.test
index e1c8f3e..9793df3 100644
--- a/test/dbstatus.test
+++ b/test/dbstatus.test
@@ -15,6 +15,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
# Memory statistics must be enabled for this test.
db close
sqlite3_shutdown
@@ -62,6 +67,11 @@ ifcapable stat3 {
set STAT3 0
}
+ifcapable malloc_usable_size {
+ finish_test
+ return
+}
+
#---------------------------------------------------------------------------
# Run the dbstatus-2 and dbstatus-3 tests with several of different
# lookaside buffer sizes.
@@ -207,8 +217,13 @@ foreach ::lookaside_buffer_size {0 64 120} {
# Some of the memory used for sqlite_stat3 is unaccounted for by
# dbstatus.
#
+ # Finally, on osx the estimate of memory used by the schema may be
+ # slightly low.
+ #
if {[string match *x $tn] || $AUTOVACUUM
- || ([string match *y $tn] && $STAT3)} {
+ || ([string match *y $tn] && $STAT3)
+ || ($::tcl_platform(os) == "Darwin")
+ } {
do_test dbstatus-2.$tn.ax { expr {($nSchema1-$nSchema2)<=$nFree} } 1
} else {
do_test dbstatus-2.$tn.a { expr {$nSchema1-$nSchema2} } $nFree
diff --git a/test/dbstatus2.test b/test/dbstatus2.test
index 4dfa9b8..b2ec156 100644
--- a/test/dbstatus2.test
+++ b/test/dbstatus2.test
@@ -9,7 +9,7 @@
#
#***********************************************************************
#
-# Tests for the sqlite3_stmt_status() function
+# Tests for the sqlite3_db_status() function
#
set testdir [file dirname $argv0]
@@ -33,6 +33,10 @@ proc db_hit_miss {db {reset 0}} {
list $nHit $nMiss
}
+proc db_write {db {reset 0}} {
+ sqlite3_db_status $db CACHE_WRITE $reset
+}
+
do_test 1.1 {
db close
sqlite3 db test.db
@@ -72,5 +76,24 @@ do_test 1.7 {
do_test 1.8 { sqlite3_db_status db CACHE_HIT 0 } {0 2 0}
do_test 1.9 { sqlite3_db_status db CACHE_MISS 0 } {0 1 0}
+do_test 2.1 { db_write db } {0 0 0}
+do_test 2.2 {
+ execsql { INSERT INTO t1 VALUES(4, randomblob(600)) }
+ db_write db
+} {0 4 0}
+do_test 2.3 { db_write db 1 } {0 4 0}
+do_test 2.4 { db_write db 0 } {0 0 0}
+do_test 2.5 { db_write db 1 } {0 0 0}
+
+do_test 2.6 {
+ execsql { PRAGMA journal_mode = WAL }
+ db_write db 1
+} {0 1 0}
+do_test 2.7 {
+ execsql { INSERT INTO t1 VALUES(5, randomblob(600)) }
+ db_write db
+} {0 4 0}
+do_test 2.8 { db_write db 1 } {0 4 0}
+do_test 2.9 { db_write db 0 } {0 0 0}
finish_test
diff --git a/test/distinct.test b/test/distinct.test
index e0a9136..3a33544 100644
--- a/test/distinct.test
+++ b/test/distinct.test
@@ -15,6 +15,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
set testprefix distinct
@@ -46,8 +51,8 @@ proc do_temptables_test {tn sql temptables} {
set ret ""
db eval "EXPLAIN [set sql]" {
if {$opcode == "OpenEphemeral" || $opcode == "SorterOpen"} {
- if {$p5 != "10" && $p5!="00"} { error "p5 = $p5" }
- if {$p5 == "10"} {
+ if {$p5 != "08" && $p5!="00"} { error "p5 = $p5" }
+ if {$p5 == "08"} {
lappend ret hash
} else {
lappend ret btree
@@ -72,20 +77,27 @@ do_execsql_test 1.0 {
CREATE TABLE t2(x INTEGER PRIMARY KEY, y);
- CREATE TABLE t3(c1 PRIMARY KEY, c2);
+ CREATE TABLE t3(c1 PRIMARY KEY NOT NULL, c2 NOT NULL);
CREATE INDEX i3 ON t3(c2);
+
+ CREATE TABLE t4(a, b NOT NULL, c NOT NULL, d NOT NULL);
+ CREATE UNIQUE INDEX t4i1 ON t4(b, c);
+ CREATE UNIQUE INDEX t4i2 ON t4(d COLLATE nocase);
}
foreach {tn noop sql} {
- 1 1 "SELECT DISTINCT b, c FROM t1"
- 2 1 "SELECT DISTINCT c FROM t1 WHERE b = ?"
+ 1.1 0 "SELECT DISTINCT b, c FROM t1"
+ 1.2 1 "SELECT DISTINCT b, c FROM t4"
+ 2.1 0 "SELECT DISTINCT c FROM t1 WHERE b = ?"
+ 2.2 1 "SELECT DISTINCT c FROM t4 WHERE b = ?"
3 1 "SELECT DISTINCT rowid FROM t1"
4 1 "SELECT DISTINCT rowid, a FROM t1"
5 1 "SELECT DISTINCT x FROM t2"
6 1 "SELECT DISTINCT * FROM t2"
7 1 "SELECT DISTINCT * FROM (SELECT * FROM t2)"
- 8 1 "SELECT DISTINCT * FROM t1"
+ 8.1 0 "SELECT DISTINCT * FROM t1"
+ 8.2 1 "SELECT DISTINCT * FROM t4"
8 0 "SELECT DISTINCT a, b FROM t1"
@@ -93,11 +105,16 @@ foreach {tn noop sql} {
10 0 "SELECT DISTINCT c FROM t1"
11 0 "SELECT DISTINCT b FROM t1"
- 12 0 "SELECT DISTINCT a, d FROM t1"
- 13 0 "SELECT DISTINCT a, b, c COLLATE nocase FROM t1"
- 14 1 "SELECT DISTINCT a, d COLLATE nocase FROM t1"
- 15 0 "SELECT DISTINCT a, d COLLATE binary FROM t1"
- 16 1 "SELECT DISTINCT a, b, c COLLATE binary FROM t1"
+ 12.1 0 "SELECT DISTINCT a, d FROM t1"
+ 12.2 0 "SELECT DISTINCT a, d FROM t4"
+ 13.1 0 "SELECT DISTINCT a, b, c COLLATE nocase FROM t1"
+ 13.2 0 "SELECT DISTINCT a, b, c COLLATE nocase FROM t4"
+ 14.1 0 "SELECT DISTINCT a, d COLLATE nocase FROM t1"
+ 14.2 1 "SELECT DISTINCT a, d COLLATE nocase FROM t4"
+
+ 15 0 "SELECT DISTINCT a, d COLLATE binary FROM t1"
+ 16.1 0 "SELECT DISTINCT a, b, c COLLATE binary FROM t1"
+ 16.2 1 "SELECT DISTINCT a, b, c COLLATE binary FROM t4"
16 0 "SELECT DISTINCT t1.rowid FROM t1, t2"
17 0 { /* Technically, it would be possible to detect that DISTINCT
@@ -115,7 +132,8 @@ foreach {tn noop sql} {
24 0 "SELECT DISTINCT rowid/2 FROM t1"
25 1 "SELECT DISTINCT rowid/2, rowid FROM t1"
- 26 1 "SELECT DISTINCT rowid/2, b FROM t1 WHERE c = ?"
+ 26.1 0 "SELECT DISTINCT rowid/2, b FROM t1 WHERE c = ?"
+ 26.2 1 "SELECT DISTINCT rowid/2, b FROM t4 WHERE c = ?"
} {
if {$noop} {
do_distinct_noop_test 1.$tn $sql
diff --git a/test/e_createtable.test b/test/e_createtable.test
index f61db1c..8221828 100644
--- a/test/e_createtable.test
+++ b/test/e_createtable.test
@@ -58,7 +58,7 @@ proc table_list {} {
}
-# EVIDENCE-OF: R-25262-01881 -- syntax diagram type-name
+# EVIDENCE-OF: R-47266-09114 -- syntax diagram type-name
#
do_createtable_tests 0.1.1 -repair {
drop_all_tables
@@ -79,12 +79,7 @@ do_createtable_tests 0.1.2 -error {
}
-# EVIDENCE-OF: R-18762-12428 -- syntax diagram column-constraint
-#
-# Note: Not shown in the syntax diagram is the "NULL" constraint. This
-# is the opposite of "NOT NULL" - it implies that the column may
-# take a NULL value. This is the default anyway, so this type of
-# constraint is rarely used.
+# EVIDENCE-OF: R-60689-48779 -- syntax diagram column-constraint
#
do_createtable_tests 0.2.1 -repair {
drop_all_tables
@@ -131,7 +126,7 @@ do_createtable_tests 0.2.1 -repair {
} {}
}
-# EVIDENCE-OF: R-17905-31923 -- syntax diagram table-constraint
+# EVIDENCE-OF: R-58169-51804 -- syntax diagram table-constraint
#
do_createtable_tests 0.3.1 -repair {
drop_all_tables
@@ -150,7 +145,7 @@ do_createtable_tests 0.3.1 -repair {
4.1 "CREATE TABLE t1(c1, c2, FOREIGN KEY(c1) REFERENCES t2)" {}
}
-# EVIDENCE-OF: R-18765-31171 -- syntax diagram column-def
+# EVIDENCE-OF: R-44826-22243 -- syntax diagram column-def
#
do_createtable_tests 0.4.1 -repair {
drop_all_tables
@@ -165,7 +160,7 @@ do_createtable_tests 0.4.1 -repair {
} {}
}
-# EVIDENCE-OF: R-59573-11075 -- syntax diagram create-table-stmt
+# EVIDENCE-OF: R-45698-45677 -- syntax diagram create-table-stmt
#
do_createtable_tests 0.5.1 -repair {
drop_all_tables
@@ -190,7 +185,7 @@ do_createtable_tests 0.5.1 -repair {
15 "CREATE TABLE t1 AS SELECT count(*), max(b), min(a) FROM t2" {}
}
-# EVIDENCE-OF: R-32138-02228 -- syntax diagram foreign-key-clause
+# EVIDENCE-OF: R-24369-11919 -- syntax diagram foreign-key-clause
#
# 1: Explicit parent-key columns.
# 2: Implicit child-key columns.
diff --git a/test/e_delete.test b/test/e_delete.test
index c77d444..31bb324 100644
--- a/test/e_delete.test
+++ b/test/e_delete.test
@@ -15,6 +15,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
proc do_delete_tests {args} {
uplevel do_select_tests $args
}
@@ -24,9 +29,9 @@ do_execsql_test e_delete-0.0 {
CREATE INDEX i1 ON t1(a);
} {}
-# EVIDENCE-OF: R-24177-52883 -- syntax diagram delete-stmt
+# EVIDENCE-OF: R-62077-19799 -- syntax diagram delete-stmt
#
-# EVIDENCE-OF: R-12802-60464 -- syntax diagram qualified-table-name
+# EVIDENCE-OF: R-60796-31013 -- syntax diagram qualified-table-name
#
do_delete_tests e_delete-0.1 {
1 "DELETE FROM t1" {}
@@ -287,7 +292,7 @@ do_delete_tests e_delete-2.5 -error { near "%s": syntax error } {
# of the DELETE statement is extended by the addition of optional ORDER
# BY and LIMIT clauses:
#
-# EVIDENCE-OF: R-45897-01670 -- syntax diagram delete-stmt-limited
+# EVIDENCE-OF: R-52694-53361 -- syntax diagram delete-stmt-limited
#
do_delete_tests e_delete-3.1 {
1 "DELETE FROM t1 LIMIT 5" {}
diff --git a/test/e_droptrigger.test b/test/e_droptrigger.test
index 0c14831..fe96104 100644
--- a/test/e_droptrigger.test
+++ b/test/e_droptrigger.test
@@ -69,7 +69,7 @@ proc droptrigger_reopen_db {{event INSERT}} {
}
-# EVIDENCE-OF: R-52650-16855 -- syntax diagram drop-trigger-stmt
+# EVIDENCE-OF: R-27975-10951 -- syntax diagram drop-trigger-stmt
#
do_droptrigger_tests 1.1 -repair {
droptrigger_reopen_db
diff --git a/test/e_dropview.test b/test/e_dropview.test
index 447e5c3..4a4b9c3 100644
--- a/test/e_dropview.test
+++ b/test/e_dropview.test
@@ -70,7 +70,7 @@ proc do_dropview_tests {nm args} {
uplevel do_select_tests $nm $args
}
-# EVIDENCE-OF: R-21739-51207 -- syntax diagram drop-view-stmt
+# EVIDENCE-OF: R-53136-36436 -- syntax diagram drop-view-stmt
#
# All paths in the syntax diagram for DROP VIEW are tested by tests 1.*.
#
diff --git a/test/e_expr.test b/test/e_expr.test
index 3c9678c..74d0c40 100644
--- a/test/e_expr.test
+++ b/test/e_expr.test
@@ -17,6 +17,10 @@ set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
proc do_expr_test {tn expr type value} {
uplevel do_execsql_test $tn [list "SELECT typeof($expr), $expr"] [
@@ -627,7 +631,7 @@ do_test e_expr-11.7.1 { sqlite3_finalize $stmt } SQLITE_OK
#-------------------------------------------------------------------------
# "Test" the syntax diagrams in lang_expr.html.
#
-# EVIDENCE-OF: R-62067-43884 -- syntax diagram signed-number
+# EVIDENCE-OF: R-02989-21050 -- syntax diagram signed-number
#
do_execsql_test e_expr-12.1.1 { SELECT 0, +0, -0 } {0 0 0}
do_execsql_test e_expr-12.1.2 { SELECT 1, +1, -1 } {1 1 -1}
@@ -642,7 +646,7 @@ do_execsql_test e_expr-12.1.6 {
SELECT 0.0001, +0.0001, -0.0001
} {0.0001 0.0001 -0.0001}
-# EVIDENCE-OF: R-21258-25489 -- syntax diagram literal-value
+# EVIDENCE-OF: R-43188-60852 -- syntax diagram literal-value
#
set sqlite_current_time 1
do_execsql_test e_expr-12.2.1 {SELECT 123} {123}
@@ -655,7 +659,7 @@ do_execsql_test e_expr-12.2.7 {SELECT CURRENT_DATE} {1970-01-01}
do_execsql_test e_expr-12.2.8 {SELECT CURRENT_TIMESTAMP} {{1970-01-01 00:00:01}}
set sqlite_current_time 0
-# EVIDENCE-OF: R-57598-59332 -- syntax diagram expr
+# EVIDENCE-OF: R-50544-32159 -- syntax diagram expr
#
forcedelete test.db2
execsql {
@@ -812,7 +816,7 @@ foreach {tn expr} {
}
}
-# EVIDENCE-OF: R-49462-56079 -- syntax diagram raise-function
+# EVIDENCE-OF: R-39820-63916 -- syntax diagram raise-function
#
foreach {tn raiseexpr} {
1 "RAISE(IGNORE)"
diff --git a/test/e_fkey.test b/test/e_fkey.test
index ae789d5..5b27e03 100644
--- a/test/e_fkey.test
+++ b/test/e_fkey.test
@@ -2325,7 +2325,7 @@ do_test e_fkey-51.1 {
do_test e_fkey-51.2 {
execsql {
UPDATE parent SET x = 22;
- SELECT * FROM parent UNION ALL SELECT 'xxx' UNION ALL SELECT a FROM child;
+ SELECT * FROM parent ; SELECT 'xxx' ; SELECT a FROM child;
}
} {22 21 23 xxx 22}
do_test e_fkey-51.3 {
@@ -2335,7 +2335,7 @@ do_test e_fkey-51.3 {
INSERT INTO parent VALUES(-1);
INSERT INTO child VALUES(-1);
UPDATE parent SET x = 22;
- SELECT * FROM parent UNION ALL SELECT 'xxx' UNION ALL SELECT a FROM child;
+ SELECT * FROM parent ; SELECT 'xxx' ; SELECT a FROM child;
}
} {22 23 21 xxx 23}
diff --git a/test/e_insert.test b/test/e_insert.test
index fe8bfcf..ac4361f 100644
--- a/test/e_insert.test
+++ b/test/e_insert.test
@@ -18,6 +18,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
# Organization of tests:
#
# e_insert-0.*: Test the syntax diagram.
@@ -45,7 +50,7 @@ proc do_insert_tests {args} {
uplevel do_select_tests $args
}
-# EVIDENCE-OF: R-41448-54465 -- syntax diagram insert-stmt
+# EVIDENCE-OF: R-21350-31508 -- syntax diagram insert-stmt
#
do_insert_tests e_insert-0 {
1 "INSERT INTO a1 DEFAULT VALUES" {}
@@ -118,6 +123,20 @@ do_insert_tests e_insert-0 {
68 "INSERT OR IGNORE INTO a1 (b, a) SELECT c, b FROM a2" {}
69 "REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {}
70 "REPLACE INTO main.a1 (b, a) SELECT c, b FROM a2" {}
+ 71 "INSERT INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 72 "INSERT INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 73 "INSERT OR ROLLBACK INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 74 "INSERT OR ROLLBACK INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 75 "INSERT OR ABORT INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 76 "INSERT OR ABORT INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 77 "INSERT OR REPLACE INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 78 "INSERT OR REPLACE INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 79 "INSERT OR FAIL INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 80 "INSERT OR FAIL INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 81 "INSERT OR FAIL INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 82 "INSERT OR IGNORE INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 83 "REPLACE INTO a1 (b, a) VALUES(1, 2),(3,4)" {}
+ 84 "REPLACE INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {}
}
delete_all_data
diff --git a/test/e_reindex.test b/test/e_reindex.test
index e9419df..b39f37e 100644
--- a/test/e_reindex.test
+++ b/test/e_reindex.test
@@ -26,7 +26,7 @@ do_execsql_test e_reindex-0.0 {
CREATE INDEX i2 ON t1(b, a);
} {}
-# EVIDENCE-OF: R-57021-15304 -- syntax diagram reindex-stmt
+# EVIDENCE-OF: R-51477-38549 -- syntax diagram reindex-stmt
#
do_reindex_tests e_reindex-0.1 {
1 "REINDEX" {}
@@ -34,8 +34,8 @@ do_reindex_tests e_reindex-0.1 {
3 "REINDEX binary" {}
4 "REINDEX t1" {}
5 "REINDEX main.t1" {}
- 4 "REINDEX i1" {}
- 5 "REINDEX main.i1" {}
+ 6 "REINDEX i1" {}
+ 7 "REINDEX main.i1" {}
}
# EVIDENCE-OF: R-52173-44778 The REINDEX command is used to delete and
diff --git a/test/e_select.test b/test/e_select.test
index e0f5f0f..e5949af 100644
--- a/test/e_select.test
+++ b/test/e_select.test
@@ -16,6 +16,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_execsql_test e_select-1.0 {
CREATE TABLE t1(a, b);
INSERT INTO t1 VALUES('a', 'one');
@@ -78,7 +83,7 @@ proc do_join_test {tn select res} {
# The following tests check that all paths on the syntax diagrams on
# the lang_select.html page may be taken.
#
-# EVIDENCE-OF: R-18428-22111 -- syntax diagram join-constraint
+# EVIDENCE-OF: R-11353-33501 -- syntax diagram join-constraint
#
do_join_test e_select-0.1.1 {
SELECT count(*) FROM t1 %JOIN% t2 ON (t1.a=t2.a)
@@ -96,7 +101,7 @@ do_catchsql_test e_select-0.1.5 {
SELECT count(*) FROM t1, t2 USING (a) ON (t1.a=t2.a)
} {1 {near "ON": syntax error}}
-# EVIDENCE-OF: R-44854-11739 -- syntax diagram select-core
+# EVIDENCE-OF: R-40919-40941 -- syntax diagram select-core
#
# 0: SELECT ...
# 1: SELECT DISTINCT ...
@@ -221,7 +226,7 @@ do_select_tests e_select-0.2 {
}
-# EVIDENCE-OF: R-23316-20169 -- syntax diagram result-column
+# EVIDENCE-OF: R-41378-26734 -- syntax diagram result-column
#
do_select_tests e_select-0.3 {
1 "SELECT * FROM t1" {a one b two c three}
@@ -231,9 +236,9 @@ do_select_tests e_select-0.3 {
5 "SELECT 'x'||a||'x' AS alias FROM t1" {xax xbx xcx}
}
-# EVIDENCE-OF: R-41233-21397 -- syntax diagram join-source
+# EVIDENCE-OF: R-43129-35648 -- syntax diagram join-source
#
-# EVIDENCE-OF: R-45040-11121 -- syntax diagram join-op
+# EVIDENCE-OF: R-36683-37460 -- syntax diagram join-op
#
do_select_tests e_select-0.4 {
1 "SELECT t1.rowid FROM t1" {1 2 3}
@@ -258,7 +263,7 @@ do_select_tests e_select-0.4 {
16 "SELECT t1.rowid FROM t1 CROSS JOIN t3" {1 1 2 2 3 3}
}
-# EVIDENCE-OF: R-56911-63533 -- syntax diagram compound-operator
+# EVIDENCE-OF: R-28308-37813 -- syntax diagram compound-operator
#
do_select_tests e_select-0.5 {
1 "SELECT rowid FROM t1 UNION ALL SELECT rowid+2 FROM t4" {1 2 3 3 4}
@@ -267,7 +272,7 @@ do_select_tests e_select-0.5 {
4 "SELECT rowid FROM t1 EXCEPT SELECT rowid+2 FROM t4" {1 2}
}
-# EVIDENCE-OF: R-60388-27458 -- syntax diagram ordering-term
+# EVIDENCE-OF: R-06480-34950 -- syntax diagram ordering-term
#
do_select_tests e_select-0.6 {
1 "SELECT b||a FROM t1 ORDER BY b||a" {onea threec twob}
@@ -276,7 +281,7 @@ do_select_tests e_select-0.6 {
4 "SELECT b||a FROM t1 ORDER BY (b||a) DESC" {twob threec onea}
}
-# EVIDENCE-OF: R-36494-33519 -- syntax diagram select-stmt
+# EVIDENCE-OF: R-23926-36668 -- syntax diagram select-stmt
#
do_select_tests e_select-0.7 {
1 "SELECT * FROM t1" {a one b two c three}
@@ -800,7 +805,7 @@ do_select_tests e_select-4.1 {
6 "SELECT count(*), * FROM z1" {6 63 born -26}
7 "SELECT max(a), * FROM z1" {63 63 born -26}
- 8 "SELECT *, min(a) FROM z1" {63 born -26 -5}
+ 8 "SELECT *, min(a) FROM z1" {-5 {} 75 -5}
9 "SELECT *,* FROM z1,z2 LIMIT 1" {
51.65 -59.58 belfries {} 21 51.65 -59.58 belfries {} 21
diff --git a/test/e_update.test b/test/e_update.test
index c14b845..230c97f 100644
--- a/test/e_update.test
+++ b/test/e_update.test
@@ -49,7 +49,7 @@ proc do_update_tests {args} {
uplevel do_select_tests $args
}
-# EVIDENCE-OF: R-05685-44205 -- syntax diagram update-stmt
+# EVIDENCE-OF: R-62337-45828 -- syntax diagram update-stmt
#
do_update_tests e_update-0 {
1 "UPDATE t1 SET a=10" {}
@@ -381,11 +381,9 @@ do_execsql_test e_update-2.2.X {
# attached).
#
do_execsql_test e_update-2.3.0 {
- SELECT 'main', tbl_name FROM main.sqlite_master WHERE type = 'table'
- UNION ALL
- SELECT 'temp', tbl_name FROM sqlite_temp_master WHERE type = 'table'
- UNION ALL
- SELECT 'aux', tbl_name FROM aux.sqlite_master WHERE type = 'table'
+ SELECT 'main', tbl_name FROM main.sqlite_master WHERE type = 'table';
+ SELECT 'temp', tbl_name FROM sqlite_temp_master WHERE type = 'table';
+ SELECT 'aux', tbl_name FROM aux.sqlite_master WHERE type = 'table';
} [list {*}{
main t1
main t2
@@ -495,7 +493,7 @@ do_update_tests e_update-2.5 -error {
# of the UPDATE statement is extended with optional ORDER BY and LIMIT
# clauses
#
-# EVIDENCE-OF: R-08948-01887 -- syntax diagram update-stmt-limited
+# EVIDENCE-OF: R-45169-39597 -- syntax diagram update-stmt-limited
#
do_update_tests e_update-3.0 {
1 "UPDATE t1 SET a=b LIMIT 5" {}
diff --git a/test/e_uri.test b/test/e_uri.test
index 5275ec1..8110d70 100644
--- a/test/e_uri.test
+++ b/test/e_uri.test
@@ -131,10 +131,10 @@ sqlite3_config_uri 1
if {$tcl_platform(platform) == "unix"} {
set flags [list SQLITE_OPEN_READWRITE SQLITE_OPEN_CREATE SQLITE_OPEN_URI]
foreach {tn uri error} "
- 1 {file://localhost[pwd]/test.db} {not an error}
- 2 {file://[pwd]/test.db} {not an error}
- 3 {file://x[pwd]/test.db} {invalid uri authority: x}
- 4 {file://invalid[pwd]/test.db} {invalid uri authority: invalid}
+ 1 {file://localhost[get_pwd]/test.db} {not an error}
+ 2 {file://[get_pwd]/test.db} {not an error}
+ 3 {file://x[get_pwd]/test.db} {invalid uri authority: x}
+ 4 {file://invalid[get_pwd]/test.db} {invalid uri authority: invalid}
" {
do_test 2.$tn {
set DB [sqlite3_open_v2 $uri $flags ""]
@@ -153,9 +153,9 @@ if {$tcl_platform(platform) == "unix"} {
# parameters passed through to the VFS xOpen() methods.
#
foreach {tn uri parse} "
- 1 {file:test.db#abc} {[pwd]/test.db {}}
- 2 {file:test.db?a=b#abc} {[pwd]/test.db {a b}}
- 3 {file:test.db?a=b#?c=d} {[pwd]/test.db {a b}}
+ 1 {file:test.db#abc} {[get_pwd]/test.db {}}
+ 2 {file:test.db?a=b#abc} {[get_pwd]/test.db {a b}}
+ 3 {file:test.db?a=b#?c=d} {[get_pwd]/test.db {a b}}
" {
do_filepath_test 3.$tn { parse_uri $uri } $parse
}
@@ -171,7 +171,7 @@ foreach {tn uri parse} "
# path is interpreted as a relative path.
#
foreach {tn uri parse} "
- 1 {file:test.db} {[pwd]/test.db {}}
+ 1 {file:test.db} {[get_pwd]/test.db {}}
2 {file:/test.db} {/test.db {}}
3 {file:///test.db} {/test.db {}}
4 {file://localhost/test.db} {/test.db {}}
diff --git a/test/e_vacuum.test b/test/e_vacuum.test
index 414c854..bad12d3 100644
--- a/test/e_vacuum.test
+++ b/test/e_vacuum.test
@@ -65,7 +65,7 @@ proc fragment_count {name} {
}
-# EVIDENCE-OF: R-63707-33375 -- syntax diagram vacuum-stmt
+# EVIDENCE-OF: R-45173-45977 -- syntax diagram vacuum-stmt
#
do_execsql_test e_vacuum-0.1 { VACUUM } {}
@@ -122,7 +122,7 @@ foreach {tn avmode sz} {
# e_vacuum-1.2.4 - Verify that t1 and its indexes are now much
# less fragmented.
#
-ifcapable vtab {
+ifcapable vtab&&compound {
create_db
register_dbstat_vtab db
do_execsql_test e_vacuum-1.2.1 {
diff --git a/test/eqp.test b/test/eqp.test
index 91a18d0..0e663f0 100644
--- a/test/eqp.test
+++ b/test/eqp.test
@@ -13,6 +13,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
set testprefix eqp
#-------------------------------------------------------------------------
diff --git a/test/filectrl.test b/test/filectrl.test
index 9f077d5..1e4ec59 100644
--- a/test/filectrl.test
+++ b/test/filectrl.test
@@ -34,7 +34,7 @@ do_test filectrl-1.4 {
do_test filectrl-1.5 {
db close
sqlite3 db test_control_lockproxy.db
- file_control_lockproxy_test db [pwd]
+ file_control_lockproxy_test db [get_pwd]
} {}
db close
forcedelete .test_control_lockproxy.db-conch test.proxy
diff --git a/test/fts3_common.tcl b/test/fts3_common.tcl
index 4d4ae38..2ed1f70 100644
--- a/test/fts3_common.tcl
+++ b/test/fts3_common.tcl
@@ -15,6 +15,129 @@
#
#-------------------------------------------------------------------------
+# INSTRUCTIONS
+#
+# The following commands are available:
+#
+# fts3_build_db_1 N
+# Using database handle [db] create an FTS4 table named t1 and populate
+# it with N rows of data. N must be less than 10,000. Refer to the
+# header comments above the proc implementation below for details.
+#
+# fts3_build_db_2 N
+# Using database handle [db] create an FTS4 table named t2 and populate
+# it with N rows of data. N must be less than 100,000. Refer to the
+# header comments above the proc implementation below for details.
+#
+# fts3_integrity_check TBL
+# TBL must be an FTS table in the database currently opened by handle
+# [db]. This proc loads and tokenizes all documents within the table,
+# then checks that the current contents of the FTS index matches the
+# results.
+#
+# fts3_terms TBL WHERE
+# Todo.
+#
+# fts3_doclist TBL TERM WHERE
+# Todo.
+#
+#
+#
+
+#-------------------------------------------------------------------------
+# USAGE: fts3_build_db_1 SWITCHES N
+#
+# Build a sample FTS table in the database opened by database connection
+# [db]. The name of the new table is "t1".
+#
+proc fts3_build_db_1 {args} {
+
+ set default(-module) fts4
+
+ set nArg [llength $args]
+ if {($nArg%2)==0} {
+ error "wrong # args: should be \"fts3_build_db_1 ?switches? n\""
+ }
+
+ set n [lindex $args [expr $nArg-1]]
+ array set opts [array get default]
+ array set opts [lrange $args 0 [expr $nArg-2]]
+ foreach k [array names opts] {
+ if {0==[info exists default($k)]} { error "unknown option: $k" }
+ }
+
+ if {$n > 10000} {error "n must be <= 10000"}
+ db eval "CREATE VIRTUAL TABLE t1 USING $opts(-module) (x, y)"
+
+ set xwords [list zero one two three four five six seven eight nine ten]
+ set ywords [list alpha beta gamma delta epsilon zeta eta theta iota kappa]
+
+ for {set i 0} {$i < $n} {incr i} {
+ set x ""
+ set y ""
+
+ set x [list]
+ lappend x [lindex $xwords [expr ($i / 1000) % 10]]
+ lappend x [lindex $xwords [expr ($i / 100) % 10]]
+ lappend x [lindex $xwords [expr ($i / 10) % 10]]
+ lappend x [lindex $xwords [expr ($i / 1) % 10]]
+
+ set y [list]
+ lappend y [lindex $ywords [expr ($i / 1000) % 10]]
+ lappend y [lindex $ywords [expr ($i / 100) % 10]]
+ lappend y [lindex $ywords [expr ($i / 10) % 10]]
+ lappend y [lindex $ywords [expr ($i / 1) % 10]]
+
+ db eval { INSERT INTO t1(docid, x, y) VALUES($i, $x, $y) }
+ }
+}
+
+#-------------------------------------------------------------------------
+# USAGE: fts3_build_db_2 N ARGS
+#
+# Build a sample FTS table in the database opened by database connection
+# [db]. The name of the new table is "t2".
+#
+proc fts3_build_db_2 {args} {
+
+ set default(-module) fts4
+ set default(-extra) ""
+
+ set nArg [llength $args]
+ if {($nArg%2)==0} {
+ error "wrong # args: should be \"fts3_build_db_1 ?switches? n\""
+ }
+
+ set n [lindex $args [expr $nArg-1]]
+ array set opts [array get default]
+ array set opts [lrange $args 0 [expr $nArg-2]]
+ foreach k [array names opts] {
+ if {0==[info exists default($k)]} { error "unknown option: $k" }
+ }
+
+ if {$n > 100000} {error "n must be <= 100000"}
+
+ set sql "CREATE VIRTUAL TABLE t2 USING $opts(-module) (content"
+ if {$opts(-extra) != ""} {
+ append sql ", " $opts(-extra)
+ }
+ append sql ")"
+ db eval $sql
+
+ set chars [list a b c d e f g h i j k l m n o p q r s t u v w x y z ""]
+
+ for {set i 0} {$i < $n} {incr i} {
+ set word ""
+ set nChar [llength $chars]
+ append word [lindex $chars [expr {($i / 1) % $nChar}]]
+ append word [lindex $chars [expr {($i / $nChar) % $nChar}]]
+ append word [lindex $chars [expr {($i / ($nChar*$nChar)) % $nChar}]]
+
+ db eval { INSERT INTO t2(docid, content) VALUES($i, $word) }
+ }
+}
+
+#-------------------------------------------------------------------------
# USAGE: fts3_integrity_check TBL
#
# This proc is used to verify that the full-text index is consistent with
@@ -46,6 +169,7 @@ proc fts3_integrity_check {tbl} {
fts3_read2 $tbl 1 A
foreach zTerm [array names A] {
+ #puts $zTerm
foreach doclist $A($zTerm) {
set docid 0
while {[string length $doclist]>0} {
@@ -97,7 +221,7 @@ proc fts3_integrity_check {tbl} {
set es "Error at docid=$iDoc col=$iCol pos=$pos. Index is missing"
lappend errors $es
} else {
- if {$C($iDoc,$iCol,$pos) != "$term"} {
+ if {[string compare $C($iDoc,$iCol,$pos) $term]} {
set es "Error at docid=$iDoc col=$iCol pos=$pos. Index "
append es "has \"$C($iDoc,$iCol,$pos)\", document has \"$term\""
lappend errors $es
@@ -233,7 +357,8 @@ proc fts3_readleaf {blob} {
set zTerm [string range $zPrev 0 [expr $nPrefix-1]]
append zTerm [gobble_string blob $nSuffix]
- set doclist [gobble_string blob [gobble_varint blob]]
+ set nDoclist [gobble_varint blob]
+ set doclist [gobble_string blob $nDoclist]
lappend terms $zTerm $doclist
set zPrev $zTerm
@@ -249,7 +374,9 @@ proc fts3_read2 {tbl where varname} {
FROM ${tbl}_segdir WHERE $where
ORDER BY level ASC, idx DESC
" {
- if {$start_block == 0} {
+ set c 0
+ binary scan $root c c
+ if {$c==0} {
foreach {t d} [fts3_readleaf $root] { lappend a($t) $d }
} else {
db eval " SELECT block
@@ -258,7 +385,6 @@ proc fts3_read2 {tbl where varname} {
ORDER BY blockid
" {
foreach {t d} [fts3_readleaf $block] { lappend a($t) $d }
-
}
}
}
diff --git a/test/fts3auto.test b/test/fts3auto.test
index 1c58a17..d5ab4ef 100644
--- a/test/fts3auto.test
+++ b/test/fts3auto.test
@@ -75,26 +75,27 @@ proc do_fts3query_test {tn args} {
}
}
- get_near_results $tbl $match $deferred aMatchinfo
+ get_near_results $tbl $match $deferred aHit
+ get_near_results $tbl [string map {AND OR} $match] $deferred aMatchinfo
set matchinfo_asc [list]
- foreach docid [lsort -integer -incr [array names aMatchinfo]] {
+ foreach docid [lsort -integer -incr [array names aHit]] {
lappend matchinfo_asc $docid $aMatchinfo($docid)
}
set matchinfo_desc [list]
- foreach docid [lsort -integer -decr [array names aMatchinfo]] {
+ foreach docid [lsort -integer -decr [array names aHit]] {
lappend matchinfo_desc $docid $aMatchinfo($docid)
}
- set title "(\"$match\" -> [llength [array names aMatchinfo]] rows)"
+ set title "(\"$match\" -> [llength [array names aHit]] rows)"
do_execsql_test $tn$title.1 "
SELECT docid FROM $tbl WHERE $tbl MATCH '$match' ORDER BY docid ASC
- " [lsort -integer -incr [array names aMatchinfo]]
+ " [lsort -integer -incr [array names aHit]]
do_execsql_test $tn$title.2 "
SELECT docid FROM $tbl WHERE $tbl MATCH '$match' ORDER BY docid DESC
- " [lsort -integer -decr [array names aMatchinfo]]
+ " [lsort -integer -decr [array names aHit]]
do_execsql_test $tn$title.3 "
SELECT docid, mit(matchinfo($tbl, 'x')) FROM $tbl
@@ -573,10 +574,10 @@ set chunkconfig [fts3_configure_incr_load 1 1]
foreach {tn create pending} {
1 "fts4(a, b)" 1
2 "fts4(a, b, order=ASC, prefix=1)" 1
- 3 "fts4(a, b, order=ASC, prefix=1,3)" 0
- 4 "fts4(a, b, order=DESC, prefix=2,4)" 0
- 5 "fts4(a, b, order=DESC, prefix=1)" 0
- 6 "fts4(a, b, order=ASC, prefix=1,3)" 0
+ 3 "fts4(a, b, order=ASC, prefix=\"1,3\")" 0
+ 4 "fts4(a, b, order=DESC, prefix=\"2,4\")" 0
+ 5 "fts4(a, b, order=DESC, prefix=\"1\")" 0
+ 6 "fts4(a, b, order=ASC, prefix=\"1,3\")" 0
} {
execsql [subst {
@@ -650,6 +651,7 @@ foreach {tn pending create} {
do_fts3query_test 6.$tn.2 t1 {b:G AND c:I}
do_fts3query_test 6.$tn.3 t1 {b:G NEAR c:I}
do_fts3query_test 6.$tn.4 t1 {a:C OR b:G OR c:K OR d:C}
+
do_fts3query_test 6.$tn.5 t1 {a:G OR b:G}
catchsql { COMMIT }
diff --git a/test/fts3defer.test b/test/fts3defer.test
index bc50874..4c8213d 100644
--- a/test/fts3defer.test
+++ b/test/fts3defer.test
@@ -489,5 +489,39 @@ do_execsql_test 4.2 {
SELECT * FROM x2 WHERE x2 MATCH 'a b c d e f g h i j k l m n o p q r s';
} {{a b c d e f g h i j k l m n o p q r s t u v w x y m}}
+set tokenizers {1 simple}
+ifcapable icu { lappend tokenizers 2 {icu en_US} }
+foreach {tn tokenizer} $tokenizers {
+ do_execsql_test 5.$tn.1 "
+ CREATE VIRTUAL TABLE x3 USING FTS4(a, b, TOKENIZE $tokenizer)
+ "
+ do_execsql_test 5.$tn.2 {
+ BEGIN;
+ INSERT INTO x3 VALUES('b b b b b b b b b b b', 'b b b b b b b b b b b b b');
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 SELECT * FROM x3;
+ INSERT INTO x3 VALUES('a b c', NULL);
+ INSERT INTO x3 VALUES('a x c', NULL);
+ COMMIT;
+
+ SELECT * FROM x3 WHERE x3 MATCH 'a b';
+ } {{a b c} {}}
+
+ do_execsql_test 5.$tn.3 { DROP TABLE x3 }
+}
finish_test
diff --git a/test/fts3prefix2.test b/test/fts3prefix2.test
new file mode 100644
index 0000000..e3da3b7
--- /dev/null
+++ b/test/fts3prefix2.test
@@ -0,0 +1,62 @@
+# 2012 January 25
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the FTS3 module.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix fts3prefix2
+
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+do_execsql_test 1.0 { PRAGMA page_size = 512 }
+do_execsql_test 1.1 {
+ CREATE VIRTUAL TABLE t1 USING fts4(x, prefix="2,3");
+
+ BEGIN;
+ INSERT INTO t1 VALUES('T TX T TX T TX T TX T TX');
+ INSERT INTO t1 SELECT * FROM t1; -- 2
+ INSERT INTO t1 SELECT * FROM t1; -- 4
+ INSERT INTO t1 SELECT * FROM t1; -- 8
+ INSERT INTO t1 SELECT * FROM t1; -- 16
+ INSERT INTO t1 SELECT * FROM t1; -- 32
+ INSERT INTO t1 SELECT * FROM t1; -- 64
+ INSERT INTO t1 SELECT * FROM t1; -- 128
+ INSERT INTO t1 SELECT * FROM t1; -- 256
+ INSERT INTO t1 SELECT * FROM t1; -- 512
+ INSERT INTO t1 SELECT * FROM t1; -- 1024
+ INSERT INTO t1 SELECT * FROM t1; -- 2048
+ COMMIT;
+}
+
+do_execsql_test 1.2 {
+ INSERT INTO t1 SELECT * FROM t1 LIMIT 10;
+ INSERT INTO t1 SELECT * FROM t1 LIMIT 10;
+ INSERT INTO t1 SELECT * FROM t1 LIMIT 10;
+ DELETE FROM t1 WHERE docid > 5;
+}
+
+do_execsql_test 1.3 {
+ SELECT * FROM t1 WHERE t1 MATCH 'T*';
+} {
+ {T TX T TX T TX T TX T TX}
+ {T TX T TX T TX T TX T TX}
+ {T TX T TX T TX T TX T TX}
+ {T TX T TX T TX T TX T TX}
+ {T TX T TX T TX T TX T TX}
+}
+
+finish_test
+
diff --git a/test/fts4check.test b/test/fts4check.test
new file mode 100644
index 0000000..cc1d018
--- /dev/null
+++ b/test/fts4check.test
@@ -0,0 +1,155 @@
+# 2012 March 26
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the FTS 'integrity-check' function,
+# used to check if the current FTS index accurately reflects the content
+# of the table.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/fts3_common.tcl
+set ::testprefix fts4check
+
+# If SQLITE_ENABLE_FTS3 is defined, omit this file.
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+# Run the integrity-check on FTS table $tbl using database handle $db. If
+# the integrity-check passes, return "ok". Otherwise, throw an exception.
+#
+proc fts_integrity {db tbl} {
+ $db eval "INSERT INTO $tbl ($tbl) VALUES('integrity-check')"
+ return "ok"
+}
+
+#-------------------------------------------------------------------------
+# Test cases 1.*
+#
+# 1.0: Build a reasonably sized FTS table (5000 rows).
+#
+# 1.1: Run the integrity check code to check it passes.
+#
+# 1.2: Make a series of minor changes to the underlying FTS data structures
+# (e.g. delete or insert a row from the %_content table). Check that
+# this causes the integrity-check code to fail.
+#
+
+# Build an FTS table and check the integrity-check passes.
+#
+do_test 1.0 { fts3_build_db_1 5000 } {}
+do_test 1.1 { fts_integrity db t1 } {ok}
+
+# Mess around with the underlying tables. Check that this causes the
+# integrity-check test to fail.
+#
+foreach {tn disruption} {
+ 1 {
+ INSERT INTO t1_content(docid, c0x, c1y) VALUES(NULL, 'a', 'b');
+ }
+ 2 {
+ DELETE FROM t1_content WHERE docid = (SELECT max(docid) FROM t1_content);
+ }
+ 3 {
+ DELETE FROM t1_segdir WHERE level=0 AND idx=(
+ SELECT max(idx) FROM t1_segdir WHERE level=0
+ );
+ }
+} {
+ do_execsql_test 1.2.1.$tn "BEGIN; $disruption"
+ do_catchsql_test 1.2.2.$tn {
+ INSERT INTO t1 (t1) VALUES('integrity-check')
+ } {1 {database disk image is malformed}}
+ do_execsql_test 1.2.3.$tn "ROLLBACK"
+}
+
+do_test 1.3 { fts_integrity db t1 } {ok}
+
+#-------------------------------------------------------------------------
+# Test cases 2.*
+#
+# 2.0: Build a reasonably sized FTS table (20000 rows) that includes
+# prefix indexes.
+#
+# 2.1: Run the integrity check code to check it passes.
+#
+# 2.2: Make a series of minor changes to the underlying FTS data structures
+# (e.g. delete or insert a row from the %_content table). Check that
+# this causes the integrity-check code to fail.
+#
+
+do_test 2.0 { fts3_build_db_2 -extra {prefix="3,1"} 20000 } {}
+do_test 2.1 { fts_integrity db t2 } {ok}
+foreach {tn disruption} {
+ 1 {
+ INSERT INTO t2_content VALUES(NULL, 'xyz')
+ }
+ 3 {
+ DELETE FROM t2_segdir WHERE level=0 AND idx=(
+ SELECT max(idx) FROM t2_segdir WHERE level=1024
+ );
+ }
+} {
+ do_execsql_test 2.2.1.$tn "BEGIN; $disruption"
+ do_catchsql_test 2.2.2.$tn {
+ INSERT INTO t2 (t2) VALUES('integrity-check')
+ } {1 {database disk image is malformed}}
+ do_execsql_test 2.2.3.$tn "ROLLBACK"
+}
+
+
+#-------------------------------------------------------------------------
+# Test cases 3.*
+#
+# 3.0: Build a reasonably sized FTS table (5000 rows) that includes
+# prefix indexes and uses the languageid= feature.
+#
+# 3.1: Run the integrity check code to check it passes.
+#
+# 3.2: Make a series of minor changes to the underlying FTS data structures
+# (e.g. delete or insert a row from the %_content table). Check that
+# this causes the integrity-check code to fail.
+#
+do_test 3.0 {
+ reset_db
+ fts3_build_db_1 5000
+ execsql {
+ CREATE VIRTUAL TABLE t3 USING fts4(x, y, prefix="2,3", languageid=langid);
+ }
+ foreach docid [execsql {SELECT docid FROM t1 ORDER BY 1 ASC}] {
+ execsql {
+ INSERT INTO t3(x, y, langid)
+ SELECT x, y, (docid%9)*4 FROM t1 WHERE docid=$docid;
+ }
+ }
+} {}
+do_test 3.1 { fts_integrity db t3 } {ok}
+
+foreach {tn disruption} {
+ 1 {
+ INSERT INTO t3_content(c0x, c1y, langid) VALUES(NULL, 'a', 0);
+ }
+ 2 {
+ UPDATE t3_content SET langid=langid+1 WHERE rowid = (
+ SELECT max(rowid) FROM t3_content
+ )
+ }
+} {
+ do_execsql_test 3.2.1.$tn "BEGIN; $disruption"
+ do_catchsql_test 3.2.2.$tn {
+ INSERT INTO t3 (t3) VALUES('integrity-check')
+ } {1 {database disk image is malformed}}
+ do_execsql_test 3.2.3.$tn "ROLLBACK"
+}
+
+finish_test
diff --git a/test/fts4content.test b/test/fts4content.test
index 025b600..59c4199 100644
--- a/test/fts4content.test
+++ b/test/fts4content.test
@@ -43,6 +43,9 @@ ifcapable !fts3 {
# exist, the FTS table can still be used for INSERT and some
# SELECT statements.
#
+# 8.* - Test that if the content=xxx and prefix options are used together,
+# the 'rebuild' command still works.
+#
do_execsql_test 1.1.1 {
CREATE TABLE t1(a, b, c);
@@ -498,4 +501,25 @@ do_catchsql_test 7.2.4 {
SELECT * FROM ft9 WHERE ft9 MATCH 'N';
} {1 {SQL logic error or missing database}}
+#-------------------------------------------------------------------------
+# Test cases 8.*
+#
+do_execsql_test 8.1 {
+ CREATE TABLE t10(a, b);
+ INSERT INTO t10 VALUES(
+ 'abasia abasic abask', 'Abassin abastardize abatable');
+ INSERT INTO t10 VALUES(
+ 'abate abatement abater', 'abatis abatised abaton');
+ INSERT INTO t10 VALUES(
+ 'abator abattoir Abatua', 'abature abave abaxial');
+
+ CREATE VIRTUAL TABLE ft10 USING fts4(content=t10, prefix="2,4", a, b);
+}
+
+do_execsql_test 8.2 { SELECT * FROM ft10 WHERE a MATCH 'ab*'; }
+do_execsql_test 8.3 { INSERT INTO ft10(ft10) VALUES('rebuild'); }
+do_execsql_test 8.4 { SELECT rowid FROM ft10 WHERE a MATCH 'ab*'; } {1 2 3}
+do_execsql_test 8.5 { SELECT rowid FROM ft10 WHERE b MATCH 'abav*'; } {3}
+do_execsql_test 8.6 { SELECT rowid FROM ft10 WHERE ft10 MATCH 'abas*'; } {1}
+
finish_test
diff --git a/test/fts4langid.test b/test/fts4langid.test
new file mode 100644
index 0000000..843e11f
--- /dev/null
+++ b/test/fts4langid.test
@@ -0,0 +1,485 @@
+# 2012 March 01
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the languageid=xxx FTS4 option.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix fts4content
+
+# If SQLITE_ENABLE_FTS3 is defined, omit this file.
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+set ::testprefix fts4langid
+
+#---------------------------------------------------------------------------
+# Test plan:
+#
+# 1.* - Warm-body tests created for specific purposes during development.
+# Passing these doesn't really prove much.
+#
+# 2.1.* - Test that FTS queries only ever return rows associated with
+# the requested language.
+#
+# 2.2.* - Same as 2.1.*, after an 'optimize' command.
+#
+# 2.3.* - Same as 2.1.*, after a 'rebuild' command.
+#
+# 3.* - Tests with content= tables. Both where there is a real
+# underlying content table and where there is not.
+#
+# 4.* - Test that if one is provided, the tokenizer xLanguage method
+# is called to configure the tokenizer before tokenizing query
+# or document text.
+#
+# 5.* - Test the fts4aux table when the associated FTS4 table contains
+# multiple languages.
+#
+
+do_execsql_test 1.1 {
+ CREATE VIRTUAL TABLE t1 USING fts4(a, b, languageid=lang_id);
+}
+
+do_execsql_test 1.2 {
+ SELECT sql FROM sqlite_master WHERE name = 't1_content';
+} {{CREATE TABLE 't1_content'(docid INTEGER PRIMARY KEY, 'c0a', 'c1b', langid)}}
+
+do_execsql_test 1.3 {SELECT docid FROM t1} {}
+do_execsql_test 1.4 {SELECT lang_id FROM t1} {}
+
+do_execsql_test 1.5 {INSERT INTO t1(a, b) VALUES('aaa', 'bbb')}
+do_execsql_test 1.6 {SELECT lang_id FROM t1 } {0}
+
+do_execsql_test 1.7 {INSERT INTO t1(a, b, lang_id) VALUES('aaa', 'bbb', 4)}
+do_execsql_test 1.8 {SELECT lang_id FROM t1 } {0 4}
+
+do_execsql_test 1.9 {INSERT INTO t1(a, b, lang_id) VALUES('aaa', 'bbb', 'xyz')}
+do_execsql_test 1.10 {SELECT lang_id FROM t1} {0 4 0}
+
+do_execsql_test 1.11 {
+ CREATE VIRTUAL TABLE t2 USING fts4;
+ INSERT INTO t2 VALUES('abc');
+}
+do_execsql_test 1.12 { SELECT rowid FROM t2 WHERE content MATCH 'abc' } 1
+
+do_execsql_test 1.13 {
+ DROP TABLE t1;
+ CREATE VIRTUAL TABLE t1 USING fts4(languageid=lang_id);
+ INSERT INTO t1(content) VALUES('a b c');
+ INSERT INTO t1(content, lang_id) VALUES('a b c', 1);
+}
+
+do_execsql_test 1.14 {
+ SELECT rowid FROM t1 WHERE t1 MATCH 'b';
+} {1}
+do_execsql_test 1.15 {
+ SELECT rowid FROM t1 WHERE t1 MATCH 'b' AND lang_id = 0;
+} {1}
+
+do_execsql_test 1.16 {
+ SELECT rowid FROM t1 WHERE t1 MATCH 'b' AND lang_id = 1;
+} {2}
+
+do_catchsql_test 1.17 {
+ INSERT INTO t1(content, lang_id) VALUES('123', -1);
+} {1 {constraint failed}}
+
+do_execsql_test 1.18 {
+ DROP TABLE t1;
+ CREATE VIRTUAL TABLE t1 USING fts4(languageid=lang_id);
+ INSERT INTO t1(content, lang_id) VALUES('A', 13);
+ INSERT INTO t1(content, lang_id) VALUES('B', 13);
+ INSERT INTO t1(content, lang_id) VALUES('C', 13);
+ INSERT INTO t1(content, lang_id) VALUES('D', 13);
+ INSERT INTO t1(content, lang_id) VALUES('E', 13);
+ INSERT INTO t1(content, lang_id) VALUES('F', 13);
+ INSERT INTO t1(content, lang_id) VALUES('G', 13);
+ INSERT INTO t1(content, lang_id) VALUES('H', 13);
+ INSERT INTO t1(content, lang_id) VALUES('I', 13);
+ INSERT INTO t1(content, lang_id) VALUES('J', 13);
+ INSERT INTO t1(content, lang_id) VALUES('K', 13);
+ INSERT INTO t1(content, lang_id) VALUES('L', 13);
+ INSERT INTO t1(content, lang_id) VALUES('M', 13);
+ INSERT INTO t1(content, lang_id) VALUES('N', 13);
+ INSERT INTO t1(content, lang_id) VALUES('O', 13);
+ INSERT INTO t1(content, lang_id) VALUES('P', 13);
+ INSERT INTO t1(content, lang_id) VALUES('Q', 13);
+ INSERT INTO t1(content, lang_id) VALUES('R', 13);
+ INSERT INTO t1(content, lang_id) VALUES('S', 13);
+ SELECT rowid FROM t1 WHERE t1 MATCH 'A';
+} {}
+
+
+#-------------------------------------------------------------------------
+# Test cases 2.*
+#
+proc build_multilingual_db_1 {db} {
+ $db eval { CREATE VIRTUAL TABLE t2 USING fts4(x, y, languageid=l) }
+
+ set xwords [list zero one two three four five six seven eight nine ten]
+ set ywords [list alpha beta gamma delta epsilon zeta eta theta iota kappa]
+
+ for {set i 0} {$i < 1000} {incr i} {
+ set iLangid [expr $i%9]
+ set x ""
+ set y ""
+
+ set x [list]
+ lappend x [lindex $xwords [expr ($i / 1000) % 10]]
+ lappend x [lindex $xwords [expr ($i / 100) % 10]]
+ lappend x [lindex $xwords [expr ($i / 10) % 10]]
+ lappend x [lindex $xwords [expr ($i / 1) % 10]]
+
+ set y [list]
+ lappend y [lindex $ywords [expr ($i / 1000) % 10]]
+ lappend y [lindex $ywords [expr ($i / 100) % 10]]
+ lappend y [lindex $ywords [expr ($i / 10) % 10]]
+ lappend y [lindex $ywords [expr ($i / 1) % 10]]
+
+ $db eval { INSERT INTO t2(docid, x, y, l) VALUES($i, $x, $y, $iLangid) }
+ }
+
+ $db eval {
+ CREATE TABLE data(x, y, l);
+ INSERT INTO data(rowid, x, y, l) SELECT docid, x, y, l FROM t2;
+ }
+}
+
+proc rowid_list_set_langid {langid} {
+ set ::rowid_list_langid $langid
+}
+proc rowid_list {pattern} {
+ set langid $::rowid_list_langid
+ set res [list]
+ db eval {SELECT rowid, x, y FROM data WHERE l = $langid ORDER BY rowid ASC} {
+ if {[string match "*$pattern*" $x] || [string match "*$pattern*" $y]} {
+ lappend res $rowid
+ }
+ }
+ return $res
+}
+
+proc or_merge_list {list1 list2} {
+ set res [list]
+
+ set i1 0
+ set i2 0
+
+ set n1 [llength $list1]
+ set n2 [llength $list2]
+
+ while {$i1 < $n1 && $i2 < $n2} {
+ set e1 [lindex $list1 $i1]
+ set e2 [lindex $list2 $i2]
+
+ if {$e1==$e2} {
+ lappend res $e1
+ incr i1
+ incr i2
+ } elseif {$e1 < $e2} {
+ lappend res $e1
+ incr i1
+ } else {
+ lappend res $e2
+ incr i2
+ }
+ }
+
+ concat $res [lrange $list1 $i1 end] [lrange $list2 $i2 end]
+}
+
+proc or_merge_lists {args} {
+ set res [lindex $args 0]
+ for {set i 1} {$i < [llength $args]} {incr i} {
+ set res [or_merge_list $res [lindex $args $i]]
+ }
+ set res
+}
+
+proc and_merge_list {list1 list2} {
+ foreach i $list2 { set a($i) 1 }
+ set res [list]
+ foreach i $list1 {
+ if {[info exists a($i)]} {lappend res $i}
+ }
+ set res
+}
+
+
+proc and_merge_lists {args} {
+ set res [lindex $args 0]
+ for {set i 1} {$i < [llength $args]} {incr i} {
+ set res [and_merge_list $res [lindex $args $i]]
+ }
+ set res
+}
+
+proc filter_list {list langid} {
+ set res [list]
+ foreach i $list {
+ if {($i % 9) == $langid} {lappend res $i}
+ }
+ set res
+}
+
+do_test 2.0 {
+ reset_db
+ build_multilingual_db_1 db
+} {}
+
+proc do_test_query1 {tn query res_script} {
+ for {set langid 0} {$langid < 10} {incr langid} {
+ rowid_list_set_langid $langid
+ set res [eval $res_script]
+
+ set actual [
+ execsql {SELECT docid FROM t2 WHERE t2 MATCH $query AND l = $langid}
+ ]
+ do_test $tn.$langid [list set {} $actual] $res
+ }
+}
+
+# Run some queries.
+do_test_query1 2.1.1 {delta} { rowid_list delta }
+do_test_query1 2.1.2 {"zero one two"} { rowid_list "zero one two" }
+do_test_query1 2.1.3 {zero one two} {
+ and_merge_lists [rowid_list zero] [rowid_list one] [rowid_list two]
+}
+do_test_query1 2.1.4 {"zero one" OR "one two"} {
+ or_merge_lists [rowid_list "zero one"] [rowid_list "one two"]
+}
+
+# Now try the same tests as above, but after running the 'optimize'
+# command on the FTS table.
+#
+do_execsql_test 2.2 {
+ INSERT INTO t2(t2) VALUES('optimize');
+ SELECT count(*) FROM t2_segdir;
+} {9}
+do_test_query1 2.2.1 {delta} { rowid_list delta }
+do_test_query1 2.2.2 {"zero one two"} { rowid_list "zero one two" }
+do_test_query1 2.2.3 {zero one two} {
+ and_merge_lists [rowid_list zero] [rowid_list one] [rowid_list two]
+}
+do_test_query1 2.2.4 {"zero one" OR "one two"} {
+ or_merge_lists [rowid_list "zero one"] [rowid_list "one two"]
+}
+
+# And rebuild.
+#
+do_test 2.3 {
+ reset_db
+ build_multilingual_db_1 db
+ execsql { INSERT INTO t2(t2) VALUES('rebuild') }
+} {}
+do_test_query1 2.3.1 {delta} { rowid_list delta }
+do_test_query1 2.3.2 {"zero one two"} { rowid_list "zero one two" }
+do_test_query1 2.3.3 {zero one two} {
+ and_merge_lists [rowid_list zero] [rowid_list one] [rowid_list two]
+}
+do_test_query1 2.3.4 {"zero one" OR "one two"} {
+ or_merge_lists [rowid_list "zero one"] [rowid_list "one two"]
+}
+
+#-------------------------------------------------------------------------
+# Test cases 3.*
+#
+do_test 3.0 {
+ reset_db
+ build_multilingual_db_1 db
+ execsql {
+ CREATE TABLE t3_data(l, x, y);
+ INSERT INTO t3_data(rowid, l, x, y) SELECT docid, l, x, y FROM t2;
+ DROP TABLE t2;
+ }
+} {}
+do_execsql_test 3.1 {
+ CREATE VIRTUAL TABLE t2 USING fts4(content=t3_data, languageid=l);
+ INSERT INTO t2(t2) VALUES('rebuild');
+}
+
+do_test_query1 3.1.1 {delta} { rowid_list delta }
+do_test_query1 3.1.2 {"zero one two"} { rowid_list "zero one two" }
+do_test_query1 3.1.3 {zero one two} {
+ and_merge_lists [rowid_list zero] [rowid_list one] [rowid_list two]
+}
+do_test_query1 3.1.4 {"zero one" OR "one two"} {
+ or_merge_lists [rowid_list "zero one"] [rowid_list "one two"]
+}
+
+do_execsql_test 3.2.1 {
+ DROP TABLE t2;
+ CREATE VIRTUAL TABLE t2 USING fts4(x, y, languageid=l, content=nosuchtable);
+}
+
+do_execsql_test 3.2.2 {
+ INSERT INTO t2(docid, x, y, l) SELECT rowid, x, y, l FROM t3_data;
+}
+
+do_execsql_test 3.2.3 {
+ DROP TABLE t3_data;
+}
+
+do_test_query1 3.3.1 {delta} { rowid_list delta }
+do_test_query1 3.3.2 {"zero one two"} { rowid_list "zero one two" }
+do_test_query1 3.3.3 {zero one two} {
+ and_merge_lists [rowid_list zero] [rowid_list one] [rowid_list two]
+}
+do_test_query1 3.3.4 {"zero one" OR "one two"} {
+ or_merge_lists [rowid_list "zero one"] [rowid_list "one two"]
+}
+
+#-------------------------------------------------------------------------
+# Test cases 4.*
+#
+proc build_multilingual_db_2 {db} {
+ $db eval {
+ CREATE VIRTUAL TABLE t4 USING fts4(
+ tokenize=testtokenizer,
+ languageid=lid
+ );
+ }
+ for {set i 0} {$i < 50} {incr i} {
+ execsql {
+ INSERT INTO t4(docid, content, lid) VALUES($i, 'The Quick Brown Fox', $i)
+ }
+ }
+}
+
+do_test 4.1.0 {
+ reset_db
+ set ptr [fts3_test_tokenizer]
+ execsql { SELECT fts3_tokenizer('testtokenizer', $ptr) }
+ build_multilingual_db_2 db
+} {}
+do_execsql_test 4.1.1 {
+ SELECT docid FROM t4 WHERE t4 MATCH 'quick';
+} {0}
+do_execsql_test 4.1.2 {
+ SELECT docid FROM t4 WHERE t4 MATCH 'quick' AND lid=1;
+} {}
+do_execsql_test 4.1.3 {
+ SELECT docid FROM t4 WHERE t4 MATCH 'Quick' AND lid=1;
+} {1}
+for {set i 0} {$i < 50} {incr i} {
+ do_execsql_test 4.1.4.$i {
+ SELECT count(*) FROM t4 WHERE t4 MATCH 'fox' AND lid=$i;
+ } [expr 0==($i%2)]
+}
+do_catchsql_test 4.1.5 {
+ INSERT INTO t4(content, lid) VALUES('hello world', 101)
+} {1 {SQL logic error or missing database}}
+
+#-------------------------------------------------------------------------
+# Test cases 5.*
+#
+# The following test cases are designed to detect a 32-bit overflow bug
+# that existed at one point.
+#
+proc build_multilingual_db_3 {db} {
+ $db eval {
+ CREATE VIRTUAL TABLE t5 USING fts4(languageid=lid);
+ }
+ set languages [list 0 1 2 [expr 1<<30]]
+
+ foreach lid $languages {
+ execsql {
+ INSERT INTO t5(docid, content, lid) VALUES(
+ $lid, 'My language is ' || $lid, $lid
+ )
+ }
+ }
+}
+
+do_test 5.1.0 {
+ reset_db
+ build_multilingual_db_3 db
+} {}
+
+do_execsql_test 5.1.1 {
+ SELECT level FROM t5_segdir;
+} [list 0 1024 2048 [expr 1<<40]]
+
+do_execsql_test 5.1.2 {SELECT docid FROM t5 WHERE t5 MATCH 'language'} 0
+foreach langid [list 0 1 2 [expr 1<<30]] {
+ do_execsql_test 5.2.$langid {
+ SELECT docid FROM t5 WHERE t5 MATCH 'language' AND lid = $langid
+ } $langid
+}
+
+set lid [expr 1<<30]
+do_execsql_test 5.3.1 {
+ CREATE VIRTUAL TABLE t6 USING fts4(languageid=lid);
+ INSERT INTO t6 VALUES('I belong to language 0!');
+}
+do_test 5.3.2 {
+ for {set i 0} {$i < 20} {incr i} {
+ execsql {
+ INSERT INTO t6(content, lid) VALUES(
+ 'I (row '||$i||') belong to langauge N!', $lid
+ );
+ }
+ }
+ execsql { SELECT docid FROM t6 WHERE t6 MATCH 'belong' }
+} {1}
+
+do_test 5.3.3 {
+ execsql { SELECT docid FROM t6 WHERE t6 MATCH 'belong' AND lid=$lid}
+} {2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21}
+
+do_execsql_test 5.3.4 { INSERT INTO t6(t6) VALUES('optimize') } {}
+do_execsql_test 5.3.5 { SELECT docid FROM t6 WHERE t6 MATCH 'belong' } {1}
+do_execsql_test 5.3.6 {
+ SELECT docid FROM t6 WHERE t6 MATCH 'belong' AND lid=$lid
+} {2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21}
+
+
+set lid [expr 1<<30]
+foreach lid [list 4 [expr 1<<30]] {
+ do_execsql_test 5.4.$lid.1 {
+ DELETE FROM t6;
+ SELECT count(*) FROM t6_segdir;
+ SELECT count(*) FROM t6_segments;
+ } {0 0}
+ do_execsql_test 5.4.$lid.2 {
+ INSERT INTO t6(content, lid) VALUES('zero zero zero', $lid);
+ INSERT INTO t6(content, lid) VALUES('zero zero one', $lid);
+ INSERT INTO t6(content, lid) VALUES('zero one zero', $lid);
+ INSERT INTO t6(content, lid) VALUES('zero one one', $lid);
+ INSERT INTO t6(content, lid) VALUES('one zero zero', $lid);
+ INSERT INTO t6(content, lid) VALUES('one zero one', $lid);
+ INSERT INTO t6(content, lid) VALUES('one one zero', $lid);
+ INSERT INTO t6(content, lid) VALUES('one one one', $lid);
+
+ SELECT docid FROM t6 WHERE t6 MATCH '"zero zero"' AND lid=$lid;
+ } {1 2 5}
+
+ do_execsql_test 5.4.$lid.3 {
+ SELECT count(*) FROM t6_segdir;
+ SELECT count(*) FROM t6_segments;
+ } {8 0}
+
+ do_execsql_test 5.4.$lid.4 {
+ INSERT INTO t6(t6) VALUES('merge=100,3');
+ INSERT INTO t6(t6) VALUES('merge=100,3');
+ SELECT docid FROM t6 WHERE t6 MATCH '"zero zero"' AND lid=$lid;
+ } {1 2 5}
+
+ do_execsql_test 5.4.$lid.5 {
+ SELECT count(*) FROM t6_segdir;
+ SELECT count(*) FROM t6_segments;
+ } {4 4}
+}
+finish_test
diff --git a/test/fts4merge.test b/test/fts4merge.test
new file mode 100644
index 0000000..fabb651
--- /dev/null
+++ b/test/fts4merge.test
@@ -0,0 +1,341 @@
+# 2012 March 06
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the incremental merge function.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/fts3_common.tcl
+
+# If SQLITE_ENABLE_FTS3 is defined, omit this file.
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+proc fts3_integrity_check {tbl} {
+ db eval "INSERT INTO $tbl ($tbl) VALUES('integrity-check')"
+ return "ok"
+}
+
+foreach mod {fts3 fts4} {
+ set ::testprefix fts4merge-$mod
+ reset_db
+
+ #-------------------------------------------------------------------------
+ # Test cases 1.*
+ #
+ do_test 1.0 { fts3_build_db_1 -module $mod 1004 } {}
+ do_test 1.1 { fts3_integrity_check t1 } {ok}
+ do_execsql_test 1.1 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level
+ } {
+ 0 {0 1 2 3 4 5 6 7 8 9 10 11}
+ 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13}
+ 2 {0 1 2}
+ }
+
+ for {set i 0} {$i<20} {incr i} {
+ do_execsql_test 1.2.$i.1 { INSERT INTO t1(t1) VALUES('merge=1') }
+ do_test 1.2.$i.2 { fts3_integrity_check t1 } ok
+ do_execsql_test 1.2.$i.3 {
+ SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three'
+ } {123 132 213 231 312 321}
+ }
+
+ do_execsql_test 1.3 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level
+ } {
+ 0 {0 1 2 3}
+ 1 {0 1 2 3 4 5 6}
+ 2 {0 1 2 3}
+ }
+
+ for {set i 0} {$i<100} {incr i} {
+ do_execsql_test 1.4.$i { INSERT INTO t1(t1) VALUES('merge=1,4') }
+ do_test 1.4.$i.2 { fts3_integrity_check t1 } ok
+ do_execsql_test 1.4.$i.3 {
+ SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three'
+ } {123 132 213 231 312 321}
+ }
+
+ do_execsql_test 1.5 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level
+ } {
+ 2 {0 1}
+ 3 0
+ }
+
+ #-------------------------------------------------------------------------
+ # Test cases 2.* test that errors in the xxx part of the 'merge=xxx' are
+ # handled correctly.
+ #
+ do_execsql_test 2.0 "CREATE VIRTUAL TABLE t2 USING $mod"
+
+ foreach {tn arg} {
+ 1 {merge=abc}
+ 2 {merge=%%%}
+ 3 {merge=,}
+ 4 {merge=5,}
+ 5 {merge=6,%}
+ 6 {merge=6,six}
+ 7 {merge=6,1}
+ 8 {merge=6,0}
+ } {
+ do_catchsql_test 2.$tn {
+ INSERT INTO t2(t2) VALUES($arg);
+ } {1 {SQL logic error or missing database}}
+ }
+
+ #-------------------------------------------------------------------------
+ # Test cases 3.*
+ #
+ do_test 3.0 {
+ reset_db
+ execsql { PRAGMA page_size = 512 }
+ fts3_build_db_2 -module $mod 30040
+ } {}
+ do_test 3.1 { fts3_integrity_check t2 } {ok}
+
+ do_execsql_test 3.2 {
+ SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level
+ } {
+ 0 {0 1 2 3 4 5 6}
+ 1 {0 1 2 3 4}
+ 2 {0 1 2 3 4}
+ 3 {0 1 2 3 4 5 6}
+ }
+
+ do_execsql_test 3.3 {
+ INSERT INTO t2(t2) VALUES('merge=1000000,2');
+ SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level
+ } {
+ 0 0
+ 2 0
+ 3 0
+ 4 0
+ 6 0
+ }
+
+ #-------------------------------------------------------------------------
+ # Test cases 4.*
+ #
+ reset_db
+ do_execsql_test 4.1 "
+ PRAGMA page_size = 512;
+ CREATE VIRTUAL TABLE t4 USING $mod;
+ PRAGMA main.page_size;
+ " {512}
+
+ do_test 4.2 {
+ foreach x {a c b d e f g h i j k l m n o p} {
+ execsql "INSERT INTO t4 VALUES('[string repeat $x 600]')"
+ }
+ execsql {SELECT level, group_concat(idx, ' ') FROM t4_segdir GROUP BY level}
+ } {0 {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15}}
+
+ foreach {tn expect} {
+ 1 "0 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 1 0"
+ 2 "0 {0 1 2 3 4 5 6 7 8 9 10 11 12} 1 0"
+ 3 "0 {0 1 2 3 4 5 6 7 8 9 10 11} 1 0"
+ 4 "0 {0 1 2 3 4 5 6 7 8 9 10} 1 0"
+ 5 "0 {0 1 2 3 4 5 6 7 8 9} 1 0"
+ 6 "0 {0 1 2 3 4 5 6 7 8} 1 0"
+ 7 "0 {0 1 2 3 4 5 6 7} 1 0"
+ 8 "0 {0 1 2 3 4 5 6} 1 0"
+ 9 "0 {0 1 2 3 4 5} 1 0"
+ } {
+ do_execsql_test 4.3.$tn {
+ INSERT INTO t4(t4) VALUES('merge=1,16');
+ SELECT level, group_concat(idx, ' ') FROM t4_segdir GROUP BY level;
+ } $expect
+ }
+
+ do_execsql_test 4.4.1 {
+ SELECT quote(value) FROM t4_stat WHERE rowid=1
+ } {X'0006'}
+
+ do_execsql_test 4.4.2 {
+ DELETE FROM t4_stat WHERE rowid=1;
+ INSERT INTO t4(t4) VALUES('merge=1,12');
+ SELECT level, group_concat(idx, ' ') FROM t4_segdir GROUP BY level;
+ } "0 {0 1 2 3 4 5} 1 0"
+
+
+ #-------------------------------------------------------------------------
+ # Test cases 5.*
+ #
+ # Test that if a crisis-merge occurs that disrupts an ongoing incremental
+ # merge, the next call to "merge=A,B" identifies this and starts a new
+ # incremental merge. There are two scenarios:
+ #
+ # * There are less segments on the input level that the disrupted
+ # incremental merge operated on, or
+ #
+ # * Sufficient segments exist on the input level but the segments
+ # contain keys smaller than the largest key in the potential output
+ # segment.
+ #
+ do_test 5.1 {
+ reset_db
+ fts3_build_db_1 -module $mod 1000
+ } {}
+
+ do_execsql_test 5.2 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
+ } {
+ 0 {0 1 2 3 4 5 6 7}
+ 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13}
+ 2 {0 1 2}
+ }
+
+ do_execsql_test 5.3 {
+ INSERT INTO t1(t1) VALUES('merge=1,5');
+ INSERT INTO t1(t1) VALUES('merge=1,5');
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
+ } {
+ 0 {0 1 2}
+ 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14}
+ 2 {0 1 2 3}
+ }
+
+ do_execsql_test 5.4 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'0105'}
+ do_test 5.5 {
+ foreach docid [execsql {SELECT docid FROM t1}] {
+ execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid}
+ }
+ } {}
+
+ do_execsql_test 5.6 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'0105'}
+
+ do_execsql_test 5.7 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
+ SELECT quote(value) from t1_stat WHERE rowid=1;
+ } {
+ 0 {0 1 2 3 4 5 6 7 8 9 10}
+ 1 {0 1 2 3 4 5 6 7 8 9 10 11 12}
+ 2 {0 1 2 3 4 5 6 7}
+ X'0105'
+ }
+
+ do_execsql_test 5.8 {
+ INSERT INTO t1(t1) VALUES('merge=1,6');
+ INSERT INTO t1(t1) VALUES('merge=1,6');
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
+ SELECT quote(value) from t1_stat WHERE rowid=1;
+ } {
+ 0 {0 1 2 3 4}
+ 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13}
+ 2 {0 1 2 3 4 5 6 7 8} X'0106'
+ }
+
+ do_test 5.8.1 { fts3_integrity_check t1 } ok
+
+ do_test 5.9 {
+ set L [expr 16*16*7 + 16*3 + 12]
+ foreach docid [execsql {
+ SELECT docid FROM t1 UNION ALL SELECT docid FROM t1 LIMIT $L
+ }] {
+ execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid}
+ }
+ } {}
+
+ do_execsql_test 5.10 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
+ SELECT quote(value) from t1_stat WHERE rowid=1;
+ } {
+ 0 0 1 {0 1} 2 0 3 0 X'0106'
+ }
+
+ do_execsql_test 5.11 {
+ INSERT INTO t1(t1) VALUES('merge=1,6');
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
+ SELECT quote(value) from t1_stat WHERE rowid=1;
+ } {
+ 0 0 1 {0 1} 2 0 3 0 X''
+ }
+
+ #-------------------------------------------------------------------------
+ # Test cases 6.*
+ #
+ # At one point the following test caused an assert() to fail (because the
+ # second 'merge=1,2' operation below actually "merges" a single input
+ # segment, which was unexpected).
+ #
+ do_test 6.1 {
+ reset_db
+ set a [string repeat a 900]
+ set b [string repeat b 900]
+ set c [string repeat c 900]
+ set d [string repeat d 900]
+
+ execsql "CREATE VIRTUAL TABLE t1 USING $mod"
+ execsql {
+ BEGIN;
+ INSERT INTO t1 VALUES($a);
+ INSERT INTO t1 VALUES($b);
+ COMMIT;
+ BEGIN;
+ INSERT INTO t1 VALUES($c);
+ INSERT INTO t1 VALUES($d);
+ COMMIT;
+ }
+
+ execsql {
+ INSERT INTO t1(t1) VALUES('merge=1,2');
+ INSERT INTO t1(t1) VALUES('merge=1,2');
+ }
+ } {}
+
+ #-------------------------------------------------------------------------
+ # Test cases 7.*
+ #
+ # Test that the value returned by sqlite3_total_changes() increases by
+ # 1 following a no-op "merge=A,B", or by more than 1 if actual work is
+ # performed.
+ #
+ do_test 7.0 {
+ reset_db
+ fts3_build_db_1 -module $mod 1000
+ } {}
+
+ do_execsql_test 7.1 {
+ SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level
+ } {
+ 0 {0 1 2 3 4 5 6 7}
+ 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13}
+ 2 {0 1 2}
+ }
+ do_test 7.2 {
+ set x [db total_changes]
+ execsql { INSERT INTO t1(t1) VALUES('merge=2,10') }
+ expr { ([db total_changes] - $x)>1 }
+ } {1}
+ do_test 7.3 {
+ set x [db total_changes]
+ execsql { INSERT INTO t1(t1) VALUES('merge=200,10') }
+ expr { ([db total_changes] - $x)>1 }
+ } {1}
+ do_test 7.4 {
+ set x [db total_changes]
+ execsql { INSERT INTO t1(t1) VALUES('merge=200,10') }
+ expr { ([db total_changes] - $x)>1 }
+ } {0}
+ do_test 7.5 {
+ set x [db total_changes]
+ execsql { INSERT INTO t1(t1) VALUES('merge=200,10') }
+ expr { ([db total_changes] - $x)>1 }
+ } {0}
+
+}
+
+finish_test
diff --git a/test/fts4merge2.test b/test/fts4merge2.test
new file mode 100644
index 0000000..308b692
--- /dev/null
+++ b/test/fts4merge2.test
@@ -0,0 +1,38 @@
+
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/fts3_common.tcl
+source $testdir/malloc_common.tcl
+set ::testprefix fts4merge2
+
+# If SQLITE_ENABLE_FTS3 is defined, omit this file.
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+do_test 1.0 {
+ fts3_build_db_1 1000
+ faultsim_save_and_close
+} {}
+
+do_faultsim_test 1.1 -faults oom-* -prep {
+ faultsim_restore_and_reopen
+} -body {
+ execsql { INSERT INTO t1(t1) VALUES('merge=32,4') }
+} -test {
+ faultsim_test_result {0 {}}
+}
+
+do_faultsim_test 1.2 -faults oom-t* -prep {
+ if {$iFail<100} {set iFail 803}
+ faultsim_restore_and_reopen
+} -body {
+ execsql { INSERT INTO t1(t1) VALUES('merge=1,2') }
+ execsql { INSERT INTO t1(t1) VALUES('merge=1,2') }
+} -test {
+ faultsim_test_result {0 {}}
+}
+
+finish_test
diff --git a/test/fts4merge3.test b/test/fts4merge3.test
new file mode 100644
index 0000000..329b4d2
--- /dev/null
+++ b/test/fts4merge3.test
@@ -0,0 +1,105 @@
+# 2012 March 06
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#*************************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this script is testing the incremental merge function.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/fts3_common.tcl
+source $testdir/lock_common.tcl
+source $testdir/bc_common.tcl
+
+set ::testprefix fts4merge3
+
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+if {"" == [bc_find_binaries backcompat.test]} {
+ finish_test
+ return
+}
+
+db close
+do_all_bc_test {
+
+ sql2 { PRAGMA page_size = 512 }
+ if { 0==[catch { sql2 { CREATE VIRTUAL TABLE x USING fts4 } } ] } {
+
+ # Build a large database.
+ set msg "this takes around 12 seconds"
+ do_test "1.1 ($msg)" { fts3_build_db_2 20000 } {}
+
+ # Run some queries on it, using the old and new versions.
+ do_test 1.2 { sql1 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485}
+ do_test 1.3 { sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485}
+
+ do_test 1.4 {
+ set x [sql2 "PRAGMA page_count"]
+ expr {$x>=1284 && $x<=1286}
+ } {1}
+ do_test 1.5 { sql2 {
+ SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
+ } } [list 0 15 1 1 2 14 3 4]
+
+ # Run some incr-merge operations on the db.
+ for {set i 0} {$i<10} {incr i} {
+ do_test 1.6.$i.1 { sql1 { INSERT INTO t2(t2) VALUES('merge=2,2') } } {}
+ do_test 1.6.$i.2 {
+ sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'"
+ } {1485}
+ }
+
+ do_test 1.7 { sql2 {
+ SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
+ } } [list 0 1 2 18 3 5]
+
+ # Using the old connection, insert many rows.
+ do_test 1.8 {
+ for {set i 0} {$i < 1500} {incr i} {
+ sql2 "INSERT INTO t2 SELECT content FROM t2 WHERE docid = $i"
+ }
+ } {}
+
+ do_test 1.9 { sql2 {
+ SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
+ } } [list 0 13 1 13 2 5 3 6]
+
+ # Run a big incr-merge operation on the db.
+ do_test 1.10 { sql1 { INSERT INTO t2(t2) VALUES('merge=2000,2') } } {}
+ do_test 1.11 {
+ sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'"
+ } {1485 21485}
+
+ do_test 1.12 {
+ for {set i 0} {$i < 1500} {incr i} {
+ sql2 "INSERT INTO t2 SELECT content FROM t2 WHERE docid = $i"
+ }
+ } {}
+ do_test 1.13 {
+ sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'"
+ } {1485 21485 22985}
+
+ do_test 1.14 {
+ sql2 "INSERT INTO t2(t2) VALUES('optimize')"
+ sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'"
+ } {1485 21485 22985}
+
+ do_test 1.15 { sql2 {
+ SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
+ } } {6 1}
+ }
+}
+
+
+finish_test
diff --git a/test/func.test b/test/func.test
index eef0543..ba1ea02 100644
--- a/test/func.test
+++ b/test/func.test
@@ -1247,4 +1247,48 @@ do_test func-28.1 {
}
} {1 {unknown function: nosuchfunc()}}
+# Verify that the length() and typeof() functions do not actually load
+# the content of their argument.
+#
+do_test func-29.1 {
+ db eval {
+ CREATE TABLE t29(id INTEGER PRIMARY KEY, x, y);
+ INSERT INTO t29 VALUES(1, 2, 3), (2, NULL, 4), (3, 4.5, 5);
+ INSERT INTO t29 VALUES(4, randomblob(1000000), 6);
+ INSERT INTO t29 VALUES(5, "hello", 7);
+ }
+ db close
+ sqlite3 db test.db
+ sqlite3_db_status db CACHE_MISS 1
+ db eval {SELECT typeof(x), length(x), typeof(y) FROM t29 ORDER BY id}
+} {integer 1 integer null {} integer real 3 integer blob 1000000 integer text 5 integer}
+do_test func-29.2 {
+ set x [lindex [sqlite3_db_status db CACHE_MISS 1] 1]
+ if {$x<5} {set x 1}
+ set x
+} {1}
+do_test func-29.3 {
+ db close
+ sqlite3 db test.db
+ sqlite3_db_status db CACHE_MISS 1
+ db eval {SELECT typeof(+x) FROM t29 ORDER BY id}
+} {integer null real blob text}
+do_test func-29.4 {
+ set x [lindex [sqlite3_db_status db CACHE_MISS 1] 1]
+ if {$x>100} {set x many}
+ set x
+} {many}
+do_test func-29.5 {
+ db close
+ sqlite3 db test.db
+ sqlite3_db_status db CACHE_MISS 1
+ db eval {SELECT sum(length(x)) FROM t29}
+} {1000009}
+do_test func-29.6 {
+ set x [lindex [sqlite3_db_status db CACHE_MISS 1] 1]
+ if {$x<5} {set x 1}
+ set x
+} {1}
+
+
finish_test
diff --git a/test/fuzz-oss1.test b/test/fuzz-oss1.test
new file mode 100644
index 0000000..08bc670
--- /dev/null
+++ b/test/fuzz-oss1.test
@@ -0,0 +1,2001 @@
+# 2012 May 21
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+# NB: Portions of this file are extracted from open-source projects
+# covered by permissive licenses. Use of this file for testing is clearly
+# allowed. However, do not incorporate the text of this one file into
+# end-products without checking the licenses on the open-source projects
+# from which this code was extracted. This warning applies to this one
+# file only - not the bulk of the SQLite source code and tests.
+#
+#***********************************************************************
+#
+# This file contains large and complex schemas obtained from open-source
+# software projects. The schemas are parsed just to make sure that nothing
+# breaks in the parser logic.
+#
+# These tests merely verify that the parse occurs without error.
+# No attempt is made to verify correct operation of the resulting schema
+# and statements.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Schema and query extracted from Skrooge.org.
+#
+do_test fuzz-oss1-skrooge {
+ db eval {
+CREATE TABLE parameters (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_uuid_parent TEXT NOT NULL DEFAULT '',t_name TEXT NOT NULL,t_value TEXT NOT NULL DEFAULT '',b_blob BLOB,d_lastmodifdate DATE NOT NULL DEFAULT CURRENT_TIMESTAMP,i_tmp INTEGER NOT NULL DEFAULT 0);
+CREATE TABLE doctransaction (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL,t_mode VARCHAR(1) DEFAULT 'U' CHECK (t_mode IN ('U', 'R')),d_date DATE NOT NULL,t_savestep VARCHAR(1) DEFAULT 'N' CHECK (t_savestep IN ('Y', 'N')),i_parent INTEGER, t_refreshviews VARCHAR(1) DEFAULT 'Y' CHECK (t_refreshviews IN ('Y', 'N')));
+CREATE TABLE doctransactionitem (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, rd_doctransaction_id INTEGER NOT NULL,i_object_id INTEGER NOT NULL,t_object_table TEXT NOT NULL,t_action VARCHAR(1) DEFAULT 'I' CHECK (t_action IN ('I', 'U', 'D')),t_sqlorder TEXT NOT NULL DEFAULT '');
+CREATE TABLE doctransactionmsg (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, rd_doctransaction_id INTEGER NOT NULL,t_message TEXT NOT NULL DEFAULT '',t_popup VARCHAR(1) DEFAULT 'Y' CHECK (t_popup IN ('Y', 'N')));
+CREATE TABLE unit(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL,t_symbol TEXT NOT NULL DEFAULT '',t_country TEXT NOT NULL DEFAULT '',t_type VARCHAR(1) NOT NULL DEFAULT 'C' CHECK (t_type IN ('1', '2', 'C', 'S', 'I', 'O')),t_internet_code TEXT NOT NULL DEFAULT '',i_nbdecimal INT NOT NULL DEFAULT 2,rd_unit_id INTEGER NOT NULL DEFAULT 0, t_source TEXT NOT NULL DEFAULT '');
+CREATE TABLE unitvalue(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,rd_unit_id INTEGER NOT NULL,d_date DATE NOT NULL,f_quantity FLOAT NOT NULL CHECK (f_quantity>=0));
+CREATE TABLE bank (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL DEFAULT '',t_bank_number TEXT NOT NULL DEFAULT '',t_icon TEXT NOT NULL DEFAULT '');
+CREATE TABLE interest(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,rd_account_id INTEGER NOT NULL,d_date DATE NOT NULL,f_rate FLOAT NOT NULL CHECK (f_rate>=0),t_income_value_date_mode VARCHAR(1) NOT NULL DEFAULT 'F' CHECK (t_income_value_date_mode IN ('F', '0', '1', '2', '3', '4', '5')),t_expenditure_value_date_mode VARCHAR(1) NOT NULL DEFAULT 'F' CHECK (t_expenditure_value_date_mode IN ('F', '0', '1', '2', '3', '4', '5')),t_base VARCHAR(3) NOT NULL DEFAULT '24' CHECK (t_base IN ('24', '360', '365')));
+CREATE TABLE operation(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,i_group_id INTEGER NOT NULL DEFAULT 0,i_number INTEGER DEFAULT 0 CHECK (i_number>=0),d_date DATE NOT NULL DEFAULT '0000-00-00',rd_account_id INTEGER NOT NULL,t_mode TEXT NOT NULL DEFAULT '',r_payee_id INTEGER NOT NULL DEFAULT 0,t_comment TEXT NOT NULL DEFAULT '',rc_unit_id INTEGER NOT NULL,t_status VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_status IN ('N', 'P', 'Y')),t_bookmarked VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_bookmarked IN ('Y', 'N')),t_imported VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_imported IN ('Y', 'N', 'P', 'T')),t_template VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_template IN ('Y', 'N')),t_import_id TEXT NOT NULL DEFAULT '',i_tmp INTEGER NOT NULL DEFAULT 0,r_recurrentoperation_id INTEGER NOT NULL DEFAULT 0);
+CREATE TABLE operationbalance(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,f_balance FLOAT NOT NULL DEFAULT 0,r_operation_id INTEGER NOT NULL);
+CREATE TABLE refund (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL DEFAULT '',t_comment TEXT NOT NULL DEFAULT '',t_close VARCHAR(1) DEFAULT 'N' CHECK (t_close IN ('Y', 'N')));
+CREATE TABLE payee (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL DEFAULT '',t_address TEXT NOT NULL DEFAULT '', t_bookmarked VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_bookmarked IN ('Y', 'N')));
+CREATE TABLE suboperation(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_comment TEXT NOT NULL DEFAULT '',rd_operation_id INTEGER NOT NULL,r_category_id INTEGER NOT NULL DEFAULT 0,f_value FLOAT NOT NULL DEFAULT 0.0,i_tmp INTEGER NOT NULL DEFAULT 0,r_refund_id INTEGER NOT NULL DEFAULT 0, t_formula TEXT NOT NULL DEFAULT '');
+CREATE TABLE rule (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_description TEXT NOT NULL DEFAULT '',t_definition TEXT NOT NULL DEFAULT '',t_action_description TEXT NOT NULL DEFAULT '',t_action_definition TEXT NOT NULL DEFAULT '',t_action_type VARCHAR(1) DEFAULT 'S' CHECK (t_action_type IN ('S', 'U', 'A')),t_bookmarked VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_bookmarked IN ('Y', 'N')),f_sortorder FLOAT);
+CREATE TABLE budget (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,rc_category_id INTEGER NOT NULL DEFAULT 0,t_including_subcategories TEXT NOT NULL DEFAULT 'N' CHECK (t_including_subcategories IN ('Y', 'N')),f_budgeted FLOAT NOT NULL DEFAULT 0.0,f_budgeted_modified FLOAT NOT NULL DEFAULT 0.0,f_transferred FLOAT NOT NULL DEFAULT 0.0,i_year INTEGER NOT NULL DEFAULT 2010,i_month INTEGER NOT NULL DEFAULT 0 CHECK (i_month>=0 AND i_month<=12));
+CREATE TABLE budgetcategory(id INTEGER NOT NULL DEFAULT 0,id_category INTEGER NOT NULL DEFAULT 0);
+CREATE TABLE budgetrule (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,rc_category_id INTEGER NOT NULL DEFAULT 0,t_category_condition TEXT NOT NULL DEFAULT 'Y' CHECK (t_category_condition IN ('Y', 'N')),t_year_condition TEXT NOT NULL DEFAULT 'Y' CHECK (t_year_condition IN ('Y', 'N')),i_year INTEGER NOT NULL DEFAULT 2010,i_month INTEGER NOT NULL DEFAULT 0 CHECK (i_month>=0 AND i_month<=12),t_month_condition TEXT NOT NULL DEFAULT 'Y' CHECK (t_month_condition IN ('Y', 'N')),i_condition INTEGER NOT NULL DEFAULT 0 CHECK (i_condition IN (-1,0,1)),f_quantity FLOAT NOT NULL DEFAULT 0.0,t_absolute TEXT NOT NULL DEFAULT 'Y' CHECK (t_absolute IN ('Y', 'N')),rc_category_id_target INTEGER NOT NULL DEFAULT 0,t_category_target TEXT NOT NULL DEFAULT 'Y' CHECK (t_category_target IN ('Y', 'N')),t_rule TEXT NOT NULL DEFAULT 'N' CHECK (t_rule IN ('N', 'C', 'Y')));
+CREATE TABLE "recurrentoperation" (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,d_date DATE NOT NULL DEFAULT '0000-00-00',rd_operation_id INTEGER NOT NULL,i_period_increment INTEGER NOT NULL DEFAULT 1 CHECK (i_period_increment>=0),t_period_unit TEXT NOT NULL DEFAULT 'M' CHECK (t_period_unit IN ('D', 'W', 'M', 'Y')),t_auto_write VARCHAR(1) DEFAULT 'Y' CHECK (t_auto_write IN ('Y', 'N')),i_auto_write_days INTEGER NOT NULL DEFAULT 5 CHECK (i_auto_write_days>=0),t_warn VARCHAR(1) DEFAULT 'Y' CHECK (t_warn IN ('Y', 'N')),i_warn_days INTEGER NOT NULL DEFAULT 5 CHECK (i_warn_days>=0),t_times VARCHAR(1) DEFAULT 'N' CHECK (t_times IN ('Y', 'N')),i_nb_times INTEGER NOT NULL DEFAULT 1 CHECK (i_nb_times>=0));
+CREATE TABLE "category" (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL DEFAULT '' CHECK (t_name NOT LIKE '% > %'),t_fullname TEXT,rd_category_id INT,t_bookmarked VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_bookmarked IN ('Y', 'N')));
+CREATE TABLE "account"(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL,t_number TEXT NOT NULL DEFAULT '',t_agency_number TEXT NOT NULL DEFAULT '',t_agency_address TEXT NOT NULL DEFAULT '',t_comment TEXT NOT NULL DEFAULT '',t_close VARCHAR(1) DEFAULT 'N' CHECK (t_close IN ('Y', 'N')),t_type VARCHAR(1) NOT NULL DEFAULT 'C' CHECK (t_type IN ('C', 'D', 'A', 'I', 'L', 'W', 'O')),t_bookmarked VARCHAR(1) NOT NULL DEFAULT 'N' CHECK (t_bookmarked IN ('Y', 'N')),rd_bank_id INTEGER NOT NULL);
+CREATE TABLE "node" (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,t_name TEXT NOT NULL DEFAULT '' CHECK (t_name NOT LIKE '% > %'),t_fullname TEXT,t_icon TEXT DEFAULT '',f_sortorder FLOAT,t_autostart VARCHAR(1) DEFAULT 'N' CHECK (t_autostart IN ('Y', 'N')),t_data TEXT,rd_node_id INT CONSTRAINT fk_id REFERENCES node(id) ON DELETE CASCADE);
+CREATE TABLE vm_category_display_tmp(
+ id INT,
+ t_name TEXT,
+ t_fullname TEXT,
+ rd_category_id INT,
+ t_bookmarked TEXT,
+ i_NBOPERATIONS,
+ f_REALCURRENTAMOUNT
+);
+CREATE TABLE vm_budget_tmp(
+ id INT,
+ rc_category_id INT,
+ t_including_subcategories TEXT,
+ f_budgeted REAL,
+ f_budgeted_modified REAL,
+ f_transferred REAL,
+ i_year INT,
+ i_month INT,
+ t_CATEGORY,
+ t_PERIOD,
+ f_CURRENTAMOUNT,
+ t_RULES
+);
+CREATE INDEX idx_doctransaction_parent ON doctransaction (i_parent);
+CREATE INDEX idx_doctransactionitem_i_object_id ON doctransactionitem (i_object_id);
+CREATE INDEX idx_doctransactionitem_t_object_table ON doctransactionitem (t_object_table);
+CREATE INDEX idx_doctransactionitem_t_action ON doctransactionitem (t_action);
+CREATE INDEX idx_doctransactionitem_rd_doctransaction_id ON doctransactionitem (rd_doctransaction_id);
+CREATE INDEX idx_doctransactionitem_optimization ON doctransactionitem (rd_doctransaction_id, i_object_id, t_object_table, t_action, id);
+CREATE INDEX idx_unit_unit_id ON unitvalue(rd_unit_id);
+CREATE INDEX idx_account_bank_id ON account(rd_bank_id);
+CREATE INDEX idx_account_type ON account(t_type);
+CREATE INDEX idx_category_category_id ON category(rd_category_id);
+CREATE INDEX idx_category_t_fullname ON category(t_fullname);
+CREATE INDEX idx_operation_account_id ON operation (rd_account_id);
+CREATE INDEX idx_operation_tmp1_found_transfert ON operation (rc_unit_id, d_date);
+CREATE INDEX idx_operation_grouped_operation_id ON operation (i_group_id);
+CREATE INDEX idx_operation_i_number ON operation (i_number);
+CREATE INDEX idx_operation_i_tmp ON operation (i_tmp);
+CREATE INDEX idx_operation_rd_account_id ON operation (rd_account_id);
+CREATE INDEX idx_operation_rc_unit_id ON operation (rc_unit_id);
+CREATE INDEX idx_operation_t_status ON operation (t_status);
+CREATE INDEX idx_operation_t_import_id ON operation (t_import_id);
+CREATE INDEX idx_operation_t_template ON operation (t_template);
+CREATE INDEX idx_operation_d_date ON operation (d_date);
+CREATE INDEX idx_operationbalance_operation_id ON operationbalance (r_operation_id);
+CREATE INDEX idx_suboperation_operation_id ON suboperation (rd_operation_id);
+CREATE INDEX idx_suboperation_i_tmp ON suboperation (i_tmp);
+CREATE INDEX idx_suboperation_category_id ON suboperation (r_category_id);
+CREATE INDEX idx_suboperation_refund_id_id ON suboperation (r_refund_id);
+CREATE INDEX idx_recurrentoperation_rd_operation_id ON recurrentoperation (rd_operation_id);
+CREATE INDEX idx_refund_close ON refund(t_close);
+CREATE INDEX idx_interest_account_id ON interest (rd_account_id);
+CREATE INDEX idx_rule_action_type ON rule(t_action_type);
+CREATE INDEX idx_budget_category_id ON budget(rc_category_id);
+CREATE INDEX idx_budgetcategory_id ON budgetcategory (id);
+CREATE INDEX idx_budgetcategory_id_category ON budgetcategory (id_category);
+CREATE UNIQUE INDEX uidx_parameters_uuid_parent_name ON parameters (t_uuid_parent, t_name);
+CREATE UNIQUE INDEX uidx_node_parent_id_name ON node(t_name,rd_node_id);
+CREATE UNIQUE INDEX uidx_node_fullname ON node(t_fullname);
+CREATE UNIQUE INDEX uidx_unit_name ON unit(t_name);
+CREATE UNIQUE INDEX uidx_unit_symbol ON unit(t_symbol);
+CREATE UNIQUE INDEX uidx_unitvalue ON unitvalue(d_date,rd_unit_id);
+CREATE UNIQUE INDEX uidx_bank_name ON bank(t_name);
+CREATE UNIQUE INDEX uidx_account_name ON account(t_name);
+CREATE UNIQUE INDEX uidx_category_parent_id_name ON category(t_name,rd_category_id);
+CREATE UNIQUE INDEX uidx_category_fullname ON category(t_fullname);
+CREATE UNIQUE INDEX uidx_refund_name ON refund(t_name);
+CREATE UNIQUE INDEX uidx_payee_name ON payee(t_name);
+CREATE UNIQUE INDEX uidx_interest ON interest(d_date,rd_account_id);
+CREATE UNIQUE INDEX uidx_budget ON budget(i_year,i_month, rc_category_id);
+CREATE VIEW v_node AS SELECT * from node;
+CREATE VIEW v_node_displayname AS SELECT *, t_fullname AS t_displayname from node;
+CREATE VIEW v_parameters_displayname AS SELECT *, t_name AS t_displayname from parameters;
+CREATE TRIGGER fkdc_parameters_parameters_uuid BEFORE DELETE ON parameters FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'parameters'; END;
+CREATE TRIGGER fkdc_node_parameters_uuid BEFORE DELETE ON node FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'node'; END;
+CREATE TRIGGER cpt_node_fullname1 AFTER INSERT ON node BEGIN UPDATE node SET t_fullname=CASE WHEN new.rd_node_id IS NULL OR new.rd_node_id='' OR new.rd_node_id=0 THEN new.t_name ELSE (SELECT c.t_fullname from node c where c.id=new.rd_node_id)||' > '||new.t_name END WHERE id=new.id;END;
+CREATE TRIGGER cpt_node_fullname2 AFTER UPDATE OF t_name, rd_node_id ON node BEGIN UPDATE node SET t_fullname=CASE WHEN new.rd_node_id IS NULL OR new.rd_node_id='' OR new.rd_node_id=0 THEN new.t_name ELSE (SELECT c.t_fullname from node c where c.id=new.rd_node_id)||' > '||new.t_name END WHERE id=new.id;UPDATE node SET t_name=t_name WHERE rd_node_id=new.id;END;
+CREATE TRIGGER fki_account_bank_rd_bank_id_id BEFORE INSERT ON account FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (bank est utilisé par account)
+Nom de la contrainte : fki_account_bank_rd_bank_id_id') WHERE NEW.rd_bank_id!=0 AND NEW.rd_bank_id!='' AND (SELECT id FROM bank WHERE id = NEW.rd_bank_id) IS NULL; END;
+CREATE TRIGGER fku_account_bank_rd_bank_id_id BEFORE UPDATE ON account FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (bank est utilisé par account)
+Nom de la contrainte : fku_account_bank_rd_bank_id_id') WHERE NEW.rd_bank_id!=0 AND NEW.rd_bank_id!='' AND (SELECT id FROM bank WHERE id = NEW.rd_bank_id) IS NULL; END;
+CREATE TRIGGER fkdc_bank_account_id_rd_bank_id BEFORE DELETE ON bank FOR EACH ROW BEGIN DELETE FROM account WHERE account.rd_bank_id = OLD.id; END;
+CREATE TRIGGER fki_budget_category_rc_category_id_id BEFORE INSERT ON budget FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (category est utilisé par budget)
+Nom de la contrainte : fki_budget_category_rc_category_id_id') WHERE NEW.rc_category_id!=0 AND NEW.rc_category_id!='' AND (SELECT id FROM category WHERE id = NEW.rc_category_id) IS NULL; END;
+CREATE TRIGGER fku_budget_category_rc_category_id_id BEFORE UPDATE ON budget FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (category est utilisé par budget)
+Nom de la contrainte : fku_budget_category_rc_category_id_id') WHERE NEW.rc_category_id!=0 AND NEW.rc_category_id!='' AND (SELECT id FROM category WHERE id = NEW.rc_category_id) IS NULL; END;
+CREATE TRIGGER fkd_budget_category_rc_category_id_id BEFORE DELETE ON category FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de détruire un objet (category est utilisé par budget)
+Nom de la contrainte : fkd_budget_category_rc_category_id_id') WHERE (SELECT rc_category_id FROM budget WHERE rc_category_id = OLD.id) IS NOT NULL; END;
+CREATE TRIGGER fki_budgetrule_category_rc_category_id_id BEFORE INSERT ON budgetrule FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (category est utilisé par budgetrule)
+Nom de la contrainte : fki_budgetrule_category_rc_category_id_id') WHERE NEW.rc_category_id!=0 AND NEW.rc_category_id!='' AND (SELECT id FROM category WHERE id = NEW.rc_category_id) IS NULL; END;
+CREATE TRIGGER fku_budgetrule_category_rc_category_id_id BEFORE UPDATE ON budgetrule FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (category est utilisé par budgetrule)
+Nom de la contrainte : fku_budgetrule_category_rc_category_id_id') WHERE NEW.rc_category_id!=0 AND NEW.rc_category_id!='' AND (SELECT id FROM category WHERE id = NEW.rc_category_id) IS NULL; END;
+CREATE TRIGGER fkd_budgetrule_category_rc_category_id_id BEFORE DELETE ON category FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de détruire un objet (category est utilisé par budgetrule)
+Nom de la contrainte : fkd_budgetrule_category_rc_category_id_id') WHERE (SELECT rc_category_id FROM budgetrule WHERE rc_category_id = OLD.id) IS NOT NULL; END;
+CREATE TRIGGER fki_budgetrule_category_rc_category_id_target_id BEFORE INSERT ON budgetrule FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (category est utilisé par budgetrule)
+Nom de la contrainte : fki_budgetrule_category_rc_category_id_target_id') WHERE NEW.rc_category_id_target!=0 AND NEW.rc_category_id_target!='' AND (SELECT id FROM category WHERE id = NEW.rc_category_id_target) IS NULL; END;
+CREATE TRIGGER fku_budgetrule_category_rc_category_id_target_id BEFORE UPDATE ON budgetrule FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (category est utilisé par budgetrule)
+Nom de la contrainte : fku_budgetrule_category_rc_category_id_target_id') WHERE NEW.rc_category_id_target!=0 AND NEW.rc_category_id_target!='' AND (SELECT id FROM category WHERE id = NEW.rc_category_id_target) IS NULL; END;
+CREATE TRIGGER fkd_budgetrule_category_rc_category_id_target_id BEFORE DELETE ON category FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de détruire un objet (category est utilisé par budgetrule)
+Nom de la contrainte : fkd_budgetrule_category_rc_category_id_target_id') WHERE (SELECT rc_category_id_target FROM budgetrule WHERE rc_category_id_target = OLD.id) IS NOT NULL; END;
+CREATE TRIGGER fki_category_category_rd_category_id_id BEFORE INSERT ON category FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (category est utilisé par category)
+Nom de la contrainte : fki_category_category_rd_category_id_id') WHERE NEW.rd_category_id!=0 AND NEW.rd_category_id!='' AND (SELECT id FROM category WHERE id = NEW.rd_category_id) IS NULL; END;
+CREATE TRIGGER fku_category_category_rd_category_id_id BEFORE UPDATE ON category FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (category est utilisé par category)
+Nom de la contrainte : fku_category_category_rd_category_id_id') WHERE NEW.rd_category_id!=0 AND NEW.rd_category_id!='' AND (SELECT id FROM category WHERE id = NEW.rd_category_id) IS NULL; END;
+CREATE TRIGGER fkdc_category_category_id_rd_category_id BEFORE DELETE ON category FOR EACH ROW BEGIN DELETE FROM category WHERE category.rd_category_id = OLD.id; END;
+CREATE TRIGGER fki_doctransactionitem_doctransaction_rd_doctransaction_id_id BEFORE INSERT ON doctransactionitem FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (doctransaction est utilisé par doctransactionitem)
+Nom de la contrainte : fki_doctransactionitem_doctransaction_rd_doctransaction_id_id') WHERE NEW.rd_doctransaction_id!=0 AND NEW.rd_doctransaction_id!='' AND (SELECT id FROM doctransaction WHERE id = NEW.rd_doctransaction_id) IS NULL; END;
+CREATE TRIGGER fku_doctransactionitem_doctransaction_rd_doctransaction_id_id BEFORE UPDATE ON doctransactionitem FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (doctransaction est utilisé par doctransactionitem)
+Nom de la contrainte : fku_doctransactionitem_doctransaction_rd_doctransaction_id_id') WHERE NEW.rd_doctransaction_id!=0 AND NEW.rd_doctransaction_id!='' AND (SELECT id FROM doctransaction WHERE id = NEW.rd_doctransaction_id) IS NULL; END;
+CREATE TRIGGER fkdc_doctransaction_doctransactionitem_id_rd_doctransaction_id BEFORE DELETE ON doctransaction FOR EACH ROW BEGIN DELETE FROM doctransactionitem WHERE doctransactionitem.rd_doctransaction_id = OLD.id; END;
+CREATE TRIGGER fki_doctransactionmsg_doctransaction_rd_doctransaction_id_id BEFORE INSERT ON doctransactionmsg FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (doctransaction est utilisé par doctransactionmsg)
+Nom de la contrainte : fki_doctransactionmsg_doctransaction_rd_doctransaction_id_id') WHERE NEW.rd_doctransaction_id!=0 AND NEW.rd_doctransaction_id!='' AND (SELECT id FROM doctransaction WHERE id = NEW.rd_doctransaction_id) IS NULL; END;
+CREATE TRIGGER fku_doctransactionmsg_doctransaction_rd_doctransaction_id_id BEFORE UPDATE ON doctransactionmsg FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (doctransaction est utilisé par doctransactionmsg)
+Nom de la contrainte : fku_doctransactionmsg_doctransaction_rd_doctransaction_id_id') WHERE NEW.rd_doctransaction_id!=0 AND NEW.rd_doctransaction_id!='' AND (SELECT id FROM doctransaction WHERE id = NEW.rd_doctransaction_id) IS NULL; END;
+CREATE TRIGGER fkdc_doctransaction_doctransactionmsg_id_rd_doctransaction_id BEFORE DELETE ON doctransaction FOR EACH ROW BEGIN DELETE FROM doctransactionmsg WHERE doctransactionmsg.rd_doctransaction_id = OLD.id; END;
+CREATE TRIGGER fki_interest_account_rd_account_id_id BEFORE INSERT ON interest FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (account est utilisé par interest)
+Nom de la contrainte : fki_interest_account_rd_account_id_id') WHERE NEW.rd_account_id!=0 AND NEW.rd_account_id!='' AND (SELECT id FROM account WHERE id = NEW.rd_account_id) IS NULL; END;
+CREATE TRIGGER fku_interest_account_rd_account_id_id BEFORE UPDATE ON interest FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (account est utilisé par interest)
+Nom de la contrainte : fku_interest_account_rd_account_id_id') WHERE NEW.rd_account_id!=0 AND NEW.rd_account_id!='' AND (SELECT id FROM account WHERE id = NEW.rd_account_id) IS NULL; END;
+CREATE TRIGGER fkdc_account_interest_id_rd_account_id BEFORE DELETE ON account FOR EACH ROW BEGIN DELETE FROM interest WHERE interest.rd_account_id = OLD.id; END;
+CREATE TRIGGER fki_node_node_rd_node_id_id BEFORE INSERT ON node FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (node est utilisé par node)
+Nom de la contrainte : fki_node_node_rd_node_id_id') WHERE NEW.rd_node_id!=0 AND NEW.rd_node_id!='' AND (SELECT id FROM node WHERE id = NEW.rd_node_id) IS NULL; END;
+CREATE TRIGGER fku_node_node_rd_node_id_id BEFORE UPDATE ON node FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (node est utilisé par node)
+Nom de la contrainte : fku_node_node_rd_node_id_id') WHERE NEW.rd_node_id!=0 AND NEW.rd_node_id!='' AND (SELECT id FROM node WHERE id = NEW.rd_node_id) IS NULL; END;
+CREATE TRIGGER fkdc_node_node_id_rd_node_id BEFORE DELETE ON node FOR EACH ROW BEGIN DELETE FROM node WHERE node.rd_node_id = OLD.id; END;
+CREATE TRIGGER fki_operation_account_rd_account_id_id BEFORE INSERT ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (account est utilisé par operation)
+Nom de la contrainte : fki_operation_account_rd_account_id_id') WHERE NEW.rd_account_id!=0 AND NEW.rd_account_id!='' AND (SELECT id FROM account WHERE id = NEW.rd_account_id) IS NULL; END;
+CREATE TRIGGER fku_operation_account_rd_account_id_id BEFORE UPDATE ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (account est utilisé par operation)
+Nom de la contrainte : fku_operation_account_rd_account_id_id') WHERE NEW.rd_account_id!=0 AND NEW.rd_account_id!='' AND (SELECT id FROM account WHERE id = NEW.rd_account_id) IS NULL; END;
+CREATE TRIGGER fkdc_account_operation_id_rd_account_id BEFORE DELETE ON account FOR EACH ROW BEGIN DELETE FROM operation WHERE operation.rd_account_id = OLD.id; END;
+CREATE TRIGGER fki_operation_payee_r_payee_id_id BEFORE INSERT ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (payee est utilisé par operation)
+Nom de la contrainte : fki_operation_payee_r_payee_id_id') WHERE NEW.r_payee_id!=0 AND NEW.r_payee_id!='' AND (SELECT id FROM payee WHERE id = NEW.r_payee_id) IS NULL; END;
+CREATE TRIGGER fku_operation_payee_r_payee_id_id BEFORE UPDATE ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (payee est utilisé par operation)
+Nom de la contrainte : fku_operation_payee_r_payee_id_id') WHERE NEW.r_payee_id!=0 AND NEW.r_payee_id!='' AND (SELECT id FROM payee WHERE id = NEW.r_payee_id) IS NULL; END;
+CREATE TRIGGER fkd_operation_payee_r_payee_id_id BEFORE DELETE ON payee FOR EACH ROW BEGIN UPDATE operation SET r_payee_id=0 WHERE r_payee_id=OLD.id; END;
+CREATE TRIGGER fki_operation_unit_rc_unit_id_id BEFORE INSERT ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (unit est utilisé par operation)
+Nom de la contrainte : fki_operation_unit_rc_unit_id_id') WHERE NEW.rc_unit_id!=0 AND NEW.rc_unit_id!='' AND (SELECT id FROM unit WHERE id = NEW.rc_unit_id) IS NULL; END;
+CREATE TRIGGER fku_operation_unit_rc_unit_id_id BEFORE UPDATE ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (unit est utilisé par operation)
+Nom de la contrainte : fku_operation_unit_rc_unit_id_id') WHERE NEW.rc_unit_id!=0 AND NEW.rc_unit_id!='' AND (SELECT id FROM unit WHERE id = NEW.rc_unit_id) IS NULL; END;
+CREATE TRIGGER fkd_operation_unit_rc_unit_id_id BEFORE DELETE ON unit FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de détruire un objet (unit est utilisé par operation)
+Nom de la contrainte : fkd_operation_unit_rc_unit_id_id') WHERE (SELECT rc_unit_id FROM operation WHERE rc_unit_id = OLD.id) IS NOT NULL; END;
+CREATE TRIGGER fki_operation_recurrentoperation_r_recurrentoperation_id_id BEFORE INSERT ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (recurrentoperation est utilisé par operation)
+Nom de la contrainte : fki_operation_recurrentoperation_r_recurrentoperation_id_id') WHERE NEW.r_recurrentoperation_id!=0 AND NEW.r_recurrentoperation_id!='' AND (SELECT id FROM recurrentoperation WHERE id = NEW.r_recurrentoperation_id) IS NULL; END;
+CREATE TRIGGER fku_operation_recurrentoperation_r_recurrentoperation_id_id BEFORE UPDATE ON operation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (recurrentoperation est utilisé par operation)
+Nom de la contrainte : fku_operation_recurrentoperation_r_recurrentoperation_id_id') WHERE NEW.r_recurrentoperation_id!=0 AND NEW.r_recurrentoperation_id!='' AND (SELECT id FROM recurrentoperation WHERE id = NEW.r_recurrentoperation_id) IS NULL; END;
+CREATE TRIGGER fkd_operation_recurrentoperation_r_recurrentoperation_id_id BEFORE DELETE ON recurrentoperation FOR EACH ROW BEGIN UPDATE operation SET r_recurrentoperation_id=0 WHERE r_recurrentoperation_id=OLD.id; END;
+CREATE TRIGGER fki_operationbalance_operation_r_operation_id_id BEFORE INSERT ON operationbalance FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (operation est utilisé par operationbalance)
+Nom de la contrainte : fki_operationbalance_operation_r_operation_id_id') WHERE NEW.r_operation_id!=0 AND NEW.r_operation_id!='' AND (SELECT id FROM operation WHERE id = NEW.r_operation_id) IS NULL; END;
+CREATE TRIGGER fku_operationbalance_operation_r_operation_id_id BEFORE UPDATE ON operationbalance FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (operation est utilisé par operationbalance)
+Nom de la contrainte : fku_operationbalance_operation_r_operation_id_id') WHERE NEW.r_operation_id!=0 AND NEW.r_operation_id!='' AND (SELECT id FROM operation WHERE id = NEW.r_operation_id) IS NULL; END;
+CREATE TRIGGER fkd_operationbalance_operation_r_operation_id_id BEFORE DELETE ON operation FOR EACH ROW BEGIN UPDATE operationbalance SET r_operation_id=0 WHERE r_operation_id=OLD.id; END;
+CREATE TRIGGER fki_recurrentoperation_operation_rd_operation_id_id BEFORE INSERT ON recurrentoperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (operation est utilisé par recurrentoperation)
+Nom de la contrainte : fki_recurrentoperation_operation_rd_operation_id_id') WHERE NEW.rd_operation_id!=0 AND NEW.rd_operation_id!='' AND (SELECT id FROM operation WHERE id = NEW.rd_operation_id) IS NULL; END;
+CREATE TRIGGER fku_recurrentoperation_operation_rd_operation_id_id BEFORE UPDATE ON recurrentoperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (operation est utilisé par recurrentoperation)
+Nom de la contrainte : fku_recurrentoperation_operation_rd_operation_id_id') WHERE NEW.rd_operation_id!=0 AND NEW.rd_operation_id!='' AND (SELECT id FROM operation WHERE id = NEW.rd_operation_id) IS NULL; END;
+CREATE TRIGGER fkdc_operation_recurrentoperation_id_rd_operation_id BEFORE DELETE ON operation FOR EACH ROW BEGIN DELETE FROM recurrentoperation WHERE recurrentoperation.rd_operation_id = OLD.id; END;
+CREATE TRIGGER fki_suboperation_operation_rd_operation_id_id BEFORE INSERT ON suboperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (operation est utilisé par suboperation)
+Nom de la contrainte : fki_suboperation_operation_rd_operation_id_id') WHERE NEW.rd_operation_id!=0 AND NEW.rd_operation_id!='' AND (SELECT id FROM operation WHERE id = NEW.rd_operation_id) IS NULL; END;
+CREATE TRIGGER fku_suboperation_operation_rd_operation_id_id BEFORE UPDATE ON suboperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (operation est utilisé par suboperation)
+Nom de la contrainte : fku_suboperation_operation_rd_operation_id_id') WHERE NEW.rd_operation_id!=0 AND NEW.rd_operation_id!='' AND (SELECT id FROM operation WHERE id = NEW.rd_operation_id) IS NULL; END;
+CREATE TRIGGER fkdc_operation_suboperation_id_rd_operation_id BEFORE DELETE ON operation FOR EACH ROW BEGIN DELETE FROM suboperation WHERE suboperation.rd_operation_id = OLD.id; END;
+CREATE TRIGGER fki_suboperation_category_r_category_id_id BEFORE INSERT ON suboperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (category est utilisé par suboperation)
+Nom de la contrainte : fki_suboperation_category_r_category_id_id') WHERE NEW.r_category_id!=0 AND NEW.r_category_id!='' AND (SELECT id FROM category WHERE id = NEW.r_category_id) IS NULL; END;
+CREATE TRIGGER fku_suboperation_category_r_category_id_id BEFORE UPDATE ON suboperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (category est utilisé par suboperation)
+Nom de la contrainte : fku_suboperation_category_r_category_id_id') WHERE NEW.r_category_id!=0 AND NEW.r_category_id!='' AND (SELECT id FROM category WHERE id = NEW.r_category_id) IS NULL; END;
+CREATE TRIGGER fkd_suboperation_category_r_category_id_id BEFORE DELETE ON category FOR EACH ROW BEGIN UPDATE suboperation SET r_category_id=0 WHERE r_category_id=OLD.id; END;
+CREATE TRIGGER fki_suboperation_refund_r_refund_id_id BEFORE INSERT ON suboperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (refund est utilisé par suboperation)
+Nom de la contrainte : fki_suboperation_refund_r_refund_id_id') WHERE NEW.r_refund_id!=0 AND NEW.r_refund_id!='' AND (SELECT id FROM refund WHERE id = NEW.r_refund_id) IS NULL; END;
+CREATE TRIGGER fku_suboperation_refund_r_refund_id_id BEFORE UPDATE ON suboperation FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (refund est utilisé par suboperation)
+Nom de la contrainte : fku_suboperation_refund_r_refund_id_id') WHERE NEW.r_refund_id!=0 AND NEW.r_refund_id!='' AND (SELECT id FROM refund WHERE id = NEW.r_refund_id) IS NULL; END;
+CREATE TRIGGER fkd_suboperation_refund_r_refund_id_id BEFORE DELETE ON refund FOR EACH ROW BEGIN UPDATE suboperation SET r_refund_id=0 WHERE r_refund_id=OLD.id; END;
+CREATE TRIGGER fki_unit_unit_rd_unit_id_id BEFORE INSERT ON unit FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (unit est utilisé par unit)
+Nom de la contrainte : fki_unit_unit_rd_unit_id_id') WHERE NEW.rd_unit_id!=0 AND NEW.rd_unit_id!='' AND (SELECT id FROM unit WHERE id = NEW.rd_unit_id) IS NULL; END;
+CREATE TRIGGER fku_unit_unit_rd_unit_id_id BEFORE UPDATE ON unit FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (unit est utilisé par unit)
+Nom de la contrainte : fku_unit_unit_rd_unit_id_id') WHERE NEW.rd_unit_id!=0 AND NEW.rd_unit_id!='' AND (SELECT id FROM unit WHERE id = NEW.rd_unit_id) IS NULL; END;
+CREATE TRIGGER fkdc_unit_unit_id_rd_unit_id BEFORE DELETE ON unit FOR EACH ROW BEGIN DELETE FROM unit WHERE unit.rd_unit_id = OLD.id; END;
+CREATE TRIGGER fki_unitvalue_unit_rd_unit_id_id BEFORE INSERT ON unitvalue FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible d''ajouter un objet (unit est utilisé par unitvalue)
+Nom de la contrainte : fki_unitvalue_unit_rd_unit_id_id') WHERE NEW.rd_unit_id!=0 AND NEW.rd_unit_id!='' AND (SELECT id FROM unit WHERE id = NEW.rd_unit_id) IS NULL; END;
+CREATE TRIGGER fku_unitvalue_unit_rd_unit_id_id BEFORE UPDATE ON unitvalue FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de modifier un objet (unit est utilisé par unitvalue)
+Nom de la contrainte : fku_unitvalue_unit_rd_unit_id_id') WHERE NEW.rd_unit_id!=0 AND NEW.rd_unit_id!='' AND (SELECT id FROM unit WHERE id = NEW.rd_unit_id) IS NULL; END;
+CREATE TRIGGER fkdc_unit_unitvalue_id_rd_unit_id BEFORE DELETE ON unit FOR EACH ROW BEGIN DELETE FROM unitvalue WHERE unitvalue.rd_unit_id = OLD.id; END;
+CREATE TRIGGER fkd_vm_budget_tmp_category_rc_category_id_id BEFORE DELETE ON category FOR EACH ROW BEGIN SELECT RAISE(ABORT, 'Impossible de détruire un objet (category est utilisé par vm_budget_tmp)
+Nom de la contrainte : fkd_vm_budget_tmp_category_rc_category_id_id') WHERE (SELECT rc_category_id FROM vm_budget_tmp WHERE rc_category_id = OLD.id) IS NOT NULL; END;
+CREATE TRIGGER fkdc_category_vm_category_display_tmp_id_rd_category_id BEFORE DELETE ON category FOR EACH ROW BEGIN DELETE FROM vm_category_display_tmp WHERE vm_category_display_tmp.rd_category_id = OLD.id; END;
+CREATE VIEW v_unit_displayname AS SELECT *, t_name||' ('||t_symbol||')' AS t_displayname FROM unit;
+CREATE VIEW v_unit_tmp1 AS SELECT *,(SELECT count(*) FROM unitvalue s WHERE s.rd_unit_id=unit.id) AS i_NBVALUES, (CASE WHEN unit.rd_unit_id=0 THEN '' ELSE (SELECT (CASE WHEN s.t_symbol!='' THEN s.t_symbol ELSE s.t_name END) FROM unit s WHERE s.id=unit.rd_unit_id) END) AS t_UNIT,(CASE unit.t_type WHEN '1' THEN 'Monnaie principale' WHEN '2' THEN 'Monnaie secondaire' WHEN 'C' THEN 'Monnaie' WHEN 'S' THEN 'Action' WHEN 'I' THEN 'Indice' ELSE 'Objet' END) AS t_TYPENLS, (SELECT MIN(s.d_date) FROM unitvalue s WHERE s.rd_unit_id=unit.id) AS d_MINDATE, (SELECT MAX(s.d_date) FROM unitvalue s WHERE s.rd_unit_id=unit.id) AS d_MAXDATE from unit;
+CREATE VIEW v_unit_tmp2 AS SELECT *,CASE WHEN v_unit_tmp1.t_type='1' THEN 1 ELSE IFNULL((SELECT s.f_quantity FROM unitvalue s WHERE s.rd_unit_id=v_unit_tmp1.id AND s.d_date=v_unit_tmp1.d_MAXDATE),1) END AS f_LASTVALUE from v_unit_tmp1;
+CREATE VIEW v_unit AS SELECT *,v_unit_tmp2.f_LASTVALUE*IFNULL((SELECT s2.f_LASTVALUE FROM v_unit_tmp2 s2 WHERE s2.id=v_unit_tmp2.rd_unit_id) , 1) AS f_CURRENTAMOUNT from v_unit_tmp2;
+CREATE VIEW v_unitvalue_displayname AS SELECT *, (SELECT t_displayname FROM v_unit_displayname WHERE unitvalue.rd_unit_id=v_unit_displayname.id)||' '||STRFTIME('%d/%m/%Y',d_date) AS t_displayname FROM unitvalue;
+CREATE VIEW v_unitvalue AS SELECT * FROM unitvalue;
+CREATE VIEW v_suboperation AS SELECT * FROM suboperation;
+CREATE VIEW v_operation_numbers AS SELECT DISTINCT i_number, rd_account_id FROM operation;
+CREATE VIEW v_operation_next_numbers AS SELECT T1.i_number+1 AS i_number FROM v_operation_numbers AS T1 LEFT OUTER JOIN v_operation_numbers T2 ON T2.rd_account_id=T1.rd_account_id AND T2.i_number=T1.i_number+1 WHERE T1.i_number!=0 AND (T2.i_number IS NULL) ORDER BY T1.i_number;
+CREATE VIEW v_operation_tmp1 AS SELECT *,(SELECT t_name FROM payee s WHERE s.id=operation.r_payee_id) AS t_PAYEE,(SELECT TOTAL(s.f_value) FROM suboperation s WHERE s.rd_operation_id=operation.ID) AS f_QUANTITY,(SELECT count(*) FROM suboperation s WHERE s.rd_operation_id=operation.ID) AS i_NBSUBCATEGORY FROM operation;
+CREATE VIEW v_operation AS SELECT *,(SELECT s.id FROM suboperation s WHERE s.rd_operation_id=v_operation_tmp1.id AND ABS(s.f_value)=(SELECT MAX(ABS(s2.f_value)) FROM suboperation s2 WHERE s2.rd_operation_id=v_operation_tmp1.id)) AS i_MOSTIMPSUBOP,((SELECT s.f_CURRENTAMOUNT FROM v_unit s WHERE s.id=v_operation_tmp1.rc_unit_id)*v_operation_tmp1.f_QUANTITY) AS f_CURRENTAMOUNT, (CASE WHEN v_operation_tmp1.i_group_id<>0 AND EXISTS (SELECT 1 FROM account a WHERE v_operation_tmp1.rd_account_id=a.id AND a.t_type<>'L') AND EXISTS (SELECT 1 FROM v_operation_tmp1 op2, account a WHERE op2.i_group_id=v_operation_tmp1.i_group_id AND op2.rd_account_id=a.id AND a.t_type<>'L' AND op2.rc_unit_id=v_operation_tmp1.rc_unit_id AND op2.f_QUANTITY=-v_operation_tmp1.f_QUANTITY) THEN 'Y' ELSE 'N' END) AS t_TRANSFER FROM v_operation_tmp1;
+CREATE VIEW v_operation_displayname AS SELECT *, STRFTIME('%d/%m/%Y',d_date)||' '||IFNULL(t_PAYEE,'')||' '||v_operation.f_CURRENTAMOUNT||' '||(SELECT (CASE WHEN s.t_symbol!='' THEN s.t_symbol ELSE s.t_name END) FROM unit s WHERE s.id=v_operation.rc_unit_id) AS t_displayname FROM v_operation;
+CREATE VIEW v_operation_delete AS SELECT *, (CASE WHEN t_status='Y' THEN 'Vous n''êtes pas autorisé à détruire cette opération car en état « rapproché »' END) t_delete_message FROM operation;
+CREATE VIEW v_account AS SELECT *,(SELECT MAX(s.d_date) FROM interest s WHERE s.rd_account_id=account.id) AS d_MAXDATE, (SELECT TOTAL(s.f_CURRENTAMOUNT) FROM v_operation s WHERE s.rd_account_id=account.id AND s.t_template='N') AS f_CURRENTAMOUNT FROM account;
+CREATE VIEW v_account_delete AS SELECT *, (CASE WHEN EXISTS(SELECT 1 FROM operation WHERE rd_account_id=account.id AND d_date<>'0000-00-00' AND t_template='N' AND t_status='Y') THEN 'Vous n''êtes pas autorisé à détruire ce compte car il contient des opérations rapprochées' END) t_delete_message FROM account;
+CREATE VIEW v_bank_displayname AS SELECT *, t_name AS t_displayname FROM bank;
+CREATE VIEW v_account_displayname AS SELECT *, (SELECT t_displayname FROM v_bank_displayname WHERE account.rd_bank_id=v_bank_displayname.id)||'-'||t_name AS t_displayname FROM account;
+CREATE VIEW v_bank AS SELECT *,(SELECT TOTAL(s.f_CURRENTAMOUNT) FROM v_account s WHERE s.rd_bank_id=bank.id) AS f_CURRENTAMOUNT FROM bank;
+CREATE VIEW v_category_displayname AS SELECT *, t_fullname AS t_displayname FROM category;
+CREATE VIEW v_category AS SELECT * FROM category;
+CREATE VIEW v_recurrentoperation AS SELECT *,i_period_increment||' '||(CASE t_period_unit WHEN 'Y' THEN 'année(s)' WHEN 'M' THEN 'mois' WHEN 'W' THEN 'semaine(s)' ELSE 'jour(s)' END) AS t_PERIODNLS FROM recurrentoperation;
+CREATE VIEW v_recurrentoperation_displayname AS SELECT *, STRFTIME('%d/%m/%Y',d_date)||' '||SUBSTR((SELECT t_displayname FROM v_operation_displayname WHERE v_operation_displayname.id=v_recurrentoperation.rd_operation_id), 11) AS t_displayname FROM v_recurrentoperation;
+CREATE VIEW v_unitvalue_display AS SELECT *,IFNULL((SELECT (CASE WHEN s.t_symbol!='' THEN s.t_symbol ELSE s.t_name END) FROM unit s WHERE s.id=(SELECT s2.rd_unit_id FROM unit s2 WHERE s2.id=unitvalue.rd_unit_id)),'') AS t_UNIT,STRFTIME('%Y-%m',unitvalue.d_date) AS d_DATEMONTH,STRFTIME('%Y',unitvalue.d_date) AS d_DATEYEAR FROM unitvalue;
+CREATE VIEW v_suboperation_display AS SELECT *,IFNULL((SELECT s.t_fullname FROM category s WHERE s.id=v_suboperation.r_category_id),'') AS t_CATEGORY, IFNULL((SELECT s.t_name FROM refund s WHERE s.id=v_suboperation.r_refund_id),'') AS t_REFUND, (CASE WHEN v_suboperation.f_value>=0 THEN v_suboperation.f_value ELSE 0 END) AS f_VALUE_INCOME, (CASE WHEN v_suboperation.f_value<=0 THEN v_suboperation.f_value ELSE 0 END) AS f_VALUE_EXPENSE FROM v_suboperation;
+CREATE VIEW v_suboperation_displayname AS SELECT *, t_CATEGORY||' : '||f_value AS t_displayname FROM v_suboperation_display;
+CREATE VIEW v_operation_display_all AS SELECT *,(SELECT s.t_name FROM account s WHERE s.id=v_operation.rd_account_id) AS t_ACCOUNT,(SELECT (CASE WHEN s.t_symbol!='' THEN s.t_symbol ELSE s.t_name END) FROM unit s WHERE s.id=v_operation.rc_unit_id) AS t_UNIT,(SELECT s.t_CATEGORY FROM v_suboperation_display s WHERE s.id=v_operation.i_MOSTIMPSUBOP) AS t_CATEGORY,(SELECT s.t_REFUND FROM v_suboperation_display s WHERE s.id=v_operation.i_MOSTIMPSUBOP) AS t_REFUND,(CASE WHEN v_operation.f_QUANTITY<0 THEN '-' WHEN v_operation.f_QUANTITY=0 THEN '' ELSE '+' END) AS t_TYPEEXPENSE, (CASE WHEN v_operation.f_QUANTITY<=0 THEN 'Dépense' ELSE 'Revenu' END) AS t_TYPEEXPENSENLS, STRFTIME('%Y-W%W',v_operation.d_date) AS d_DATEWEEK,STRFTIME('%Y-%m',v_operation.d_date) AS d_DATEMONTH,STRFTIME('%Y',v_operation.d_date)||'-Q'||(CASE WHEN STRFTIME('%m',v_operation.d_date)<='03' THEN '1' WHEN STRFTIME('%m',v_operation.d_date)<='06' THEN '2' WHEN STRFTIME('%m',v_operation.d_date)<='09' THEN '3' ELSE '4' END) AS d_DATEQUARTER, STRFTIME('%Y',v_operation.d_date)||'-S'||(CASE WHEN STRFTIME('%m',v_operation.d_date)<='06' THEN '1' ELSE '2' END) AS d_DATESEMESTER, STRFTIME('%Y',v_operation.d_date) AS d_DATEYEAR, (SELECT count(*) FROM v_recurrentoperation s WHERE s.rd_operation_id=v_operation.id) AS i_NBRECURRENT, (CASE WHEN v_operation.f_QUANTITY>=0 THEN v_operation.f_QUANTITY ELSE 0 END) AS f_QUANTITY_INCOME, (CASE WHEN v_operation.f_QUANTITY<=0 THEN v_operation.f_QUANTITY ELSE 0 END) AS f_QUANTITY_EXPENSE, (SELECT o2.f_balance FROM operationbalance o2 WHERE o2.r_operation_id=v_operation.id ) AS f_BALANCE, (CASE WHEN v_operation.f_QUANTITY>=0 THEN v_operation.f_CURRENTAMOUNT ELSE 0 END) AS f_CURRENTAMOUNT_INCOME, (CASE WHEN v_operation.f_QUANTITY<=0 THEN v_operation.f_CURRENTAMOUNT ELSE 0 END) AS f_CURRENTAMOUNT_EXPENSE FROM v_operation;
+CREATE VIEW v_operation_template_display AS SELECT * FROM v_operation_display_all WHERE t_template='Y';
+CREATE VIEW v_operation_display AS SELECT * FROM v_operation_display_all WHERE d_date!='0000-00-00' AND t_template='N';
+CREATE VIEW v_unit_display AS SELECT *,(SELECT TOTAL(o.f_QUANTITY) FROM v_operation_display o WHERE o.rc_unit_id=v_unit.id) AS f_QUANTITYOWNED FROM v_unit;
+CREATE VIEW v_account_display AS SELECT (CASE t_type WHEN 'C' THEN 'Courant' WHEN 'D' THEN 'Carte de crédit' WHEN 'A' THEN 'Actif' WHEN 'I' THEN 'Investissement' WHEN 'W' THEN 'Portefeuille' WHEN 'L' THEN 'Prêt' WHEN 'O' THEN 'Autre' END) AS t_TYPENLS,bank.t_name AS t_BANK,bank.t_bank_number AS t_BANK_NUMBER,bank.t_icon AS t_ICON,v_account.*,(v_account.f_CURRENTAMOUNT/(SELECT u.f_CURRENTAMOUNT FROM v_unit u, operation s WHERE u.id=s.rc_unit_id AND s.rd_account_id=v_account.id AND s.d_date='0000-00-00')) AS f_QUANTITY, (SELECT (CASE WHEN u.t_symbol!='' THEN u.t_symbol ELSE u.t_name END) FROM unit u, operation s WHERE u.id=s.rc_unit_id AND s.rd_account_id=v_account.id AND s.d_date='0000-00-00') AS t_UNIT, (SELECT TOTAL(s.f_CURRENTAMOUNT) FROM v_operation s WHERE s.rd_account_id=v_account.id AND s.t_status!='N' AND s.t_template='N') AS f_CHECKED, (SELECT TOTAL(s.f_CURRENTAMOUNT) FROM v_operation s WHERE s.rd_account_id=v_account.id AND s.t_status='N' AND s.t_template='N') AS f_COMING_SOON, (SELECT TOTAL(s.f_CURRENTAMOUNT) FROM v_operation s WHERE s.rd_account_id=v_account.id AND s.d_date<=date('now') AND s.t_template='N') AS f_TODAYAMOUNT, (SELECT count(*) FROM v_operation_display s WHERE s.rd_account_id=v_account.id) AS i_NBOPERATIONS, IFNULL((SELECT s.f_rate FROM interest s WHERE s.rd_account_id=v_account.id AND s.d_date=v_account.d_MAXDATE),0) AS f_RATE FROM v_account, bank WHERE bank.id=v_account.rd_bank_id;
+CREATE VIEW v_operation_consolidated AS SELECT (SELECT s.t_TYPENLS FROM v_account_display s WHERE s.id=op.rd_account_id) AS t_ACCOUNTTYPE,(SELECT u.t_TYPENLS FROM v_unit u WHERE u.id=op.rc_unit_id) AS t_UNITTYPE,sop.id AS i_SUBOPID, sop.r_refund_id AS r_refund_id, (CASE WHEN sop.t_comment='' THEN op.t_comment ELSE sop.t_comment END) AS t_REALCOMMENT, sop.t_CATEGORY AS t_REALCATEGORY, sop.t_REFUND AS t_REALREFUND, sop.r_category_id AS i_IDCATEGORY, (CASE WHEN sop.f_value<0 THEN '-' WHEN sop.f_value=0 THEN '' ELSE '+' END) AS t_TYPEEXPENSE, (CASE WHEN sop.f_value<0 THEN 'Dépense' WHEN sop.f_value=0 THEN '' ELSE 'Revenu' END) AS t_TYPEEXPENSENLS, sop.f_value AS f_REALQUANTITY, sop.f_VALUE_INCOME AS f_REALQUANTITY_INCOME, sop.f_VALUE_EXPENSE AS f_REALQUANTITY_EXPENSE, ((SELECT u.f_CURRENTAMOUNT FROM v_unit u WHERE u.id=op.rc_unit_id)*sop.f_value) AS f_REALCURRENTAMOUNT, ((SELECT u.f_CURRENTAMOUNT FROM v_unit u WHERE u.id=op.rc_unit_id)*sop.f_VALUE_INCOME) AS f_REALCURRENTAMOUNT_INCOME, ((SELECT u.f_CURRENTAMOUNT FROM v_unit u WHERE u.id=op.rc_unit_id)*sop.f_VALUE_EXPENSE) AS f_REALCURRENTAMOUNT_EXPENSE, op.* FROM v_operation_display_all AS op, v_suboperation_display AS sop WHERE op.t_template='N' AND sop.rd_operation_id=op.ID;
+CREATE VIEW v_operation_prop AS SELECT p.id AS i_PROPPID, p.t_name AS i_PROPPNAME, p.t_value AS i_PROPVALUE, op.* FROM v_operation_consolidated AS op LEFT OUTER JOIN parameters AS p ON p.t_uuid_parent=op.id||'-operation';
+CREATE VIEW v_refund_delete AS SELECT *, (CASE WHEN EXISTS(SELECT 1 FROM v_operation_consolidated WHERE r_refund_id=refund.id AND t_status='Y') THEN 'Vous n''êtes pas autorisé à détruire ce suiveur car utilisé par des opérations rapprochées' END) t_delete_message FROM refund;
+CREATE VIEW v_refund AS SELECT *, (SELECT TOTAL(o.f_REALCURRENTAMOUNT) FROM v_operation_consolidated o WHERE o.r_refund_id=refund.id) AS f_CURRENTAMOUNT FROM refund;
+CREATE VIEW v_refund_display AS SELECT *,(SELECT MIN(o.d_date) FROM v_operation_consolidated o WHERE o.r_refund_id=v_refund.id) AS d_FIRSTDATE, (SELECT MAX(o.d_date) FROM v_operation_consolidated o WHERE o.r_refund_id=v_refund.id) AS d_LASTDATE FROM v_refund;
+CREATE VIEW v_refund_displayname AS SELECT *, t_name AS t_displayname FROM refund;
+CREATE VIEW v_payee_delete AS SELECT *, (CASE WHEN EXISTS(SELECT 1 FROM operation WHERE r_payee_id=payee.id AND t_status='Y') THEN 'Vous n''êtes pas autorisé à détruire ce tiers car utilisé par des opérations rapprochées' END) t_delete_message FROM payee;
+CREATE VIEW v_payee AS SELECT *, (SELECT TOTAL(o.f_CURRENTAMOUNT) FROM v_operation o WHERE o.r_payee_id=payee.id AND o.t_template='N') AS f_CURRENTAMOUNT FROM payee;
+CREATE VIEW v_payee_display AS SELECT * FROM v_payee;
+CREATE VIEW v_payee_displayname AS SELECT *, t_name AS t_displayname FROM payee;
+CREATE VIEW v_category_delete AS SELECT *, (CASE WHEN EXISTS(SELECT 1 FROM v_operation_consolidated WHERE (t_REALCATEGORY=category.t_fullname OR t_REALCATEGORY like category.t_fullname||'%') AND t_status='Y') THEN 'Vous n''êtes pas autorisé à détruire cette catégorie car utilisée par des opérations rapprochées' END) t_delete_message FROM category;
+CREATE VIEW v_category_display_tmp AS SELECT *,(SELECT count(distinct(so.rd_operation_id)) FROM operation o, suboperation so WHERE so.rd_operation_id=o.id AND so.r_category_id=v_category.ID AND o.t_template='N') AS i_NBOPERATIONS, (SELECT TOTAL(o.f_REALCURRENTAMOUNT) FROM v_operation_consolidated o WHERE o.i_IDCATEGORY=v_category.ID) AS f_REALCURRENTAMOUNT FROM v_category;
+CREATE VIEW v_category_display AS SELECT *,f_REALCURRENTAMOUNT+(SELECT TOTAL(c.f_REALCURRENTAMOUNT) FROM vm_category_display_tmp c WHERE c.t_fullname LIKE vm_category_display_tmp.t_fullname||' > %') AS f_SUMCURRENTAMOUNT, i_NBOPERATIONS+(SELECT CAST(TOTAL(c.i_NBOPERATIONS) AS INTEGER) FROM vm_category_display_tmp c WHERE c.t_fullname like vm_category_display_tmp.t_fullname||' > %') AS i_SUMNBOPERATIONS, (CASE WHEN t_bookmarked='Y' THEN 'Y' WHEN EXISTS(SELECT 1 FROM category c WHERE c.t_bookmarked='Y' AND c.t_fullname like vm_category_display_tmp.t_fullname||' > %') THEN 'C' ELSE 'N' END) AS t_HASBOOKMARKEDCHILD, (CASE WHEN vm_category_display_tmp.f_REALCURRENTAMOUNT<0 THEN '-' WHEN vm_category_display_tmp.f_REALCURRENTAMOUNT=0 THEN '' ELSE '+' END) AS t_TYPEEXPENSE,(CASE WHEN vm_category_display_tmp.f_REALCURRENTAMOUNT<0 THEN 'Dépense' WHEN vm_category_display_tmp.f_REALCURRENTAMOUNT=0 THEN '' ELSE 'Revenu' END) AS t_TYPEEXPENSENLS FROM vm_category_display_tmp;
+CREATE VIEW v_recurrentoperation_display AS SELECT rop.*, op.t_ACCOUNT, op.i_number, op.t_mode, op.i_group_id, op.t_TRANSFER, op.t_PAYEE, op.t_comment, op.t_CATEGORY, op.t_status, op.f_CURRENTAMOUNT FROM v_recurrentoperation rop, v_operation_display_all AS op WHERE rop.rd_operation_id=op.ID;
+CREATE VIEW v_rule AS SELECT *,(SELECT COUNT(1) FROM rule r WHERE r.f_sortorder<=rule.f_sortorder) AS i_ORDER FROM rule;
+CREATE VIEW v_rule_displayname AS SELECT *, t_definition AS t_displayname FROM rule;
+CREATE VIEW v_interest AS SELECT *,(SELECT s.t_name FROM account s WHERE s.id=interest.rd_account_id) AS t_ACCOUNT FROM interest;
+CREATE VIEW v_interest_displayname AS SELECT *, STRFTIME('%d/%m/%Y',d_date)||' '||f_rate||'%' AS t_displayname FROM interest;
+CREATE VIEW v_budgetrule AS SELECT *, IFNULL((SELECT s.t_fullname FROM category s WHERE s.id=budgetrule.rc_category_id),'') AS t_CATEGORYCONDITION, IFNULL((SELECT s.t_fullname FROM category s WHERE s.id=budgetrule.rc_category_id_target),'') AS t_CATEGORY, (CASE WHEN budgetrule.i_condition=-1 THEN 'Négatif' WHEN budgetrule.i_condition=1 THEN 'Positif' WHEN budgetrule.i_condition=0 THEN 'Tous' END) AS t_WHENNLS, f_quantity||(CASE WHEN budgetrule.t_absolute='N' THEN '%' ELSE (SELECT t_symbol FROM unit WHERE t_type='1') END) AS t_WHATNLS,(CASE WHEN budgetrule.t_rule='N' THEN 'Suivant' WHEN budgetrule.t_rule='C' THEN 'Courant' WHEN budgetrule.t_rule='Y' THEN 'Année' END) AS t_RULENLS FROM budgetrule;
+CREATE VIEW v_budgetrule_display AS SELECT * FROM v_budgetrule;
+CREATE VIEW v_budgetrule_displayname AS SELECT *, t_WHENNLS||' '||t_WHATNLS||' '||t_RULENLS||' '||t_CATEGORY AS t_displayname FROM v_budgetrule;
+CREATE VIEW v_budget_tmp AS SELECT *, IFNULL((SELECT s.t_fullname FROM category s WHERE s.id=budget.rc_category_id),'') AS t_CATEGORY, (i_year||(CASE WHEN i_month=0 THEN '' WHEN i_month<10 THEN '-0'||i_month ELSE '-'||i_month END)) AS t_PERIOD, (SELECT TOTAL(o.f_REALCURRENTAMOUNT) FROM v_operation_consolidated o WHERE STRFTIME('%Y', o.d_date)=i_year AND (i_month=0 OR STRFTIME('%m', o.d_date)=i_month) AND o.i_IDCATEGORY IN (SELECT b2.id_category FROM budgetcategory b2 WHERE b2.id=budget.id)) AS f_CURRENTAMOUNT, (SELECT GROUP_CONCAT(v_budgetrule_displayname.t_displayname,',') FROM v_budgetrule_displayname WHERE (v_budgetrule_displayname.t_year_condition='N' OR budget.i_year=v_budgetrule_displayname.i_year) AND (v_budgetrule_displayname.t_month_condition='N' OR budget.i_month=v_budgetrule_displayname.i_month) AND (v_budgetrule_displayname.t_category_condition='N' OR budget.rc_category_id=v_budgetrule_displayname.rc_category_id) ORDER BY v_budgetrule_displayname.t_absolute DESC, v_budgetrule_displayname.id) AS t_RULES FROM budget;
+CREATE VIEW v_budget AS SELECT *, (f_CURRENTAMOUNT-f_budgeted_modified) AS f_DELTABEFORETRANSFER, (f_CURRENTAMOUNT-f_budgeted_modified-f_transferred) AS f_DELTA FROM v_budget_tmp;
+CREATE VIEW v_budget_display AS SELECT *, (f_CURRENTAMOUNT-f_budgeted_modified) AS f_DELTABEFORETRANSFER, (f_CURRENTAMOUNT-f_budgeted_modified-f_transferred) AS f_DELTA FROM vm_budget_tmp;
+CREATE VIEW v_budget_displayname AS SELECT *, t_CATEGORY||' '||t_PERIOD||' '||f_budgeted_modified AS t_displayname FROM v_budget;
+CREATE TRIGGER fkdc_bank_parameters_uuid BEFORE DELETE ON bank FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'bank'; END;
+CREATE TRIGGER fkdc_account_parameters_uuid BEFORE DELETE ON account FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'account'; END;
+CREATE TRIGGER fkdc_unit_parameters_uuid BEFORE DELETE ON unit FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'unit'; END;
+CREATE TRIGGER fkdc_unitvalue_parameters_uuid BEFORE DELETE ON unitvalue FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'unitvalue'; END;
+CREATE TRIGGER fkdc_category_parameters_uuid BEFORE DELETE ON category FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'category'; END;
+CREATE TRIGGER fkdc_operation_parameters_uuid BEFORE DELETE ON operation FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'operation'; END;
+CREATE TRIGGER fkdc_interest_parameters_uuid BEFORE DELETE ON interest FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'interest'; END;
+CREATE TRIGGER fkdc_suboperation_parameters_uuid BEFORE DELETE ON suboperation FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'suboperation'; END;
+CREATE TRIGGER fkdc_refund_parameters_uuid BEFORE DELETE ON refund FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'refund'; END;
+CREATE TRIGGER fkdc_payee_parameters_uuid BEFORE DELETE ON payee FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'payee'; END;
+CREATE TRIGGER fkdc_recurrentoperation_parameters_uuid BEFORE DELETE ON recurrentoperation FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'recurrentoperation'; END;
+CREATE TRIGGER fkdc_rule_parameters_uuid BEFORE DELETE ON rule FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'rule'; END;
+CREATE TRIGGER fkdc_budget_parameters_uuid BEFORE DELETE ON budget FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'budget'; END;
+CREATE TRIGGER fkdc_budgetrule_parameters_uuid BEFORE DELETE ON budgetrule FOR EACH ROW BEGIN DELETE FROM parameters WHERE parameters.t_uuid_parent=OLD.id||'-'||'budgetrule'; END;
+CREATE TRIGGER cpt_category_fullname1 AFTER INSERT ON category BEGIN UPDATE category SET t_fullname=CASE WHEN rd_category_id IS NULL OR rd_category_id='' OR rd_category_id=0 THEN new.t_name ELSE (SELECT c.t_fullname FROM category c WHERE c.id=new.rd_category_id)||' > '||new.t_name END WHERE id=new.id;END;
+CREATE TRIGGER cpt_category_fullname2 AFTER UPDATE OF t_name, rd_category_id ON category BEGIN UPDATE category SET t_fullname=CASE WHEN rd_category_id IS NULL OR rd_category_id='' OR rd_category_id=0 THEN new.t_name ELSE (SELECT c.t_fullname FROM category c WHERE c.id=new.rd_category_id)||' > '||new.t_name END WHERE id=new.id;UPDATE category SET t_name=t_name WHERE rd_category_id=new.id;END;
+CREATE TRIGGER fkdc_category_delete BEFORE DELETE ON category FOR EACH ROW BEGIN UPDATE suboperation SET r_category_id=OLD.rd_category_id WHERE r_category_id=OLD.id; END;
+explain
+ SELECT TOTAL(f_CURRENTAMOUNT), d_DATEMONTH
+ from v_operation_display
+ WHERE d_DATEMONTH IN ('2012-05', '2012-04')
+ group by d_DATEMONTH, t_TYPEEXPENSE;
+ }
+} {/.* Goto .*/}
+
+# The next test requires FTS4
+ifcapable !fts3 {
+ finish_test
+ return
+}
+
+# Taken from the gnome-shell project
+#
+db close
+forcedelete test.db
+sqlite3 db test.db
+do_test fuzz-oss1-gnomeshell {
+ db eval {
+CREATE TABLE Resource (ID INTEGER NOT NULL PRIMARY KEY, Uri TEXT NOT
+NULL, UNIQUE (Uri));
+CREATE VIRTUAL TABLE fts USING fts4;
+CREATE TABLE "mfo:Action" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mfo:Enclosure" (ID INTEGER NOT NULL PRIMARY KEY,
+"mfo:remoteLink" INTEGER, "mfo:remoteLink:graph" INTEGER,
+"mfo:groupDefault" INTEGER, "mfo:groupDefault:graph" INTEGER,
+"mfo:localLink" INTEGER, "mfo:localLink:graph" INTEGER, "mfo:optional"
+INTEGER, "mfo:optional:graph" INTEGER);
+CREATE TABLE "mfo:FeedChannel" (ID INTEGER NOT NULL PRIMARY KEY,
+"mfo:updatedTime" INTEGER, "mfo:updatedTime:graph" INTEGER,
+"mfo:updatedTime:localDate" INTEGER, "mfo:updatedTime:localTime"
+INTEGER, "mfo:unreadCount" INTEGER, "mfo:unreadCount:graph" INTEGER,
+"mfo:totalCount" INTEGER, "mfo:totalCount:graph" INTEGER, "mfo:action"
+INTEGER, "mfo:action:graph" INTEGER, "mfo:type" INTEGER,
+"mfo:type:graph" INTEGER);
+CREATE TABLE "mfo:FeedElement" (ID INTEGER NOT NULL PRIMARY KEY,
+"mfo:image" TEXT COLLATE NOCASE, "mfo:image:graph" INTEGER,
+"mfo:feedSettings" INTEGER, "mfo:feedSettings:graph" INTEGER);
+CREATE TABLE "mfo:FeedMessage" (ID INTEGER NOT NULL PRIMARY KEY,
+"mfo:downloadedTime" INTEGER, "mfo:downloadedTime:graph" INTEGER,
+"mfo:downloadedTime:localDate" INTEGER, "mfo:downloadedTime:localTime"
+INTEGER);
+CREATE TABLE "mfo:FeedMessage_mfo:enclosureList" (ID INTEGER NOT NULL,
+"mfo:enclosureList" INTEGER NOT NULL, "mfo:enclosureList:graph"
+INTEGER);
+CREATE TABLE "mfo:FeedSettings" (ID INTEGER NOT NULL PRIMARY KEY,
+"mfo:updateInterval" INTEGER, "mfo:updateInterval:graph" INTEGER,
+"mfo:expiryInterval" INTEGER, "mfo:expiryInterval:graph" INTEGER,
+"mfo:downloadPath" TEXT COLLATE NOCASE, "mfo:downloadPath:graph"
+INTEGER, "mfo:downloadFlag" INTEGER, "mfo:downloadFlag:graph" INTEGER,
+"mfo:maxSize" INTEGER, "mfo:maxSize:graph" INTEGER);
+CREATE TABLE "mfo:FeedType" (ID INTEGER NOT NULL PRIMARY KEY,
+"mfo:name" TEXT COLLATE NOCASE, "mfo:name:graph" INTEGER);
+CREATE TABLE "mlo:GeoBoundingBox" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:GeoBoundingBox_mlo:bbNorthWest" (ID INTEGER NOT
+NULL, "mlo:bbNorthWest" INTEGER NOT NULL, "mlo:bbNorthWest:graph"
+INTEGER);
+CREATE TABLE "mlo:GeoBoundingBox_mlo:bbSouthEast" (ID INTEGER NOT
+NULL, "mlo:bbSouthEast" INTEGER NOT NULL, "mlo:bbSouthEast:graph"
+INTEGER);
+CREATE TABLE "mlo:GeoLocation" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:GeoLocation_mlo:asBoundingBox" (ID INTEGER NOT NULL,
+"mlo:asBoundingBox" INTEGER NOT NULL, "mlo:asBoundingBox:graph"
+INTEGER);
+CREATE TABLE "mlo:GeoLocation_mlo:asGeoPoint" (ID INTEGER NOT NULL,
+"mlo:asGeoPoint" INTEGER NOT NULL, "mlo:asGeoPoint:graph" INTEGER);
+CREATE TABLE "mlo:GeoLocation_mlo:asPostalAddress" (ID INTEGER NOT
+NULL, "mlo:asPostalAddress" INTEGER NOT NULL,
+"mlo:asPostalAddress:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:GeoPoint_mlo:address" (ID INTEGER NOT NULL,
+"mlo:address" TEXT NOT NULL, "mlo:address:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:altitude" (ID INTEGER NOT NULL,
+"mlo:altitude" REAL NOT NULL, "mlo:altitude:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:city" (ID INTEGER NOT NULL, "mlo:city"
+TEXT NOT NULL, "mlo:city:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:country" (ID INTEGER NOT NULL,
+"mlo:country" TEXT NOT NULL, "mlo:country:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:latitude" (ID INTEGER NOT NULL,
+"mlo:latitude" REAL NOT NULL, "mlo:latitude:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:longitude" (ID INTEGER NOT NULL,
+"mlo:longitude" REAL NOT NULL, "mlo:longitude:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:state" (ID INTEGER NOT NULL,
+"mlo:state" TEXT NOT NULL, "mlo:state:graph" INTEGER);
+CREATE TABLE "mlo:GeoPoint_mlo:timestamp" (ID INTEGER NOT NULL,
+"mlo:timestamp" INTEGER NOT NULL, "mlo:timestamp:graph" INTEGER,
+"mlo:timestamp:localDate" INTEGER NOT NULL, "mlo:timestamp:localTime"
+INTEGER NOT NULL);
+CREATE TABLE "mlo:GeoSphere" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:GeoSphere_mlo:radius" (ID INTEGER NOT NULL,
+"mlo:radius" REAL NOT NULL, "mlo:radius:graph" INTEGER);
+CREATE TABLE "mlo:Landmark" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:LandmarkCategory" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:LandmarkCategory_mlo:isRemovable" (ID INTEGER NOT
+NULL, "mlo:isRemovable" INTEGER NOT NULL, "mlo:isRemovable:graph"
+INTEGER);
+CREATE TABLE "mlo:Landmark_mlo:belongsToCategory" (ID INTEGER NOT
+NULL, "mlo:belongsToCategory" INTEGER NOT NULL,
+"mlo:belongsToCategory:graph" INTEGER);
+CREATE TABLE "mlo:Landmark_mlo:poiLocation" (ID INTEGER NOT NULL,
+"mlo:poiLocation" INTEGER NOT NULL, "mlo:poiLocation:graph" INTEGER);
+CREATE TABLE "mlo:LocationBoundingBox" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:LocationBoundingBox_mlo:boxEastLimit" (ID INTEGER
+NOT NULL, "mlo:boxEastLimit" INTEGER NOT NULL,
+"mlo:boxEastLimit:graph" INTEGER);
+CREATE TABLE "mlo:LocationBoundingBox_mlo:boxNorthLimit" (ID INTEGER
+NOT NULL, "mlo:boxNorthLimit" INTEGER NOT NULL,
+"mlo:boxNorthLimit:graph" INTEGER);
+CREATE TABLE "mlo:LocationBoundingBox_mlo:boxSouthWestCorner" (ID
+INTEGER NOT NULL, "mlo:boxSouthWestCorner" INTEGER NOT NULL,
+"mlo:boxSouthWestCorner:graph" INTEGER);
+CREATE TABLE "mlo:LocationBoundingBox_mlo:boxVerticalLimit" (ID
+INTEGER NOT NULL, "mlo:boxVerticalLimit" INTEGER NOT NULL,
+"mlo:boxVerticalLimit:graph" INTEGER);
+CREATE TABLE "mlo:PointOfInterest" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:Route" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mlo:Route_mlo:endTime" (ID INTEGER NOT NULL,
+"mlo:endTime" INTEGER NOT NULL, "mlo:endTime:graph" INTEGER,
+"mlo:endTime:localDate" INTEGER NOT NULL, "mlo:endTime:localTime"
+INTEGER NOT NULL);
+CREATE TABLE "mlo:Route_mlo:routeDetails" (ID INTEGER NOT NULL,
+"mlo:routeDetails" TEXT NOT NULL, "mlo:routeDetails:graph" INTEGER);
+CREATE TABLE "mlo:Route_mlo:startTime" (ID INTEGER NOT NULL,
+"mlo:startTime" INTEGER NOT NULL, "mlo:startTime:graph" INTEGER,
+"mlo:startTime:localDate" INTEGER NOT NULL, "mlo:startTime:localTime"
+INTEGER NOT NULL);
+CREATE TABLE "mto:DownloadTransfer" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mto:State" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mto:SyncTransfer" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mto:Transfer" (ID INTEGER NOT NULL PRIMARY KEY,
+"mto:transferState" INTEGER, "mto:transferState:graph" INTEGER,
+"mto:method" INTEGER, "mto:method:graph" INTEGER, "mto:created"
+INTEGER, "mto:created:graph" INTEGER, "mto:created:localDate" INTEGER,
+"mto:created:localTime" INTEGER, "mto:account" TEXT COLLATE NOCASE,
+"mto:account:graph" INTEGER, "mto:starter" INTEGER,
+"mto:starter:graph" INTEGER, "mto:agent" INTEGER, "mto:agent:graph"
+INTEGER);
+CREATE TABLE "mto:TransferElement" (ID INTEGER NOT NULL PRIMARY KEY,
+"mto:source" INTEGER, "mto:source:graph" INTEGER, "mto:destination"
+INTEGER, "mto:destination:graph" INTEGER, "mto:startedTime" INTEGER,
+"mto:startedTime:graph" INTEGER, "mto:startedTime:localDate" INTEGER,
+"mto:startedTime:localTime" INTEGER, "mto:completedTime" INTEGER,
+"mto:completedTime:graph" INTEGER, "mto:completedTime:localDate"
+INTEGER, "mto:completedTime:localTime" INTEGER, "mto:state" INTEGER,
+"mto:state:graph" INTEGER);
+CREATE TABLE "mto:TransferMethod" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mto:Transfer_mto:transferList" (ID INTEGER NOT NULL,
+"mto:transferList" INTEGER NOT NULL, "mto:transferList:graph"
+INTEGER);
+CREATE TABLE "mto:Transfer_mto:transferPrivacyLevel" (ID INTEGER NOT
+NULL, "mto:transferPrivacyLevel" TEXT NOT NULL,
+"mto:transferPrivacyLevel:graph" INTEGER);
+CREATE TABLE "mto:UploadTransfer" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "mto:UploadTransfer_mto:transferCategory" (ID INTEGER NOT
+NULL, "mto:transferCategory" TEXT NOT NULL,
+"mto:transferCategory:graph" INTEGER);
+CREATE TABLE "mtp:ScanType" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nao:Property" (ID INTEGER NOT NULL PRIMARY KEY,
+"nao:propertyName" TEXT COLLATE NOCASE, "nao:propertyName:graph"
+INTEGER, "nao:propertyValue" TEXT COLLATE NOCASE,
+"nao:propertyValue:graph" INTEGER);
+CREATE TABLE "nao:Tag" (ID INTEGER NOT NULL PRIMARY KEY,
+"nao:prefLabel" TEXT COLLATE NOCASE, "nao:prefLabel:graph" INTEGER,
+"nao:description" TEXT COLLATE NOCASE, "nao:description:graph"
+INTEGER);
+CREATE TABLE "nao:Tag_tracker:isDefaultTag" (ID INTEGER NOT NULL,
+"tracker:isDefaultTag" INTEGER NOT NULL, "tracker:isDefaultTag:graph"
+INTEGER);
+CREATE TABLE "nao:Tag_tracker:tagRelatedTo" (ID INTEGER NOT NULL,
+"tracker:tagRelatedTo" INTEGER NOT NULL, "tracker:tagRelatedTo:graph"
+INTEGER);
+CREATE TABLE "ncal:AccessClassification" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Alarm" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:repeat" INTEGER, "ncal:repeat:graph" INTEGER);
+CREATE TABLE "ncal:AlarmAction" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Alarm_ncal:action" (ID INTEGER NOT NULL,
+"ncal:action" INTEGER NOT NULL, "ncal:action:graph" INTEGER);
+CREATE TABLE "ncal:Attachment" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:attachmentUri" INTEGER, "ncal:attachmentUri:graph" INTEGER,
+"ncal:fmttype" TEXT COLLATE NOCASE, "ncal:fmttype:graph" INTEGER,
+"ncal:encoding" INTEGER, "ncal:encoding:graph" INTEGER,
+"ncal:attachmentContent" TEXT COLLATE NOCASE,
+"ncal:attachmentContent:graph" INTEGER);
+CREATE TABLE "ncal:AttachmentEncoding" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Attendee" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:delegatedTo" INTEGER, "ncal:delegatedTo:graph" INTEGER,
+"ncal:delegatedFrom" INTEGER, "ncal:delegatedFrom:graph" INTEGER,
+"ncal:cutype" INTEGER, "ncal:cutype:graph" INTEGER, "ncal:member"
+INTEGER, "ncal:member:graph" INTEGER, "ncal:role" INTEGER,
+"ncal:role:graph" INTEGER, "ncal:rsvp" INTEGER, "ncal:rsvp:graph"
+INTEGER, "ncal:partstat" INTEGER, "ncal:partstat:graph" INTEGER);
+CREATE TABLE "ncal:AttendeeOrOrganizer" (ID INTEGER NOT NULL PRIMARY
+KEY, "ncal:dir" INTEGER, "ncal:dir:graph" INTEGER,
+"ncal:involvedContact" INTEGER, "ncal:involvedContact:graph" INTEGER,
+"ncal:sentBy" INTEGER, "ncal:sentBy:graph" INTEGER);
+CREATE TABLE "ncal:AttendeeRole" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:BydayRulePart" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:BydayRulePart_ncal:bydayModifier" (ID INTEGER NOT
+NULL, "ncal:bydayModifier" INTEGER NOT NULL,
+"ncal:bydayModifier:graph" INTEGER);
+CREATE TABLE "ncal:BydayRulePart_ncal:bydayWeekday" (ID INTEGER NOT
+NULL, "ncal:bydayWeekday" INTEGER NOT NULL, "ncal:bydayWeekday:graph"
+INTEGER);
+CREATE TABLE "ncal:Calendar" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:method" TEXT COLLATE NOCASE, "ncal:method:graph" INTEGER,
+"ncal:calscale" INTEGER, "ncal:calscale:graph" INTEGER, "ncal:prodid"
+TEXT COLLATE NOCASE, "ncal:prodid:graph" INTEGER, "ncal:version" TEXT
+COLLATE NOCASE, "ncal:version:graph" INTEGER);
+CREATE TABLE "ncal:CalendarDataObject" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:CalendarScale" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:CalendarUserType" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Calendar_ncal:component" (ID INTEGER NOT NULL,
+"ncal:component" INTEGER NOT NULL, "ncal:component:graph" INTEGER);
+CREATE TABLE "ncal:Event" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:eventStatus" INTEGER, "ncal:eventStatus:graph" INTEGER,
+"ncal:transp" INTEGER, "ncal:transp:graph" INTEGER);
+CREATE TABLE "ncal:EventStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Freebusy" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:FreebusyPeriod" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:fbtype" INTEGER, "ncal:fbtype:graph" INTEGER);
+CREATE TABLE "ncal:FreebusyType" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Freebusy_ncal:freebusy" (ID INTEGER NOT NULL,
+"ncal:freebusy" INTEGER NOT NULL, "ncal:freebusy:graph" INTEGER);
+CREATE TABLE "ncal:Journal" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:journalStatus" INTEGER, "ncal:journalStatus:graph" INTEGER);
+CREATE TABLE "ncal:JournalStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:NcalDateTime" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:ncalTimezone" INTEGER, "ncal:ncalTimezone:graph" INTEGER,
+"ncal:date" INTEGER, "ncal:date:graph" INTEGER, "ncal:date:localDate"
+INTEGER, "ncal:date:localTime" INTEGER, "ncal:dateTime" INTEGER,
+"ncal:dateTime:graph" INTEGER, "ncal:dateTime:localDate" INTEGER,
+"ncal:dateTime:localTime" INTEGER);
+CREATE TABLE "ncal:NcalPeriod" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:periodBegin" INTEGER, "ncal:periodBegin:graph" INTEGER,
+"ncal:periodBegin:localDate" INTEGER, "ncal:periodBegin:localTime"
+INTEGER, "ncal:periodDuration" INTEGER, "ncal:periodDuration:graph"
+INTEGER, "ncal:periodEnd" INTEGER, "ncal:periodEnd:graph" INTEGER,
+"ncal:periodEnd:localDate" INTEGER, "ncal:periodEnd:localTime"
+INTEGER);
+CREATE TABLE "ncal:NcalTimeEntity" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Organizer" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:ParticipationStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:RecurrenceFrequency" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:RecurrenceIdentifier" (ID INTEGER NOT NULL PRIMARY
+KEY, "ncal:range" INTEGER, "ncal:range:graph" INTEGER,
+"ncal:recurrenceIdDateTime" INTEGER, "ncal:recurrenceIdDateTime:graph"
+INTEGER);
+CREATE TABLE "ncal:RecurrenceIdentifierRange" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:RecurrenceRule" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:until" INTEGER, "ncal:until:graph" INTEGER,
+"ncal:until:localDate" INTEGER, "ncal:until:localTime" INTEGER,
+"ncal:wkst" INTEGER, "ncal:wkst:graph" INTEGER, "ncal:interval"
+INTEGER, "ncal:interval:graph" INTEGER, "ncal:count" INTEGER,
+"ncal:count:graph" INTEGER, "ncal:freq" INTEGER, "ncal:freq:graph"
+INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:byday" (ID INTEGER NOT NULL,
+"ncal:byday" INTEGER NOT NULL, "ncal:byday:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:byhour" (ID INTEGER NOT NULL,
+"ncal:byhour" INTEGER NOT NULL, "ncal:byhour:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:byminute" (ID INTEGER NOT NULL,
+"ncal:byminute" INTEGER NOT NULL, "ncal:byminute:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:bymonth" (ID INTEGER NOT NULL,
+"ncal:bymonth" INTEGER NOT NULL, "ncal:bymonth:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:bymonthday" (ID INTEGER NOT
+NULL, "ncal:bymonthday" INTEGER NOT NULL, "ncal:bymonthday:graph"
+INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:bysecond" (ID INTEGER NOT NULL,
+"ncal:bysecond" INTEGER NOT NULL, "ncal:bysecond:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:bysetpos" (ID INTEGER NOT NULL,
+"ncal:bysetpos" INTEGER NOT NULL, "ncal:bysetpos:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:byweekno" (ID INTEGER NOT NULL,
+"ncal:byweekno" INTEGER NOT NULL, "ncal:byweekno:graph" INTEGER);
+CREATE TABLE "ncal:RecurrenceRule_ncal:byyearday" (ID INTEGER NOT
+NULL, "ncal:byyearday" INTEGER NOT NULL, "ncal:byyearday:graph"
+INTEGER);
+CREATE TABLE "ncal:RequestStatus" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:statusDescription" TEXT COLLATE NOCASE,
+"ncal:statusDescription:graph" INTEGER, "ncal:returnStatus" TEXT
+COLLATE NOCASE, "ncal:returnStatus:graph" INTEGER,
+"ncal:requestStatusData" TEXT COLLATE NOCASE,
+"ncal:requestStatusData:graph" INTEGER);
+CREATE TABLE "ncal:TimeTransparency" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Timezone" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:tzurl" INTEGER, "ncal:tzurl:graph" INTEGER, "ncal:standard"
+INTEGER, "ncal:standard:graph" INTEGER, "ncal:daylight" INTEGER,
+"ncal:daylight:graph" INTEGER, "ncal:tzid" TEXT COLLATE NOCASE,
+"ncal:tzid:graph" INTEGER);
+CREATE TABLE "ncal:TimezoneObservance" (ID INTEGER NOT NULL PRIMARY
+KEY, "ncal:tzoffsetfrom" TEXT COLLATE NOCASE,
+"ncal:tzoffsetfrom:graph" INTEGER, "ncal:tzoffsetto" TEXT COLLATE
+NOCASE, "ncal:tzoffsetto:graph" INTEGER, "ncal:tzname" TEXT COLLATE
+NOCASE, "ncal:tzname:graph" INTEGER);
+CREATE TABLE "ncal:Todo" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:percentComplete" INTEGER, "ncal:percentComplete:graph" INTEGER,
+"ncal:completed" INTEGER, "ncal:completed:graph" INTEGER,
+"ncal:completed:localDate" INTEGER, "ncal:completed:localTime"
+INTEGER, "ncal:todoStatus" INTEGER, "ncal:todoStatus:graph" INTEGER,
+"ncal:due" INTEGER, "ncal:due:graph" INTEGER);
+CREATE TABLE "ncal:TodoStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:Trigger" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:related" INTEGER, "ncal:related:graph" INTEGER,
+"ncal:triggerDateTime" INTEGER, "ncal:triggerDateTime:graph" INTEGER,
+"ncal:triggerDateTime:localDate" INTEGER,
+"ncal:triggerDateTime:localTime" INTEGER, "ncal:triggerDuration"
+INTEGER, "ncal:triggerDuration:graph" INTEGER);
+CREATE TABLE "ncal:TriggerRelation" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "ncal:UnionParentClass" (ID INTEGER NOT NULL PRIMARY KEY,
+"ncal:lastModified" INTEGER, "ncal:lastModified:graph" INTEGER,
+"ncal:lastModified:localDate" INTEGER, "ncal:lastModified:localTime"
+INTEGER, "ncal:trigger" INTEGER, "ncal:trigger:graph" INTEGER,
+"ncal:created" INTEGER, "ncal:created:graph" INTEGER,
+"ncal:created:localDate" INTEGER, "ncal:created:localTime" INTEGER,
+"ncal:url" INTEGER, "ncal:url:graph" INTEGER, "ncal:comment" TEXT
+COLLATE NOCASE, "ncal:comment:graph" INTEGER, "ncal:summaryAltRep"
+INTEGER, "ncal:summaryAltRep:graph" INTEGER, "ncal:priority" INTEGER,
+"ncal:priority:graph" INTEGER, "ncal:location" TEXT COLLATE NOCASE,
+"ncal:location:graph" INTEGER, "ncal:uid" TEXT COLLATE NOCASE,
+"ncal:uid:graph" INTEGER, "ncal:requestStatus" INTEGER,
+"ncal:requestStatus:graph" INTEGER, "ncal:recurrenceId" INTEGER,
+"ncal:recurrenceId:graph" INTEGER, "ncal:dtstamp" INTEGER,
+"ncal:dtstamp:graph" INTEGER, "ncal:dtstamp:localDate" INTEGER,
+"ncal:dtstamp:localTime" INTEGER, "ncal:class" INTEGER,
+"ncal:class:graph" INTEGER, "ncal:organizer" INTEGER,
+"ncal:organizer:graph" INTEGER, "ncal:dtend" INTEGER,
+"ncal:dtend:graph" INTEGER, "ncal:summary" TEXT COLLATE NOCASE,
+"ncal:summary:graph" INTEGER, "ncal:descriptionAltRep" INTEGER,
+"ncal:descriptionAltRep:graph" INTEGER, "ncal:commentAltRep" INTEGER,
+"ncal:commentAltRep:graph" INTEGER, "ncal:sequence" INTEGER,
+"ncal:sequence:graph" INTEGER, "ncal:contact" TEXT COLLATE NOCASE,
+"ncal:contact:graph" INTEGER, "ncal:contactAltRep" INTEGER,
+"ncal:contactAltRep:graph" INTEGER, "ncal:locationAltRep" INTEGER,
+"ncal:locationAltRep:graph" INTEGER, "ncal:geo" INTEGER,
+"ncal:geo:graph" INTEGER, "ncal:resourcesAltRep" INTEGER,
+"ncal:resourcesAltRep:graph" INTEGER, "ncal:dtstart" INTEGER,
+"ncal:dtstart:graph" INTEGER, "ncal:description" TEXT COLLATE NOCASE,
+"ncal:description:graph" INTEGER, "ncal:relatedToSibling" TEXT COLLATE
+NOCASE, "ncal:relatedToSibling:graph" INTEGER, "ncal:duration"
+INTEGER, "ncal:duration:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:attach" (ID INTEGER NOT NULL,
+"ncal:attach" INTEGER NOT NULL, "ncal:attach:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:attendee" (ID INTEGER NOT
+NULL, "ncal:attendee" INTEGER NOT NULL, "ncal:attendee:graph"
+INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:categories" (ID INTEGER NOT
+NULL, "ncal:categories" TEXT NOT NULL, "ncal:categories:graph"
+INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:exdate" (ID INTEGER NOT NULL,
+"ncal:exdate" INTEGER NOT NULL, "ncal:exdate:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:exrule" (ID INTEGER NOT NULL,
+"ncal:exrule" INTEGER NOT NULL, "ncal:exrule:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:hasAlarm" (ID INTEGER NOT
+NULL, "ncal:hasAlarm" INTEGER NOT NULL, "ncal:hasAlarm:graph"
+INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:ncalRelation" (ID INTEGER NOT
+NULL, "ncal:ncalRelation" TEXT NOT NULL, "ncal:ncalRelation:graph"
+INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:rdate" (ID INTEGER NOT NULL,
+"ncal:rdate" INTEGER NOT NULL, "ncal:rdate:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:relatedToChild" (ID INTEGER
+NOT NULL, "ncal:relatedToChild" TEXT NOT NULL,
+"ncal:relatedToChild:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:relatedToParent" (ID INTEGER
+NOT NULL, "ncal:relatedToParent" TEXT NOT NULL,
+"ncal:relatedToParent:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:resources" (ID INTEGER NOT
+NULL, "ncal:resources" TEXT NOT NULL, "ncal:resources:graph" INTEGER);
+CREATE TABLE "ncal:UnionParentClass_ncal:rrule" (ID INTEGER NOT NULL,
+"ncal:rrule" INTEGER NOT NULL, "ncal:rrule:graph" INTEGER);
+CREATE TABLE "ncal:Weekday" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:Affiliation" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:department" TEXT COLLATE NOCASE, "nco:department:graph" INTEGER,
+"nco:org" INTEGER, "nco:org:graph" INTEGER, "nco:role" TEXT COLLATE
+NOCASE, "nco:role:graph" INTEGER);
+CREATE TABLE "nco:Affiliation_nco:title" (ID INTEGER NOT NULL,
+"nco:title" TEXT NOT NULL, "nco:title:graph" INTEGER);
+CREATE TABLE "nco:AuthorizationStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:BbsNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:CarPhoneNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:CellPhoneNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:Contact" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:fullname" TEXT COLLATE NOCASE, "nco:fullname:graph" INTEGER,
+"nco:key" INTEGER, "nco:key:graph" INTEGER, "nco:contactUID" TEXT
+COLLATE NOCASE, "nco:contactUID:graph" INTEGER, "nco:contactLocalUID"
+TEXT COLLATE NOCASE, "nco:contactLocalUID:graph" INTEGER,
+"nco:hasLocation" INTEGER, "nco:hasLocation:graph" INTEGER,
+"nco:nickname" TEXT COLLATE NOCASE, "nco:nickname:graph" INTEGER,
+"nco:representative" INTEGER, "nco:representative:graph" INTEGER,
+"nco:photo" INTEGER, "nco:photo:graph" INTEGER, "nco:birthDate"
+INTEGER, "nco:birthDate:graph" INTEGER, "nco:birthDate:localDate"
+INTEGER, "nco:birthDate:localTime" INTEGER, "nco:sound" INTEGER,
+"nco:sound:graph" INTEGER);
+CREATE TABLE "nco:ContactGroup" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:contactGroupName" TEXT COLLATE NOCASE,
+"nco:contactGroupName:graph" INTEGER);
+CREATE TABLE "nco:ContactList" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:ContactListDataObject" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:ContactList_nco:containsContact" (ID INTEGER NOT
+NULL, "nco:containsContact" INTEGER NOT NULL,
+"nco:containsContact:graph" INTEGER);
+CREATE TABLE "nco:ContactMedium" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:contactMediumComment" TEXT COLLATE NOCASE,
+"nco:contactMediumComment:graph" INTEGER);
+CREATE TABLE "nco:Contact_ncal:anniversary" (ID INTEGER NOT NULL,
+"ncal:anniversary" INTEGER NOT NULL, "ncal:anniversary:graph"
+INTEGER);
+CREATE TABLE "nco:Contact_ncal:birthday" (ID INTEGER NOT NULL,
+"ncal:birthday" INTEGER NOT NULL, "ncal:birthday:graph" INTEGER);
+CREATE TABLE "nco:Contact_nco:belongsToGroup" (ID INTEGER NOT NULL,
+"nco:belongsToGroup" INTEGER NOT NULL, "nco:belongsToGroup:graph"
+INTEGER);
+CREATE TABLE "nco:Contact_nco:note" (ID INTEGER NOT NULL, "nco:note"
+TEXT NOT NULL, "nco:note:graph" INTEGER);
+CREATE TABLE "nco:Contact_scal:anniversary" (ID INTEGER NOT NULL,
+"scal:anniversary" INTEGER NOT NULL, "scal:anniversary:graph"
+INTEGER);
+CREATE TABLE "nco:Contact_scal:birthday" (ID INTEGER NOT NULL,
+"scal:birthday" INTEGER NOT NULL, "scal:birthday:graph" INTEGER);
+CREATE TABLE "nco:DomesticDeliveryAddress" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:EmailAddress" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:emailAddress" TEXT COLLATE NOCASE UNIQUE,
+"nco:emailAddress:graph" INTEGER);
+CREATE TABLE "nco:FaxNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:Gender" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:IMAccount" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:imAccountAddress" INTEGER UNIQUE, "nco:imAccountAddress:graph"
+INTEGER, "nco:imAccountType" TEXT COLLATE NOCASE,
+"nco:imAccountType:graph" INTEGER, "nco:imDisplayName" TEXT COLLATE
+NOCASE, "nco:imDisplayName:graph" INTEGER, "nco:imEnabled" INTEGER,
+"nco:imEnabled:graph" INTEGER);
+CREATE TABLE "nco:IMAccount_nco:hasIMContact" (ID INTEGER NOT NULL,
+"nco:hasIMContact" INTEGER NOT NULL, "nco:hasIMContact:graph"
+INTEGER);
+CREATE TABLE "nco:IMAddress" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:imID" TEXT COLLATE NOCASE, "nco:imID:graph" INTEGER,
+"nco:imNickname" TEXT COLLATE NOCASE, "nco:imNickname:graph" INTEGER,
+"nco:imAvatar" INTEGER, "nco:imAvatar:graph" INTEGER, "nco:imProtocol"
+TEXT COLLATE NOCASE, "nco:imProtocol:graph" INTEGER,
+"nco:imStatusMessage" TEXT COLLATE NOCASE,
+"nco:imStatusMessage:graph" INTEGER, "nco:imPresence" INTEGER,
+"nco:imPresence:graph" INTEGER, "nco:presenceLastModified" INTEGER,
+"nco:presenceLastModified:graph" INTEGER,
+"nco:presenceLastModified:localDate" INTEGER,
+"nco:presenceLastModified:localTime" INTEGER,
+"nco:imAddressAuthStatusFrom" INTEGER,
+"nco:imAddressAuthStatusFrom:graph" INTEGER,
+"nco:imAddressAuthStatusTo" INTEGER, "nco:imAddressAuthStatusTo:graph"
+INTEGER);
+CREATE TABLE "nco:IMAddress_nco:imCapability" (ID INTEGER NOT NULL,
+"nco:imCapability" INTEGER NOT NULL, "nco:imCapability:graph"
+INTEGER);
+CREATE TABLE "nco:IMCapability" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:InternationalDeliveryAddress" (ID INTEGER NOT NULL
+PRIMARY KEY);
+CREATE TABLE "nco:IsdnNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:MessagingNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:ModemNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:OrganizationContact" (ID INTEGER NOT NULL PRIMARY
+KEY, "nco:logo" INTEGER, "nco:logo:graph" INTEGER);
+CREATE TABLE "nco:PagerNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:ParcelDeliveryAddress" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:PcsNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:PersonContact" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:nameFamily" TEXT COLLATE NOCASE, "nco:nameFamily:graph" INTEGER,
+"nco:nameGiven" TEXT COLLATE NOCASE, "nco:nameGiven:graph" INTEGER,
+"nco:nameAdditional" TEXT COLLATE NOCASE, "nco:nameAdditional:graph"
+INTEGER, "nco:nameHonorificSuffix" TEXT COLLATE NOCASE,
+"nco:nameHonorificSuffix:graph" INTEGER, "nco:nameHonorificPrefix"
+TEXT COLLATE NOCASE, "nco:nameHonorificPrefix:graph" INTEGER,
+"nco:hobby" TEXT COLLATE NOCASE, "nco:hobby:graph" INTEGER,
+"nco:gender" INTEGER, "nco:gender:graph" INTEGER);
+CREATE TABLE "nco:PersonContact_nco:hasAffiliation" (ID INTEGER NOT
+NULL, "nco:hasAffiliation" INTEGER NOT NULL,
+"nco:hasAffiliation:graph" INTEGER);
+CREATE TABLE "nco:PhoneNumber" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:phoneNumber" TEXT COLLATE NOCASE, "nco:phoneNumber:graph"
+INTEGER);
+CREATE TABLE "nco:PostalAddress" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:region" TEXT COLLATE NOCASE, "nco:region:graph" INTEGER,
+"nco:country" TEXT COLLATE NOCASE, "nco:country:graph" INTEGER,
+"nco:extendedAddress" TEXT COLLATE NOCASE,
+"nco:extendedAddress:graph" INTEGER, "nco:addressLocation" INTEGER,
+"nco:addressLocation:graph" INTEGER, "nco:streetAddress" TEXT COLLATE
+NOCASE, "nco:streetAddress:graph" INTEGER, "nco:postalcode" TEXT
+COLLATE NOCASE, "nco:postalcode:graph" INTEGER, "nco:locality" TEXT
+COLLATE NOCASE, "nco:locality:graph" INTEGER, "nco:county" TEXT
+COLLATE NOCASE, "nco:county:graph" INTEGER, "nco:district" TEXT
+COLLATE NOCASE, "nco:district:graph" INTEGER, "nco:pobox" TEXT
+COLLATE NOCASE, "nco:pobox:graph" INTEGER);
+CREATE TABLE "nco:PresenceStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:Role" (ID INTEGER NOT NULL PRIMARY KEY, "nco:video"
+INTEGER, "nco:video:graph" INTEGER);
+CREATE TABLE "nco:Role_nco:blogUrl" (ID INTEGER NOT NULL,
+"nco:blogUrl" INTEGER NOT NULL, "nco:blogUrl:graph" INTEGER);
+CREATE TABLE "nco:Role_nco:foafUrl" (ID INTEGER NOT NULL,
+"nco:foafUrl" INTEGER NOT NULL, "nco:foafUrl:graph" INTEGER);
+CREATE TABLE "nco:Role_nco:hasContactMedium" (ID INTEGER NOT NULL,
+"nco:hasContactMedium" INTEGER NOT NULL, "nco:hasContactMedium:graph"
+INTEGER);
+CREATE TABLE "nco:Role_nco:hasEmailAddress" (ID INTEGER NOT NULL,
+"nco:hasEmailAddress" INTEGER NOT NULL, "nco:hasEmailAddress:graph"
+INTEGER);
+CREATE TABLE "nco:Role_nco:hasIMAddress" (ID INTEGER NOT NULL,
+"nco:hasIMAddress" INTEGER NOT NULL, "nco:hasIMAddress:graph"
+INTEGER);
+CREATE TABLE "nco:Role_nco:hasPhoneNumber" (ID INTEGER NOT NULL,
+"nco:hasPhoneNumber" INTEGER NOT NULL, "nco:hasPhoneNumber:graph"
+INTEGER);
+CREATE TABLE "nco:Role_nco:hasPostalAddress" (ID INTEGER NOT NULL,
+"nco:hasPostalAddress" INTEGER NOT NULL, "nco:hasPostalAddress:graph"
+INTEGER);
+CREATE TABLE "nco:Role_nco:url" (ID INTEGER NOT NULL, "nco:url"
+INTEGER NOT NULL, "nco:url:graph" INTEGER);
+CREATE TABLE "nco:Role_nco:websiteUrl" (ID INTEGER NOT NULL,
+"nco:websiteUrl" INTEGER NOT NULL, "nco:websiteUrl:graph" INTEGER);
+CREATE TABLE "nco:VideoTelephoneNumber" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nco:VoicePhoneNumber" (ID INTEGER NOT NULL PRIMARY KEY,
+"nco:voiceMail" INTEGER, "nco:voiceMail:graph" INTEGER);
+CREATE TABLE "nfo:Application" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Archive" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:uncompressedSize" INTEGER, "nfo:uncompressedSize:graph" INTEGER);
+CREATE TABLE "nfo:ArchiveItem" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:isPasswordProtected" INTEGER, "nfo:isPasswordProtected:graph"
+INTEGER);
+CREATE TABLE "nfo:Attachment" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Audio" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:channels" INTEGER, "nfo:channels:graph" INTEGER,
+"nfo:sideChannels" INTEGER, "nfo:sideChannels:graph" INTEGER,
+"nfo:lfeChannels" INTEGER, "nfo:lfeChannels:graph" INTEGER,
+"nfo:sampleCount" INTEGER, "nfo:sampleCount:graph" INTEGER,
+"nfo:bitsPerSample" INTEGER, "nfo:bitsPerSample:graph" INTEGER,
+"nfo:frontChannels" INTEGER, "nfo:frontChannels:graph" INTEGER,
+"nfo:sampleRate" REAL, "nfo:sampleRate:graph" INTEGER,
+"nfo:averageAudioBitrate" REAL, "nfo:averageAudioBitrate:graph"
+INTEGER, "nfo:rearChannels" INTEGER, "nfo:rearChannels:graph" INTEGER,
+"nfo:gain" INTEGER, "nfo:gain:graph" INTEGER, "nfo:peakGain" INTEGER,
+"nfo:peakGain:graph" INTEGER, "nfo:audioOffset" REAL,
+"nfo:audioOffset:graph" INTEGER);
+CREATE TABLE "nfo:Bookmark" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:bookmarks" INTEGER, "nfo:bookmarks:graph" INTEGER,
+"nfo:characterPosition" INTEGER, "nfo:characterPosition:graph"
+INTEGER, "nfo:pageNumber" INTEGER, "nfo:pageNumber:graph" INTEGER,
+"nfo:streamPosition" INTEGER, "nfo:streamPosition:graph" INTEGER,
+"nfo:streamDuration" INTEGER, "nfo:streamDuration:graph" INTEGER);
+CREATE TABLE "nfo:BookmarkFolder" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:BookmarkFolder_nfo:containsBookmark" (ID INTEGER NOT
+NULL, "nfo:containsBookmark" INTEGER NOT NULL,
+"nfo:containsBookmark:graph" INTEGER);
+CREATE TABLE "nfo:BookmarkFolder_nfo:containsBookmarkFolder" (ID
+INTEGER NOT NULL, "nfo:containsBookmarkFolder" INTEGER NOT NULL,
+"nfo:containsBookmarkFolder:graph" INTEGER);
+CREATE TABLE "nfo:CompressionType" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Cursor" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:DataContainer" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:DeletedResource" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:originalLocation" TEXT COLLATE NOCASE,
+"nfo:originalLocation:graph" INTEGER, "nfo:deletionDate" INTEGER,
+"nfo:deletionDate:graph" INTEGER, "nfo:deletionDate:localDate"
+INTEGER, "nfo:deletionDate:localTime" INTEGER);
+CREATE TABLE "nfo:Document" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:tableOfContents" TEXT COLLATE NOCASE,
+"nfo:tableOfContents:graph" INTEGER);
+CREATE TABLE "nfo:EmbeddedFileDataObject" (ID INTEGER NOT NULL PRIMARY
+KEY, "nfo:encoding" TEXT COLLATE NOCASE, "nfo:encoding:graph"
+INTEGER);
+CREATE TABLE "nfo:Equipment" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:manufacturer" TEXT COLLATE NOCASE, "nfo:manufacturer:graph"
+INTEGER, "nfo:model" TEXT COLLATE NOCASE, "nfo:model:graph" INTEGER,
+"nfo:equipmentSoftware" TEXT COLLATE NOCASE,
+"nfo:equipmentSoftware:graph" INTEGER);
+CREATE TABLE "nfo:Executable" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:FileDataObject" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:fileLastAccessed" INTEGER, "nfo:fileLastAccessed:graph" INTEGER,
+"nfo:fileLastAccessed:localDate" INTEGER,
+"nfo:fileLastAccessed:localTime" INTEGER, "nfo:fileCreated" INTEGER,
+"nfo:fileCreated:graph" INTEGER, "nfo:fileCreated:localDate" INTEGER,
+"nfo:fileCreated:localTime" INTEGER, "nfo:fileSize" INTEGER,
+"nfo:fileSize:graph" INTEGER, "nfo:permissions" TEXT COLLATE NOCASE,
+"nfo:permissions:graph" INTEGER, "nfo:fileName" TEXT COLLATE NOCASE,
+"nfo:fileName:graph" INTEGER, "nfo:hasHash" INTEGER,
+"nfo:hasHash:graph" INTEGER, "nfo:fileOwner" INTEGER,
+"nfo:fileOwner:graph" INTEGER, "nfo:fileLastModified" INTEGER,
+"nfo:fileLastModified:graph" INTEGER, "nfo:fileLastModified:localDate"
+INTEGER, "nfo:fileLastModified:localTime" INTEGER);
+CREATE TABLE "nfo:FileHash" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:hashValue" TEXT COLLATE NOCASE, "nfo:hashValue:graph" INTEGER,
+"nfo:hashAlgorithm" TEXT COLLATE NOCASE, "nfo:hashAlgorithm:graph"
+INTEGER);
+CREATE TABLE "nfo:Filesystem" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:FilesystemImage" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Folder" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Font" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:fontFamily" TEXT COLLATE NOCASE, "nfo:fontFamily:graph" INTEGER,
+"nfo:foundry" INTEGER, "nfo:foundry:graph" INTEGER);
+CREATE TABLE "nfo:HardDiskPartition" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:HelpDocument" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:HtmlDocument" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Icon" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Image" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:verticalResolution" INTEGER, "nfo:verticalResolution:graph"
+INTEGER, "nfo:horizontalResolution" INTEGER,
+"nfo:horizontalResolution:graph" INTEGER, "nfo:orientation" INTEGER,
+"nfo:orientation:graph" INTEGER);
+CREATE TABLE "nfo:Image_nfo:depicts" (ID INTEGER NOT NULL,
+"nfo:depicts" INTEGER NOT NULL, "nfo:depicts:graph" INTEGER);
+CREATE TABLE "nfo:Image_nfo:hasRegionOfInterest" (ID INTEGER NOT NULL,
+"nfo:hasRegionOfInterest" INTEGER NOT NULL,
+"nfo:hasRegionOfInterest:graph" INTEGER);
+CREATE TABLE "nfo:Media" (ID INTEGER NOT NULL PRIMARY KEY, "nfo:count"
+INTEGER, "nfo:count:graph" INTEGER, "nfo:duration" INTEGER,
+"nfo:duration:graph" INTEGER, "nfo:compressionType" INTEGER,
+"nfo:compressionType:graph" INTEGER, "nfo:hasMediaStream" INTEGER,
+"nfo:hasMediaStream:graph" INTEGER, "nfo:bitDepth" INTEGER,
+"nfo:bitDepth:graph" INTEGER, "nfo:codec" TEXT COLLATE NOCASE,
+"nfo:codec:graph" INTEGER, "nfo:encodedBy" TEXT COLLATE NOCASE,
+"nfo:encodedBy:graph" INTEGER, "nfo:bitrateType" TEXT COLLATE NOCASE,
+"nfo:bitrateType:graph" INTEGER, "nfo:averageBitrate" REAL,
+"nfo:averageBitrate:graph" INTEGER, "nfo:genre" TEXT COLLATE NOCASE,
+"nfo:genre:graph" INTEGER, "nfo:equipment" INTEGER,
+"nfo:equipment:graph" INTEGER, "nfo:lastPlayedPosition" INTEGER,
+"nfo:lastPlayedPosition:graph" INTEGER, "nmm:genre" TEXT COLLATE
+NOCASE, "nmm:genre:graph" INTEGER, "nmm:skipCounter" INTEGER,
+"nmm:skipCounter:graph" INTEGER, "nmm:dlnaProfile" TEXT COLLATE
+NOCASE, "nmm:dlnaProfile:graph" INTEGER, "nmm:dlnaMime" TEXT COLLATE
+NOCASE, "nmm:dlnaMime:graph" INTEGER, "nmm:uPnPShared" INTEGER,
+"nmm:uPnPShared:graph" INTEGER, "mtp:credits" TEXT COLLATE NOCASE,
+"mtp:credits:graph" INTEGER, "mtp:creator" TEXT COLLATE NOCASE,
+"mtp:creator:graph" INTEGER);
+CREATE TABLE "nfo:MediaFileListEntry" (ID INTEGER NOT NULL PRIMARY
+KEY, "nfo:listPosition" REAL, "nfo:listPosition:graph" INTEGER,
+"nfo:entryUrl" TEXT COLLATE NOCASE, "nfo:entryUrl:graph" INTEGER);
+CREATE TABLE "nfo:MediaList" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:entryCounter" INTEGER, "nfo:entryCounter:graph" INTEGER,
+"nfo:listDuration" INTEGER, "nfo:listDuration:graph" INTEGER);
+CREATE TABLE "nfo:MediaList_nfo:hasMediaFileListEntry" (ID INTEGER NOT
+NULL, "nfo:hasMediaFileListEntry" INTEGER NOT NULL,
+"nfo:hasMediaFileListEntry:graph" INTEGER);
+CREATE TABLE "nfo:MediaList_nfo:mediaListEntry" (ID INTEGER NOT NULL,
+"nfo:mediaListEntry" INTEGER NOT NULL, "nfo:mediaListEntry:graph"
+INTEGER);
+CREATE TABLE "nfo:MediaStream" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Media_mtp:hidden" (ID INTEGER NOT NULL, "mtp:hidden"
+INTEGER NOT NULL, "mtp:hidden:graph" INTEGER);
+CREATE TABLE "nfo:Media_nmm:alternativeMedia" (ID INTEGER NOT NULL,
+"nmm:alternativeMedia" INTEGER NOT NULL, "nmm:alternativeMedia:graph"
+INTEGER);
+CREATE TABLE "nfo:MindMap" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Note" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:OperatingSystem" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Orientation" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:PaginatedTextDocument" (ID INTEGER NOT NULL PRIMARY
+KEY, "nfo:pageCount" INTEGER, "nfo:pageCount:graph" INTEGER);
+CREATE TABLE "nfo:PlainTextDocument" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Presentation" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:RasterImage" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:RegionOfInterest" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:regionOfInterestX" REAL, "nfo:regionOfInterestX:graph" INTEGER,
+"nfo:regionOfInterestY" REAL, "nfo:regionOfInterestY:graph" INTEGER,
+"nfo:regionOfInterestWidth" REAL, "nfo:regionOfInterestWidth:graph"
+INTEGER, "nfo:regionOfInterestHeight" REAL,
+"nfo:regionOfInterestHeight:graph" INTEGER, "nfo:regionOfInterestType"
+INTEGER, "nfo:regionOfInterestType:graph" INTEGER, "nfo:roiRefersTo"
+INTEGER, "nfo:roiRefersTo:graph" INTEGER);
+CREATE TABLE "nfo:RegionOfInterestContent" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:RemoteDataObject" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:RemotePortAddress" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Software" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:conflicts" INTEGER, "nfo:conflicts:graph" INTEGER,
+"nfo:supercedes" INTEGER, "nfo:supercedes:graph" INTEGER,
+"nfo:softwareIcon" INTEGER, "nfo:softwareIcon:graph" INTEGER,
+"nfo:softwareCmdLine" TEXT COLLATE NOCASE,
+"nfo:softwareCmdLine:graph" INTEGER);
+CREATE TABLE "nfo:SoftwareApplication" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:SoftwareCategory" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:softwareCategoryIcon" INTEGER, "nfo:softwareCategoryIcon:graph"
+INTEGER);
+CREATE TABLE "nfo:SoftwareItem" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:SoftwareService" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:SourceCode" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:commentCharacterCount" INTEGER, "nfo:commentCharacterCount:graph"
+INTEGER, "nfo:programmingLanguage" TEXT COLLATE NOCASE,
+"nfo:programmingLanguage:graph" INTEGER, "nfo:definesClass" TEXT
+COLLATE NOCASE, "nfo:definesClass:graph" INTEGER,
+"nfo:definesFunction" TEXT COLLATE NOCASE,
+"nfo:definesFunction:graph" INTEGER, "nfo:definesGlobalVariable" TEXT
+COLLATE NOCASE, "nfo:definesGlobalVariable:graph" INTEGER);
+CREATE TABLE "nfo:Spreadsheet" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:TextDocument" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:wordCount" INTEGER, "nfo:wordCount:graph" INTEGER,
+"nfo:lineCount" INTEGER, "nfo:lineCount:graph" INTEGER,
+"nfo:characterCount" INTEGER, "nfo:characterCount:graph" INTEGER);
+CREATE TABLE "nfo:Trash" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:VectorImage" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nfo:Video" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:frameRate" REAL, "nfo:frameRate:graph" INTEGER, "nfo:frameCount"
+INTEGER, "nfo:frameCount:graph" INTEGER, "nfo:averageVideoBitrate"
+REAL, "nfo:averageVideoBitrate:graph" INTEGER);
+CREATE TABLE "nfo:Visual" (ID INTEGER NOT NULL PRIMARY KEY,
+"nie:contentCreated" INTEGER, "nie:contentCreated:graph" INTEGER,
+"nie:contentCreated:localDate" INTEGER, "nie:contentCreated:localTime"
+INTEGER, "nfo:aspectRatio" REAL, "nfo:aspectRatio:graph" INTEGER,
+"nfo:heading" REAL, "nfo:heading:graph" INTEGER, "nfo:tilt" REAL,
+"nfo:tilt:graph" INTEGER, "nfo:interlaceMode" INTEGER,
+"nfo:interlaceMode:graph" INTEGER, "nfo:height" INTEGER,
+"nfo:height:graph" INTEGER, "nfo:width" INTEGER, "nfo:width:graph"
+INTEGER, "nfo:colorDepth" INTEGER, "nfo:colorDepth:graph" INTEGER);
+CREATE TABLE "nfo:WebHistory" (ID INTEGER NOT NULL PRIMARY KEY,
+"nfo:domain" TEXT COLLATE NOCASE, "nfo:domain:graph" INTEGER,
+"nfo:uri" TEXT COLLATE NOCASE, "nfo:uri:graph" INTEGER);
+CREATE TABLE "nfo:Website" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nid3:ID3Audio" (ID INTEGER NOT NULL PRIMARY KEY,
+"nid3:title" TEXT COLLATE NOCASE, "nid3:title:graph" INTEGER,
+"nid3:albumTitle" TEXT COLLATE NOCASE, "nid3:albumTitle:graph"
+INTEGER, "nid3:contentType" TEXT COLLATE NOCASE,
+"nid3:contentType:graph" INTEGER, "nid3:length" INTEGER,
+"nid3:length:graph" INTEGER, "nid3:recordingYear" INTEGER,
+"nid3:recordingYear:graph" INTEGER, "nid3:trackNumber" TEXT COLLATE
+NOCASE, "nid3:trackNumber:graph" INTEGER, "nid3:partOfSet" TEXT
+COLLATE NOCASE, "nid3:partOfSet:graph" INTEGER, "nid3:comments" TEXT
+COLLATE NOCASE, "nid3:comments:graph" INTEGER);
+CREATE TABLE "nid3:ID3Audio_nid3:leadArtist" (ID INTEGER NOT NULL,
+"nid3:leadArtist" INTEGER NOT NULL, "nid3:leadArtist:graph" INTEGER);
+CREATE TABLE "nie:DataObject" (ID INTEGER NOT NULL PRIMARY KEY,
+"nie:url" TEXT COLLATE NOCASE UNIQUE, "nie:url:graph" INTEGER,
+"nie:byteSize" INTEGER, "nie:byteSize:graph" INTEGER,
+"nie:interpretedAs" INTEGER, "nie:interpretedAs:graph" INTEGER,
+"nie:lastRefreshed" INTEGER, "nie:lastRefreshed:graph" INTEGER,
+"nie:lastRefreshed:localDate" INTEGER, "nie:lastRefreshed:localTime"
+INTEGER, "nie:created" INTEGER, "nie:created:graph" INTEGER,
+"nie:created:localDate" INTEGER, "nie:created:localTime" INTEGER,
+"nfo:belongsToContainer" INTEGER, "nfo:belongsToContainer:graph"
+INTEGER, "tracker:available" INTEGER, "tracker:available:graph"
+INTEGER);
+CREATE TABLE "nie:DataObject_nie:dataSource" (ID INTEGER NOT NULL,
+"nie:dataSource" INTEGER NOT NULL, "nie:dataSource:graph" INTEGER);
+CREATE TABLE "nie:DataObject_nie:isPartOf" (ID INTEGER NOT NULL,
+"nie:isPartOf" INTEGER NOT NULL, "nie:isPartOf:graph" INTEGER);
+CREATE TABLE "nie:DataSource" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nie:InformationElement" (ID INTEGER NOT NULL PRIMARY
+KEY, "nie:title" TEXT COLLATE NOCASE, "nie:title:graph" INTEGER,
+"nie:contentLastModified" INTEGER, "nie:contentLastModified:graph"
+INTEGER, "nie:contentLastModified:localDate" INTEGER,
+"nie:contentLastModified:localTime" INTEGER, "nie:subject" TEXT
+COLLATE NOCASE, "nie:subject:graph" INTEGER, "nie:mimeType" TEXT
+COLLATE NOCASE, "nie:mimeType:graph" INTEGER, "nie:language" TEXT
+COLLATE NOCASE, "nie:language:graph" INTEGER, "nie:plainTextContent"
+TEXT COLLATE NOCASE, "nie:plainTextContent:graph" INTEGER,
+"nie:legal" TEXT COLLATE NOCASE, "nie:legal:graph" INTEGER,
+"nie:generator" TEXT COLLATE NOCASE, "nie:generator:graph" INTEGER,
+"nie:description" TEXT COLLATE NOCASE, "nie:description:graph"
+INTEGER, "nie:disclaimer" TEXT COLLATE NOCASE, "nie:disclaimer:graph"
+INTEGER, "nie:depends" INTEGER, "nie:depends:graph" INTEGER,
+"nie:links" INTEGER, "nie:links:graph" INTEGER, "nie:copyright" TEXT
+COLLATE NOCASE, "nie:copyright:graph" INTEGER, "nie:comment" TEXT
+COLLATE NOCASE, "nie:comment:graph" INTEGER, "nie:isStoredAs"
+INTEGER, "nie:isStoredAs:graph" INTEGER, "nie:version" TEXT COLLATE
+NOCASE, "nie:version:graph" INTEGER, "nie:contentCreated" INTEGER,
+"nie:contentCreated:graph" INTEGER, "nie:contentCreated:localDate"
+INTEGER, "nie:contentCreated:localTime" INTEGER, "nie:contentAccessed"
+INTEGER, "nie:contentAccessed:graph" INTEGER,
+"nie:contentAccessed:localDate" INTEGER,
+"nie:contentAccessed:localTime" INTEGER, "nie:license" TEXT COLLATE
+NOCASE, "nie:license:graph" INTEGER, "nie:identifier" TEXT COLLATE
+NOCASE, "nie:identifier:graph" INTEGER, "nie:licenseType" TEXT
+COLLATE NOCASE, "nie:licenseType:graph" INTEGER, "nie:characterSet"
+TEXT COLLATE NOCASE, "nie:characterSet:graph" INTEGER,
+"nie:contentSize" INTEGER, "nie:contentSize:graph" INTEGER,
+"nie:rootElementOf" INTEGER, "nie:rootElementOf:graph" INTEGER,
+"nie:usageCounter" INTEGER, "nie:usageCounter:graph" INTEGER,
+"nco:publisher" INTEGER, "nco:publisher:graph" INTEGER,
+"nfo:isContentEncrypted" INTEGER, "nfo:isContentEncrypted:graph"
+INTEGER, "slo:location" INTEGER, "slo:location:graph" INTEGER,
+"nfo:isBootable" INTEGER, "nfo:isBootable:graph" INTEGER, "osinfo:id"
+TEXT COLLATE NOCASE, "osinfo:id:graph" INTEGER, "osinfo:mediaId" TEXT
+COLLATE NOCASE, "osinfo:mediaId:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_mlo:location" (ID INTEGER NOT
+NULL, "mlo:location" INTEGER NOT NULL, "mlo:location:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_nao:hasProperty" (ID INTEGER NOT
+NULL, "nao:hasProperty" INTEGER NOT NULL, "nao:hasProperty:graph"
+INTEGER);
+CREATE TABLE "nie:InformationElement_nco:contributor" (ID INTEGER NOT
+NULL, "nco:contributor" INTEGER NOT NULL, "nco:contributor:graph"
+INTEGER);
+CREATE TABLE "nie:InformationElement_nco:creator" (ID INTEGER NOT
+NULL, "nco:creator" INTEGER NOT NULL, "nco:creator:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_nie:hasLogicalPart" (ID INTEGER
+NOT NULL, "nie:hasLogicalPart" INTEGER NOT NULL,
+"nie:hasLogicalPart:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_nie:hasPart" (ID INTEGER NOT
+NULL, "nie:hasPart" INTEGER NOT NULL, "nie:hasPart:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_nie:informationElementDate" (ID
+INTEGER NOT NULL, "nie:informationElementDate" INTEGER NOT NULL,
+"nie:informationElementDate:graph" INTEGER,
+"nie:informationElementDate:localDate" INTEGER NOT NULL,
+"nie:informationElementDate:localTime" INTEGER NOT NULL);
+CREATE TABLE "nie:InformationElement_nie:isLogicalPartOf" (ID INTEGER
+NOT NULL, "nie:isLogicalPartOf" INTEGER NOT NULL,
+"nie:isLogicalPartOf:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_nie:keyword" (ID INTEGER NOT
+NULL, "nie:keyword" TEXT NOT NULL, "nie:keyword:graph" INTEGER);
+CREATE TABLE "nie:InformationElement_nie:relatedTo" (ID INTEGER NOT
+NULL, "nie:relatedTo" INTEGER NOT NULL, "nie:relatedTo:graph"
+INTEGER);
+CREATE TABLE "nmm:AnalogRadio" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:modulation" INTEGER, "nmm:modulation:graph" INTEGER,
+"nmm:frequency" INTEGER, "nmm:frequency:graph" INTEGER);
+CREATE TABLE "nmm:Artist" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:artistName" TEXT COLLATE NOCASE, "nmm:artistName:graph"
+INTEGER);
+CREATE TABLE "nmm:DigitalRadio" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:streamingBitrate" INTEGER, "nmm:streamingBitrate:graph" INTEGER,
+"nmm:encoding" TEXT COLLATE NOCASE, "nmm:encoding:graph" INTEGER,
+"nmm:protocol" TEXT COLLATE NOCASE, "nmm:protocol:graph" INTEGER);
+CREATE TABLE "nmm:Flash" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmm:ImageList" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmm:MeteringMode" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmm:MusicAlbum" (ID INTEGER NOT NULL PRIMARY KEY,
+"nie:title" TEXT COLLATE NOCASE, "nie:title:graph" INTEGER,
+"nmm:albumTrackCount" INTEGER, "nmm:albumTrackCount:graph" INTEGER,
+"nmm:albumTitle" TEXT COLLATE NOCASE, "nmm:albumTitle:graph" INTEGER,
+"nmm:albumDuration" INTEGER, "nmm:albumDuration:graph" INTEGER,
+"nmm:albumGain" INTEGER, "nmm:albumGain:graph" INTEGER,
+"nmm:albumPeakGain" INTEGER, "nmm:albumPeakGain:graph" INTEGER);
+CREATE TABLE "nmm:MusicAlbumDisc" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:albumDiscAlbum" INTEGER, "nmm:albumDiscAlbum:graph" INTEGER,
+"nmm:musicCDIdentifier" TEXT COLLATE NOCASE,
+"nmm:musicCDIdentifier:graph" INTEGER, "nmm:setNumber" INTEGER,
+"nmm:setNumber:graph" INTEGER);
+CREATE TABLE "nmm:MusicAlbum_nmm:albumArtist" (ID INTEGER NOT NULL,
+"nmm:albumArtist" INTEGER NOT NULL, "nmm:albumArtist:graph" INTEGER);
+CREATE TABLE "nmm:MusicPiece" (ID INTEGER NOT NULL PRIMARY KEY,
+"nie:title" TEXT COLLATE NOCASE, "nie:title:graph" INTEGER,
+"nmm:musicAlbum" INTEGER, "nmm:musicAlbum:graph" INTEGER,
+"nmm:musicAlbumDisc" INTEGER, "nmm:musicAlbumDisc:graph" INTEGER,
+"nmm:beatsPerMinute" INTEGER, "nmm:beatsPerMinute:graph" INTEGER,
+"nmm:performer" INTEGER, "nmm:performer:graph" INTEGER, "nmm:composer"
+INTEGER, "nmm:composer:graph" INTEGER, "nmm:lyricist" INTEGER,
+"nmm:lyricist:graph" INTEGER, "nmm:trackNumber" INTEGER,
+"nmm:trackNumber:graph" INTEGER,
+"nmm:internationalStandardRecordingCode" TEXT COLLATE NOCASE,
+"nmm:internationalStandardRecordingCode:graph" INTEGER);
+CREATE TABLE "nmm:MusicPiece_nmm:lyrics" (ID INTEGER NOT NULL,
+"nmm:lyrics" INTEGER NOT NULL, "nmm:lyrics:graph" INTEGER);
+CREATE TABLE "nmm:Photo" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:exposureTime" REAL, "nmm:exposureTime:graph" INTEGER, "nmm:flash"
+INTEGER, "nmm:flash:graph" INTEGER, "nmm:fnumber" REAL,
+"nmm:fnumber:graph" INTEGER, "nmm:focalLength" REAL,
+"nmm:focalLength:graph" INTEGER, "nmm:isoSpeed" REAL,
+"nmm:isoSpeed:graph" INTEGER, "nmm:meteringMode" INTEGER,
+"nmm:meteringMode:graph" INTEGER, "nmm:whiteBalance" INTEGER,
+"nmm:whiteBalance:graph" INTEGER, "nmm:isCropped" INTEGER,
+"nmm:isCropped:graph" INTEGER, "nmm:isColorCorrected" INTEGER,
+"nmm:isColorCorrected:graph" INTEGER);
+CREATE TABLE "nmm:Playlist" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmm:RadioModulation" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmm:RadioStation" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:radioIcon" INTEGER, "nmm:radioIcon:graph" INTEGER, "nmm:radioPTY"
+INTEGER, "nmm:radioPTY:graph" INTEGER);
+CREATE TABLE "nmm:RadioStation_nmm:carrier" (ID INTEGER NOT NULL,
+"nmm:carrier" INTEGER NOT NULL, "nmm:carrier:graph" INTEGER);
+CREATE TABLE "nmm:SynchronizedText" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:isForHearingImpaired" INTEGER, "nmm:isForHearingImpaired:graph"
+INTEGER);
+CREATE TABLE "nmm:Video" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmm:videoAlbum" INTEGER, "nmm:videoAlbum:graph" INTEGER,
+"nmm:isSeries" INTEGER, "nmm:isSeries:graph" INTEGER, "nmm:season"
+INTEGER, "nmm:season:graph" INTEGER, "nmm:episodeNumber" INTEGER,
+"nmm:episodeNumber:graph" INTEGER, "nmm:runTime" INTEGER,
+"nmm:runTime:graph" INTEGER, "nmm:synopsis" TEXT COLLATE NOCASE,
+"nmm:synopsis:graph" INTEGER, "nmm:MPAARating" TEXT COLLATE NOCASE,
+"nmm:MPAARating:graph" INTEGER, "nmm:category" TEXT COLLATE NOCASE,
+"nmm:category:graph" INTEGER, "nmm:producedBy" INTEGER,
+"nmm:producedBy:graph" INTEGER, "nmm:hasSubtitle" INTEGER,
+"nmm:hasSubtitle:graph" INTEGER, "nmm:isContentEncrypted" INTEGER,
+"nmm:isContentEncrypted:graph" INTEGER, "mtp:fourCC" TEXT COLLATE
+NOCASE, "mtp:fourCC:graph" INTEGER, "mtp:waveformat" TEXT COLLATE
+NOCASE, "mtp:waveformat:graph" INTEGER);
+CREATE TABLE "nmm:Video_mtp:scantype" (ID INTEGER NOT NULL,
+"mtp:scantype" INTEGER NOT NULL, "mtp:scantype:graph" INTEGER);
+CREATE TABLE "nmm:Video_nmm:director" (ID INTEGER NOT NULL,
+"nmm:director" INTEGER NOT NULL, "nmm:director:graph" INTEGER);
+CREATE TABLE "nmm:Video_nmm:leadActor" (ID INTEGER NOT NULL,
+"nmm:leadActor" INTEGER NOT NULL, "nmm:leadActor:graph" INTEGER);
+CREATE TABLE "nmm:Video_nmm:subtitle" (ID INTEGER NOT NULL,
+"nmm:subtitle" INTEGER NOT NULL, "nmm:subtitle:graph" INTEGER);
+CREATE TABLE "nmm:WhiteBalance" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:Attachment" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:Call" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:sentDate" INTEGER, "nmo:sentDate:graph" INTEGER,
+"nmo:sentDate:localDate" INTEGER, "nmo:sentDate:localTime" INTEGER,
+"nmo:duration" INTEGER, "nmo:duration:graph" INTEGER);
+CREATE TABLE "nmo:CommunicationChannel" (ID INTEGER NOT NULL PRIMARY
+KEY, "nmo:lastMessageDate" INTEGER, "nmo:lastMessageDate:graph"
+INTEGER, "nmo:lastMessageDate:localDate" INTEGER,
+"nmo:lastMessageDate:localTime" INTEGER,
+"nmo:lastSuccessfulMessageDate" INTEGER,
+"nmo:lastSuccessfulMessageDate:graph" INTEGER,
+"nmo:lastSuccessfulMessageDate:localDate" INTEGER,
+"nmo:lastSuccessfulMessageDate:localTime" INTEGER);
+CREATE TABLE "nmo:CommunicationChannel_nmo:hasParticipant" (ID INTEGER
+NOT NULL, "nmo:hasParticipant" INTEGER NOT NULL,
+"nmo:hasParticipant:graph" INTEGER);
+CREATE TABLE "nmo:Conversation" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:DeliveryStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:Email" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:hasContent" INTEGER, "nmo:hasContent:graph" INTEGER,
+"nmo:isFlagged" INTEGER, "nmo:isFlagged:graph" INTEGER, "nmo:isRecent"
+INTEGER, "nmo:isRecent:graph" INTEGER, "nmo:status" TEXT COLLATE
+NOCASE, "nmo:status:graph" INTEGER, "nmo:responseType" TEXT COLLATE
+NOCASE, "nmo:responseType:graph" INTEGER);
+CREATE TABLE "nmo:Email_nmo:contentMimeType" (ID INTEGER NOT NULL,
+"nmo:contentMimeType" TEXT NOT NULL, "nmo:contentMimeType:graph"
+INTEGER);
+CREATE TABLE "nmo:IMMessage" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:MMSMessage" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:mmsHasContent" INTEGER, "nmo:mmsHasContent:graph" INTEGER);
+CREATE TABLE "nmo:MailAccount" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:accountName" TEXT COLLATE NOCASE, "nmo:accountName:graph"
+INTEGER, "nmo:accountDisplayName" TEXT COLLATE NOCASE,
+"nmo:accountDisplayName:graph" INTEGER, "nmo:fromAddress" INTEGER,
+"nmo:fromAddress:graph" INTEGER, "nmo:signature" TEXT COLLATE NOCASE,
+"nmo:signature:graph" INTEGER);
+CREATE TABLE "nmo:MailFolder" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:folderName" TEXT COLLATE NOCASE, "nmo:folderName:graph" INTEGER,
+"nmo:serverCount" INTEGER, "nmo:serverCount:graph" INTEGER,
+"nmo:serverUnreadCount" INTEGER, "nmo:serverUnreadCount:graph"
+INTEGER);
+CREATE TABLE "nmo:MailboxDataObject" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:Message" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:sentDate" INTEGER, "nmo:sentDate:graph" INTEGER,
+"nmo:sentDate:localDate" INTEGER, "nmo:sentDate:localTime" INTEGER,
+"nmo:from" INTEGER, "nmo:from:graph" INTEGER, "nmo:isAnswered"
+INTEGER, "nmo:isAnswered:graph" INTEGER, "nmo:isDeleted" INTEGER,
+"nmo:isDeleted:graph" INTEGER, "nmo:isDraft" INTEGER,
+"nmo:isDraft:graph" INTEGER, "nmo:isRead" INTEGER, "nmo:isRead:graph"
+INTEGER, "nmo:isSent" INTEGER, "nmo:isSent:graph" INTEGER,
+"nmo:isEmergency" INTEGER, "nmo:isEmergency:graph" INTEGER,
+"nmo:htmlMessageContent" TEXT COLLATE NOCASE,
+"nmo:htmlMessageContent:graph" INTEGER, "nmo:messageId" TEXT COLLATE
+NOCASE, "nmo:messageId:graph" INTEGER, "nmo:messageSubject" TEXT
+COLLATE NOCASE, "nmo:messageSubject:graph" INTEGER,
+"nmo:receivedDate" INTEGER, "nmo:receivedDate:graph" INTEGER,
+"nmo:receivedDate:localDate" INTEGER, "nmo:receivedDate:localTime"
+INTEGER, "nmo:replyTo" INTEGER, "nmo:replyTo:graph" INTEGER,
+"nmo:sender" INTEGER, "nmo:sender:graph" INTEGER, "nmo:conversation"
+INTEGER, "nmo:conversation:graph" INTEGER, "nmo:communicationChannel"
+INTEGER, "nmo:communicationChannel:graph" INTEGER,
+"nmo:deliveryStatus" INTEGER, "nmo:deliveryStatus:graph" INTEGER,
+"nmo:reportDelivery" INTEGER, "nmo:reportDelivery:graph" INTEGER,
+"nmo:sentWithReportRead" INTEGER, "nmo:sentWithReportRead:graph"
+INTEGER, "nmo:reportReadStatus" INTEGER, "nmo:reportReadStatus:graph"
+INTEGER, "nmo:mustAnswerReportRead" INTEGER,
+"nmo:mustAnswerReportRead:graph" INTEGER, "nmo:mmsId" TEXT COLLATE
+NOCASE, "nmo:mmsId:graph" INTEGER);
+CREATE TABLE "nmo:MessageHeader" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:headerName" TEXT COLLATE NOCASE, "nmo:headerName:graph" INTEGER,
+"nmo:headerValue" TEXT COLLATE NOCASE, "nmo:headerValue:graph"
+INTEGER);
+CREATE TABLE "nmo:Message_nmo:bcc" (ID INTEGER NOT NULL, "nmo:bcc"
+INTEGER NOT NULL, "nmo:bcc:graph" INTEGER);
+CREATE TABLE "nmo:Message_nmo:cc" (ID INTEGER NOT NULL, "nmo:cc"
+INTEGER NOT NULL, "nmo:cc:graph" INTEGER);
+CREATE TABLE "nmo:Message_nmo:hasAttachment" (ID INTEGER NOT NULL,
+"nmo:hasAttachment" INTEGER NOT NULL, "nmo:hasAttachment:graph"
+INTEGER);
+CREATE TABLE "nmo:Message_nmo:inReplyTo" (ID INTEGER NOT NULL,
+"nmo:inReplyTo" INTEGER NOT NULL, "nmo:inReplyTo:graph" INTEGER);
+CREATE TABLE "nmo:Message_nmo:messageHeader" (ID INTEGER NOT NULL,
+"nmo:messageHeader" INTEGER NOT NULL, "nmo:messageHeader:graph"
+INTEGER);
+CREATE TABLE "nmo:Message_nmo:recipient" (ID INTEGER NOT NULL,
+"nmo:recipient" INTEGER NOT NULL, "nmo:recipient:graph" INTEGER);
+CREATE TABLE "nmo:Message_nmo:references" (ID INTEGER NOT NULL,
+"nmo:references" INTEGER NOT NULL, "nmo:references:graph" INTEGER);
+CREATE TABLE "nmo:Message_nmo:to" (ID INTEGER NOT NULL, "nmo:to"
+INTEGER NOT NULL, "nmo:to:graph" INTEGER);
+CREATE TABLE "nmo:MimePart" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:charSet" TEXT COLLATE NOCASE, "nmo:charSet:graph" INTEGER,
+"nmo:contentId" TEXT COLLATE NOCASE, "nmo:contentId:graph" INTEGER,
+"nmo:contentTransferEncoding" TEXT COLLATE NOCASE,
+"nmo:contentTransferEncoding:graph" INTEGER, "nmo:contentDescription"
+TEXT COLLATE NOCASE, "nmo:contentDescription:graph" INTEGER,
+"nmo:contentDisposition" TEXT COLLATE NOCASE,
+"nmo:contentDisposition:graph" INTEGER);
+CREATE TABLE "nmo:MimePart_nmo:mimeHeader" (ID INTEGER NOT NULL,
+"nmo:mimeHeader" INTEGER NOT NULL, "nmo:mimeHeader:graph" INTEGER);
+CREATE TABLE "nmo:Multipart" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:Multipart_nmo:partBoundary" (ID INTEGER NOT NULL,
+"nmo:partBoundary" TEXT NOT NULL, "nmo:partBoundary:graph" INTEGER);
+CREATE TABLE "nmo:PermanentChannel" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:PhoneMessage" (ID INTEGER NOT NULL PRIMARY KEY,
+"nmo:fromVCard" INTEGER, "nmo:fromVCard:graph" INTEGER, "nmo:encoding"
+TEXT COLLATE NOCASE, "nmo:encoding:graph" INTEGER,
+"nmo:phoneMessageId" INTEGER, "nmo:phoneMessageId:graph" INTEGER,
+"nmo:validityPeriod" INTEGER, "nmo:validityPeriod:graph" INTEGER);
+CREATE TABLE "nmo:PhoneMessageFolder" (ID INTEGER NOT NULL PRIMARY
+KEY, "nmo:phoneMessageFolderId" TEXT COLLATE NOCASE,
+"nmo:phoneMessageFolderId:graph" INTEGER);
+CREATE TABLE "nmo:PhoneMessageFolder_nmo:containsPhoneMessage" (ID
+INTEGER NOT NULL, "nmo:containsPhoneMessage" INTEGER NOT NULL,
+"nmo:containsPhoneMessage:graph" INTEGER);
+CREATE TABLE "nmo:PhoneMessageFolder_nmo:containsPhoneMessageFolder"
+(ID INTEGER NOT NULL, "nmo:containsPhoneMessageFolder" INTEGER NOT
+NULL, "nmo:containsPhoneMessageFolder:graph" INTEGER);
+CREATE TABLE "nmo:PhoneMessage_nmo:toVCard" (ID INTEGER NOT NULL,
+"nmo:toVCard" INTEGER NOT NULL, "nmo:toVCard:graph" INTEGER);
+CREATE TABLE "nmo:ReportReadStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:SMSMessage" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:TransientChannel" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nmo:VOIPCall" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "nrl:InverseFunctionalProperty" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "osinfo:Installer" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "poi:ObjectOfInterest" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "rdf:Property" (ID INTEGER NOT NULL PRIMARY KEY,
+"rdfs:domain" INTEGER, "rdfs:domain:graph" INTEGER, "rdfs:range"
+INTEGER, "rdfs:range:graph" INTEGER, "tracker:indexed" INTEGER,
+"tracker:indexed:graph" INTEGER, "tracker:secondaryIndex" INTEGER,
+"tracker:secondaryIndex:graph" INTEGER, "tracker:fulltextIndexed"
+INTEGER, "tracker:fulltextIndexed:graph" INTEGER,
+"tracker:fulltextNoLimit" INTEGER, "tracker:fulltextNoLimit:graph"
+INTEGER, "tracker:transient" INTEGER, "tracker:transient:graph"
+INTEGER, "tracker:weight" INTEGER, "tracker:weight:graph" INTEGER,
+"tracker:defaultValue" TEXT COLLATE NOCASE,
+"tracker:defaultValue:graph" INTEGER, "nrl:maxCardinality" INTEGER,
+"nrl:maxCardinality:graph" INTEGER, "tracker:writeback" INTEGER,
+"tracker:writeback:graph" INTEGER, "tracker:forceJournal" INTEGER,
+"tracker:forceJournal:graph" INTEGER);
+CREATE TABLE "rdf:Property_rdfs:subPropertyOf" (ID INTEGER NOT NULL,
+"rdfs:subPropertyOf" INTEGER NOT NULL, "rdfs:subPropertyOf:graph"
+INTEGER);
+CREATE TABLE "rdfs:Class" (ID INTEGER NOT NULL PRIMARY KEY,
+"tracker:notify" INTEGER, "tracker:notify:graph" INTEGER);
+CREATE TABLE "rdfs:Class_rdfs:subClassOf" (ID INTEGER NOT NULL,
+"rdfs:subClassOf" INTEGER NOT NULL, "rdfs:subClassOf:graph" INTEGER);
+CREATE TABLE "rdfs:Class_tracker:domainIndex" (ID INTEGER NOT NULL,
+"tracker:domainIndex" INTEGER NOT NULL, "tracker:domainIndex:graph"
+INTEGER);
+CREATE TABLE "rdfs:Literal" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "rdfs:Resource" (ID INTEGER NOT NULL PRIMARY KEY,
+Available INTEGER NOT NULL, "rdfs:comment" TEXT COLLATE NOCASE,
+"rdfs:comment:graph" INTEGER, "rdfs:label" TEXT COLLATE NOCASE,
+"rdfs:label:graph" INTEGER, "tracker:added" INTEGER,
+"tracker:added:graph" INTEGER, "tracker:added:localDate" INTEGER,
+"tracker:added:localTime" INTEGER, "tracker:modified" INTEGER,
+"tracker:modified:graph" INTEGER, "tracker:damaged" INTEGER,
+"tracker:damaged:graph" INTEGER, "dc:title" TEXT COLLATE NOCASE,
+"dc:title:graph" INTEGER, "dc:creator" TEXT COLLATE NOCASE,
+"dc:creator:graph" INTEGER, "dc:subject" TEXT COLLATE NOCASE,
+"dc:subject:graph" INTEGER, "dc:description" TEXT COLLATE NOCASE,
+"dc:description:graph" INTEGER, "dc:publisher" TEXT COLLATE NOCASE,
+"dc:publisher:graph" INTEGER, "dc:type" TEXT COLLATE NOCASE,
+"dc:type:graph" INTEGER, "dc:format" TEXT COLLATE NOCASE,
+"dc:format:graph" INTEGER, "dc:identifier" TEXT COLLATE NOCASE,
+"dc:identifier:graph" INTEGER, "dc:language" TEXT COLLATE NOCASE,
+"dc:language:graph" INTEGER, "dc:coverage" TEXT COLLATE NOCASE,
+"dc:coverage:graph" INTEGER, "dc:rights" TEXT COLLATE NOCASE,
+"dc:rights:graph" INTEGER, "nao:identifier" TEXT COLLATE NOCASE,
+"nao:identifier:graph" INTEGER, "nao:numericRating" REAL,
+"nao:numericRating:graph" INTEGER, "nao:lastModified" INTEGER,
+"nao:lastModified:graph" INTEGER, "nao:lastModified:localDate"
+INTEGER, "nao:lastModified:localTime" INTEGER);
+CREATE TABLE "rdfs:Resource_dc:contributor" (ID INTEGER NOT NULL,
+"dc:contributor" TEXT NOT NULL, "dc:contributor:graph" INTEGER);
+CREATE TABLE "rdfs:Resource_dc:date" (ID INTEGER NOT NULL, "dc:date"
+INTEGER NOT NULL, "dc:date:graph" INTEGER, "dc:date:localDate" INTEGER
+NOT NULL, "dc:date:localTime" INTEGER NOT NULL);
+CREATE TABLE "rdfs:Resource_dc:relation" (ID INTEGER NOT NULL,
+"dc:relation" TEXT NOT NULL, "dc:relation:graph" INTEGER);
+CREATE TABLE "rdfs:Resource_dc:source" (ID INTEGER NOT NULL,
+"dc:source" INTEGER NOT NULL, "dc:source:graph" INTEGER);
+CREATE TABLE "rdfs:Resource_nao:deprecated" (ID INTEGER NOT NULL,
+"nao:deprecated" INTEGER NOT NULL, "nao:deprecated:graph" INTEGER);
+CREATE TABLE "rdfs:Resource_nao:hasTag" (ID INTEGER NOT NULL,
+"nao:hasTag" INTEGER NOT NULL, "nao:hasTag:graph" INTEGER);
+CREATE TABLE "rdfs:Resource_nao:isRelated" (ID INTEGER NOT NULL,
+"nao:isRelated" INTEGER NOT NULL, "nao:isRelated:graph" INTEGER);
+CREATE TABLE "rdfs:Resource_rdf:type" (ID INTEGER NOT NULL, "rdf:type"
+INTEGER NOT NULL, "rdf:type:graph" INTEGER);
+CREATE TABLE "scal:AccessLevel" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:AttendanceStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:Attendee" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:attendanceStatus" INTEGER, "scal:attendanceStatus:graph"
+INTEGER, "scal:attendeeRole" INTEGER, "scal:attendeeRole:graph"
+INTEGER, "scal:attendeeContact" INTEGER, "scal:attendeeContact:graph"
+INTEGER, "scal:rsvp" INTEGER, "scal:rsvp:graph" INTEGER,
+"scal:calendarUserType" INTEGER, "scal:calendarUserType:graph"
+INTEGER);
+CREATE TABLE "scal:AttendeeRole" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:Attendee_scal:delegated-from" (ID INTEGER NOT NULL,
+"scal:delegated-from" INTEGER NOT NULL, "scal:delegated-from:graph"
+INTEGER);
+CREATE TABLE "scal:Attendee_scal:delegated-to" (ID INTEGER NOT NULL,
+"scal:delegated-to" INTEGER NOT NULL, "scal:delegated-to:graph"
+INTEGER);
+CREATE TABLE "scal:Attendee_scal:member" (ID INTEGER NOT NULL,
+"scal:member" INTEGER NOT NULL, "scal:member:graph" INTEGER);
+CREATE TABLE "scal:Attendee_scal:sent-by" (ID INTEGER NOT NULL,
+"scal:sent-by" INTEGER NOT NULL, "scal:sent-by:graph" INTEGER);
+CREATE TABLE "scal:Calendar" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:CalendarAlarm" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:alarmOffset" INTEGER, "scal:alarmOffset:graph" INTEGER);
+CREATE TABLE "scal:CalendarAlarm_scal:alarmAttendee" (ID INTEGER NOT
+NULL, "scal:alarmAttendee" INTEGER NOT NULL,
+"scal:alarmAttendee:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:textLocation" INTEGER, "scal:textLocation:graph" INTEGER,
+"scal:resources" TEXT COLLATE NOCASE, "scal:resources:graph" INTEGER,
+"scal:transparency" INTEGER, "scal:transparency:graph" INTEGER,
+"scal:calendarItemAlarm" INTEGER, "scal:calendarItemAlarm:graph"
+INTEGER, "scal:start" INTEGER, "scal:start:graph" INTEGER, "scal:end"
+INTEGER, "scal:end:graph" INTEGER, "scal:isAllDay" INTEGER,
+"scal:isAllDay:graph" INTEGER, "scal:priority" INTEGER,
+"scal:priority:graph" INTEGER, "scal:rdate" INTEGER,
+"scal:rdate:graph" INTEGER, "scal:exceptionRDate" INTEGER,
+"scal:exceptionRDate:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem_scal:access" (ID INTEGER NOT NULL,
+"scal:access" INTEGER NOT NULL, "scal:access:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem_scal:attachment" (ID INTEGER NOT NULL,
+"scal:attachment" INTEGER NOT NULL, "scal:attachment:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem_scal:attendee" (ID INTEGER NOT NULL,
+"scal:attendee" INTEGER NOT NULL, "scal:attendee:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem_scal:belongsToCalendar" (ID INTEGER
+NOT NULL, "scal:belongsToCalendar" INTEGER NOT NULL,
+"scal:belongsToCalendar:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem_scal:contact" (ID INTEGER NOT NULL,
+"scal:contact" INTEGER NOT NULL, "scal:contact:graph" INTEGER);
+CREATE TABLE "scal:CalendarItem_scal:rrule" (ID INTEGER NOT NULL,
+"scal:rrule" INTEGER NOT NULL, "scal:rrule:graph" INTEGER);
+CREATE TABLE "scal:CalendarUserType" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:Event" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:eventStatus" INTEGER, "scal:eventStatus:graph" INTEGER);
+CREATE TABLE "scal:EventStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:Journal" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:journalStatus" INTEGER, "scal:journalStatus:graph" INTEGER);
+CREATE TABLE "scal:JournalStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:RSVPValues" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:RecurrenceRule" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:recurrencePattern" TEXT COLLATE NOCASE,
+"scal:recurrencePattern:graph" INTEGER, "scal:recurrenceStartDate"
+INTEGER, "scal:recurrenceStartDate:graph" INTEGER, "scal:exception"
+INTEGER, "scal:exception:graph" INTEGER);
+CREATE TABLE "scal:TimePoint" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:dateTime" INTEGER, "scal:dateTime:graph" INTEGER,
+"scal:dateTime:localDate" INTEGER, "scal:dateTime:localTime" INTEGER,
+"scal:TimeZone" TEXT COLLATE NOCASE, "scal:TimeZone:graph" INTEGER);
+CREATE TABLE "scal:Todo" (ID INTEGER NOT NULL PRIMARY KEY,
+"scal:todoStatus" INTEGER, "scal:todoStatus:graph" INTEGER, "scal:due"
+INTEGER, "scal:due:graph" INTEGER, "scal:completed" INTEGER,
+"scal:completed:graph" INTEGER, "scal:percentComplete" INTEGER,
+"scal:percentComplete:graph" INTEGER);
+CREATE TABLE "scal:TodoStatus" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "scal:TransparencyValues" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "slo:GeoLocation" (ID INTEGER NOT NULL PRIMARY KEY,
+"slo:latitude" REAL, "slo:latitude:graph" INTEGER, "slo:longitude"
+REAL, "slo:longitude:graph" INTEGER, "slo:verticalAccuracy" REAL,
+"slo:verticalAccuracy:graph" INTEGER, "slo:horizontalAccuracy" REAL,
+"slo:horizontalAccuracy:graph" INTEGER, "slo:altitude" REAL,
+"slo:altitude:graph" INTEGER, "slo:boundingLatitudeMin" REAL,
+"slo:boundingLatitudeMin:graph" INTEGER, "slo:boundingLatitudeMax"
+REAL, "slo:boundingLatitudeMax:graph" INTEGER,
+"slo:boundingLongitudeMin" REAL, "slo:boundingLongitudeMin:graph"
+INTEGER, "slo:boundingLongitudeMax" REAL,
+"slo:boundingLongitudeMax:graph" INTEGER, "slo:radius" REAL,
+"slo:radius:graph" INTEGER, "slo:timestamp" INTEGER,
+"slo:timestamp:graph" INTEGER, "slo:timestamp:localDate" INTEGER,
+"slo:timestamp:localTime" INTEGER, "slo:postalAddress" INTEGER,
+"slo:postalAddress:graph" INTEGER);
+CREATE TABLE "slo:Landmark" (ID INTEGER NOT NULL PRIMARY KEY,
+"slo:iconUrl" INTEGER, "slo:iconUrl:graph" INTEGER);
+CREATE TABLE "slo:LandmarkCategory" (ID INTEGER NOT NULL PRIMARY KEY,
+"slo:isRemovable" INTEGER, "slo:isRemovable:graph" INTEGER,
+"slo:categoryIconUrl" INTEGER, "slo:categoryIconUrl:graph" INTEGER);
+CREATE TABLE "slo:Landmark_slo:belongsToCategory" (ID INTEGER NOT
+NULL, "slo:belongsToCategory" INTEGER NOT NULL,
+"slo:belongsToCategory:graph" INTEGER);
+CREATE TABLE "slo:Landmark_slo:hasContact" (ID INTEGER NOT NULL,
+"slo:hasContact" INTEGER NOT NULL, "slo:hasContact:graph" INTEGER);
+CREATE TABLE "slo:Route" (ID INTEGER NOT NULL PRIMARY KEY,
+"slo:startTime" INTEGER, "slo:startTime:graph" INTEGER,
+"slo:startTime:localDate" INTEGER, "slo:startTime:localTime" INTEGER,
+"slo:endTime" INTEGER, "slo:endTime:graph" INTEGER,
+"slo:endTime:localDate" INTEGER, "slo:endTime:localTime" INTEGER);
+CREATE TABLE "slo:Route_slo:routeDetails" (ID INTEGER NOT NULL,
+"slo:routeDetails" TEXT NOT NULL, "slo:routeDetails:graph" INTEGER);
+CREATE TABLE "tracker:Namespace" (ID INTEGER NOT NULL PRIMARY KEY,
+"tracker:prefix" TEXT COLLATE NOCASE, "tracker:prefix:graph"
+INTEGER);
+CREATE TABLE "tracker:Ontology" (ID INTEGER NOT NULL PRIMARY KEY);
+CREATE TABLE "tracker:Volume" (ID INTEGER NOT NULL PRIMARY KEY,
+"tracker:isMounted" INTEGER, "tracker:isMounted:graph" INTEGER,
+"tracker:unmountDate" INTEGER, "tracker:unmountDate:graph" INTEGER,
+"tracker:unmountDate:localDate" INTEGER,
+"tracker:unmountDate:localTime" INTEGER, "tracker:mountPoint" INTEGER,
+"tracker:mountPoint:graph" INTEGER, "tracker:isRemovable" INTEGER,
+"tracker:isRemovable:graph" INTEGER, "tracker:isOptical" INTEGER,
+"tracker:isOptical:graph" INTEGER);
+CREATE UNIQUE INDEX "mfo:FeedMessage_mfo:enclosureList_ID_ID" ON
+"mfo:FeedMessage_mfo:enclosureList" (ID, "mfo:enclosureList");
+CREATE UNIQUE INDEX "mlo:GeoBoundingBox_mlo:bbNorthWest_ID_ID" ON
+"mlo:GeoBoundingBox_mlo:bbNorthWest" (ID, "mlo:bbNorthWest");
+CREATE UNIQUE INDEX "mlo:GeoBoundingBox_mlo:bbSouthEast_ID_ID" ON
+"mlo:GeoBoundingBox_mlo:bbSouthEast" (ID, "mlo:bbSouthEast");
+CREATE INDEX "mlo:GeoLocation_mlo:asBoundingBox_ID" ON
+"mlo:GeoLocation_mlo:asBoundingBox" (ID);
+CREATE UNIQUE INDEX "mlo:GeoLocation_mlo:asBoundingBox_ID_ID" ON
+"mlo:GeoLocation_mlo:asBoundingBox" ("mlo:asBoundingBox", ID);
+CREATE INDEX "mlo:GeoLocation_mlo:asGeoPoint_ID" ON
+"mlo:GeoLocation_mlo:asGeoPoint" (ID);
+CREATE UNIQUE INDEX "mlo:GeoLocation_mlo:asGeoPoint_ID_ID" ON
+"mlo:GeoLocation_mlo:asGeoPoint" ("mlo:asGeoPoint", ID);
+CREATE INDEX "mlo:GeoLocation_mlo:asPostalAddress_ID" ON
+"mlo:GeoLocation_mlo:asPostalAddress" (ID);
+CREATE UNIQUE INDEX "mlo:GeoLocation_mlo:asPostalAddress_ID_ID" ON
+"mlo:GeoLocation_mlo:asPostalAddress" ("mlo:asPostalAddress", ID);
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:address_ID_ID" ON
+"mlo:GeoPoint_mlo:address" (ID, "mlo:address");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:altitude_ID_ID" ON
+"mlo:GeoPoint_mlo:altitude" (ID, "mlo:altitude");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:city_ID_ID" ON
+"mlo:GeoPoint_mlo:city" (ID, "mlo:city");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:country_ID_ID" ON
+"mlo:GeoPoint_mlo:country" (ID, "mlo:country");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:latitude_ID_ID" ON
+"mlo:GeoPoint_mlo:latitude" (ID, "mlo:latitude");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:longitude_ID_ID" ON
+"mlo:GeoPoint_mlo:longitude" (ID, "mlo:longitude");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:state_ID_ID" ON
+"mlo:GeoPoint_mlo:state" (ID, "mlo:state");
+CREATE UNIQUE INDEX "mlo:GeoPoint_mlo:timestamp_ID_ID" ON
+"mlo:GeoPoint_mlo:timestamp" (ID, "mlo:timestamp");
+CREATE UNIQUE INDEX "mlo:GeoSphere_mlo:radius_ID_ID" ON
+"mlo:GeoSphere_mlo:radius" (ID, "mlo:radius");
+CREATE UNIQUE INDEX "mlo:LandmarkCategory_mlo:isRemovable_ID_ID" ON
+"mlo:LandmarkCategory_mlo:isRemovable" (ID, "mlo:isRemovable");
+CREATE UNIQUE INDEX "mlo:Landmark_mlo:belongsToCategory_ID_ID" ON
+"mlo:Landmark_mlo:belongsToCategory" (ID, "mlo:belongsToCategory");
+CREATE UNIQUE INDEX "mlo:Landmark_mlo:poiLocation_ID_ID" ON
+"mlo:Landmark_mlo:poiLocation" (ID, "mlo:poiLocation");
+CREATE UNIQUE INDEX "mlo:LocationBoundingBox_mlo:boxEastLimit_ID_ID"
+ON "mlo:LocationBoundingBox_mlo:boxEastLimit" (ID,
+"mlo:boxEastLimit");
+CREATE UNIQUE INDEX "mlo:LocationBoundingBox_mlo:boxNorthLimit_ID_ID"
+ON "mlo:LocationBoundingBox_mlo:boxNorthLimit" (ID,
+"mlo:boxNorthLimit");
+CREATE UNIQUE INDEX
+"mlo:LocationBoundingBox_mlo:boxSouthWestCorner_ID_ID" ON
+"mlo:LocationBoundingBox_mlo:boxSouthWestCorner" (ID,
+"mlo:boxSouthWestCorner");
+CREATE UNIQUE INDEX
+"mlo:LocationBoundingBox_mlo:boxVerticalLimit_ID_ID" ON
+"mlo:LocationBoundingBox_mlo:boxVerticalLimit" (ID,
+"mlo:boxVerticalLimit");
+CREATE UNIQUE INDEX "mlo:Route_mlo:endTime_ID_ID" ON
+"mlo:Route_mlo:endTime" (ID, "mlo:endTime");
+CREATE UNIQUE INDEX "mlo:Route_mlo:routeDetails_ID_ID" ON
+"mlo:Route_mlo:routeDetails" (ID, "mlo:routeDetails");
+CREATE UNIQUE INDEX "mlo:Route_mlo:startTime_ID_ID" ON
+"mlo:Route_mlo:startTime" (ID, "mlo:startTime");
+CREATE UNIQUE INDEX "mto:Transfer_mto:transferList_ID_ID" ON
+"mto:Transfer_mto:transferList" (ID, "mto:transferList");
+CREATE UNIQUE INDEX "mto:Transfer_mto:transferPrivacyLevel_ID_ID" ON
+"mto:Transfer_mto:transferPrivacyLevel" (ID,
+"mto:transferPrivacyLevel");
+CREATE UNIQUE INDEX "mto:UploadTransfer_mto:transferCategory_ID_ID" ON
+"mto:UploadTransfer_mto:transferCategory" (ID,
+"mto:transferCategory");
+CREATE UNIQUE INDEX "nao:Tag_tracker:isDefaultTag_ID_ID" ON
+"nao:Tag_tracker:isDefaultTag" (ID, "tracker:isDefaultTag");
+CREATE UNIQUE INDEX "nao:Tag_tracker:tagRelatedTo_ID_ID" ON
+"nao:Tag_tracker:tagRelatedTo" (ID, "tracker:tagRelatedTo");
+CREATE UNIQUE INDEX "ncal:Alarm_ncal:action_ID_ID" ON
+"ncal:Alarm_ncal:action" (ID, "ncal:action");
+CREATE UNIQUE INDEX "ncal:BydayRulePart_ncal:bydayModifier_ID_ID" ON
+"ncal:BydayRulePart_ncal:bydayModifier" (ID, "ncal:bydayModifier");
+CREATE UNIQUE INDEX "ncal:BydayRulePart_ncal:bydayWeekday_ID_ID" ON
+"ncal:BydayRulePart_ncal:bydayWeekday" (ID, "ncal:bydayWeekday");
+CREATE UNIQUE INDEX "ncal:Calendar_ncal:component_ID_ID" ON
+"ncal:Calendar_ncal:component" (ID, "ncal:component");
+CREATE UNIQUE INDEX "ncal:Freebusy_ncal:freebusy_ID_ID" ON
+"ncal:Freebusy_ncal:freebusy" (ID, "ncal:freebusy");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:byday_ID_ID" ON
+"ncal:RecurrenceRule_ncal:byday" (ID, "ncal:byday");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:byhour_ID_ID" ON
+"ncal:RecurrenceRule_ncal:byhour" (ID, "ncal:byhour");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:byminute_ID_ID" ON
+"ncal:RecurrenceRule_ncal:byminute" (ID, "ncal:byminute");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:bymonth_ID_ID" ON
+"ncal:RecurrenceRule_ncal:bymonth" (ID, "ncal:bymonth");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:bymonthday_ID_ID" ON
+"ncal:RecurrenceRule_ncal:bymonthday" (ID, "ncal:bymonthday");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:bysecond_ID_ID" ON
+"ncal:RecurrenceRule_ncal:bysecond" (ID, "ncal:bysecond");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:bysetpos_ID_ID" ON
+"ncal:RecurrenceRule_ncal:bysetpos" (ID, "ncal:bysetpos");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:byweekno_ID_ID" ON
+"ncal:RecurrenceRule_ncal:byweekno" (ID, "ncal:byweekno");
+CREATE UNIQUE INDEX "ncal:RecurrenceRule_ncal:byyearday_ID_ID" ON
+"ncal:RecurrenceRule_ncal:byyearday" (ID, "ncal:byyearday");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:attach_ID_ID" ON
+"ncal:UnionParentClass_ncal:attach" (ID, "ncal:attach");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:attendee_ID_ID" ON
+"ncal:UnionParentClass_ncal:attendee" (ID, "ncal:attendee");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:categories_ID_ID" ON
+"ncal:UnionParentClass_ncal:categories" (ID, "ncal:categories");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:exdate_ID_ID" ON
+"ncal:UnionParentClass_ncal:exdate" (ID, "ncal:exdate");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:exrule_ID_ID" ON
+"ncal:UnionParentClass_ncal:exrule" (ID, "ncal:exrule");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:hasAlarm_ID_ID" ON
+"ncal:UnionParentClass_ncal:hasAlarm" (ID, "ncal:hasAlarm");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:ncalRelation_ID_ID" ON
+"ncal:UnionParentClass_ncal:ncalRelation" (ID, "ncal:ncalRelation");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:rdate_ID_ID" ON
+"ncal:UnionParentClass_ncal:rdate" (ID, "ncal:rdate");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:relatedToChild_ID_ID"
+ON "ncal:UnionParentClass_ncal:relatedToChild" (ID,
+"ncal:relatedToChild");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:relatedToParent_ID_ID"
+ON "ncal:UnionParentClass_ncal:relatedToParent" (ID,
+"ncal:relatedToParent");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:resources_ID_ID" ON
+"ncal:UnionParentClass_ncal:resources" (ID, "ncal:resources");
+CREATE UNIQUE INDEX "ncal:UnionParentClass_ncal:rrule_ID_ID" ON
+"ncal:UnionParentClass_ncal:rrule" (ID, "ncal:rrule");
+CREATE UNIQUE INDEX "nco:Affiliation_nco:title_ID_ID" ON
+"nco:Affiliation_nco:title" (ID, "nco:title");
+CREATE UNIQUE INDEX "nco:ContactList_nco:containsContact_ID_ID" ON
+"nco:ContactList_nco:containsContact" (ID, "nco:containsContact");
+CREATE UNIQUE INDEX "nco:Contact_ncal:anniversary_ID_ID" ON
+"nco:Contact_ncal:anniversary" (ID, "ncal:anniversary");
+CREATE UNIQUE INDEX "nco:Contact_ncal:birthday_ID_ID" ON
+"nco:Contact_ncal:birthday" (ID, "ncal:birthday");
+CREATE UNIQUE INDEX "nco:Contact_nco:belongsToGroup_ID_ID" ON
+"nco:Contact_nco:belongsToGroup" (ID, "nco:belongsToGroup");
+CREATE UNIQUE INDEX "nco:Contact_nco:note_ID_ID" ON
+"nco:Contact_nco:note" (ID, "nco:note");
+CREATE UNIQUE INDEX "nco:Contact_scal:anniversary_ID_ID" ON
+"nco:Contact_scal:anniversary" (ID, "scal:anniversary");
+CREATE UNIQUE INDEX "nco:Contact_scal:birthday_ID_ID" ON
+"nco:Contact_scal:birthday" (ID, "scal:birthday");
+CREATE UNIQUE INDEX "nco:IMAccount_nco:hasIMContact_ID_ID" ON
+"nco:IMAccount_nco:hasIMContact" (ID, "nco:hasIMContact");
+CREATE UNIQUE INDEX "nco:IMAddress_nco:imCapability_ID_ID" ON
+"nco:IMAddress_nco:imCapability" (ID, "nco:imCapability");
+CREATE UNIQUE INDEX "nco:PersonContact_nco:hasAffiliation_ID_ID" ON
+"nco:PersonContact_nco:hasAffiliation" (ID, "nco:hasAffiliation");
+CREATE INDEX "nco:PersonContact_nco:nameFamily" ON "nco:PersonContact"
+("nco:nameFamily");
+CREATE INDEX "nco:PhoneNumber_nco:phoneNumber" ON "nco:PhoneNumber"
+("nco:phoneNumber");
+CREATE UNIQUE INDEX "nco:Role_nco:blogUrl_ID_ID" ON
+"nco:Role_nco:blogUrl" (ID, "nco:blogUrl");
+CREATE UNIQUE INDEX "nco:Role_nco:foafUrl_ID_ID" ON
+"nco:Role_nco:foafUrl" (ID, "nco:foafUrl");
+CREATE UNIQUE INDEX "nco:Role_nco:hasContactMedium_ID_ID" ON
+"nco:Role_nco:hasContactMedium" (ID, "nco:hasContactMedium");
+CREATE INDEX "nco:Role_nco:hasEmailAddress_ID" ON
+"nco:Role_nco:hasEmailAddress" (ID);
+CREATE UNIQUE INDEX "nco:Role_nco:hasEmailAddress_ID_ID" ON
+"nco:Role_nco:hasEmailAddress" ("nco:hasEmailAddress", ID);
+CREATE UNIQUE INDEX "nco:Role_nco:hasIMAddress_ID_ID" ON
+"nco:Role_nco:hasIMAddress" (ID, "nco:hasIMAddress");
+CREATE UNIQUE INDEX "nco:Role_nco:hasPhoneNumber_ID_ID" ON
+"nco:Role_nco:hasPhoneNumber" (ID, "nco:hasPhoneNumber");
+CREATE INDEX "nco:Role_nco:hasPostalAddress_ID" ON
+"nco:Role_nco:hasPostalAddress" (ID);
+CREATE UNIQUE INDEX "nco:Role_nco:hasPostalAddress_ID_ID" ON
+"nco:Role_nco:hasPostalAddress" ("nco:hasPostalAddress", ID);
+CREATE UNIQUE INDEX "nco:Role_nco:url_ID_ID" ON "nco:Role_nco:url"
+(ID, "nco:url");
+CREATE UNIQUE INDEX "nco:Role_nco:websiteUrl_ID_ID" ON
+"nco:Role_nco:websiteUrl" (ID, "nco:websiteUrl");
+CREATE UNIQUE INDEX
+"nfo:BookmarkFolder_nfo:containsBookmarkFolder_ID_ID" ON
+"nfo:BookmarkFolder_nfo:containsBookmarkFolder" (ID,
+"nfo:containsBookmarkFolder");
+CREATE UNIQUE INDEX "nfo:BookmarkFolder_nfo:containsBookmark_ID_ID" ON
+"nfo:BookmarkFolder_nfo:containsBookmark" (ID,
+"nfo:containsBookmark");
+CREATE INDEX "nfo:FileDataObject_nfo:fileLastModified" ON
+"nfo:FileDataObject" ("nfo:fileLastModified");
+CREATE UNIQUE INDEX "nfo:Image_nfo:depicts_ID_ID" ON
+"nfo:Image_nfo:depicts" (ID, "nfo:depicts");
+CREATE UNIQUE INDEX "nfo:Image_nfo:hasRegionOfInterest_ID_ID" ON
+"nfo:Image_nfo:hasRegionOfInterest" (ID, "nfo:hasRegionOfInterest");
+CREATE UNIQUE INDEX "nfo:MediaList_nfo:hasMediaFileListEntry_ID_ID" ON
+"nfo:MediaList_nfo:hasMediaFileListEntry" (ID,
+"nfo:hasMediaFileListEntry");
+CREATE UNIQUE INDEX "nfo:MediaList_nfo:mediaListEntry_ID_ID" ON
+"nfo:MediaList_nfo:mediaListEntry" (ID, "nfo:mediaListEntry");
+CREATE UNIQUE INDEX "nfo:Media_mtp:hidden_ID_ID" ON
+"nfo:Media_mtp:hidden" (ID, "mtp:hidden");
+CREATE UNIQUE INDEX "nfo:Media_nmm:alternativeMedia_ID_ID" ON
+"nfo:Media_nmm:alternativeMedia" (ID, "nmm:alternativeMedia");
+CREATE INDEX "nfo:Visual_nie:contentCreated" ON "nfo:Visual"
+("nie:contentCreated");
+CREATE UNIQUE INDEX "nid3:ID3Audio_nid3:leadArtist_ID_ID" ON
+"nid3:ID3Audio_nid3:leadArtist" (ID, "nid3:leadArtist");
+CREATE UNIQUE INDEX "nie:DataObject_nie:dataSource_ID_ID" ON
+"nie:DataObject_nie:dataSource" (ID, "nie:dataSource");
+CREATE UNIQUE INDEX "nie:DataObject_nie:isPartOf_ID_ID" ON
+"nie:DataObject_nie:isPartOf" (ID, "nie:isPartOf");
+CREATE INDEX "nie:DataObject_nie:url" ON "nie:DataObject" ("nie:url");
+CREATE INDEX "nie:InformationElement_mlo:location_ID" ON
+"nie:InformationElement_mlo:location" (ID);
+CREATE UNIQUE INDEX "nie:InformationElement_mlo:location_ID_ID" ON
+"nie:InformationElement_mlo:location" ("mlo:location", ID);
+CREATE UNIQUE INDEX "nie:InformationElement_nao:hasProperty_ID_ID" ON
+"nie:InformationElement_nao:hasProperty" (ID, "nao:hasProperty");
+CREATE UNIQUE INDEX "nie:InformationElement_nco:contributor_ID_ID" ON
+"nie:InformationElement_nco:contributor" (ID, "nco:contributor");
+CREATE UNIQUE INDEX "nie:InformationElement_nco:creator_ID_ID" ON
+"nie:InformationElement_nco:creator" (ID, "nco:creator");
+CREATE UNIQUE INDEX "nie:InformationElement_nie:hasLogicalPart_ID_ID"
+ON "nie:InformationElement_nie:hasLogicalPart" (ID,
+"nie:hasLogicalPart");
+CREATE UNIQUE INDEX "nie:InformationElement_nie:hasPart_ID_ID" ON
+"nie:InformationElement_nie:hasPart" (ID, "nie:hasPart");
+CREATE UNIQUE INDEX
+"nie:InformationElement_nie:informationElementDate_ID_ID" ON
+"nie:InformationElement_nie:informationElementDate" (ID,
+"nie:informationElementDate");
+CREATE UNIQUE INDEX "nie:InformationElement_nie:isLogicalPartOf_ID_ID"
+ON "nie:InformationElement_nie:isLogicalPartOf" (ID,
+"nie:isLogicalPartOf");
+CREATE UNIQUE INDEX "nie:InformationElement_nie:keyword_ID_ID" ON
+"nie:InformationElement_nie:keyword" (ID, "nie:keyword");
+CREATE UNIQUE INDEX "nie:InformationElement_nie:relatedTo_ID_ID" ON
+"nie:InformationElement_nie:relatedTo" (ID, "nie:relatedTo");
+CREATE INDEX "nie:InformationElement_slo:location" ON
+"nie:InformationElement" ("slo:location");
+CREATE INDEX "nmm:Artist_nmm:artistName" ON "nmm:Artist" ("nmm:artistName");
+CREATE INDEX "nmm:MusicAlbum_nie:title" ON "nmm:MusicAlbum" ("nie:title");
+CREATE UNIQUE INDEX "nmm:MusicAlbum_nmm:albumArtist_ID_ID" ON
+"nmm:MusicAlbum_nmm:albumArtist" (ID, "nmm:albumArtist");
+CREATE INDEX "nmm:MusicPiece_nie:title" ON "nmm:MusicPiece" ("nie:title");
+CREATE UNIQUE INDEX "nmm:MusicPiece_nmm:lyrics_ID_ID" ON
+"nmm:MusicPiece_nmm:lyrics" (ID, "nmm:lyrics");
+CREATE INDEX "nmm:MusicPiece_nmm:musicAlbum" ON "nmm:MusicPiece"
+("nmm:musicAlbum");
+CREATE INDEX "nmm:MusicPiece_nmm:performer" ON "nmm:MusicPiece"
+("nmm:performer");
+CREATE UNIQUE INDEX "nmm:RadioStation_nmm:carrier_ID_ID" ON
+"nmm:RadioStation_nmm:carrier" (ID, "nmm:carrier");
+CREATE UNIQUE INDEX "nmm:Video_mtp:scantype_ID_ID" ON
+"nmm:Video_mtp:scantype" (ID, "mtp:scantype");
+CREATE UNIQUE INDEX "nmm:Video_nmm:director_ID_ID" ON
+"nmm:Video_nmm:director" (ID, "nmm:director");
+CREATE UNIQUE INDEX "nmm:Video_nmm:leadActor_ID_ID" ON
+"nmm:Video_nmm:leadActor" (ID, "nmm:leadActor");
+CREATE UNIQUE INDEX "nmm:Video_nmm:subtitle_ID_ID" ON
+"nmm:Video_nmm:subtitle" (ID, "nmm:subtitle");
+CREATE INDEX "nmo:Call_nmo:sentDate" ON "nmo:Call" ("nmo:sentDate");
+CREATE INDEX "nmo:CommunicationChannel_nmo:hasParticipant_ID" ON
+"nmo:CommunicationChannel_nmo:hasParticipant" (ID);
+CREATE UNIQUE INDEX
+"nmo:CommunicationChannel_nmo:hasParticipant_ID_ID" ON
+"nmo:CommunicationChannel_nmo:hasParticipant" ("nmo:hasParticipant",
+ID);
+CREATE INDEX "nmo:CommunicationChannel_nmo:lastMessageDate" ON
+"nmo:CommunicationChannel" ("nmo:lastMessageDate");
+CREATE UNIQUE INDEX "nmo:Email_nmo:contentMimeType_ID_ID" ON
+"nmo:Email_nmo:contentMimeType" (ID, "nmo:contentMimeType");
+CREATE UNIQUE INDEX "nmo:Message_nmo:bcc_ID_ID" ON
+"nmo:Message_nmo:bcc" (ID, "nmo:bcc");
+CREATE UNIQUE INDEX "nmo:Message_nmo:cc_ID_ID" ON "nmo:Message_nmo:cc"
+(ID, "nmo:cc");
+CREATE INDEX "nmo:Message_nmo:communicationChannel" ON "nmo:Message"
+("nmo:communicationChannel", "nmo:receivedDate");
+CREATE INDEX "nmo:Message_nmo:conversation" ON "nmo:Message"
+("nmo:conversation");
+CREATE INDEX "nmo:Message_nmo:from" ON "nmo:Message" ("nmo:from");
+CREATE UNIQUE INDEX "nmo:Message_nmo:hasAttachment_ID_ID" ON
+"nmo:Message_nmo:hasAttachment" (ID, "nmo:hasAttachment");
+CREATE UNIQUE INDEX "nmo:Message_nmo:inReplyTo_ID_ID" ON
+"nmo:Message_nmo:inReplyTo" (ID, "nmo:inReplyTo");
+CREATE UNIQUE INDEX "nmo:Message_nmo:messageHeader_ID_ID" ON
+"nmo:Message_nmo:messageHeader" (ID, "nmo:messageHeader");
+CREATE UNIQUE INDEX "nmo:Message_nmo:recipient_ID_ID" ON
+"nmo:Message_nmo:recipient" (ID, "nmo:recipient");
+CREATE UNIQUE INDEX "nmo:Message_nmo:references_ID_ID" ON
+"nmo:Message_nmo:references" (ID, "nmo:references");
+CREATE INDEX "nmo:Message_nmo:sender" ON "nmo:Message" ("nmo:sender");
+CREATE INDEX "nmo:Message_nmo:sentDate" ON "nmo:Message" ("nmo:sentDate");
+CREATE INDEX "nmo:Message_nmo:to_ID" ON "nmo:Message_nmo:to" (ID);
+CREATE UNIQUE INDEX "nmo:Message_nmo:to_ID_ID" ON "nmo:Message_nmo:to"
+("nmo:to", ID);
+CREATE UNIQUE INDEX "nmo:MimePart_nmo:mimeHeader_ID_ID" ON
+"nmo:MimePart_nmo:mimeHeader" (ID, "nmo:mimeHeader");
+CREATE UNIQUE INDEX "nmo:Multipart_nmo:partBoundary_ID_ID" ON
+"nmo:Multipart_nmo:partBoundary" (ID, "nmo:partBoundary");
+CREATE UNIQUE INDEX
+"nmo:PhoneMessageFolder_nmo:containsPhoneMessageFolder_ID_ID" ON
+"nmo:PhoneMessageFolder_nmo:containsPhoneMessageFolder" (ID,
+"nmo:containsPhoneMessageFolder");
+CREATE UNIQUE INDEX
+"nmo:PhoneMessageFolder_nmo:containsPhoneMessage_ID_ID" ON
+"nmo:PhoneMessageFolder_nmo:containsPhoneMessage" (ID,
+"nmo:containsPhoneMessage");
+CREATE UNIQUE INDEX "nmo:PhoneMessage_nmo:toVCard_ID_ID" ON
+"nmo:PhoneMessage_nmo:toVCard" (ID, "nmo:toVCard");
+CREATE UNIQUE INDEX "rdf:Property_rdfs:subPropertyOf_ID_ID" ON
+"rdf:Property_rdfs:subPropertyOf" (ID, "rdfs:subPropertyOf");
+CREATE UNIQUE INDEX "rdfs:Class_rdfs:subClassOf_ID_ID" ON
+"rdfs:Class_rdfs:subClassOf" (ID, "rdfs:subClassOf");
+CREATE UNIQUE INDEX "rdfs:Class_tracker:domainIndex_ID_ID" ON
+"rdfs:Class_tracker:domainIndex" (ID, "tracker:domainIndex");
+CREATE UNIQUE INDEX "rdfs:Resource_dc:contributor_ID_ID" ON
+"rdfs:Resource_dc:contributor" (ID, "dc:contributor");
+CREATE UNIQUE INDEX "rdfs:Resource_dc:date_ID_ID" ON
+"rdfs:Resource_dc:date" (ID, "dc:date");
+CREATE UNIQUE INDEX "rdfs:Resource_dc:relation_ID_ID" ON
+"rdfs:Resource_dc:relation" (ID, "dc:relation");
+CREATE UNIQUE INDEX "rdfs:Resource_dc:source_ID_ID" ON
+"rdfs:Resource_dc:source" (ID, "dc:source");
+CREATE UNIQUE INDEX "rdfs:Resource_nao:deprecated_ID_ID" ON
+"rdfs:Resource_nao:deprecated" (ID, "nao:deprecated");
+CREATE INDEX "rdfs:Resource_nao:hasTag_ID" ON "rdfs:Resource_nao:hasTag" (ID);
+CREATE UNIQUE INDEX "rdfs:Resource_nao:hasTag_ID_ID" ON
+"rdfs:Resource_nao:hasTag" ("nao:hasTag", ID);
+CREATE UNIQUE INDEX "rdfs:Resource_nao:isRelated_ID_ID" ON
+"rdfs:Resource_nao:isRelated" (ID, "nao:isRelated");
+CREATE UNIQUE INDEX "rdfs:Resource_rdf:type_ID_ID" ON
+"rdfs:Resource_rdf:type" (ID, "rdf:type");
+CREATE INDEX "rdfs:Resource_tracker:added" ON "rdfs:Resource" ("tracker:added");
+CREATE UNIQUE INDEX "scal:Attendee_scal:delegated-from_ID_ID" ON
+"scal:Attendee_scal:delegated-from" (ID, "scal:delegated-from");
+CREATE UNIQUE INDEX "scal:Attendee_scal:delegated-to_ID_ID" ON
+"scal:Attendee_scal:delegated-to" (ID, "scal:delegated-to");
+CREATE UNIQUE INDEX "scal:Attendee_scal:member_ID_ID" ON
+"scal:Attendee_scal:member" (ID, "scal:member");
+CREATE UNIQUE INDEX "scal:Attendee_scal:sent-by_ID_ID" ON
+"scal:Attendee_scal:sent-by" (ID, "scal:sent-by");
+CREATE UNIQUE INDEX "scal:CalendarAlarm_scal:alarmAttendee_ID_ID" ON
+"scal:CalendarAlarm_scal:alarmAttendee" (ID, "scal:alarmAttendee");
+CREATE UNIQUE INDEX "scal:CalendarItem_scal:access_ID_ID" ON
+"scal:CalendarItem_scal:access" (ID, "scal:access");
+CREATE UNIQUE INDEX "scal:CalendarItem_scal:attachment_ID_ID" ON
+"scal:CalendarItem_scal:attachment" (ID, "scal:attachment");
+CREATE UNIQUE INDEX "scal:CalendarItem_scal:attendee_ID_ID" ON
+"scal:CalendarItem_scal:attendee" (ID, "scal:attendee");
+CREATE UNIQUE INDEX "scal:CalendarItem_scal:belongsToCalendar_ID_ID"
+ON "scal:CalendarItem_scal:belongsToCalendar" (ID,
+"scal:belongsToCalendar");
+CREATE UNIQUE INDEX "scal:CalendarItem_scal:contact_ID_ID" ON
+"scal:CalendarItem_scal:contact" (ID, "scal:contact");
+CREATE UNIQUE INDEX "scal:CalendarItem_scal:rrule_ID_ID" ON
+"scal:CalendarItem_scal:rrule" (ID, "scal:rrule");
+CREATE INDEX "slo:GeoLocation_slo:postalAddress" ON "slo:GeoLocation"
+("slo:postalAddress");
+CREATE UNIQUE INDEX "slo:Landmark_slo:belongsToCategory_ID_ID" ON
+"slo:Landmark_slo:belongsToCategory" (ID, "slo:belongsToCategory");
+CREATE UNIQUE INDEX "slo:Landmark_slo:hasContact_ID_ID" ON
+"slo:Landmark_slo:hasContact" (ID, "slo:hasContact");
+CREATE UNIQUE INDEX "slo:Route_slo:routeDetails_ID_ID" ON
+"slo:Route_slo:routeDetails" (ID, "slo:routeDetails");
+
+EXPLAIN SELECT "1_u", (SELECT "nco:fullname" FROM "nco:Contact" WHERE
+ID = "1_u") COLLATE NOCASE, (SELECT "nco:nameFamily" FROM
+"nco:PersonContact" WHERE ID = "1_u") COLLATE NOCASE, (SELECT
+"nco:nameGiven" FROM "nco:PersonContact" WHERE ID = "1_u")
+COLLATE NOCASE, (SELECT "nco:nameAdditional" FROM
+"nco:PersonContact" WHERE ID = "1_u") COLLATE NOCASE, (SELECT
+"nco:nameHonorificPrefix" FROM "nco:PersonContact" WHERE ID =
+"1_u") COLLATE NOCASE, (SELECT "nco:nameHonorificSuffix" FROM
+"nco:PersonContact" WHERE ID = "1_u") COLLATE NOCASE, (SELECT
+"nco:nickname" FROM "nco:Contact" WHERE ID = "1_u") COLLATE
+NOCASE, strftime("%s",(SELECT "nco:birthDate" FROM
+"nco:Contact" WHERE ID = "1_u")), (SELECT "nie:url" FROM
+"nie:DataObject" WHERE ID = (SELECT "nco:photo" FROM
+"nco:Contact" WHERE ID = "1_u")) COLLATE NOCASE, (SELECT
+GROUP_CONCAT("2_u"||? COLLATE NOCASE||COALESCE((SELECT
+"nco:imProtocol" FROM "nco:IMAddress" WHERE ID = "3_u") COLLATE
+NOCASE, ? COLLATE NOCASE)||? COLLATE NOCASE||COALESCE((SELECT
+"nco:imID" FROM "nco:IMAddress" WHERE ID = "3_u") COLLATE
+NOCASE, ? COLLATE NOCASE)||? COLLATE NOCASE||COALESCE((SELECT
+"nco:imNickname" FROM "nco:IMAddress" WHERE ID = "3_u") COLLATE
+NOCASE, ? COLLATE NOCASE), '\n') FROM (SELECT
+"nco:PersonContact_nco:hasAffiliation2"."nco:hasAffiliation" AS
+"2_u", "nco:Role_nco:hasIMAddress3"."nco:hasIMAddress" AS
+"3_u" FROM "nco:PersonContact_nco:hasAffiliation" AS
+"nco:PersonContact_nco:hasAffiliation2",
+"nco:Role_nco:hasIMAddress" AS "nco:Role_nco:hasIMAddress3" WHERE
+"1_u" = "nco:PersonContact_nco:hasAffiliation2"."ID" AND
+"nco:PersonContact_nco:hasAffiliation2"."nco:hasAffiliation" =
+"nco:Role_nco:hasIMAddress3"."ID")), (SELECT
+GROUP_CONCAT("2_u"||? COLLATE NOCASE||(SELECT "nco:phoneNumber"
+FROM "nco:PhoneNumber" WHERE ID = "4_u") COLLATE NOCASE, '\n')
+FROM (SELECT "nco:PersonContact_nco:hasAffiliation4"."nco:hasAffiliation"
+AS "2_u", "nco:Role_nco:hasPhoneNumber5"."nco:hasPhoneNumber" AS
+"4_u" FROM "nco:PersonContact_nco:hasAffiliation" AS
+"nco:PersonContact_nco:hasAffiliation4",
+"nco:Role_nco:hasPhoneNumber" AS "nco:Role_nco:hasPhoneNumber5"
+WHERE "1_u" = "nco:PersonContact_nco:hasAffiliation4"."ID" AND
+"nco:PersonContact_nco:hasAffiliation4"."nco:hasAffiliation" =
+"nco:Role_nco:hasPhoneNumber5"."ID")), (SELECT
+GROUP_CONCAT("2_u"||? COLLATE NOCASE||(SELECT "nco:emailAddress"
+FROM "nco:EmailAddress" WHERE ID = "5_u") COLLATE NOCASE, ',')
+FROM (SELECT "nco:PersonContact_nco:hasAffiliation6"."nco:hasAffiliation"
+AS "2_u", "nco:Role_nco:hasEmailAddress7"."nco:hasEmailAddress"
+AS "5_u" FROM "nco:PersonContact_nco:hasAffiliation" AS
+"nco:PersonContact_nco:hasAffiliation6",
+"nco:Role_nco:hasEmailAddress" AS "nco:Role_nco:hasEmailAddress7"
+WHERE "1_u" = "nco:PersonContact_nco:hasAffiliation6"."ID" AND
+"nco:PersonContact_nco:hasAffiliation6"."nco:hasAffiliation" =
+"nco:Role_nco:hasEmailAddress7"."ID")), (SELECT
+GROUP_CONCAT("2_u"||? COLLATE NOCASE||COALESCE((SELECT
+GROUP_CONCAT((SELECT Uri FROM Resource WHERE ID =
+"nco:blogUrl"),',') FROM "nco:Role_nco:blogUrl" WHERE ID =
+"2_u"), ? COLLATE NOCASE)||? COLLATE NOCASE||COALESCE((SELECT
+GROUP_CONCAT((SELECT Uri FROM Resource WHERE ID =
+"nco:websiteUrl"),',') FROM "nco:Role_nco:websiteUrl" WHERE ID =
+"2_u"), ? COLLATE NOCASE)||? COLLATE NOCASE||COALESCE((SELECT
+GROUP_CONCAT((SELECT Uri FROM Resource WHERE ID = "nco:url"),',')
+FROM "nco:Role_nco:url" WHERE ID = "2_u"), ? COLLATE NOCASE),
+'\n') FROM (SELECT
+"nco:PersonContact_nco:hasAffiliation8"."nco:hasAffiliation" AS
+"2_u" FROM "nco:PersonContact_nco:hasAffiliation" AS
+"nco:PersonContact_nco:hasAffiliation8" WHERE "1_u" =
+"nco:PersonContact_nco:hasAffiliation8"."ID")), (SELECT
+GROUP_CONCAT("6_u", ',') FROM (SELECT
+"rdfs:Resource_nao:hasTag9"."nao:hasTag" AS "6_u" FROM
+"rdfs:Resource_nao:hasTag" AS "rdfs:Resource_nao:hasTag9" WHERE
+"1_u" = "rdfs:Resource_nao:hasTag9"."ID")), (SELECT Uri FROM
+Resource WHERE ID = "1_u"), (SELECT GROUP_CONCAT("2_u"||? COLLATE
+NOCASE||COALESCE((SELECT "nco:role" FROM "nco:Affiliation" WHERE
+ID = "2_u") COLLATE NOCASE, ? COLLATE NOCASE)||? COLLATE
+NOCASE||COALESCE((SELECT "nco:department" FROM "nco:Affiliation"
+WHERE ID = "2_u") COLLATE NOCASE, ? COLLATE NOCASE)||? COLLATE
+NOCASE||COALESCE((SELECT GROUP_CONCAT("nco:title",',') FROM
+"nco:Affiliation_nco:title" WHERE ID = "2_u"), ? COLLATE NOCASE),
+'\n') FROM (SELECT
+"nco:PersonContact_nco:hasAffiliation10"."nco:hasAffiliation" AS
+"2_u" FROM "nco:PersonContact_nco:hasAffiliation" AS
+"nco:PersonContact_nco:hasAffiliation10" WHERE "1_u" =
+"nco:PersonContact_nco:hasAffiliation10"."ID")), (SELECT
+GROUP_CONCAT("nco:note",',') FROM "nco:Contact_nco:note" WHERE ID
+= "1_u"), (SELECT "nco:gender" FROM "nco:PersonContact" WHERE ID
+= "1_u"), (SELECT GROUP_CONCAT("2_u"||? COLLATE
+NOCASE||COALESCE((SELECT "nco:pobox" FROM "nco:PostalAddress"
+WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE NOCASE)||? COLLATE
+NOCASE||COALESCE((SELECT "nco:district" FROM "nco:PostalAddress"
+WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE NOCASE)||? COLLATE
+NOCASE||COALESCE((SELECT "nco:county" FROM "nco:PostalAddress"
+WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE NOCASE)||? COLLATE
+NOCASE||COALESCE((SELECT "nco:locality" FROM "nco:PostalAddress"
+WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE NOCASE)||? COLLATE
+NOCASE||COALESCE((SELECT "nco:postalcode" FROM
+"nco:PostalAddress" WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE
+NOCASE)||? COLLATE NOCASE||COALESCE((SELECT "nco:streetAddress"
+FROM "nco:PostalAddress" WHERE ID = "7_u") COLLATE NOCASE, ?
+COLLATE NOCASE)||? COLLATE NOCASE||COALESCE((SELECT Uri FROM
+Resource WHERE ID = (SELECT "nco:addressLocation" FROM
+"nco:PostalAddress" WHERE ID = "7_u")), ? COLLATE NOCASE)||?
+COLLATE NOCASE||COALESCE((SELECT "nco:extendedAddress" FROM
+"nco:PostalAddress" WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE
+NOCASE)||? COLLATE NOCASE||COALESCE((SELECT "nco:country" FROM
+"nco:PostalAddress" WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE
+NOCASE)||? COLLATE NOCASE||COALESCE((SELECT "nco:region" FROM
+"nco:PostalAddress" WHERE ID = "7_u") COLLATE NOCASE, ? COLLATE
+NOCASE), '\n') FROM (SELECT
+"nco:PersonContact_nco:hasAffiliation11"."nco:hasAffiliation" AS
+"2_u", "nco:Role_nco:hasPostalAddress12"."nco:hasPostalAddress"
+AS "7_u" FROM "nco:PersonContact_nco:hasAffiliation" AS
+"nco:PersonContact_nco:hasAffiliation11",
+"nco:Role_nco:hasPostalAddress" AS
+"nco:Role_nco:hasPostalAddress12" WHERE "1_u" =
+"nco:PersonContact_nco:hasAffiliation11"."ID" AND
+"nco:PersonContact_nco:hasAffiliation11"."nco:hasAffiliation" =
+"nco:Role_nco:hasPostalAddress12"."ID")), (SELECT
+GROUP_CONCAT("10_u" COLLATE NOCASE, ',') FROM (SELECT
+"nie:InformationElement_nao:hasProperty13"."nao:hasProperty" AS
+"8_u", "nao:Property14"."nao:propertyName" AS "9_u",
+"nao:Property14"."nao:propertyValue" AS "10_u" FROM
+"nie:InformationElement_nao:hasProperty" AS
+"nie:InformationElement_nao:hasProperty13", "nao:Property" AS
+"nao:Property14" WHERE "1_u" =
+"nie:InformationElement_nao:hasProperty13"."ID" AND
+"nie:InformationElement_nao:hasProperty13"."nao:hasProperty" =
+"nao:Property14"."ID" AND "9_u" IS NOT NULL AND "10_u" IS NOT
+NULL AND ("9_u" COLLATE NOCASE = ? COLLATE NOCASE))) FROM (SELECT
+"nco:PersonContact1"."ID" AS "1_u" FROM "nco:PersonContact" AS
+"nco:PersonContact1") ORDER BY "1_u";
+ }
+} {/.* Goto .*/}
+
+
+finish_test
diff --git a/test/fuzzer1.test b/test/fuzzer1.test
index 6c23211..dc8b445 100644
--- a/test/fuzzer1.test
+++ b/test/fuzzer1.test
@@ -22,100 +22,233 @@ ifcapable !vtab {
return
}
+set ::testprefix fuzzer1
+
+# Test of test code. Only here to make the coverage metric better.
+do_test 0.1 {
+ list [catch { register_fuzzer_module a b c } msg] $msg
+} {1 {wrong # args: should be "register_fuzzer_module DB"}}
+
register_fuzzer_module db
-do_test fuzzer1-1.0 {
- catchsql {CREATE VIRTUAL TABLE fault1 USING fuzzer;}
-} {1 {fuzzer virtual tables must be TEMP}}
-do_test fuzzer1-1.1 {
- db eval {CREATE VIRTUAL TABLE temp.f1 USING fuzzer;}
+
+# Check configuration errors.
+#
+do_catchsql_test fuzzer1-1.1 {
+ CREATE VIRTUAL TABLE f USING fuzzer;
+} {1 {fuzzer: wrong number of CREATE VIRTUAL TABLE arguments}}
+
+do_catchsql_test fuzzer1-1.2 {
+ CREATE VIRTUAL TABLE f USING fuzzer(one, two);
+} {1 {fuzzer: wrong number of CREATE VIRTUAL TABLE arguments}}
+
+do_catchsql_test fuzzer1-1.3 {
+ CREATE VIRTUAL TABLE f USING fuzzer(nosuchtable);
+} {1 {fuzzer: no such table: main.nosuchtable}}
+
+do_catchsql_test fuzzer1-1.4 {
+ CREATE TEMP TABLE nosuchtable(a, b, c, d);
+ CREATE VIRTUAL TABLE f USING fuzzer(nosuchtable);
+} {1 {fuzzer: no such table: main.nosuchtable}}
+
+do_catchsql_test fuzzer1-1.5 {
+ DROP TABLE temp.nosuchtable;
+ CREATE TABLE nosuchtable(a, b, c, d);
+ CREATE VIRTUAL TABLE temp.f USING fuzzer(nosuchtable);
+} {1 {fuzzer: no such table: temp.nosuchtable}}
+
+do_catchsql_test fuzzer1-1.6 {
+ DROP TABLE IF EXISTS f_rules;
+ CREATE TABLE f_rules(a, b, c);
+ CREATE VIRTUAL TABLE f USING fuzzer(f_rules);
+} {1 {fuzzer: f_rules has 3 columns, expected 4}}
+
+do_catchsql_test fuzzer1-1.7 {
+ DROP TABLE IF EXISTS f_rules;
+ CREATE TABLE f_rules(a, b, c, d, e);
+ CREATE VIRTUAL TABLE f USING fuzzer(f_rules);
+} {1 {fuzzer: f_rules has 5 columns, expected 4}}
+
+
+do_execsql_test fuzzer1-2.1 {
+ CREATE TABLE f1_rules(ruleset DEFAULT 0, cfrom, cto, cost);
+ INSERT INTO f1_rules(cfrom, cto, cost) VALUES('e','a',1);
+ INSERT INTO f1_rules(cfrom, cto, cost) VALUES('a','e',10);
+ INSERT INTO f1_rules(cfrom, cto, cost) VALUES('e','o',100);
+
+ CREATE VIRTUAL TABLE f1 USING fuzzer(f1_rules);
} {}
-do_test fuzzer1-1.2 {
- db eval {
- INSERT INTO f1(cfrom, cto, cost) VALUES('e','a',1);
- INSERT INTO f1(cfrom, cto, cost) VALUES('a','e',10);
- INSERT INTO f1(cfrom, cto, cost) VALUES('e','o',100);
- }
+
+do_execsql_test fuzzer1-2.1 {
+ SELECT word, distance FROM f1 WHERE word MATCH 'abcde'
+} {
+ abcde 0 abcda 1 ebcde 10
+ ebcda 11 abcdo 100 ebcdo 110
+ obcde 110 obcda 111 obcdo 210
+}
+
+do_execsql_test fuzzer1-2.4 {
+ INSERT INTO f1_rules(ruleset, cfrom, cto, cost) VALUES(1,'b','x',1);
+ INSERT INTO f1_rules(ruleset, cfrom, cto, cost) VALUES(1,'d','y',10);
+ INSERT INTO f1_rules(ruleset, cfrom, cto, cost) VALUES(1,'y','z',100);
+
+ DROP TABLE f1;
+ CREATE VIRTUAL TABLE f1 USING fuzzer(f1_rules);
} {}
-do_test fuzzer1-1.3 {
+do_execsql_test fuzzer1-2.5 {
+ SELECT word, distance FROM f1 WHERE word MATCH 'abcde'
+} {
+ abcde 0 abcda 1 ebcde 10
+ ebcda 11 abcdo 100 ebcdo 110
+ obcde 110 obcda 111 obcdo 210
+}
+
+do_execsql_test fuzzer1-2.6 {
+ SELECT word, distance FROM f1 WHERE word MATCH 'abcde' AND ruleset=0
+} {
+ abcde 0 abcda 1 ebcde 10
+ ebcda 11 abcdo 100 ebcdo 110
+ obcde 110 obcda 111 obcdo 210
+}
+
+do_execsql_test fuzzer1-2.7 {
+ SELECT word, distance FROM f1 WHERE word MATCH 'abcde' AND ruleset=1
+} {
+ abcde 0 axcde 1 abcye 10
+ axcye 11 abcze 110 axcze 111
+}
+
+do_test fuzzer1-1.8 {
db eval {
- SELECT word, distance FROM f1 WHERE word MATCH 'abcde'
+ SELECT word, distance FROM f1 WHERE word MATCH 'abcde' AND distance<100
+ }
+} {abcde 0 abcda 1 ebcde 10 ebcda 11}
+do_test fuzzer1-1.9 {
+ db eval {
+ SELECT word, distance FROM f1 WHERE word MATCH 'abcde' AND distance<=100
+ }
+} {abcde 0 abcda 1 ebcde 10 ebcda 11 abcdo 100}
+do_test fuzzer1-1.10 {
+ db eval {
+ SELECT word, distance FROM f1
+ WHERE word MATCH 'abcde' AND distance<100 AND ruleset=0
+ }
+} {abcde 0 abcda 1 ebcde 10 ebcda 11}
+do_test fuzzer1-1.11 {
+ db eval {
+ SELECT word, distance FROM f1
+ WHERE word MATCH 'abcde' AND distance<=100 AND ruleset=0
+ }
+} {abcde 0 abcda 1 ebcde 10 ebcda 11 abcdo 100}
+do_test fuzzer1-1.12 {
+ db eval {
+ SELECT word, distance FROM f1
+ WHERE word MATCH 'abcde' AND distance<11 AND ruleset=1
}
-} {abcde 0 abcda 1 ebcde 10 ebcda 11 abcdo 100 ebcdo 110 obcde 110 obcda 111 obcdo 210}
+} {abcde 0 axcde 1 abcye 10}
+do_test fuzzer1-1.13 {
+ db eval {
+ SELECT word, distance FROM f1
+ WHERE word MATCH 'abcde' AND distance<=11 AND ruleset=1
+ }
+} {abcde 0 axcde 1 abcye 10 axcye 11}
+do_test fuzzer1-1.14 {
+ catchsql {INSERT INTO f1 VALUES(1)}
+} {1 {table f1 may not be modified}}
+do_test fuzzer1-1.15 {
+ catchsql {DELETE FROM f1}
+} {1 {table f1 may not be modified}}
+do_test fuzzer1-1.16 {
+ catchsql {UPDATE f1 SET rowid=rowid+10000}
+} {1 {table f1 may not be modified}}
+
do_test fuzzer1-2.0 {
execsql {
- CREATE VIRTUAL TABLE temp.f2 USING fuzzer;
-- costs based on English letter frequencies
- INSERT INTO f2(cFrom,cTo,cost) VALUES('a','e',24);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('a','o',47);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('a','u',50);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('e','a',23);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('e','i',33);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('e','o',37);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('i','e',33);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('i','y',33);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('o','a',41);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('o','e',46);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('o','u',57);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('u','o',58);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('y','i',33);
-
- INSERT INTO f2(cFrom,cTo,cost) VALUES('t','th',70);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('th','t',66);
+ CREATE TEMP TABLE f2_rules(ruleset DEFAULT 0, cFrom, cTo, cost);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('a','e',24);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('a','o',47);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('a','u',50);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('e','a',23);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('e','i',33);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('e','o',37);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('i','e',33);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('i','y',33);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('o','a',41);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('o','e',46);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('o','u',57);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('u','o',58);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('y','i',33);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('t','th',70);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('th','t',66);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('a','',84);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','b',106);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('b','',106);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','c',94);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('c','',94);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','d',89);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('d','',89);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','e',83);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('e','',83);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','f',97);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('f','',97);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','g',99);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('g','',99);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','h',86);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('h','',86);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','i',85);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('i','',85);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','j',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('j','',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','k',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('k','',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','l',89);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('l','',89);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','m',96);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('m','',96);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','n',85);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('n','',85);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','o',85);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('o','',85);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','p',100);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('p','',100);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','q',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('q','',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','r',86);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('r','',86);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','s',86);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('s','',86);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','t',84);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('t','',84);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','u',94);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('u','',94);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','v',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('v','',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','w',96);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('w','',96);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','x',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('x','',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','y',100);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('y','',100);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('','z',120);
- INSERT INTO f2(cFrom,cTo,cost) VALUES('z','',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('a','',84);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','b',106);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('b','',106);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','c',94);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('c','',94);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','d',89);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('d','',89);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','e',83);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('e','',83);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','f',97);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('f','',97);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','g',99);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('g','',99);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','h',86);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('h','',86);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','i',85);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('i','',85);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','j',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('j','',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','k',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('k','',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','l',89);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('l','',89);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','m',96);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('m','',96);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','n',85);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('n','',85);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','o',85);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('o','',85);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','p',100);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('p','',100);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','q',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('q','',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','r',86);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('r','',86);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','s',86);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('s','',86);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','t',84);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('t','',84);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','u',94);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('u','',94);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','v',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('v','',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','w',96);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('w','',96);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','x',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('x','',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','y',100);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('y','',100);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('','z',120);
+ INSERT INTO f2_rules(cFrom,cTo,cost) VALUES('z','',120);
+ INSERT INTO f2_rules(ruleset,cFrom,cTo,cost)
+ SELECT 1, cFrom, cTo, 100 FROM f2_rules WHERE ruleset=0;
+ INSERT INTO f2_rules(ruleset,cFrom,cTo,cost)
+ SELECT 2, cFrom, cTo, 200-cost FROM f2_rules WHERE ruleset=0;
+ INSERT INTO f2_rules(ruleset,cFrom,cTo,cost)
+ SELECT 3, cFrom, cTo, cost FROM f2_rules WHERE ruleset=0;
+ INSERT INTO f2_rules(ruleset,cFrom,cTo,cost)
+ VALUES(3, 'mallard','duck',50),
+ (3, 'duck', 'mallard', 50),
+ (3, 'rock', 'stone', 50),
+ (3, 'stone', 'rock', 50);
+
+
+ CREATE VIRTUAL TABLE temp.f2 USING fuzzer(f2_rules);
-- Street names for the 28269 ZIPCODE.
--
@@ -1377,6 +1510,359 @@ do_test fuzzer1-2.3 {
AND streetname.n>=f2.word AND streetname.n<=(f2.word || x'F7BFBFBF')
}
} {{tyler finley} trailer taymouth steelewood tallia tallu talwyn thelema}
+do_test fuzzer1-2.4 {
+ execsql {
+ SELECT DISTINCT streetname.n
+ FROM f2 JOIN streetname
+ ON (streetname.n>=f2.word AND streetname.n<=(f2.word || 'zzzzzz'))
+ WHERE f2.word MATCH 'duck'
+ AND f2.distance<150
+ AND f2.ruleset=3
+ ORDER BY 1
+ }
+} {mallard {mallard cove} {mallard forest} {mallard grove} {mallard hill} {mallard park} {mallard ridge} {mallard view}}
+do_test fuzzer1-2.5 {
+ execsql {
+ SELECT DISTINCT streetname.n
+ FROM f2 JOIN streetname
+ ON (streetname.n>=f2.word AND streetname.n<=(f2.word || 'zzzzzz'))
+ WHERE f2.word MATCH 'duck'
+ AND f2.distance<150
+ AND f2.ruleset=2
+ ORDER BY 1
+ }
+} {}
+
+forcedelete test.db2
+do_execsql_test fuzzer1-4.1 {
+ ATTACH 'test.db2' AS aux;
+ CREATE TABLE aux.f3_rules(ruleset, cfrom, cto, cost);
+ INSERT INTO f3_rules(ruleset, cfrom, cto, cost) VALUES(0, 'x','y', 10);
+ INSERT INTO f3_rules(ruleset, cfrom, cto, cost) VALUES(1, 'a','b', 10);
+ CREATE VIRTUAL TABLE aux.f3 USING fuzzer(f3_rules);
+ SELECT word FROM f3 WHERE word MATCH 'ax'
+} {ax ay}
+
+#-------------------------------------------------------------------------
+#
+# 1.5.1 - Check things work with a fuzzer data table name that requires
+# quoting. Also that NULL entries in the "from" column of the
+# data table are treated as zero length strings ('').
+#
+# 1.5.2 - Check that no-op rules (i.e. C->C) are ignored. Test NULL in
+# the "to" column of a fuzzer data table.
+#
+# 1.5.3 - Test out-of-range values for the cost field of the data table.
+#
+# 1.5.4 - Test out-of-range values for the string fields of the data table.
+#
+# 1.5.5 - Test out-of-range values for the ruleset field of the data table.
+#
+do_execsql_test 5.1 {
+ CREATE TABLE "fuzzer [x] rules table"(a, b, c, d);
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, NULL, 'abc', 10);
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+ SELECT word, distance FROM x WHERE word MATCH '123' LIMIT 4;
+} {123 0 abc123 10 1abc23 10 12abc3 10}
+
+do_execsql_test 5.2 {
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, 'x', NULL, 20);
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, NULL, NULL, 10);
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, 'x', 'x', 10);
+ DROP TABLE x;
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+
+ SELECT word, distance FROM x WHERE word MATCH 'xx';
+} {xx 0 x 20 {} 40}
+
+do_execsql_test 5.3.1 {
+ DROP TABLE IF EXISTS x;
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, 'c', 'd', 1001);
+}
+do_catchsql_test 5.3.2 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: cost must be between 1 and 1000}}
+
+do_execsql_test 5.3.3 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, 'd', 'c', 0);
+}
+do_catchsql_test 5.3.4 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: cost must be between 1 and 1000}}
+
+do_execsql_test 5.3.5 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(0, 'd', 'c', -20);
+}
+do_catchsql_test 5.3.6 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: cost must be between 1 and 1000}}
+
+do_execsql_test 5.4.1 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(
+ 0, 'x', '12345678901234567890123456789012345678901234567890', 2
+ );
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+ SELECT word FROM x WHERE word MATCH 'x';
+} {x 12345678901234567890123456789012345678901234567890}
+
+do_execsql_test 5.4.2 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(
+ 0, 'x', '123456789012345678901234567890123456789012345678901', 2
+ );
+}
+do_catchsql_test 5.4.3 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: maximum string length is 50}}
+
+do_execsql_test 5.4.4 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(
+ 0, '123456789012345678901234567890123456789012345678901', 'x', 2
+ );
+}
+do_catchsql_test 5.4.5 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: maximum string length is 50}}
+
+do_execsql_test 5.5.1 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES(-1, 'x', 'y', 2);
+}
+do_catchsql_test 5.5.2 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: ruleset must be between 0 and 2147483647}}
+
+do_execsql_test 5.5.3 {
+ DROP TABLE IF EXISTS x;
+ DELETE FROM "fuzzer [x] rules table";
+ INSERT INTO "fuzzer [x] rules table" VALUES((1<<32)+100, 'x', 'y', 2);
+}
+do_catchsql_test 5.5.4 {
+ CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table');
+} {1 {fuzzer: ruleset must be between 0 and 2147483647}}
+
+#-------------------------------------------------------------------------
+# This test uses a fuzzer table with many rules. There is one rule to
+# map each possible two character string, where characters are lower-case
+# letters used in the English language, to all other possible two character
+# strings. In total, (26^4)-(26^2) mappings (the subtracted term represents
+# the no-op mappings discarded automatically by the fuzzer).
+#
+#
+do_execsql_test 6.1.1 {
+ DROP TABLE IF EXISTS x1;
+ DROP TABLE IF EXISTS x1_rules;
+ CREATE TABLE x1_rules(ruleset, cFrom, cTo, cost);
+}
+puts "This test is slow - perhaps around 7 seconds on an average pc"
+do_test 6.1.2 {
+ set LETTERS {a b c d e f g h i j k l m n o p q r s t u v w x y z}
+ set cost 1
+ db transaction {
+ foreach c1 $LETTERS {
+ foreach c2 $LETTERS {
+ foreach c3 $LETTERS {
+ foreach c4 $LETTERS {
+ db eval {INSERT INTO x1_rules VALUES(0, $c1||$c2, $c3||$c4, $cost)}
+ set cost [expr ($cost%1000) + 1]
+ }
+ }
+ }
+ }
+ db eval {UPDATE x1_rules SET cost = 20 WHERE cost<20 AND cFrom!='xx'}
+ }
+} {}
+
+do_execsql_test 6.2 {
+ SELECT count(*) FROM x1_rules WHERE cTo!=cFrom;
+} [expr 26*26*26*26 - 26*26]
+
+do_execsql_test 6.2.1 {
+ CREATE VIRTUAL TABLE x1 USING fuzzer(x1_rules);
+ SELECT word FROM x1 WHERE word MATCH 'xx' LIMIT 10;
+} {xx hw hx hy hz ia ib ic id ie}
+do_execsql_test 6.2.2 {
+ SELECT cTo FROM x1_rules WHERE cFrom='xx'
+ ORDER BY cost asc, rowid asc LIMIT 9;
+} {hw hx hy hz ia ib ic id ie}
+
+#-------------------------------------------------------------------------
+# Test using different types of quotes with CREATE VIRTUAL TABLE
+# arguments.
+#
+do_execsql_test 7.1 {
+ CREATE TABLE [x2 "rules] (a, b, c, d);
+ INSERT INTO [x2 "rules] VALUES(0, 'a', 'b', 5);
+}
+foreach {tn sql} {
+ 1 { CREATE VIRTUAL TABLE x2 USING fuzzer( [x2 "rules] ) }
+ 2 { CREATE VIRTUAL TABLE x2 USING fuzzer( "x2 ""rules" ) }
+ 3 { CREATE VIRTUAL TABLE x2 USING fuzzer( 'x2 "rules' ) }
+ 4 { CREATE VIRTUAL TABLE x2 USING fuzzer( `x2 "rules` ) }
+} {
+ do_execsql_test 7.2.$tn.1 { DROP TABLE IF EXISTS x2 }
+ do_execsql_test 7.2.$tn.2 $sql
+ do_execsql_test 7.2.$tn.3 {
+ SELECT word FROM x2 WHERE word MATCH 'aaa'
+ } {aaa baa aba aab bab abb bba bbb}
+}
+
+#-------------------------------------------------------------------------
+# Test using a fuzzer table in different contexts.
+#
+do_execsql_test 8.1 {
+ CREATE TABLE x3_rules(rule_set, cFrom, cTo, cost);
+ INSERT INTO x3_rules VALUES(2, 'a', 'x', 10);
+ INSERT INTO x3_rules VALUES(2, 'a', 'y', 9);
+ INSERT INTO x3_rules VALUES(2, 'a', 'z', 8);
+ CREATE VIRTUAL TABLE x3 USING fuzzer(x3_rules);
+}
+
+do_execsql_test 8.2.1 {
+ SELECT cFrom, cTo, word
+ FROM x3_rules CROSS JOIN x3
+ WHERE word MATCH 'a' AND cost=distance AND ruleset=2;
+} {a x x a y y a z z}
+
+do_execsql_test 8.2.2 {
+ SELECT cFrom, cTo, word
+ FROM x3 CROSS JOIN x3_rules
+ WHERE word MATCH 'a' AND cost=distance AND ruleset=2;
+} {a z z a y y a x x}
+
+do_execsql_test 8.2.3 {
+ SELECT cFrom, cTo, word
+ FROM x3_rules, x3
+ WHERE word MATCH 'a' AND cost=distance AND ruleset=2;
+} {a z z a y y a x x}
+
+do_execsql_test 8.2.4 {
+ SELECT cFrom, cTo, word
+ FROM x3, x3_rules
+ WHERE word MATCH 'a' AND cost=distance AND ruleset=2;
+} {a z z a y y a x x}
+
+do_execsql_test 8.2.5 {
+ CREATE INDEX i1 ON x3_rules(cost);
+ SELECT cFrom, cTo, word
+ FROM x3_rules, x3
+ WHERE word MATCH 'a' AND cost=distance AND ruleset=2;
+} {a z z a y y a x x}
+
+do_execsql_test 8.2.5 {
+ SELECT word FROM x3_rules, x3 WHERE word MATCH x3_rules.cFrom AND ruleset=2;
+} {a z y x a z y x a z y x}
+
+do_execsql_test 8.2.6 {
+ SELECT word FROM x3_rules, x3
+ WHERE word MATCH x3_rules.cFrom
+ AND ruleset=2
+ AND x3_rules.cost=8;
+} {a z y x}
+
+do_execsql_test 8.2.7 {
+ CREATE TABLE t1(a, b);
+ CREATE INDEX i2 ON t1(b);
+ SELECT word, distance FROM x3, t1
+ WHERE x3.word MATCH t1.a AND ruleset=2 AND distance=t1.b;
+} {}
+
+do_execsql_test 8.2.8 {
+ INSERT INTO x3_rules VALUES(1, 'a', 't', 5);
+ INSERT INTO x3_rules VALUES(1, 'a', 'u', 4);
+ INSERT INTO x3_rules VALUES(1, 'a', 'v', 3);
+ DROP TABLE x3;
+ CREATE VIRTUAL TABLE x3 USING fuzzer(x3_rules);
+ SELECT * FROM x3_rules;
+} {
+ 2 a x 10
+ 2 a y 9
+ 2 a z 8
+ 1 a t 5
+ 1 a u 4
+ 1 a v 3
+}
+
+do_catchsql_test 8.2.9 {
+ SELECT word FROM x3 WHERE ruleset=2 AND word MATCH 'a' AND WORD MATCH 'b';
+} {1 {unable to use function MATCH in the requested context}}
+
+do_execsql_test 8.2.10 {
+ SELECT word FROM x3 WHERE ruleset=1 AND word MATCH 'a'
+} {a v u t}
+
+# The term "ruleset<=1" is not handled by the fuzzer module. Instead, it
+# is handled by SQLite, which assumes that all rows have a NULL value in
+# the ruleset column. Since NULL<=1 is never true, this query returns
+# no rows.
+do_execsql_test 8.2.11 {
+ SELECT word FROM x3 WHERE ruleset<=1 AND word MATCH 'a'
+} {}
+
+do_execsql_test 8.2.12 {
+ SELECT word FROM x3 WHERE ruleset=1 AND word MATCH 'a' ORDER BY distance ASC;
+} {a v u t}
+
+do_execsql_test 8.2.13 {
+ SELECT word FROM x3 WHERE ruleset=1 AND word MATCH 'a' ORDER BY distance DESC;
+} {t u v a}
+
+do_execsql_test 8.2.13 {
+ SELECT word FROM x3 WHERE ruleset=1 AND word MATCH 'a' ORDER BY word ASC;
+} {a t u v}
+
+do_execsql_test 8.2.14 {
+ SELECT word FROM x3 WHERE ruleset=1 AND word MATCH 'a' ORDER BY word DESC;
+} {v u t a}
+
+#-------------------------------------------------------------------------
+#
+do_execsql_test 9.1 {
+ CREATE TABLE x4_rules(a, b, c, d);
+ INSERT INTO x4_rules VALUES(0, 'a', 'b', 10);
+ INSERT INTO x4_rules VALUES(0, 'a', 'c', 11);
+ INSERT INTO x4_rules VALUES(0, 'bx', 'zz', 20);
+ INSERT INTO x4_rules VALUES(0, 'cx', 'yy', 15);
+ INSERT INTO x4_rules VALUES(0, 'zz', '!!', 50);
+ CREATE VIRTUAL TABLE x4 USING fuzzer(x4_rules);
+}
+
+do_execsql_test 9.2 {
+ SELECT word, distance FROM x4 WHERE word MATCH 'ax';
+} {ax 0 bx 10 cx 11 yy 26 zz 30 !! 80}
+
+
+do_execsql_test 10.1 {
+ CREATE TABLE x5_rules(a, b, c, d);
+ CREATE VIRTUAL TABLE x5 USING fuzzer(x5_rules);
+}
+
+do_execsql_test 10.2 {
+ SELECT word, distance FROM x5 WHERE word MATCH
+ 'aaaaaaaaaXaaaaaaaaaXaaaaaaaaaXaaaaaaaaaXaaaaaaaaa' ||
+ 'aaaaaaaaaXaaaaaaaaaXaaaaaaaaaXaaaaaaaaaXaaaaaaaaa' ||
+ 'aaaaaaaaaXaaaaaaaaaXaaaaaaaaaXaaaaaaaaaXaaaaaaaaa'
+} {}
+
+do_execsql_test 10.3 {
+ INSERT INTO x5_rules VALUES(0, 'a', '0.1.2.3.4.5.6.7.8.9.a', 1);
+ DROP TABLE x5;
+ CREATE VIRTUAL TABLE x5 USING fuzzer(x5_rules);
+ SELECT length(word) FROM x5 WHERE word MATCH 'a' LIMIT 50;
+} {1 21 41 61 81}
finish_test
+
+
diff --git a/test/fuzzerfault.test b/test/fuzzerfault.test
new file mode 100644
index 0000000..067da7f
--- /dev/null
+++ b/test/fuzzerfault.test
@@ -0,0 +1,92 @@
+# 2012 February 21
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for TCL interface to the
+# SQLite library.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+ifcapable !vtab { finish_test ; return }
+set ::testprefix fuzzerfault
+
+register_fuzzer_module db
+
+do_test 1-pre1 {
+ execsql {
+ CREATE TABLE x1_rules(ruleset, cFrom, cTo, cost);
+ INSERT INTO x1_rules VALUES(0, 'a', 'b', 1);
+ INSERT INTO x1_rules VALUES(0, 'a', 'c', 2);
+ INSERT INTO x1_rules VALUES(0, 'a', 'd', 3);
+ }
+ faultsim_save_and_close
+} {}
+do_faultsim_test 1 -prep {
+ faultsim_restore_and_reopen
+ register_fuzzer_module db
+} -body {
+ execsql {
+ CREATE VIRTUAL TABLE x1 USING fuzzer(x1_rules);
+ SELECT word FROM x1 WHERE word MATCH 'xax';
+ }
+} -test {
+ faultsim_test_result {0 {xax xbx xcx xdx}} \
+ {1 {vtable constructor failed: x1}}
+}
+
+do_test 2-pre1 {
+ faultsim_delete_and_reopen
+ register_fuzzer_module db
+ execsql {
+ CREATE TABLE x2_rules(ruleset, cFrom, cTo, cost);
+ INSERT INTO x2_rules VALUES(0, 'a', 'x', 1);
+ INSERT INTO x2_rules VALUES(0, 'b', 'x', 2);
+ INSERT INTO x2_rules VALUES(0, 'c', 'x', 3);
+ CREATE VIRTUAL TABLE x2 USING fuzzer(x2_rules);
+ }
+ faultsim_save_and_close
+} {}
+
+do_faultsim_test 2 -prep {
+ faultsim_restore_and_reopen
+ register_fuzzer_module db
+} -body {
+ execsql {
+ SELECT count(*) FROM x2 WHERE word MATCH 'abc';
+ }
+} -test {
+ faultsim_test_result {0 8} {1 {vtable constructor failed: x2}}
+}
+
+do_test 3-pre1 {
+ faultsim_delete_and_reopen
+ execsql {
+ CREATE TABLE x1_rules(ruleset, cFrom, cTo, cost);
+ INSERT INTO x1_rules VALUES(0, 'a',
+ '123456789012345678901234567890a1234567890123456789', 10
+ );
+ }
+ faultsim_save_and_close
+} {}
+
+do_faultsim_test 3 -prep {
+ faultsim_restore_and_reopen
+ register_fuzzer_module db
+} -body {
+ execsql {
+ CREATE VIRTUAL TABLE x1 USING fuzzer(x1_rules);
+ SELECT count(*) FROM (SELECT * FROM x1 WHERE word MATCH 'a' LIMIT 2);
+ }
+} -test {
+ faultsim_test_result {0 2} {1 {vtable constructor failed: x1}}
+}
+
+
+finish_test
diff --git a/test/in.test b/test/in.test
index 2c38a0f..3b23f04 100644
--- a/test/in.test
+++ b/test/in.test
@@ -258,17 +258,29 @@ do_test in-7.5 {
SELECT a FROM t1 WHERE a IN (5) AND b NOT IN ();
}
} {5}
-do_test in-7.6 {
+do_test in-7.6.1 {
execsql {
SELECT a FROM ta WHERE a IN ();
}
} {}
+do_test in-7.6.2 {
+ db status step
+} {0}
do_test in-7.7 {
execsql {
SELECT a FROM ta WHERE a NOT IN ();
}
} {1 2 3 4 6 8 10}
+do_test in-7.8.1 {
+ execsql {
+ SELECT * FROM ta LEFT JOIN tb ON (ta.b=tb.b) WHERE ta.a IN ();
+ }
+} {}
+do_test in-7.8.2 {
+ db status step
+} {0}
+
do_test in-8.1 {
execsql {
SELECT b FROM t1 WHERE a IN ('hello','there')
@@ -431,6 +443,7 @@ do_test in-12.9 {
} {1 {SELECTs to the left and right of INTERSECT do not have the same number of result columns}}
}
+ifcapable compound {
do_test in-12.10 {
catchsql {
SELECT * FROM t2 WHERE a IN (
@@ -459,6 +472,7 @@ do_test in-12.13 {
);
}
} {1 {only a single result allowed for a SELECT that is part of an expression}}
+}; #ifcapable compound
#------------------------------------------------------------------------
diff --git a/test/incrblob.test b/test/incrblob.test
index 388c4ba..1880128 100644
--- a/test/incrblob.test
+++ b/test/incrblob.test
@@ -473,15 +473,9 @@ if {[permutation] != "memsubsys1"} {
flush $::blob
} {}
- # At this point rollback should be illegal (because
- # there is an open blob channel). But commit is also illegal because
- # the open blob is read-write.
+ # At this point commit should be illegal (because
+ # there is an open blob channel).
#
- do_test incrblob-6.10 {
- catchsql {
- ROLLBACK;
- } db2
- } {1 {cannot rollback transaction - SQL statements in progress}}
do_test incrblob-6.11 {
catchsql {
COMMIT;
diff --git a/test/incrblob4.test b/test/incrblob4.test
new file mode 100644
index 0000000..a96356b
--- /dev/null
+++ b/test/incrblob4.test
@@ -0,0 +1,90 @@
+# 2012 March 23
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+ifcapable {!incrblob} { finish_test ; return }
+set testprefix incrblob4
+
+proc create_t1 {} {
+ execsql {
+ PRAGMA page_size = 1024;
+ CREATE TABLE t1(k INTEGER PRIMARY KEY, v);
+ }
+}
+
+proc populate_t1 {} {
+ set data [list a b c d e f g h i j k l m n o p q r s t u v w x y z]
+ foreach d $data {
+ set blob [string repeat $d 900]
+ execsql { INSERT INTO t1(v) VALUES($blob) }
+ }
+}
+
+
+do_test 1.1 {
+ create_t1
+ populate_t1
+} {}
+
+do_test 1.2 {
+ set blob [db incrblob t1 v 5]
+ read $blob 10
+} {eeeeeeeeee}
+
+do_test 1.3 {
+ execsql { DELETE FROM t1 }
+ populate_t1
+} {}
+
+
+
+do_test 2.1 {
+ reset_db
+ create_t1
+ populate_t1
+} {}
+
+do_test 2.2 {
+ set blob [db incrblob t1 v 10]
+ read $blob 10
+} {jjjjjjjjjj}
+
+do_test 2.3 {
+ set new [string repeat % 900]
+ execsql { DELETE FROM t1 WHERE k=10 }
+ execsql { DELETE FROM t1 WHERE k=9 }
+ execsql { INSERT INTO t1(v) VALUES($new) }
+} {}
+
+
+
+do_test 3.1 {
+ reset_db
+ create_t1
+ populate_t1
+} {}
+
+do_test 3.2 {
+ set blob [db incrblob t1 v 20]
+ read $blob 10
+} {tttttttttt}
+
+do_test 3.3 {
+ set new [string repeat % 900]
+ execsql { UPDATE t1 SET v = $new WHERE k = 20 }
+ execsql { DELETE FROM t1 WHERE k=19 }
+ execsql { INSERT INTO t1(v) VALUES($new) }
+} {}
+
+finish_test
+
diff --git a/test/incrvacuum2.test b/test/incrvacuum2.test
index e67a086..6e8e1be 100644
--- a/test/incrvacuum2.test
+++ b/test/incrvacuum2.test
@@ -191,7 +191,7 @@ ifcapable wal {
PRAGMA wal_checkpoint;
}
file size test.db-wal
- } {1640}
+ } [expr {32+2*(512+24)}]
do_test 4.3 {
db close
@@ -205,7 +205,7 @@ ifcapable wal {
if {$newsz>$maxsz} {set maxsz $newsz}
}
set maxsz
- } {2176}
+ } [expr {32+3*(512+24)}]
}
finish_test
diff --git a/test/insert.test b/test/insert.test
index 9ea9cd7..e00b9a8 100644
--- a/test/insert.test
+++ b/test/insert.test
@@ -386,6 +386,23 @@ do_test insert-9.2 {
}
} {1 1 2 2 3 3 12 101 13 102 16 103}
+# Multiple VALUES clauses
+#
+ifcapable compound {
+ do_test insert-10.1 {
+ execsql {
+ CREATE TABLE t10(a,b,c);
+ INSERT INTO t10 VALUES(1,2,3), (4,5,6), (7,8,9);
+ SELECT * FROM t10;
+ }
+ } {1 2 3 4 5 6 7 8 9}
+ do_test insert-10.2 {
+ catchsql {
+ INSERT INTO t10 VALUES(11,12,13), (14,15);
+ }
+ } {1 {all VALUES must have the same number of terms}}
+}
+
integrity_check insert-99.0
finish_test
diff --git a/test/insert4.test b/test/insert4.test
index cb02b9d..f4a45c1 100644
--- a/test/insert4.test
+++ b/test/insert4.test
@@ -386,4 +386,179 @@ ifcapable foreignkey {
} {1}
}
+# Ticket [676bc02b87176125635cb174d110b431581912bb]
+# Make sure INTEGER PRIMARY KEY ON CONFLICT ... works with the xfer
+# optimization.
+#
+do_test insert4-8.1 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT REPLACE, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT REPLACE, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+do_test insert4-8.2 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT REPLACE, b);
+ CREATE TABLE t2(x, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+do_test insert4-8.3 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT IGNORE, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT IGNORE, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 2}
+do_test insert4-8.4 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT IGNORE, b);
+ CREATE TABLE t2(x, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 2}
+do_test insert4-8.5 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT FAIL, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT FAIL, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(-99,100);
+ INSERT INTO t2 VALUES(1,3);
+ SELECT * FROM t1;
+ }
+ catchsql {
+ INSERT INTO t1 SELECT * FROM t2;
+ }
+} {1 {PRIMARY KEY must be unique}}
+do_test insert4-8.6 {
+ execsql {
+ SELECT * FROM t1;
+ }
+} {-99 100 1 2}
+do_test insert4-8.7 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT ABORT, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT ABORT, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(-99,100);
+ INSERT INTO t2 VALUES(1,3);
+ SELECT * FROM t1;
+ }
+ catchsql {
+ INSERT INTO t1 SELECT * FROM t2;
+ }
+} {1 {PRIMARY KEY must be unique}}
+do_test insert4-8.8 {
+ execsql {
+ SELECT * FROM t1;
+ }
+} {1 2}
+do_test insert4-8.9 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT ROLLBACK, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT ROLLBACK, y);
+ INSERT INTO t1 VALUES(1,2);
+ INSERT INTO t2 VALUES(-99,100);
+ INSERT INTO t2 VALUES(1,3);
+ SELECT * FROM t1;
+ }
+ catchsql {
+ BEGIN;
+ INSERT INTO t1 VALUES(2,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ }
+} {1 {PRIMARY KEY must be unique}}
+do_test insert4-8.10 {
+ catchsql {COMMIT}
+} {1 {cannot commit - no transaction is active}}
+do_test insert4-8.11 {
+ execsql {
+ SELECT * FROM t1;
+ }
+} {1 2}
+
+do_test insert4-8.21 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT REPLACE, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT REPLACE, y);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+do_test insert4-8.22 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT IGNORE, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT IGNORE, y);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+do_test insert4-8.23 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT ABORT, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT ABORT, y);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+do_test insert4-8.24 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT FAIL, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT FAIL, y);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+do_test insert4-8.25 {
+ execsql {
+ DROP TABLE IF EXISTS t1;
+ DROP TABLE IF EXISTS t2;
+ CREATE TABLE t1(a INTEGER PRIMARY KEY ON CONFLICT ROLLBACK, b);
+ CREATE TABLE t2(x INTEGER PRIMARY KEY ON CONFLICT ROLLBACK, y);
+ INSERT INTO t2 VALUES(1,3);
+ INSERT INTO t1 SELECT * FROM t2;
+ SELECT * FROM t1;
+ }
+} {1 3}
+
+
finish_test
diff --git a/test/io.test b/test/io.test
index 58caeee..9363b0c 100644
--- a/test/io.test
+++ b/test/io.test
@@ -146,11 +146,15 @@ do_test io-2.2 {
# written because page 1 - the change-counter page - is written using
# an out-of-band method that bypasses the write counter.
#
+# UPDATE: As of [05f98d4eec] (adding SQLITE_DBSTATUS_CACHE_WRITE), the
+# second write is also counted. So this now reports two writes and a
+# single fsync.
+#
sqlite3_simulate_device -char atomic
do_test io-2.3 {
execsql { INSERT INTO abc VALUES(3, 4) }
list [nWrite db] [nSync]
-} {1 1}
+} {2 1}
# Test that the journal file is not created and the change-counter is
# updated when the atomic-write optimization is used.
diff --git a/test/ioerr2.test b/test/ioerr2.test
index 325c0ba..5150ace 100644
--- a/test/ioerr2.test
+++ b/test/ioerr2.test
@@ -130,7 +130,7 @@ do_test ioerr2-5 {
}
} msg]
list $rc $msg
-} {1 {callback requested query abort}}
+} {1 {abort due to ROLLBACK}}
if {$::tcl_platform(platform) == "unix"} {
# Cause the call to xAccess used by [pragma temp_store_directory] to
diff --git a/test/join6.test b/test/join6.test
index 4f65dcb..7fbf508 100644
--- a/test/join6.test
+++ b/test/join6.test
@@ -124,26 +124,28 @@ do_test join6-3.6 {
}
} {1 91 92 3 93 5 91 2 93 94 4 95 6 99}
-do_test join6-4.1 {
- execsql {
- SELECT * FROM
- (SELECT 1 AS a, 91 AS x, 92 AS y UNION SELECT 2, 93, 94)
- NATURAL JOIN t2 NATURAL JOIN t3
- }
-} {1 91 92 3 93 5}
-do_test join6-4.2 {
- execsql {
- SELECT * FROM t1 NATURAL JOIN
- (SELECT 3 AS b, 92 AS y, 93 AS z UNION SELECT 4, 94, 95)
- NATURAL JOIN t3
- }
-} {1 91 92 3 93 5}
-do_test join6-4.3 {
- execsql {
- SELECT * FROM t1 NATURAL JOIN t2 NATURAL JOIN
- (SELECT 5 AS c, 91 AS x, 93 AS z UNION SELECT 6, 99, 95)
- }
-} {1 91 92 3 93 5}
+ifcapable compound {
+ do_test join6-4.1 {
+ execsql {
+ SELECT * FROM
+ (SELECT 1 AS a, 91 AS x, 92 AS y UNION SELECT 2, 93, 94)
+ NATURAL JOIN t2 NATURAL JOIN t3
+ }
+ } {1 91 92 3 93 5}
+ do_test join6-4.2 {
+ execsql {
+ SELECT * FROM t1 NATURAL JOIN
+ (SELECT 3 AS b, 92 AS y, 93 AS z UNION SELECT 4, 94, 95)
+ NATURAL JOIN t3
+ }
+ } {1 91 92 3 93 5}
+ do_test join6-4.3 {
+ execsql {
+ SELECT * FROM t1 NATURAL JOIN t2 NATURAL JOIN
+ (SELECT 5 AS c, 91 AS x, 93 AS z UNION SELECT 6, 99, 95)
+ }
+ } {1 91 92 3 93 5}
+}
diff --git a/test/journal2.test b/test/journal2.test
index 25ce941..8f9b4d0 100644
--- a/test/journal2.test
+++ b/test/journal2.test
@@ -34,7 +34,7 @@ proc a_string {n} {
# characteristics flags to "SAFE_DELETE".
#
testvfs tvfs -default 1
-tvfs devchar undeletable_when_open
+tvfs devchar {undeletable_when_open powersafe_overwrite}
# Set up a hook so that each time a journal file is opened, closed or
# deleted, the method name ("xOpen", "xClose" or "xDelete") and the final
@@ -231,4 +231,3 @@ ifcapable wal {
tvfs delete
finish_test
-
diff --git a/test/journal3.test b/test/journal3.test
index f1bf89f..939cc27 100644
--- a/test/journal3.test
+++ b/test/journal3.test
@@ -22,7 +22,9 @@ source $testdir/malloc_common.tcl
#
if {$::tcl_platform(platform) == "unix"} {
- set umask [exec /bin/sh -c umask]
+ # Changed on 2012-02-13: umask is deliberately ignored for -wal, -journal,
+ # and -shm files.
+ #set umask [exec /bin/sh -c umask]
faultsim_delete_and_reopen
do_test journal3-1.1 { execsql { CREATE TABLE tx(y, z) } } {}
@@ -33,7 +35,8 @@ if {$::tcl_platform(platform) == "unix"} {
4 00755
} {
db close
- set effective [format %.5o [expr $permissions & ~$umask]]
+ #set effective [format %.5o [expr $permissions & ~$umask]]
+ set effective $permissions
do_test journal3-1.2.$tn.1 {
catch { forcedelete test.db-journal }
file attributes test.db -permissions $permissions
diff --git a/test/malloc5.test b/test/malloc5.test
index 71f56bb..c02f65e 100644
--- a/test/malloc5.test
+++ b/test/malloc5.test
@@ -352,7 +352,7 @@ do_test malloc5-6.3.1 {
do_test malloc5-6.3.2 {
# Try to release 7700 bytes. This should release all the
# non-dirty pages held by db2.
- sqlite3_release_memory [expr 7*1100]
+ sqlite3_release_memory [expr 7*1132]
list [nPage db] [nPage db2]
} {10 3}
do_test malloc5-6.3.3 {
@@ -366,7 +366,7 @@ do_test malloc5-6.3.4 {
# the rest of the db cache. But the db2 cache remains intact, because
# SQLite tries to avoid calling sync().
if {$::tcl_platform(wordSize)==8} {
- sqlite3_release_memory 10177
+ sqlite3_release_memory 10500
} else {
sqlite3_release_memory 9900
}
diff --git a/test/memsubsys1.test b/test/memsubsys1.test
index 7eecf08..e14a1b3 100644
--- a/test/memsubsys1.test
+++ b/test/memsubsys1.test
@@ -68,7 +68,7 @@ proc reset_highwater_marks {} {
sqlite3_status SQLITE_STATUS_PARSER_STACK 1
}
-set xtra_size 256
+set xtra_size 290
# Test 1: Both PAGECACHE and SCRATCH are shut down.
#
@@ -97,9 +97,11 @@ reset_highwater_marks
build_test_db memsubsys1-2 {PRAGMA page_size=1024}
#show_memstats
set MEMORY_MANAGEMENT $sqlite_options(memorymanage)
-do_test memsubsys1-2.3 {
- set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2]
-} [expr ($TEMP_STORE>1 || $MEMORY_MANAGEMENT==0)*1024]
+ifcapable !malloc_usable_size {
+ do_test memsubsys1-2.3 {
+ set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2]
+ } [expr ($TEMP_STORE>1 || $MEMORY_MANAGEMENT==0)*1024]
+}
do_test memsubsys1-2.4 {
set pg_used [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_USED 0] 2]
} 20
diff --git a/test/minmax4.test b/test/minmax4.test
new file mode 100644
index 0000000..0d8305b
--- /dev/null
+++ b/test/minmax4.test
@@ -0,0 +1,150 @@
+# 2012 February 02
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Test for queries of the form:
+#
+# SELECT p, max(q) FROM t1;
+#
+# Demonstration that the value returned for p is on the same row as
+# the maximum q.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+ifcapable !compound {
+ finish_test
+ return
+}
+
+do_test minmax4-1.1 {
+ db eval {
+ CREATE TABLE t1(p,q);
+ SELECT p, max(q) FROM t1;
+ }
+} {{} {}}
+do_test minmax4-1.2 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {{} {}}
+do_test minmax4-1.3 {
+ db eval {
+ INSERT INTO t1 VALUES(1,2);
+ SELECT p, max(q) FROM t1;
+ }
+} {1 2}
+do_test minmax4-1.4 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {1 2}
+do_test minmax4-1.5 {
+ db eval {
+ INSERT INTO t1 VALUES(3,4);
+ SELECT p, max(q) FROM t1;
+ }
+} {3 4}
+do_test minmax4-1.6 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {1 2}
+do_test minmax4-1.7 {
+ db eval {
+ INSERT INTO t1 VALUES(5,0);
+ SELECT p, max(q) FROM t1;
+ }
+} {3 4}
+do_test minmax4-1.8 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {5 0}
+do_test minmax4-1.9 {
+ db eval {
+ INSERT INTO t1 VALUES(6,1);
+ SELECT p, max(q) FROM t1;
+ }
+} {3 4}
+do_test minmax4-1.10 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {5 0}
+do_test minmax4-1.11 {
+ db eval {
+ INSERT INTO t1 VALUES(7,NULL);
+ SELECT p, max(q) FROM t1;
+ }
+} {3 4}
+do_test minmax4-1.12 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {5 0}
+do_test minmax4-1.13 {
+ db eval {
+ DELETE FROM t1 WHERE q IS NOT NULL;
+ SELECT p, max(q) FROM t1;
+ }
+} {7 {}}
+do_test minmax4-1.14 {
+ db eval {
+ SELECT p, min(q) FROM t1;
+ }
+} {7 {}}
+
+do_test minmax4-2.1 {
+ db eval {
+ CREATE TABLE t2(a,b,c);
+ INSERT INTO t2 VALUES
+ (1,null,2),
+ (1,2,3),
+ (1,1,4),
+ (2,3,5);
+ SELECT a, max(b), c FROM t2 GROUP BY a ORDER BY a;
+ }
+} {1 2 3 2 3 5}
+do_test minmax4-2.2 {
+ db eval {
+ SELECT a, min(b), c FROM t2 GROUP BY a ORDER BY a;
+ }
+} {1 1 4 2 3 5}
+do_test minmax4-2.3 {
+ db eval {
+ SELECT a, min(b), avg(b), count(b), c FROM t2 GROUP BY a ORDER BY a DESC;
+ }
+} {2 3 3.0 1 5 1 1 1.5 2 4}
+do_test minmax4-2.4 {
+ db eval {
+ SELECT a, min(b), max(b), c FROM t2 GROUP BY a ORDER BY a;
+ }
+} {1 1 2 3 2 3 3 5}
+do_test minmax4-2.5 {
+ db eval {
+ SELECT a, max(b), min(b), c FROM t2 GROUP BY a ORDER BY a;
+ }
+} {1 2 1 4 2 3 3 5}
+do_test minmax4-2.6 {
+ db eval {
+ SELECT a, max(b), b, max(c), c FROM t2 GROUP BY a ORDER BY a;
+ }
+} {1 2 1 4 4 2 3 3 5 5}
+do_test minmax4-2.7 {
+ db eval {
+ SELECT a, min(b), b, min(c), c FROM t2 GROUP BY a ORDER BY a;
+ }
+} {1 1 {} 2 2 2 3 3 5 5}
+
+
+
+finish_test
diff --git a/test/misc7.test b/test/misc7.test
index 9dee327..146dca0 100644
--- a/test/misc7.test
+++ b/test/misc7.test
@@ -151,6 +151,12 @@ db2 close
# Test that nothing goes horribly wrong when attaching a database
# after the omit_readlock pragma has been exercised.
#
+# Note: The PRAGMA omit_readlock was an early hack to disable the
+# fcntl() calls for read-only databases so that read-only databases could
+# be read on broken NFS systems. That pragma has now been removed.
+# (Use the unix-none VFS as a replacement, if needed.) But these tests
+# do not really depend on omit_readlock, so we left them in place.
+#
do_test misc7-7.1 {
forcedelete test2.db
forcedelete test2.db-journal
@@ -477,7 +483,7 @@ do_test misc7-20.1 {
# Try to open a really long file name.
#
do_test misc7-21.1 {
- set zFile [file join [pwd] "[string repeat abcde 104].db"]
+ set zFile [file join [get_pwd] "[string repeat abcde 104].db"]
set rc [catch {sqlite3 db2 $zFile} msg]
list $rc $msg
} {1 {unable to open database file}}
diff --git a/test/multiplex.test b/test/multiplex.test
index 3abdcf4..32c87d9 100644
--- a/test/multiplex.test
+++ b/test/multiplex.test
@@ -14,6 +14,16 @@ set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
+# The tests in this file assume that SQLite is compiled without
+# ENABLE_8_3_NAMES.
+#
+ifcapable 8_3_names {
+ puts -nonewline "SQLite compiled with SQLITE_ENABLE_8_3_NAMES. "
+ puts "Skipping tests multiplex-*."
+ finish_test
+ return
+}
+
set g_chunk_size [ expr ($::SQLITE_MAX_PAGE_SIZE*16384) ]
set g_max_chunks 32
@@ -24,7 +34,7 @@ set g_max_chunks 32
# file name with the chunk number.
proc multiplex_name {name chunk} {
if {$chunk==0} { return $name }
- set num [format "%02d" $chunk]
+ set num [format "%03d" $chunk]
ifcapable {multiplex_ext_overwrite} {
set name [string range $name 0 [expr [string length $name]-2-1]]
}
@@ -146,6 +156,9 @@ sqlite3_multiplex_initialize "" 1
multiplex_set db main 32768 16
forcedelete test.x
+foreach f [glob -nocomplain {test.x*[0-9][0-9][0-9]}] {
+ forcedelete $f
+}
do_test multiplex-2.1.2 {
sqlite3 db test.x
execsql {
@@ -182,12 +195,17 @@ do_test multiplex-2.4.2 {
execsql { INSERT INTO t1 VALUES(3, randomblob(1100)) }
} {}
do_test multiplex-2.4.4 { file size [multiplex_name test.x 0] } {7168}
-do_test multiplex-2.4.99 {
+do_test multiplex-2.4.5 {
db close
+ sqlite3 db test.x
+ db eval vacuum
+ db close
+ glob test.x*
+} {test.x}
+do_test multiplex-2.4.99 {
sqlite3_multiplex_shutdown
} {SQLITE_OK}
-
do_test multiplex-2.5.1 {
multiplex_delete test.x
sqlite3_multiplex_initialize "" 1
diff --git a/test/multiplex2.test b/test/multiplex2.test
new file mode 100644
index 0000000..bdfc05b
--- /dev/null
+++ b/test/multiplex2.test
@@ -0,0 +1,70 @@
+# 2010 October 29
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/malloc_common.tcl
+source $testdir/lock_common.tcl
+
+
+do_multiclient_test tn {
+ code1 { catch { sqlite3_multiplex_initialize "" 0 } }
+ code2 { catch { sqlite3_multiplex_initialize "" 0 } }
+
+ code1 { db close }
+ code2 { db2 close }
+
+ code1 { sqlite3 db test.db -vfs multiplex }
+ code2 { sqlite3 db2 test.db -vfs multiplex }
+
+ code1 { sqlite3_multiplex_control db main chunk_size [expr 1024*1024] }
+ code2 { sqlite3_multiplex_control db2 main chunk_size [expr 1024*1024] }
+
+ sql1 {
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(randomblob(10), randomblob(4000)); -- 1
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 2
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 4
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 8
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 16
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 32
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 64
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 128
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 256
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 512
+ SELECT count(*) FROM t1;
+ }
+
+ do_test multiplex-1.$tn.1 { sql1 { SELECT count(*) FROM t1 } } 512
+ do_test multiplex-1.$tn.2 { sql2 { SELECT count(*) FROM t1 } } 512
+ sql2 { DELETE FROM t1 ; VACUUM }
+ do_test multiplex-1.$tn.3 { sql1 { SELECT count(*) FROM t1 } } 0
+
+ sql1 {
+ INSERT INTO t1 VALUES(randomblob(10), randomblob(4000)); -- 1
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 2
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 4
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 8
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 16
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 32
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 64
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 128
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 256
+ INSERT INTO t1 SELECT randomblob(10), randomblob(4000) FROM t1; -- 512
+ SELECT count(*) FROM t1;
+ }
+
+ do_test multiplex-1.$tn.4 { sql2 { SELECT count(*) FROM t1 } } 512
+}
+
+catch { sqlite3_multiplex_shutdown }
+finish_test
diff --git a/test/multiplex3.test b/test/multiplex3.test
new file mode 100644
index 0000000..c1e741a
--- /dev/null
+++ b/test/multiplex3.test
@@ -0,0 +1,166 @@
+
+# 2011 December 13
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file contains tests for error (IO, OOM etc.) handling when using
+# the multiplexor extension with 8.3 filenames.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/malloc_common.tcl
+set ::testprefix multiplex3
+
+ifcapable !8_3_names {
+ puts -nonewline "SQLite compiled without SQLITE_ENABLE_8_3_NAMES. "
+ puts "Skipping tests multiplex3-*."
+ finish_test
+ return
+}
+
+db close
+sqlite3_shutdown
+sqlite3_config_uri 1
+autoinstall_test_functions
+
+sqlite3_multiplex_initialize "" 1
+
+proc destroy_vfs_stack {} {
+ generic_unregister stack
+ sqlite3_multiplex_shutdown
+}
+
+proc multiplex_delete_db {} {
+ forcedelete test.db
+ for {set i 1} {$i <= 1000} {incr i} {
+ forcedelete test.[format %03d $i]
+ }
+}
+
+# Procs to save and restore the current muliplexed database.
+#
+proc multiplex_save_db {} {
+ foreach f [glob -nocomplain sv_test.*] { forcedelete $f }
+ foreach f [glob -nocomplain test.*] { forcecopy $f "sv_$f" }
+}
+proc multiplex_restore_db {} {
+ foreach f [glob -nocomplain test.*] {forcedelete $f}
+ foreach f [glob -nocomplain sv_test.*] {forcecopy $f [string range $f 3 end]} }
+
+proc setup_and_save_db {} {
+ multiplex_delete_db
+ sqlite3 db file:test.db?8_3_names=1
+ sqlite3_multiplex_control db main chunk_size [expr 256*1024]
+ execsql {
+ CREATE TABLE t1(a PRIMARY KEY, b);
+ INSERT INTO t1 VALUES(randomblob(15), randomblob(2000));
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 2
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 4
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 8
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 16
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 32
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 64
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 128
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 256
+ INSERT INTO t1 SELECT randomblob(15), randomblob(2000) FROM t1; -- 512
+ }
+ set ::cksum1 [execsql {SELECT md5sum(a, b) FROM t1 ORDER BY a}]
+ db close
+ multiplex_save_db
+}
+
+do_test 1.0 { setup_and_save_db } {}
+do_faultsim_test 1 -prep {
+ multiplex_restore_db
+ sqlite3 db file:test.db?8_3_names=1
+ sqlite3_multiplex_control db main chunk_size [expr 256*1024]
+} -body {
+ execsql {
+ UPDATE t1 SET a=randomblob(12), b=randomblob(1500) WHERE (rowid%32)=0
+ }
+} -test {
+ faultsim_test_result {0 {}}
+ if {$testrc!=0} {
+ set cksum2 [execsql {SELECT md5sum(a, b) FROM t1 ORDER BY a}]
+ if {$cksum2 != $::cksum1} { error "data mismatch" }
+ }
+}
+
+#-------------------------------------------------------------------------
+# The following tests verify that hot-journal rollback works. As follows:
+#
+# 1. Create a large database.
+# 2. Set the pager cache to be very small.
+# 3. Open a transaction.
+# 4. Run the following 100 times:
+# a. Update a row.
+# b. Copy all files on disk to a new db location, including the journal.
+# c. Verify that the new db can be opened and that the content matches
+# the database created in step 1 (proving the journal was rolled
+# back).
+
+do_test 2.0 {
+ setup_and_save_db
+ multiplex_restore_db
+ sqlite3 db file:test.db?8_3_names=1
+ execsql { PRAGMA cache_size = 10 }
+ execsql { BEGIN }
+} {}
+
+for {set iTest 1} {$iTest<=100} {incr iTest} {
+ do_test 2.$iTest {
+ execsql {
+ UPDATE t1 SET a=randomblob(12), b=randomblob(1400) WHERE rowid=5*$iTest
+ }
+ foreach f [glob -nocomplain test.*] {forcecopy $f "xx_$f"}
+ sqlite3 db2 file:xx_test.db?8_3_names=1
+ execsql {SELECT md5sum(a, b) FROM t1 ORDER BY a} db2
+ } $::cksum1
+
+ db2 close
+}
+catch { db close }
+
+
+do_test 3.0 { setup_and_save_db } {}
+do_faultsim_test 3 -faults ioerr-trans* -prep {
+
+ forcedelete test2.db
+ set fd [open test2.wal w]
+ seek $fd 4095
+ puts -nonewline $fd x
+ close $fd
+
+ multiplex_restore_db
+ sqlite3 db file:test.db?8_3_names=1
+ sqlite3 db2 file:test2.db?8_3_names=1
+ sqlite3_multiplex_control db main chunk_size [expr 256*1024]
+ sqlite3_multiplex_control db2 main chunk_size [expr 256*1024]
+} -body {
+ sqlite3_backup B db2 main db main
+ B step 100000
+ set rc [B finish]
+ if { [string match SQLITE_IOERR_* $rc] } {error "disk I/O error"}
+ set rc
+} -test {
+ faultsim_test_result {0 SQLITE_OK}
+ if {$testrc==0} {
+ set cksum2 [execsql {SELECT md5sum(a, b) FROM t1 ORDER BY a} db2]
+ if {$cksum2 != $::cksum1} { error "data mismatch" }
+ }
+ catch { B finish }
+ catch { db close }
+ catch { db2 close }
+}
+
+catch { db close }
+sqlite3_multiplex_shutdown
+finish_test
diff --git a/test/pager1.test b/test/pager1.test
index 0226fe4..9c62e87 100644
--- a/test/pager1.test
+++ b/test/pager1.test
@@ -54,6 +54,8 @@ do_not_use_codec
# pager1-16.*: Varying sqlite3_vfs.mxPathname
#
# pager1-17.*: Tests related to "PRAGMA omit_readlock"
+# (The omit_readlock pragma has been removed and so have
+# these tests.)
#
# pager1-18.*: Test that the pager layer responds correctly if the b-tree
# requests an invalid page number (due to db corruption).
@@ -460,7 +462,7 @@ do_test pager1.4.2.3 {
} {64 ok}
do_test pager1.4.2.4 {
faultsim_restore_and_reopen
- hexio_write test.db-journal [expr [file size test.db-journal]-20] 123456
+ hexio_write test.db-journal [expr [file size test.db-journal]-30] 123456
execsql {
SELECT count(*) FROM t1;
PRAGMA integrity_check;
@@ -468,7 +470,7 @@ do_test pager1.4.2.4 {
} {4 ok}
do_test pager1.4.2.5 {
faultsim_restore_and_reopen
- hexio_write test.db-journal [expr [file size test.db-journal]-20] 123456
+ hexio_write test.db-journal [expr [file size test.db-journal]-30] 123456
foreach f [glob test.db-mj*] { forcedelete $f }
execsql {
SELECT count(*) FROM t1;
@@ -533,7 +535,7 @@ proc copy_on_mj_delete {method filename args} {
return SQLITE_OK
}
-set pwd [pwd]
+set pwd [get_pwd]
foreach {tn1 tcl} {
1 { set prefix "test.db" }
2 {
@@ -885,6 +887,24 @@ do_test pager1.4.7.3 {
delete_file test.db-journal
file exists test.db-journal
} {0}
+do_test pager1.4.8.1 {
+ catch {file attributes test.db -permissions r--------}
+ catch {file attributes test.db -readonly 1}
+ sqlite3 db test.db
+ db eval { SELECT * FROM t1 }
+ sqlite3_db_readonly db main
+} {1}
+do_test pager1.4.8.2 {
+ sqlite3_db_readonly db xyz
+} {-1}
+do_test pager1.4.8.3 {
+ db close
+ catch {file attributes test.db -readonly 0}
+ catch {file attributes test.db -permissions rw-rw-rw-} msg
+ sqlite3 db test.db
+ db eval { SELECT * FROM t1 }
+ sqlite3_db_readonly db main
+} {0}
#-------------------------------------------------------------------------
# The following tests deal with multi-file commits.
@@ -990,8 +1010,19 @@ do_test pager1-5.4.1 {
INSERT INTO t2 VALUES(85, 'Gorbachev');
COMMIT;
}
- set ::max_journal
-} [expr 2615+[string length [pwd]]]
+
+ # The size of the journal file is now:
+ #
+ # 1) 512 byte header +
+ # 2) 2 * (1024+8) byte records +
+ # 3) 20+N bytes of master-journal pointer, where N is the size of
+ # the master-journal name encoded as utf-8 with no nul term.
+ #
+ set mj_pointer [expr {
+ 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"]
+ }]
+ expr {$::max_journal==(512+2*(1024+8)+$mj_pointer)}
+} 1
do_test pager1-5.4.2 {
set ::max_journal 0
execsql {
@@ -1001,8 +1032,16 @@ do_test pager1-5.4.2 {
DELETE FROM t2 WHERE b = 'Lenin';
COMMIT;
}
- set ::max_journal
-} [expr 3111+[string length [pwd]]]
+
+ # In synchronous=full mode, the master-journal pointer is not written
+ # directly after the last record in the journal file. Instead, it is
+ # written starting at the next (in this case 512 byte) sector boundary.
+ #
+ set mj_pointer [expr {
+ 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"]
+ }]
+ expr {$::max_journal==(((512+2*(1024+8)+511)/512)*512 + $mj_pointer)}
+} 1
db close
tv delete
@@ -1312,6 +1351,7 @@ foreach sectorsize {
4096 8192 16384 32768 65536 131072 262144
} {
tv sectorsize $sectorsize
+ tv devchar {}
set eff $sectorsize
if {$sectorsize < 512} { set eff 512 }
if {$sectorsize > 65536} { set eff 65536 }
@@ -1688,75 +1728,6 @@ for {set ii [expr $::file_len-5]} {$ii < [expr $::file_len+20]} {incr ii} {
tv delete
}
-#-------------------------------------------------------------------------
-# Test "PRAGMA omit_readlock".
-#
-# pager1-17.$tn.1.*: Test that if a second connection has an open
-# read-transaction, it is not usually possible to write
-# the database.
-#
-# pager1-17.$tn.2.*: Test that if the second connection was opened with
-# the SQLITE_OPEN_READONLY flag, and
-# "PRAGMA omit_readlock = 1" is executed before attaching
-# the database and opening a read-transaction on it, it is
-# possible to write the db.
-#
-# pager1-17.$tn.3.*: Test that if the second connection was *not* opened with
-# the SQLITE_OPEN_READONLY flag, executing
-# "PRAGMA omit_readlock = 1" has no effect.
-#
-do_multiclient_test tn {
- do_test pager1-17.$tn.1.1 {
- sql1 {
- CREATE TABLE t1(a, b);
- INSERT INTO t1 VALUES(1, 2);
- }
- sql2 {
- BEGIN;
- SELECT * FROM t1;
- }
- } {1 2}
- do_test pager1-17.$tn.1.2 {
- csql1 { INSERT INTO t1 VALUES(3, 4) }
- } {1 {database is locked}}
- do_test pager1-17.$tn.1.3 {
- sql2 { COMMIT }
- sql1 { INSERT INTO t1 VALUES(3, 4) }
- } {}
-
- do_test pager1-17.$tn.2.1 {
- code2 {
- db2 close
- sqlite3 db2 :memory: -readonly 1
- }
- sql2 {
- PRAGMA omit_readlock = 1;
- ATTACH 'test.db' AS two;
- BEGIN;
- SELECT * FROM t1;
- }
- } {1 2 3 4}
- do_test pager1-17.$tn.2.2 { sql1 "INSERT INTO t1 VALUES(5, 6)" } {}
- do_test pager1-17.$tn.2.3 { sql2 "SELECT * FROM t1" } {1 2 3 4}
- do_test pager1-17.$tn.2.4 { sql2 "COMMIT ; SELECT * FROM t1" } {1 2 3 4 5 6}
-
- do_test pager1-17.$tn.3.1 {
- code2 {
- db2 close
- sqlite3 db2 :memory:
- }
- sql2 {
- PRAGMA omit_readlock = 1;
- ATTACH 'test.db' AS two;
- BEGIN;
- SELECT * FROM t1;
- }
- } {1 2 3 4 5 6}
- do_test pager1-17.$tn.3.2 {
- csql1 { INSERT INTO t1 VALUES(3, 4) }
- } {1 {database is locked}}
- do_test pager1-17.$tn.3.3 { sql2 COMMIT } {}
-}
#-------------------------------------------------------------------------
# Test the pagers response to the b-tree layer requesting illegal page
@@ -1797,7 +1768,7 @@ do_test pager1-18.2 {
catchsql { SELECT count(*) FROM t1 } db2
} {1 {database disk image is malformed}}
db2 close
-do_test pager1-18.3 {
+do_test pager1-18.3.1 {
execsql {
CREATE TABLE t2(x);
INSERT INTO t2 VALUES(a_string(5000));
@@ -1805,13 +1776,38 @@ do_test pager1-18.3 {
set pgno [expr ([file size test.db] / 1024)-2]
hexio_write test.db [expr ($pgno-1)*1024] 00000000
sqlite3 db2 test.db
- catchsql { SELECT length(x) FROM t2 } db2
+ # even though x is malformed, because typeof() does
+ # not load the content of x, the error is not noticed.
+ catchsql { SELECT typeof(x) FROM t2 } db2
+} {0 text}
+do_test pager1-18.3.2 {
+ # in this case, the value of x is loaded and so the error is
+ # detected
+ catchsql { SELECT length(x||'') FROM t2 } db2
+} {1 {database disk image is malformed}}
+db2 close
+do_test pager1-18.3.3 {
+ execsql {
+ DELETE FROM t2;
+ INSERT INTO t2 VALUES(randomblob(5000));
+ }
+ set pgno [expr ([file size test.db] / 1024)-2]
+ hexio_write test.db [expr ($pgno-1)*1024] 00000000
+ sqlite3 db2 test.db
+ # even though x is malformed, because length() and typeof() do
+ # not load the content of x, the error is not noticed.
+ catchsql { SELECT length(x), typeof(x) FROM t2 } db2
+} {0 {5000 blob}}
+do_test pager1-18.3.4 {
+ # in this case, the value of x is loaded and so the error is
+ # detected
+ catchsql { SELECT length(x||'') FROM t2 } db2
} {1 {database disk image is malformed}}
db2 close
do_test pager1-18.4 {
hexio_write test.db [expr ($pgno-1)*1024] 90000000
sqlite3 db2 test.db
- catchsql { SELECT length(x) FROM t2 } db2
+ catchsql { SELECT length(x||'') FROM t2 } db2
} {1 {database disk image is malformed}}
db2 close
do_test pager1-18.5 {
diff --git a/test/permutations.test b/test/permutations.test
index 7c3b026..3165ea3 100644
--- a/test/permutations.test
+++ b/test/permutations.test
@@ -110,8 +110,8 @@ set allquicktests [test_set $alltests -exclude {
speed4p.test sqllimits1.test tkt2686.test thread001.test thread002.test
thread003.test thread004.test thread005.test trans2.test vacuum3.test
incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test
- vtab_err.test walslow.test walcrash.test
- walthread.test rtree3.test indexfault.test
+ vtab_err.test walslow.test walcrash.test walcrash3.test
+ walthread.test rtree3.test indexfault.test
}]
if {[info exists ::env(QUICKTEST_INCLUDE)]} {
set allquicktests [concat $allquicktests $::env(QUICKTEST_INCLUDE)]
@@ -142,7 +142,7 @@ test_suite "valgrind" -prefix "" -description {
Run the "veryquick" test suite with a couple of multi-process tests (that
fail under valgrind) omitted.
} -files [
- test_set $allquicktests -exclude *malloc* *ioerr* *fault*
+ test_set $allquicktests -exclude *malloc* *ioerr* *fault* wal.test
] -initialize {
set ::G(valgrind) 1
} -shutdown {
@@ -184,8 +184,8 @@ test_suite "fts3" -prefix "" -description {
fts3aux1.test fts3comp1.test fts3auto.test
fts4aa.test fts4content.test
fts3conf.test fts3prefix.test fts3fault2.test fts3corrupt.test
- fts3corrupt2.test
- fts3first.test
+ fts3corrupt2.test fts3first.test fts4langid.test fts4merge.test
+ fts4check.test
}
@@ -530,6 +530,8 @@ test_suite "inmemory_journal" -description {
# Exclude stmt.test, which expects sub-journals to use temporary files.
stmt.test
+ zerodamage.test
+
# WAL mode is different.
wal* tkt-2d1a5c67d.test backcompat.test
}]
diff --git a/test/pragma.test b/test/pragma.test
index 0cad25a..bb10327 100644
--- a/test/pragma.test
+++ b/test/pragma.test
@@ -99,7 +99,7 @@ do_test pragma-1.5 {
PRAGMA default_cache_size;
PRAGMA synchronous;
}
-} [list 4321 $DFLT_CACHE_SZ 0]
+} [list -4321 $DFLT_CACHE_SZ 0]
do_test pragma-1.6 {
execsql {
PRAGMA synchronous=ON;
@@ -107,7 +107,7 @@ do_test pragma-1.6 {
PRAGMA default_cache_size;
PRAGMA synchronous;
}
-} [list 4321 $DFLT_CACHE_SZ 1]
+} [list -4321 $DFLT_CACHE_SZ 1]
do_test pragma-1.7 {
db close
sqlite3 db test.db
@@ -990,7 +990,7 @@ do_test pragma-9.4 {
} {}
ifcapable wsd {
do_test pragma-9.5 {
- set pwd [string map {' ''} [file nativename [pwd]]]
+ set pwd [string map {' ''} [file nativename [get_pwd]]]
execsql "
PRAGMA temp_store_directory='$pwd';
"
@@ -999,7 +999,7 @@ ifcapable wsd {
execsql {
PRAGMA temp_store_directory;
}
- } [list [file nativename [pwd]]]
+ } [list [file nativename [get_pwd]]]
do_test pragma-9.7 {
catchsql {
PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR';
@@ -1489,4 +1489,26 @@ foreach {temp_setting val} {
} $val
}
+# The SQLITE_FCNTL_PRAGMA logic, with error handling.
+#
+db close
+testvfs tvfs
+sqlite3 db test.db -vfs tvfs
+do_test pragma-19.1 {
+ catchsql {PRAGMA error}
+} {1 {SQL logic error or missing database}}
+do_test pragma-19.2 {
+ catchsql {PRAGMA error='This is the error message'}
+} {1 {This is the error message}}
+do_test pragma-19.3 {
+ catchsql {PRAGMA error='7 This is the error message'}
+} {1 {This is the error message}}
+do_test pragma-19.4 {
+ catchsql {PRAGMA error=7}
+} {1 {out of memory}}
+do_test pragma-19.5 {
+ file tail [lindex [execsql {PRAGMA filename}] 0]
+} {test.db}
+
+
finish_test
diff --git a/test/quota-glob.test b/test/quota-glob.test
new file mode 100644
index 0000000..28c813c
--- /dev/null
+++ b/test/quota-glob.test
@@ -0,0 +1,87 @@
+# 2011 December 1
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Tests for the glob-style string compare operator embedded in the
+# quota shim.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+catch { unset testnum }
+catch { unset pattern }
+catch { unset text }
+catch { unset ans }
+
+foreach {testnum pattern text ans} {
+ 1 abcdefg abcdefg 1
+ 2 abcdefG abcdefg 0
+ 3 abcdef abcdefg 0
+ 4 abcdefgh abcdefg 0
+ 5 abcdef? abcdefg 1
+ 6 abcdef? abcdef 0
+ 7 abcdef? abcdefgh 0
+ 8 abcdefg abcdef? 0
+ 9 abcdef? abcdef? 1
+ 10 abc/def abc/def 1
+ 11 abc//def abc/def 0
+ 12 */abc/* x/abc/y 1
+ 13 */abc/* /abc/ 1
+ 16 */abc/* x///a/ab/abc 0
+ 17 */abc/* x//a/ab/abc/ 1
+ 16 */abc/* x///a/ab/abc 0
+ 17 */abc/* x//a/ab/abc/ 1
+ 18 **/abc/** x//a/ab/abc/ 1
+ 19 *?/abc/*? x//a/ab/abc/y 1
+ 20 ?*/abc/?* x//a/ab/abc/y 1
+ 21 {abc[cde]efg} abcbefg 0
+ 22 {abc[cde]efg} abccefg 1
+ 23 {abc[cde]efg} abcdefg 1
+ 24 {abc[cde]efg} abceefg 1
+ 25 {abc[cde]efg} abcfefg 0
+ 26 {abc[^cde]efg} abcbefg 1
+ 27 {abc[^cde]efg} abccefg 0
+ 28 {abc[^cde]efg} abcdefg 0
+ 29 {abc[^cde]efg} abceefg 0
+ 30 {abc[^cde]efg} abcfefg 1
+ 31 {abc[c-e]efg} abcbefg 0
+ 32 {abc[c-e]efg} abccefg 1
+ 33 {abc[c-e]efg} abcdefg 1
+ 34 {abc[c-e]efg} abceefg 1
+ 35 {abc[c-e]efg} abcfefg 0
+ 36 {abc[^c-e]efg} abcbefg 1
+ 37 {abc[^c-e]efg} abccefg 0
+ 38 {abc[^c-e]efg} abcdefg 0
+ 39 {abc[^c-e]efg} abceefg 0
+ 40 {abc[^c-e]efg} abcfefg 1
+ 41 {abc[c-e]efg} abc-efg 0
+ 42 {abc[-ce]efg} abc-efg 1
+ 43 {abc[ce-]efg} abc-efg 1
+ 44 {abc[][*?]efg} {abc]efg} 1
+ 45 {abc[][*?]efg} {abc*efg} 1
+ 46 {abc[][*?]efg} {abc?efg} 1
+ 47 {abc[][*?]efg} {abc[efg} 1
+ 48 {abc[^][*?]efg} {abc]efg} 0
+ 49 {abc[^][*?]efg} {abc*efg} 0
+ 50 {abc[^][*?]efg} {abc?efg} 0
+ 51 {abc[^][*?]efg} {abc[efg} 0
+ 52 {abc[^][*?]efg} {abcdefg} 1
+ 53 {*[xyz]efg} {abcxefg} 1
+ 54 {*[xyz]efg} {abcwefg} 0
+} {
+ do_test quota-glob-$testnum.1 {
+ sqlite3_quota_glob $::pattern $::text
+ } $::ans
+ do_test quota-glob-$testnum.2 {
+ sqlite3_quota_glob $::pattern [string map {/ \\} $::text]
+ } $::ans
+}
+finish_test
diff --git a/test/quota.test b/test/quota.test
index 49b403f..ec89086 100644
--- a/test/quota.test
+++ b/test/quota.test
@@ -14,6 +14,8 @@ set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/malloc_common.tcl
+unset -nocomplain defaultVfs
+set defaultVfs [file_control_vfsname db]
db close
do_test quota-1.1 { sqlite3_quota_initialize nosuchvfs 1 } {SQLITE_ERROR}
@@ -48,6 +50,7 @@ do_test quota-1.8 { sqlite3_quota_shutdown } {SQLITE_OK}
#
sqlite3_quota_initialize "" 1
+unset -nocomplain quota_request_ok
proc quota_check {filename limitvar size} {
upvar $limitvar limit
@@ -73,6 +76,9 @@ do_test quota-2.1.2 {
}
set ::quota
} {}
+do_test quota-2.1.2.1 {
+ file_control_vfsname db
+} quota/$defaultVfs
do_test quota-2.1.3 { file size test.db } {4096}
do_test quota-2.1.4 {
catchsql { INSERT INTO t1 VALUES(3, randomblob(1100)) }
@@ -215,7 +221,7 @@ do_test quota-3.3.1 {
execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a
execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b
set ::quota
-} [list [file join [pwd] test.db] 5120]
+} [list [file join [get_pwd] test.db] 5120]
do_test quota-3.2.X {
foreach db {db1a db2a db2b db1b} { catch { $db close } }
diff --git a/test/quota2.test b/test/quota2.test
new file mode 100644
index 0000000..5bb50d7
--- /dev/null
+++ b/test/quota2.test
@@ -0,0 +1,271 @@
+# 2011 December 1
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+source $testdir/malloc_common.tcl
+
+db close
+sqlite3_quota_initialize "" 1
+
+foreach dir {quota2a/x1 quota2a/x2 quota2a quota2b quota2c} {
+ file delete -force $dir
+}
+foreach dir {quota2a quota2a/x1 quota2a/x2 quota2b quota2c} {
+ file mkdir $dir
+}
+
+# The standard_path procedure converts a pathname into a standard format
+# that is the same across platforms.
+#
+unset -nocomplain ::quota_pwd ::quota_mapping
+set ::quota_pwd [string map {\\ /} [get_pwd]]
+set ::quota_mapping [list $::quota_pwd PWD]
+proc standard_path {x} {
+ set x [string map {\\ /} $x]
+ return [string map $::quota_mapping $x]
+}
+
+# The quota_check procedure is a callback from the quota handler.
+# It has three arguments which are (1) the full pathname of the file
+# that has gone over quota, (2) the quota limit, (3) the requested
+# new quota size to cover the last write. These three values are
+# appended to the global variable $::quota. The filename is processed
+# to convert every \ character into / and to change the name of the
+# working directory to PWD.
+#
+# The quota is increased to the request if the ::quota_request_ok
+# global variable is true.
+#
+set ::quota {}
+set ::quota_request_ok 0
+
+proc quota_check {filename limitvar size} {
+ upvar $limitvar limit
+ lappend ::quota [standard_path $filename] [set limit] $size
+ if {$::quota_request_ok} {set limit $size}
+}
+
+sqlite3_quota_set */quota2a/* 4000 quota_check
+sqlite3_quota_set */quota2b/* 5000 quota_check
+
+unset -nocomplain bigtext
+for {set i 1} {$i<=1000} {incr i} {
+ if {$i%10==0} {
+ append bigtext [format "%06d\n" $i]
+ } else {
+ append bigtext [format "%06d " $i]
+ }
+}
+
+catch { unset h1 }
+catch { unset x }
+do_test quota2-1.1 {
+ set ::h1 [sqlite3_quota_fopen quota2a/xyz.txt w+b]
+ sqlite3_quota_fwrite $::h1 1 7000 $bigtext
+} {4000}
+do_test quota2-1.2 {
+ set ::quota
+} {PWD/quota2a/xyz.txt 4000 7000}
+do_test quota2-1.2.1 {
+ sqlite3_quota_file_size $::h1
+} {4000}
+do_test quota2-1.2.2 {
+ sqlite3_quota_fflush $::h1 1
+ sqlite3_quota_file_truesize $::h1
+} {4000}
+do_test quota2-1.3 {
+ sqlite3_quota_rewind $::h1
+ set ::x [sqlite3_quota_fread $::h1 1001 7]
+ string length $::x
+} {3003}
+do_test quota2-1.4 {
+ string match $::x [string range $::bigtext 0 3002]
+} {1}
+do_test quota2-1.5 {
+ sqlite3_quota_fseek $::h1 0 SEEK_END
+ sqlite3_quota_ftell $::h1
+} {4000}
+do_test quota2-1.6 {
+ sqlite3_quota_fseek $::h1 -100 SEEK_END
+ sqlite3_quota_ftell $::h1
+} {3900}
+do_test quota2-1.7 {
+ sqlite3_quota_fseek $::h1 -100 SEEK_CUR
+ sqlite3_quota_ftell $::h1
+} {3800}
+do_test quota2-1.8 {
+ sqlite3_quota_fseek $::h1 50 SEEK_CUR
+ sqlite3_quota_ftell $::h1
+} {3850}
+do_test quota2-1.9 {
+ sqlite3_quota_fseek $::h1 50 SEEK_SET
+ sqlite3_quota_ftell $::h1
+} {50}
+do_test quota2-1.10 {
+ sqlite3_quota_rewind $::h1
+ sqlite3_quota_ftell $::h1
+} {0}
+do_test quota2-1.11 {
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 4000 {PWD/quota2a/xyz.txt 4000 1 0}}}
+do_test quota2-1.12 {
+ sqlite3_quota_ftruncate $::h1 3500
+ sqlite3_quota_file_size $::h1
+} {3500}
+do_test quota2-1.13 {
+ sqlite3_quota_file_truesize $::h1
+} {3500}
+do_test quota2-1.14 {
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 3500 {PWD/quota2a/xyz.txt 3500 1 0}}}
+do_test quota2-1.15 {
+ sqlite3_quota_fseek $::h1 0 SEEK_END
+ sqlite3_quota_ftell $::h1
+} {3500}
+do_test quota2-1.16 {
+ sqlite3_quota_fwrite $::h1 1 7000 $bigtext
+} {500}
+do_test quota2-1.17 {
+ sqlite3_quota_ftell $::h1
+} {4000}
+do_test quota2-1.18 {
+ sqlite3_quota_file_size $::h1
+} {4000}
+do_test quota2-1.19 {
+ sqlite3_quota_fflush $::h1 1
+ sqlite3_quota_file_truesize $::h1
+} {4000}
+do_test quota2-1.20 {
+ sqlite3_quota_fclose $::h1
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 4000 {PWD/quota2a/xyz.txt 4000 0 0}}}
+do_test quota2-1.21 {
+ sqlite3_quota_remove quota2a/xyz.txt
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 0}}
+
+
+
+set quota {}
+do_test quota2-2.1 {
+ set ::h1 [sqlite3_quota_fopen quota2c/xyz.txt w+b]
+ sqlite3_quota_fwrite $::h1 1 7000 $bigtext
+} {7000}
+do_test quota2-2.2 {
+ set ::quota
+} {}
+do_test quota2-2.3 {
+ sqlite3_quota_rewind $::h1
+ set ::x [sqlite3_quota_fread $::h1 1001 7]
+ string length $::x
+} {6006}
+do_test quota2-2.4 {
+ string match $::x [string range $::bigtext 0 6005]
+} {1}
+do_test quota2-2.5 {
+ sqlite3_quota_fseek $::h1 0 SEEK_END
+ sqlite3_quota_ftell $::h1
+} {7000}
+do_test quota2-2.6 {
+ sqlite3_quota_fseek $::h1 -100 SEEK_END
+ sqlite3_quota_ftell $::h1
+} {6900}
+do_test quota2-2.7 {
+ sqlite3_quota_fseek $::h1 -100 SEEK_CUR
+ sqlite3_quota_ftell $::h1
+} {6800}
+do_test quota2-2.8 {
+ sqlite3_quota_fseek $::h1 50 SEEK_CUR
+ sqlite3_quota_ftell $::h1
+} {6850}
+do_test quota2-2.9 {
+ sqlite3_quota_fseek $::h1 50 SEEK_SET
+ sqlite3_quota_ftell $::h1
+} {50}
+do_test quota2-2.10 {
+ sqlite3_quota_rewind $::h1
+ sqlite3_quota_ftell $::h1
+} {0}
+do_test quota2-2.11 {
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 0}}
+do_test quota2-2.12 {
+ sqlite3_quota_fclose $::h1
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2b/* 5000 0} {*/quota2a/* 4000 0}}
+
+do_test quota2-3.1 {
+ sqlite3_quota_set */quota2b/* 0 quota_check
+ set ::h1 [sqlite3_quota_fopen quota2a/x1/a.txt a]
+ sqlite3_quota_fwrite $::h1 10 10 $bigtext
+} {10}
+do_test quota2-3.2 {
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 100 {PWD/quota2a/x1/a.txt 100 1 0}}}
+do_test quota2-3.3a {
+ sqlite3_quota_fflush $::h1 0
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 100 {PWD/quota2a/x1/a.txt 100 1 0}}}
+do_test quota2-3.3b {
+ sqlite3_quota_fflush $::h1 1
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 100 {PWD/quota2a/x1/a.txt 100 1 0}}}
+do_test quota2-3.3c {
+ sqlite3_quota_fflush $::h1
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 100 {PWD/quota2a/x1/a.txt 100 1 0}}}
+do_test quota2-3.4 {
+ sqlite3_quota_fclose $::h1
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 100 {PWD/quota2a/x1/a.txt 100 0 0}}}
+do_test quota2-3.5 {
+ set ::h2 [sqlite3_quota_fopen quota2a/x2/b.txt a]
+ sqlite3_quota_fwrite $::h2 10 20 $bigtext
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 300 {PWD/quota2a/x2/b.txt 200 1 0} {PWD/quota2a/x1/a.txt 100 0 0}}}
+do_test quota2-3.6 {
+ set ::h3 [sqlite3_quota_fopen quota2a/x1/c.txt a]
+ sqlite3_quota_fwrite $::h3 10 50 $bigtext
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 800 {PWD/quota2a/x1/c.txt 500 1 0} {PWD/quota2a/x2/b.txt 200 1 0} {PWD/quota2a/x1/a.txt 100 0 0}}}
+do_test quota2-3.7 {
+ file exists quota2a/x1/a.txt
+} {1}
+do_test quota2-3.8 {
+ file exists quota2a/x2/b.txt
+} {1}
+do_test quota2-3.9 {
+ file exists quota2a/x1/c.txt
+} {1}
+do_test quota2-3.10 {
+ sqlite3_quota_remove quota2a/x1
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 700 {PWD/quota2a/x1/c.txt 500 1 1} {PWD/quota2a/x2/b.txt 200 1 0}}}
+do_test quota2-3.11 {
+ sqlite3_quota_fclose $::h2
+ sqlite3_quota_fclose $::h3
+ standard_path [sqlite3_quota_dump]
+} {{*/quota2a/* 4000 200 {PWD/quota2a/x2/b.txt 200 0 0}}}
+do_test quota2-3.12 {
+ file exists quota2a/x1/a.txt
+} {0}
+do_test quota2-3.13 {
+ file exists quota2a/x2/b.txt
+} {1}
+do_test quota2-3.14 {
+ file exists quota2a/x1/c.txt
+} {0}
+
+catch { sqlite3_quota_shutdown }
+catch { unset quota_request_ok }
+finish_test
diff --git a/test/randexpr1.test b/test/randexpr1.test
index 10a1d17..7a98f0b 100644
--- a/test/randexpr1.test
+++ b/test/randexpr1.test
@@ -22,6 +22,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
# Create test data
#
do_test randexpr1-1.1 {
diff --git a/test/savepoint.test b/test/savepoint.test
index 3bbbaaa..015d97f 100644
--- a/test/savepoint.test
+++ b/test/savepoint.test
@@ -303,11 +303,19 @@ ifcapable incrblob {
execsql {SAVEPOINT abc}
catchsql {ROLLBACK TO def}
} {1 {no such savepoint: def}}
- do_test savepoint-5.3.2 {
+ do_test savepoint-5.3.2.1 {
execsql {SAVEPOINT def}
set fd [db incrblob -readonly blobs x 1]
+ set rc [catch {seek $fd 0;read $fd} res]
+ lappend rc $res
+ } {0 {hellontyeight character blob}}
+ do_test savepoint-5.3.2.2 {
catchsql {ROLLBACK TO def}
- } {1 {cannot rollback savepoint - SQL statements in progress}}
+ } {0 {}}
+ do_test savepoint-5.3.2.3 {
+ set rc [catch {seek $fd 0; read $fd} res]
+ set rc
+ } {1}
do_test savepoint-5.3.3 {
catchsql {RELEASE def}
} {0 {}}
@@ -649,10 +657,8 @@ if {[wal_is_wal_mode]==0} {
CREATE TABLE main.t1(x, y);
CREATE TABLE aux1.t2(x, y);
CREATE TABLE aux2.t3(x, y);
- SELECT name FROM sqlite_master
- UNION ALL
- SELECT name FROM aux1.sqlite_master
- UNION ALL
+ SELECT name FROM sqlite_master;
+ SELECT name FROM aux1.sqlite_master;
SELECT name FROM aux2.sqlite_master;
}
} {t1 t2 t3}
@@ -691,7 +697,7 @@ if {[wal_is_wal_mode]==0} {
execsql { PRAGMA lock_status }
} [list main reserved temp $templockstate aux1 reserved aux2 reserved]
do_test savepoint-10.2.9 {
- execsql { SELECT 'a', * FROM t1 UNION ALL SELECT 'b', * FROM t3 }
+ execsql { SELECT 'a', * FROM t1 ; SELECT 'b', * FROM t3 }
} {a 1 2 b 3 4}
do_test savepoint-10.2.9 {
execsql {
diff --git a/test/savepoint7.test b/test/savepoint7.test
new file mode 100644
index 0000000..bc99187
--- /dev/null
+++ b/test/savepoint7.test
@@ -0,0 +1,96 @@
+# 2012 March 31
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# Focus on the interaction between RELEASE and ROLLBACK TO with
+# pending query aborts. See ticket [27ca74af3c083f787a1c44b11fbb7c53bdbbcf1e].
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# The RELEASE of an inner savepoint should not effect pending queries.
+#
+do_test savepoint7-1.1 {
+ db eval {
+ CREATE TABLE t1(a,b,c);
+ CREATE TABLE t2(x,y,z);
+ INSERT INTO t1 VALUES(1,2,3);
+ INSERT INTO t1 VALUES(4,5,6);
+ INSERT INTO t1 VALUES(7,8,9);
+ SAVEPOINT x1;
+ }
+ db eval {SELECT * FROM t1} {
+ db eval {
+ SAVEPOINT x2;
+ INSERT INTO t2 VALUES($a,$b,$c);
+ RELEASE x2;
+ }
+ }
+ db eval {SELECT * FROM t2; RELEASE x1}
+} {1 2 3 4 5 6 7 8 9}
+
+do_test savepoint7-1.2 {
+ db eval {DELETE FROM t2;}
+ db eval {SELECT * FROM t1} {
+ db eval {
+ SAVEPOINT x2;
+ INSERT INTO t2 VALUES($a,$b,$c);
+ RELEASE x2;
+ }
+ }
+ db eval {SELECT * FROM t2}
+} {1 2 3 4 5 6 7 8 9}
+
+do_test savepoint7-1.3 {
+ db eval {DELETE FROM t2; BEGIN;}
+ db eval {SELECT * FROM t1} {
+ db eval {
+ SAVEPOINT x2;
+ INSERT INTO t2 VALUES($a,$b,$c);
+ RELEASE x2;
+ }
+ }
+ db eval {SELECT * FROM t2; ROLLBACK;}
+} {1 2 3 4 5 6 7 8 9}
+
+# However, a ROLLBACK of an inner savepoint will abort all queries, including
+# queries in outer contexts.
+#
+do_test savepoint7-2.1 {
+ db eval {DELETE FROM t2; SAVEPOINT x1;}
+ set rc [catch {
+ db eval {SELECT * FROM t1} {
+ db eval {
+ SAVEPOINT x2;
+ INSERT INTO t2 VALUES($a,$b,$c);
+ ROLLBACK TO x2;
+ }
+ }
+ } msg]
+ db eval {RELEASE x1}
+ list $rc $msg [db eval {SELECT * FROM t2}]
+} {1 {callback requested query abort} {}}
+
+do_test savepoint7-2.2 {
+ db eval {DELETE FROM t2;}
+ set rc [catch {
+ db eval {SELECT * FROM t1} {
+ db eval {
+ SAVEPOINT x2;
+ INSERT INTO t2 VALUES($a,$b,$c);
+ ROLLBACK TO x2;
+ }
+ }
+ } msg]
+ list $rc $msg [db eval {SELECT * FROM t2}]
+} {1 {callback requested query abort} {}}
+
+finish_test
diff --git a/test/schema5.test b/test/schema5.test
new file mode 100644
index 0000000..6dea5e8
--- /dev/null
+++ b/test/schema5.test
@@ -0,0 +1,69 @@
+# 2010 September 28
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file checks corner cases in the CREATE TABLE syntax to make
+# sure that legacy syntax (syntax that is disallowed according to the
+# syntax diagrams) is still accepted, so that older databases that use
+# that syntax can still be read.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+# Table constraints should be separated by commas, but they do not have
+# to be.
+#
+do_test schema5-1.1 {
+ db eval {
+ CREATE TABLE t1(a,b,c, PRIMARY KEY(a) UNIQUE (a) CONSTRAINT one);
+ INSERT INTO t1 VALUES(1,2,3);
+ SELECT * FROM t1;
+ }
+} {1 2 3}
+do_test schema5-1.2 {
+ catchsql {INSERT INTO t1 VALUES(1,3,4);}
+} {1 {column a is not unique}}
+do_test schema5-1.3 {
+ db eval {
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c,
+ CONSTRAINT one PRIMARY KEY(a) CONSTRAINT two CHECK(b<10) UNIQUE(b)
+ CONSTRAINT three
+ );
+ INSERT INTO t1 VALUES(1,2,3);
+ SELECT * FROM t1;
+ }
+} {1 2 3}
+do_test schema5-1.4 {
+ catchsql {INSERT INTO t1 VALUES(10,11,12);}
+} {1 {constraint two failed}}
+do_test schema5-1.5 {
+ db eval {
+ DROP TABLE t1;
+ CREATE TABLE t1(a,b,c,
+ UNIQUE(a) CONSTRAINT one,
+ PRIMARY KEY(b,c) CONSTRAINT two
+ );
+ INSERT INTO t1 VALUES(1,2,3);
+ }
+} {}
+do_test schema5-1.6 {
+ catchsql {INSERT INTO t1 VALUES(1,3,4)}
+} {1 {column a is not unique}}
+do_test schema5-1.7 {
+ catchsql {INSERT INTO t1 VALUES(10,2,3)}
+} {1 {columns b, c are not unique}}
+
+
+
+
+
+finish_test
diff --git a/test/select1.test b/test/select1.test
index 73b0e40..852b52e 100644
--- a/test/select1.test
+++ b/test/select1.test
@@ -1066,5 +1066,11 @@ if {[db one {PRAGMA locking_mode}]=="normal"} {
execsql { SELECT 2 IN (SELECT a FROM t1) }
} {1}
}
+
+# Crash bug reported on the mailing list on 2012-02-23
+#
+do_test select1-16.1 {
+ catchsql {SELECT 1 FROM (SELECT *)}
+} {1 {no tables specified}}
finish_test
diff --git a/test/select4.test b/test/select4.test
index dff0b90..e205b37 100644
--- a/test/select4.test
+++ b/test/select4.test
@@ -805,4 +805,23 @@ do_test select4-12.1 {
} ;# ifcapable compound
+
+# Ticket [3557ad65a076c] - Incorrect DISTINCT processing with an
+# indexed query using IN.
+#
+do_test select4-13.1 {
+ sqlite3 db test.db
+ db eval {
+ CREATE TABLE t13(a,b);
+ INSERT INTO t13 VALUES(1,1);
+ INSERT INTO t13 VALUES(2,1);
+ INSERT INTO t13 VALUES(3,1);
+ INSERT INTO t13 VALUES(2,2);
+ INSERT INTO t13 VALUES(3,2);
+ INSERT INTO t13 VALUES(4,2);
+ CREATE INDEX t13ab ON t13(a,b);
+ SELECT DISTINCT b from t13 WHERE a IN (1,2,3);
+ }
+} {1 2}
+
finish_test
diff --git a/test/select9.test b/test/select9.test
index 085dee0..9f54014 100644
--- a/test/select9.test
+++ b/test/select9.test
@@ -415,5 +415,40 @@ do_test select9-4.X {
}
} {}
+# Testing to make sure that queries involving a view of a compound select
+# are planned efficiently. This detects a problem reported on the mailing
+# list on 2012-04-26. See
+#
+# http://www.mail-archive.com/sqlite-users%40sqlite.org/msg69746.html
+#
+# For additional information.
+#
+do_test select9-5.1 {
+ db eval {
+ CREATE TABLE t51(x, y);
+ CREATE TABLE t52(x, y);
+ CREATE VIEW v5 as
+ SELECT x, y FROM t51
+ UNION ALL
+ SELECT x, y FROM t52;
+ CREATE INDEX t51x ON t51(x);
+ CREATE INDEX t52x ON t52(x);
+ EXPLAIN QUERY PLAN
+ SELECT * FROM v5 WHERE x='12345' ORDER BY y;
+ }
+} {~/SCAN TABLE/} ;# Uses indices with "*"
+do_test select9-5.2 {
+ db eval {
+ EXPLAIN QUERY PLAN
+ SELECT x, y FROM v5 WHERE x='12345' ORDER BY y;
+ }
+} {~/SCAN TABLE/} ;# Uses indices with "x, y"
+do_test select9-5.3 {
+ db eval {
+ EXPLAIN QUERY PLAN
+ SELECT x, y FROM v5 WHERE +x='12345' ORDER BY y;
+ }
+} {/SCAN TABLE/} ;# Full table scan if the "+x" prevents index usage.
+
finish_test
diff --git a/test/selectB.test b/test/selectB.test
index b9d979a..05ec9c6 100644
--- a/test/selectB.test
+++ b/test/selectB.test
@@ -194,19 +194,28 @@ do_test selectB-3.0 {
}
} {}
-for {set ii 3} {$ii <= 4} {incr ii} {
+for {set ii 3} {$ii <= 6} {incr ii} {
- if {$ii == 4} {
- do_test selectB-4.0 {
- execsql {
- CREATE INDEX i1 ON t1(a);
- CREATE INDEX i2 ON t1(b);
- CREATE INDEX i3 ON t1(c);
- CREATE INDEX i4 ON t2(d);
- CREATE INDEX i5 ON t2(e);
- CREATE INDEX i6 ON t2(f);
- }
- } {}
+ switch $ii {
+ 4 {
+ optimization_control db query-flattener off
+ }
+ 5 {
+ optimization_control db query-flattener on
+ do_test selectB-5.0 {
+ execsql {
+ CREATE INDEX i1 ON t1(a);
+ CREATE INDEX i2 ON t1(b);
+ CREATE INDEX i3 ON t1(c);
+ CREATE INDEX i4 ON t2(d);
+ CREATE INDEX i5 ON t2(e);
+ CREATE INDEX i6 ON t2(f);
+ }
+ } {}
+ }
+ 6 {
+ optimization_control db query-flattener off
+ }
}
do_test selectB-$ii.1 {
@@ -371,11 +380,47 @@ for {set ii 3} {$ii <= 4} {incr ii} {
}
} {2 4 6 3 6 9 8 10 12 12 15 18 14 16 18 21 24 27}
- do_test selectB-$ii.21 {
+ do_test selectB-$ii.22 {
execsql {
SELECT * FROM (SELECT 345 UNION ALL SELECT d FROM t2) ORDER BY 1;
}
} {3 12 21 345}
+
+ do_test selectB-$ii.23 {
+ execsql {
+ SELECT x, y FROM (
+ SELECT a AS x, b AS y FROM t1
+ UNION ALL
+ SELECT a*10 + 0.1, f*10 + 0.1 FROM t1 JOIN t2 ON (c=d)
+ UNION ALL
+ SELECT a*100, b*100 FROM t1
+ ) ORDER BY 1;
+ }
+ } {2 4 8 10 14 16 80.1 180.1 200 400 800 1000 1400 1600}
+
+ do_test selectB-$ii.24 {
+ execsql {
+ SELECT x, y FROM (
+ SELECT a AS x, b AS y FROM t1
+ UNION ALL
+ SELECT a*10 + 0.1, f*10 + 0.1 FROM t1 LEFT JOIN t2 ON (c=d)
+ UNION ALL
+ SELECT a*100, b*100 FROM t1
+ ) ORDER BY 1;
+ }
+ } {2 4 8 10 14 16 20.1 {} 80.1 180.1 140.1 {} 200 400 800 1000 1400 1600}
+
+ do_test selectB-$ii.25 {
+ execsql {
+ SELECT x+y FROM (
+ SELECT a AS x, b AS y FROM t1
+ UNION ALL
+ SELECT a*10 + 0.1, f*10 + 0.1 FROM t1 LEFT JOIN t2 ON (c=d)
+ UNION ALL
+ SELECT a*100, b*100 FROM t1
+ ) WHERE y+x NOT NULL ORDER BY 1;
+ }
+ } {6 18 30 260.2 600 1800 3000}
}
finish_test
diff --git a/test/selectC.test b/test/selectC.test
index fd38405..dedac41 100644
--- a/test/selectC.test
+++ b/test/selectC.test
@@ -151,7 +151,7 @@ do_test selectC-1.14.2 {
# The following query used to leak memory. Verify that has been fixed.
#
-ifcapable trigger {
+ifcapable trigger&&compound {
do_test selectC-2.1 {
catchsql {
CREATE TABLE t21a(a,b);
diff --git a/test/shared2.test b/test/shared2.test
index d40c9a2..5bde8cf 100644
--- a/test/shared2.test
+++ b/test/shared2.test
@@ -79,48 +79,6 @@ do_test shared2-1.3 {
list $a $count
} {32 64}
-#---------------------------------------------------------------------------
-# These tests, shared2.2.*, test the outcome when data is added to or
-# removed from a table due to a rollback while a read-uncommitted
-# cursor is scanning it.
-#
-do_test shared2-2.1 {
- execsql {
- INSERT INTO numbers VALUES(1, 'Medium length text field');
- INSERT INTO numbers VALUES(2, 'Medium length text field');
- INSERT INTO numbers VALUES(3, 'Medium length text field');
- INSERT INTO numbers VALUES(4, 'Medium length text field');
- BEGIN;
- DELETE FROM numbers WHERE (a%2)=0;
- } db1
- set res [list]
- db2 eval {
- SELECT a FROM numbers ORDER BY a;
- } {
- lappend res $a
- if {$a==3} {
- execsql {ROLLBACK} db1
- }
- }
- set res
-} {1 3 4}
-do_test shared2-2.2 {
- execsql {
- BEGIN;
- INSERT INTO numbers VALUES(5, 'Medium length text field');
- INSERT INTO numbers VALUES(6, 'Medium length text field');
- } db1
- set res [list]
- db2 eval {
- SELECT a FROM numbers ORDER BY a;
- } {
- lappend res $a
- if {$a==5} {
- execsql {ROLLBACK} db1
- }
- }
- set res
-} {1 2 3 4 5}
db1 close
db2 close
diff --git a/test/shell1.test b/test/shell1.test
new file mode 100644
index 0000000..0cafc35
--- /dev/null
+++ b/test/shell1.test
@@ -0,0 +1,733 @@
+# 2009 Nov 11
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The focus of this file is testing the CLI shell tool.
+#
+#
+
+# Test plan:
+#
+# shell1-1.*: Basic command line option handling.
+# shell1-2.*: Basic "dot" command token parsing.
+# shell1-3.*: Basic test that "dot" command can be called.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+if {$tcl_platform(platform)=="windows"} {
+ set CLI "sqlite3.exe"
+} else {
+ set CLI "./sqlite3"
+}
+if {![file executable $CLI]} {
+ finish_test
+ return
+}
+db close
+forcedelete test.db test.db-journal test.db-wal
+sqlite3 db test.db
+
+#----------------------------------------------------------------------------
+# Test cases shell1-1.*: Basic command line option handling.
+#
+
+# invalid option
+do_test shell1-1.1.1 {
+ set res [catchcmd "-bad test.db" ""]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Error: unknown option: -bad} $res]
+} {1 1}
+# error on extra options
+do_test shell1-1.1.2 {
+ set res [catchcmd "-bad test.db \"select 3\" \"select 4\"" ""]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Error: too many options: "select 4"} $res]
+} {1 1}
+# error on extra options
+do_test shell1-1.1.3 {
+ set res [catchcmd "-bad FOO test.db BAD" ".quit"]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Error: too many options: "BAD"} $res]
+} {1 1}
+
+# -help
+do_test shell1-1.2.1 {
+ set res [catchcmd "-help test.db" ""]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Usage} $res] \
+ [regexp {\-init} $res] \
+ [regexp {\-version} $res]
+} {1 1 1 1}
+
+# -init filename read/process named file
+do_test shell1-1.3.1 {
+ catchcmd "-init FOO test.db" ""
+} {0 {}}
+do_test shell1-1.3.2 {
+ set res [catchcmd "-init FOO test.db .quit BAD" ""]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Error: too many options: "BAD"} $res]
+} {1 1}
+
+# -echo print commands before execution
+do_test shell1-1.4.1 {
+ catchcmd "-echo test.db" ""
+} {0 {}}
+
+# -[no]header turn headers on or off
+do_test shell1-1.5.1 {
+ catchcmd "-header test.db" ""
+} {0 {}}
+do_test shell1-1.5.2 {
+ catchcmd "-noheader test.db" ""
+} {0 {}}
+
+# -bail stop after hitting an error
+do_test shell1-1.6.1 {
+ catchcmd "-bail test.db" ""
+} {0 {}}
+
+# -interactive force interactive I/O
+do_test shell1-1.7.1 {
+ set res [catchcmd "-interactive test.db" ".quit"]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {SQLite version} $res] \
+ [regexp {Enter SQL statements} $res]
+} {0 1 1}
+
+# -batch force batch I/O
+do_test shell1-1.8.1 {
+ catchcmd "-batch test.db" ""
+} {0 {}}
+
+# -column set output mode to 'column'
+do_test shell1-1.9.1 {
+ catchcmd "-column test.db" ""
+} {0 {}}
+
+# -csv set output mode to 'csv'
+do_test shell1-1.10.1 {
+ catchcmd "-csv test.db" ""
+} {0 {}}
+
+# -html set output mode to HTML
+do_test shell1-1.11.1 {
+ catchcmd "-html test.db" ""
+} {0 {}}
+
+# -line set output mode to 'line'
+do_test shell1-1.12.1 {
+ catchcmd "-line test.db" ""
+} {0 {}}
+
+# -list set output mode to 'list'
+do_test shell1-1.13.1 {
+ catchcmd "-list test.db" ""
+} {0 {}}
+
+# -separator 'x' set output field separator (|)
+do_test shell1-1.14.1 {
+ catchcmd "-separator 'x' test.db" ""
+} {0 {}}
+do_test shell1-1.14.2 {
+ catchcmd "-separator x test.db" ""
+} {0 {}}
+do_test shell1-1.14.3 {
+ set res [catchcmd "-separator" ""]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Error: missing argument for option: -separator} $res]
+} {1 1}
+
+# -stats print memory stats before each finalize
+do_test shell1-1.14b.1 {
+ catchcmd "-stats test.db" ""
+} {0 {}}
+
+# -nullvalue 'text' set text string for NULL values
+do_test shell1-1.15.1 {
+ catchcmd "-nullvalue 'x' test.db" ""
+} {0 {}}
+do_test shell1-1.15.2 {
+ catchcmd "-nullvalue x test.db" ""
+} {0 {}}
+do_test shell1-1.15.3 {
+ set res [catchcmd "-nullvalue" ""]
+ set rc [lindex $res 0]
+ list $rc \
+ [regexp {Error: missing argument for option: -nullvalue} $res]
+} {1 1}
+
+# -version show SQLite version
+do_test shell1-1.16.1 {
+ set x [catchcmd "-version test.db" ""]
+} {/3.[0-9.]+ 20\d\d-[01]\d-\d\d \d\d:\d\d:\d\d [0-9a-f]+/}
+
+#----------------------------------------------------------------------------
+# Test cases shell1-2.*: Basic "dot" command token parsing.
+#
+
+# check first token handling
+do_test shell1-2.1.1 {
+ catchcmd "test.db" ".foo"
+} {1 {Error: unknown command or invalid arguments: "foo". Enter ".help" for help}}
+do_test shell1-2.1.2 {
+ catchcmd "test.db" ".\"foo OFF\""
+} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
+do_test shell1-2.1.3 {
+ catchcmd "test.db" ".\'foo OFF\'"
+} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
+
+# unbalanced quotes
+do_test shell1-2.2.1 {
+ catchcmd "test.db" ".\"foo OFF"
+} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
+do_test shell1-2.2.2 {
+ catchcmd "test.db" ".\'foo OFF"
+} {1 {Error: unknown command or invalid arguments: "foo OFF". Enter ".help" for help}}
+do_test shell1-2.2.3 {
+ catchcmd "test.db" ".explain \"OFF"
+} {0 {}}
+do_test shell1-2.2.4 {
+ catchcmd "test.db" ".explain \'OFF"
+} {0 {}}
+do_test shell1-2.2.5 {
+ catchcmd "test.db" ".mode \"insert FOO"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+do_test shell1-2.2.6 {
+ catchcmd "test.db" ".mode \'insert FOO"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+
+# check multiple tokens, and quoted tokens
+do_test shell1-2.3.1 {
+ catchcmd "test.db" ".explain 1"
+} {0 {}}
+do_test shell1-2.3.2 {
+ catchcmd "test.db" ".explain on"
+} {0 {}}
+do_test shell1-2.3.3 {
+ catchcmd "test.db" ".explain \"1 2 3\""
+} {0 {}}
+do_test shell1-2.3.4 {
+ catchcmd "test.db" ".explain \"OFF\""
+} {0 {}}
+do_test shell1-2.3.5 {
+ catchcmd "test.db" ".\'explain\' \'OFF\'"
+} {0 {}}
+do_test shell1-2.3.6 {
+ catchcmd "test.db" ".explain \'OFF\'"
+} {0 {}}
+do_test shell1-2.3.7 {
+ catchcmd "test.db" ".\'explain\' \'OFF\'"
+} {0 {}}
+
+# check quoted args are unquoted
+do_test shell1-2.4.1 {
+ catchcmd "test.db" ".mode FOO"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+do_test shell1-2.4.2 {
+ catchcmd "test.db" ".mode csv"
+} {0 {}}
+do_test shell1-2.4.2 {
+ catchcmd "test.db" ".mode \"csv\""
+} {0 {}}
+
+
+#----------------------------------------------------------------------------
+# Test cases shell1-3.*: Basic test that "dot" command can be called.
+#
+
+# .backup ?DB? FILE Backup DB (default "main") to FILE
+do_test shell1-3.1.1 {
+ catchcmd "test.db" ".backup"
+} {1 {Error: unknown command or invalid arguments: "backup". Enter ".help" for help}}
+do_test shell1-3.1.2 {
+ catchcmd "test.db" ".backup FOO"
+} {0 {}}
+do_test shell1-3.1.3 {
+ catchcmd "test.db" ".backup FOO BAR"
+} {1 {Error: unknown database FOO}}
+do_test shell1-3.1.4 {
+ # too many arguments
+ catchcmd "test.db" ".backup FOO BAR BAD"
+} {1 {Error: unknown command or invalid arguments: "backup". Enter ".help" for help}}
+
+# .bail ON|OFF Stop after hitting an error. Default OFF
+do_test shell1-3.2.1 {
+ catchcmd "test.db" ".bail"
+} {1 {Error: unknown command or invalid arguments: "bail". Enter ".help" for help}}
+do_test shell1-3.2.2 {
+ catchcmd "test.db" ".bail ON"
+} {0 {}}
+do_test shell1-3.2.3 {
+ catchcmd "test.db" ".bail OFF"
+} {0 {}}
+do_test shell1-3.2.4 {
+ # too many arguments
+ catchcmd "test.db" ".bail OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "bail". Enter ".help" for help}}
+
+# .databases List names and files of attached databases
+do_test shell1-3.3.1 {
+ catchcmd "-csv test.db" ".databases"
+} "/0 +.*main +[string map {/ .} [string range [pwd] 0 10]].*/"
+do_test shell1-3.3.2 {
+ # too many arguments
+ catchcmd "test.db" ".databases BAD"
+} {1 {Error: unknown command or invalid arguments: "databases". Enter ".help" for help}}
+
+# .dump ?TABLE? ... Dump the database in an SQL text format
+# If TABLE specified, only dump tables matching
+# LIKE pattern TABLE.
+do_test shell1-3.4.1 {
+ set res [catchcmd "test.db" ".dump"]
+ list [regexp {BEGIN TRANSACTION;} $res] \
+ [regexp {COMMIT;} $res]
+} {1 1}
+do_test shell1-3.4.2 {
+ set res [catchcmd "test.db" ".dump FOO"]
+ list [regexp {BEGIN TRANSACTION;} $res] \
+ [regexp {COMMIT;} $res]
+} {1 1}
+do_test shell1-3.4.3 {
+ # too many arguments
+ catchcmd "test.db" ".dump FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "dump". Enter ".help" for help}}
+
+# .echo ON|OFF Turn command echo on or off
+do_test shell1-3.5.1 {
+ catchcmd "test.db" ".echo"
+} {1 {Error: unknown command or invalid arguments: "echo". Enter ".help" for help}}
+do_test shell1-3.5.2 {
+ catchcmd "test.db" ".echo ON"
+} {0 {}}
+do_test shell1-3.5.3 {
+ catchcmd "test.db" ".echo OFF"
+} {0 {}}
+do_test shell1-3.5.4 {
+ # too many arguments
+ catchcmd "test.db" ".echo OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "echo". Enter ".help" for help}}
+
+# .exit Exit this program
+do_test shell1-3.6.1 {
+ catchcmd "test.db" ".exit"
+} {0 {}}
+do_test shell1-3.6.2 {
+ # too many arguments
+ catchcmd "test.db" ".exit BAD"
+} {1 {Error: unknown command or invalid arguments: "exit". Enter ".help" for help}}
+
+# .explain ON|OFF Turn output mode suitable for EXPLAIN on or off.
+do_test shell1-3.7.1 {
+ catchcmd "test.db" ".explain"
+ # explain is the exception to the booleans. without an option, it turns it on.
+} {0 {}}
+do_test shell1-3.7.2 {
+ catchcmd "test.db" ".explain ON"
+} {0 {}}
+do_test shell1-3.7.3 {
+ catchcmd "test.db" ".explain OFF"
+} {0 {}}
+do_test shell1-3.7.4 {
+ # too many arguments
+ catchcmd "test.db" ".explain OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "explain". Enter ".help" for help}}
+
+
+# .header(s) ON|OFF Turn display of headers on or off
+do_test shell1-3.9.1 {
+ catchcmd "test.db" ".header"
+} {1 {Error: unknown command or invalid arguments: "header". Enter ".help" for help}}
+do_test shell1-3.9.2 {
+ catchcmd "test.db" ".header ON"
+} {0 {}}
+do_test shell1-3.9.3 {
+ catchcmd "test.db" ".header OFF"
+} {0 {}}
+do_test shell1-3.9.4 {
+ # too many arguments
+ catchcmd "test.db" ".header OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "header". Enter ".help" for help}}
+
+do_test shell1-3.9.5 {
+ catchcmd "test.db" ".headers"
+} {1 {Error: unknown command or invalid arguments: "headers". Enter ".help" for help}}
+do_test shell1-3.9.6 {
+ catchcmd "test.db" ".headers ON"
+} {0 {}}
+do_test shell1-3.9.7 {
+ catchcmd "test.db" ".headers OFF"
+} {0 {}}
+do_test shell1-3.9.8 {
+ # too many arguments
+ catchcmd "test.db" ".headers OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "headers". Enter ".help" for help}}
+
+# .help Show this message
+do_test shell1-3.10.1 {
+ set res [catchcmd "test.db" ".help"]
+ # look for a few of the possible help commands
+ list [regexp {.help} $res] \
+ [regexp {.quit} $res] \
+ [regexp {.show} $res]
+} {1 1 1}
+do_test shell1-3.10.2 {
+ # we allow .help to take extra args (it is help after all)
+ set res [catchcmd "test.db" ".help BAD"]
+ # look for a few of the possible help commands
+ list [regexp {.help} $res] \
+ [regexp {.quit} $res] \
+ [regexp {.show} $res]
+} {1 1 1}
+
+# .import FILE TABLE Import data from FILE into TABLE
+do_test shell1-3.11.1 {
+ catchcmd "test.db" ".import"
+} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
+do_test shell1-3.11.2 {
+ catchcmd "test.db" ".import FOO"
+} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
+do_test shell1-3.11.2 {
+ catchcmd "test.db" ".import FOO BAR"
+} {1 {Error: no such table: BAR}}
+do_test shell1-3.11.3 {
+ # too many arguments
+ catchcmd "test.db" ".import FOO BAR BAD"
+} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
+
+# .indices ?TABLE? Show names of all indices
+# If TABLE specified, only show indices for tables
+# matching LIKE pattern TABLE.
+do_test shell1-3.12.1 {
+ catchcmd "test.db" ".indices"
+} {0 {}}
+do_test shell1-3.12.2 {
+ catchcmd "test.db" ".indices FOO"
+} {0 {}}
+do_test shell1-3.12.3 {
+ # too many arguments
+ catchcmd "test.db" ".indices FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "indices". Enter ".help" for help}}
+
+# .mode MODE ?TABLE? Set output mode where MODE is one of:
+# csv Comma-separated values
+# column Left-aligned columns. (See .width)
+# html HTML <table> code
+# insert SQL insert statements for TABLE
+# line One value per line
+# list Values delimited by .separator string
+# tabs Tab-separated values
+# tcl TCL list elements
+do_test shell1-3.13.1 {
+ catchcmd "test.db" ".mode"
+} {1 {Error: unknown command or invalid arguments: "mode". Enter ".help" for help}}
+do_test shell1-3.13.2 {
+ catchcmd "test.db" ".mode FOO"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+do_test shell1-3.13.3 {
+ catchcmd "test.db" ".mode csv"
+} {0 {}}
+do_test shell1-3.13.4 {
+ catchcmd "test.db" ".mode column"
+} {0 {}}
+do_test shell1-3.13.5 {
+ catchcmd "test.db" ".mode html"
+} {0 {}}
+do_test shell1-3.13.6 {
+ catchcmd "test.db" ".mode insert"
+} {0 {}}
+do_test shell1-3.13.7 {
+ catchcmd "test.db" ".mode line"
+} {0 {}}
+do_test shell1-3.13.8 {
+ catchcmd "test.db" ".mode list"
+} {0 {}}
+do_test shell1-3.13.9 {
+ catchcmd "test.db" ".mode tabs"
+} {0 {}}
+do_test shell1-3.13.10 {
+ catchcmd "test.db" ".mode tcl"
+} {0 {}}
+do_test shell1-3.13.11 {
+ # too many arguments
+ catchcmd "test.db" ".mode tcl BAD"
+} {1 {Error: invalid arguments: "BAD". Enter ".help" for help}}
+
+# don't allow partial mode type matches
+do_test shell1-3.13.12 {
+ catchcmd "test.db" ".mode l"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+do_test shell1-3.13.13 {
+ catchcmd "test.db" ".mode li"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+do_test shell1-3.13.14 {
+ catchcmd "test.db" ".mode lin"
+} {1 {Error: mode should be one of: column csv html insert line list tabs tcl}}
+
+# .nullvalue STRING Print STRING in place of NULL values
+do_test shell1-3.14.1 {
+ catchcmd "test.db" ".nullvalue"
+} {1 {Error: unknown command or invalid arguments: "nullvalue". Enter ".help" for help}}
+do_test shell1-3.14.2 {
+ catchcmd "test.db" ".nullvalue FOO"
+} {0 {}}
+do_test shell1-3.14.3 {
+ # too many arguments
+ catchcmd "test.db" ".nullvalue FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "nullvalue". Enter ".help" for help}}
+
+# .output FILENAME Send output to FILENAME
+do_test shell1-3.15.1 {
+ catchcmd "test.db" ".output"
+} {1 {Error: unknown command or invalid arguments: "output". Enter ".help" for help}}
+do_test shell1-3.15.2 {
+ catchcmd "test.db" ".output FOO"
+} {0 {}}
+do_test shell1-3.15.3 {
+ # too many arguments
+ catchcmd "test.db" ".output FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "output". Enter ".help" for help}}
+
+# .output stdout Send output to the screen
+do_test shell1-3.16.1 {
+ catchcmd "test.db" ".output stdout"
+} {0 {}}
+do_test shell1-3.16.2 {
+ # too many arguments
+ catchcmd "test.db" ".output stdout BAD"
+} {1 {Error: unknown command or invalid arguments: "output". Enter ".help" for help}}
+
+# .prompt MAIN CONTINUE Replace the standard prompts
+do_test shell1-3.17.1 {
+ catchcmd "test.db" ".prompt"
+} {1 {Error: unknown command or invalid arguments: "prompt". Enter ".help" for help}}
+do_test shell1-3.17.2 {
+ catchcmd "test.db" ".prompt FOO"
+} {0 {}}
+do_test shell1-3.17.3 {
+ catchcmd "test.db" ".prompt FOO BAR"
+} {0 {}}
+do_test shell1-3.17.4 {
+ # too many arguments
+ catchcmd "test.db" ".prompt FOO BAR BAD"
+} {1 {Error: unknown command or invalid arguments: "prompt". Enter ".help" for help}}
+
+# .quit Exit this program
+do_test shell1-3.18.1 {
+ catchcmd "test.db" ".quit"
+} {0 {}}
+do_test shell1-3.18.2 {
+ # too many arguments
+ catchcmd "test.db" ".quit BAD"
+} {1 {Error: unknown command or invalid arguments: "quit". Enter ".help" for help}}
+
+# .read FILENAME Execute SQL in FILENAME
+do_test shell1-3.19.1 {
+ catchcmd "test.db" ".read"
+} {1 {Error: unknown command or invalid arguments: "read". Enter ".help" for help}}
+do_test shell1-3.19.2 {
+ file delete -force FOO
+ catchcmd "test.db" ".read FOO"
+} {1 {Error: cannot open "FOO"}}
+do_test shell1-3.19.3 {
+ # too many arguments
+ catchcmd "test.db" ".read FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "read". Enter ".help" for help}}
+
+# .restore ?DB? FILE Restore content of DB (default "main") from FILE
+do_test shell1-3.20.1 {
+ catchcmd "test.db" ".restore"
+} {1 {Error: unknown command or invalid arguments: "restore". Enter ".help" for help}}
+do_test shell1-3.20.2 {
+ catchcmd "test.db" ".restore FOO"
+} {0 {}}
+do_test shell1-3.20.3 {
+ catchcmd "test.db" ".restore FOO BAR"
+} {1 {Error: unknown database FOO}}
+do_test shell1-3.20.4 {
+ # too many arguments
+ catchcmd "test.db" ".restore FOO BAR BAD"
+} {1 {Error: unknown command or invalid arguments: "restore". Enter ".help" for help}}
+
+# .schema ?TABLE? Show the CREATE statements
+# If TABLE specified, only show tables matching
+# LIKE pattern TABLE.
+do_test shell1-3.21.1 {
+ catchcmd "test.db" ".schema"
+} {0 {}}
+do_test shell1-3.21.2 {
+ catchcmd "test.db" ".schema FOO"
+} {0 {}}
+do_test shell1-3.21.3 {
+ # too many arguments
+ catchcmd "test.db" ".schema FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "schema". Enter ".help" for help}}
+
+do_test shell1-3.21.4 {
+ catchcmd "test.db" {
+ CREATE TABLE t1(x);
+ CREATE VIEW v2 AS SELECT x+1 AS y FROM t1;
+ CREATE VIEW v1 AS SELECT y+1 FROM v2;
+ }
+ catchcmd "test.db" ".schema"
+} {0 {CREATE TABLE t1(x);
+CREATE VIEW v2 AS SELECT x+1 AS y FROM t1;
+CREATE VIEW v1 AS SELECT y+1 FROM v2;}}
+db eval {DROP VIEW v1; DROP VIEW v2; DROP TABLE t1;}
+
+# .separator STRING Change separator used by output mode and .import
+do_test shell1-3.22.1 {
+ catchcmd "test.db" ".separator"
+} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
+do_test shell1-3.22.2 {
+ catchcmd "test.db" ".separator FOO"
+} {0 {}}
+do_test shell1-3.22.3 {
+ # too many arguments
+ catchcmd "test.db" ".separator FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
+
+# .show Show the current values for various settings
+do_test shell1-3.23.1 {
+ set res [catchcmd "test.db" ".show"]
+ list [regexp {echo:} $res] \
+ [regexp {explain:} $res] \
+ [regexp {headers:} $res] \
+ [regexp {mode:} $res] \
+ [regexp {nullvalue:} $res] \
+ [regexp {output:} $res] \
+ [regexp {separator:} $res] \
+ [regexp {stats:} $res] \
+ [regexp {width:} $res]
+} {1 1 1 1 1 1 1 1 1}
+do_test shell1-3.23.2 {
+ # too many arguments
+ catchcmd "test.db" ".show BAD"
+} {1 {Error: unknown command or invalid arguments: "show". Enter ".help" for help}}
+
+# .stats ON|OFF Turn stats on or off
+do_test shell1-3.23b.1 {
+ catchcmd "test.db" ".stats"
+} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
+do_test shell1-3.23b.2 {
+ catchcmd "test.db" ".stats ON"
+} {0 {}}
+do_test shell1-3.23b.3 {
+ catchcmd "test.db" ".stats OFF"
+} {0 {}}
+do_test shell1-3.23b.4 {
+ # too many arguments
+ catchcmd "test.db" ".stats OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
+
+# .tables ?TABLE? List names of tables
+# If TABLE specified, only list tables matching
+# LIKE pattern TABLE.
+do_test shell1-3.24.1 {
+ catchcmd "test.db" ".tables"
+} {0 {}}
+do_test shell1-3.24.2 {
+ catchcmd "test.db" ".tables FOO"
+} {0 {}}
+do_test shell1-3.24.3 {
+ # too many arguments
+ catchcmd "test.db" ".tables FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "tables". Enter ".help" for help}}
+
+# .timeout MS Try opening locked tables for MS milliseconds
+do_test shell1-3.25.1 {
+ catchcmd "test.db" ".timeout"
+} {1 {Error: unknown command or invalid arguments: "timeout". Enter ".help" for help}}
+do_test shell1-3.25.2 {
+ catchcmd "test.db" ".timeout zzz"
+ # this should be treated the same as a '0' timeout
+} {0 {}}
+do_test shell1-3.25.3 {
+ catchcmd "test.db" ".timeout 1"
+} {0 {}}
+do_test shell1-3.25.4 {
+ # too many arguments
+ catchcmd "test.db" ".timeout 1 BAD"
+} {1 {Error: unknown command or invalid arguments: "timeout". Enter ".help" for help}}
+
+# .width NUM NUM ... Set column widths for "column" mode
+do_test shell1-3.26.1 {
+ catchcmd "test.db" ".width"
+} {1 {Error: unknown command or invalid arguments: "width". Enter ".help" for help}}
+do_test shell1-3.26.2 {
+ catchcmd "test.db" ".width xxx"
+ # this should be treated the same as a '0' width for col 1
+} {0 {}}
+do_test shell1-3.26.3 {
+ catchcmd "test.db" ".width xxx yyy"
+ # this should be treated the same as a '0' width for col 1 and 2
+} {0 {}}
+do_test shell1-3.26.4 {
+ catchcmd "test.db" ".width 1 1"
+ # this should be treated the same as a '1' width for col 1 and 2
+} {0 {}}
+
+# .timer ON|OFF Turn the CPU timer measurement on or off
+do_test shell1-3.27.1 {
+ catchcmd "test.db" ".timer"
+} {1 {Error: unknown command or invalid arguments: "timer". Enter ".help" for help}}
+do_test shell1-3.27.2 {
+ catchcmd "test.db" ".timer ON"
+} {0 {}}
+do_test shell1-3.27.3 {
+ catchcmd "test.db" ".timer OFF"
+} {0 {}}
+do_test shell1-3.27.4 {
+ # too many arguments
+ catchcmd "test.db" ".timer OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "timer". Enter ".help" for help}}
+
+do_test shell1-3-28.1 {
+ catchcmd test.db \
+ ".log stdout\nSELECT coalesce(sqlite_log(123,'hello'),'456');"
+} "0 {(123) hello\n456}"
+
+# Test the output of the ".dump" command
+#
+do_test shell1-4.1 {
+ db eval {
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES(null), (1), (2.25), ('hello'), (x'807f');
+ }
+ catchcmd test.db {.dump}
+} {0 {PRAGMA foreign_keys=OFF;
+BEGIN TRANSACTION;
+CREATE TABLE t1(x);
+INSERT INTO "t1" VALUES(NULL);
+INSERT INTO "t1" VALUES(1);
+INSERT INTO "t1" VALUES(2.25);
+INSERT INTO "t1" VALUES('hello');
+INSERT INTO "t1" VALUES(X'807F');
+COMMIT;}}
+
+# Test the output of ".mode insert"
+#
+do_test shell1-4.2 {
+ catchcmd test.db ".mode insert t1\nselect * from t1;"
+} {0 {INSERT INTO t1 VALUES(NULL);
+INSERT INTO t1 VALUES(1);
+INSERT INTO t1 VALUES(2.25);
+INSERT INTO t1 VALUES('hello');
+INSERT INTO t1 VALUES(X'807f');}}
+
+
+finish_test
diff --git a/test/shell2.test b/test/shell2.test
new file mode 100644
index 0000000..8260932
--- /dev/null
+++ b/test/shell2.test
@@ -0,0 +1,197 @@
+# 2009 Nov 11
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The focus of this file is testing the CLI shell tool.
+#
+# $Id: shell2.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
+#
+
+# Test plan:
+#
+# shell2-1.*: Misc. test of various tickets and reported errors.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+if {$tcl_platform(platform)=="windows"} {
+ set CLI "sqlite3.exe"
+} else {
+ set CLI "./sqlite3"
+}
+if {![file executable $CLI]} {
+ finish_test
+ return
+}
+db close
+forcedelete test.db test.db-journal test.db-wal
+sqlite3 db test.db
+
+
+#----------------------------------------------------------------------------
+# shell2-1.*: Misc. test of various tickets and reported errors.
+#
+
+# Batch mode not creating databases.
+# Reported on mailing list by Ken Zalewski.
+# Ticket [aeff892c57].
+do_test shell2-1.1.1 {
+ file delete -force foo.db
+ set rc [ catchcmd "-batch foo.db" "CREATE TABLE t1(a);" ]
+ set fexist [file exist foo.db]
+ list $rc $fexist
+} {{0 {}} 1}
+
+# Shell silently ignores extra parameters.
+# Ticket [f5cb008a65].
+do_test shell2-1.2.1 {
+ set rc [catch { eval exec $CLI \":memory:\" \"select 3\" \"select 4\" } msg]
+ list $rc \
+ [regexp {Error: too many options: "select 4"} $msg]
+} {1 1}
+
+# Test a problem reported on the mailing list. The shell was at one point
+# returning the generic SQLITE_ERROR message ("SQL error or missing database")
+# instead of the "too many levels..." message in the test below.
+#
+do_test shell2-1.3 {
+ catchcmd "-batch test.db" {
+ PRAGMA recursive_triggers = ON;
+ CREATE TABLE t5(a PRIMARY KEY, b, c);
+ INSERT INTO t5 VALUES(1, 2, 3);
+ CREATE TRIGGER au_tble AFTER UPDATE ON t5 BEGIN
+ UPDATE OR IGNORE t5 SET a = new.a, c = 10;
+ END;
+
+ UPDATE OR REPLACE t5 SET a = 4 WHERE a = 1;
+ }
+} {1 {Error: near line 9: too many levels of trigger recursion}}
+
+
+
+# Shell not echoing all commands with echo on.
+# Ticket [eb620916be].
+
+# Test with echo off
+# NB. whitespace is important
+do_test shell2-1.4.1 {
+ file delete -force foo.db
+ catchcmd "foo.db" {CREATE TABLE foo(a);
+INSERT INTO foo(a) VALUES(1);
+SELECT * FROM foo;}
+} {0 1}
+
+# Test with echo on using command line option
+# NB. whitespace is important
+do_test shell2-1.4.2 {
+ file delete -force foo.db
+ catchcmd "-echo foo.db" {CREATE TABLE foo(a);
+INSERT INTO foo(a) VALUES(1);
+SELECT * FROM foo;}
+} {0 {CREATE TABLE foo(a);
+INSERT INTO foo(a) VALUES(1);
+SELECT * FROM foo;
+1}}
+
+# Test with echo on using dot command
+# NB. whitespace is important
+do_test shell2-1.4.3 {
+ file delete -force foo.db
+ catchcmd "foo.db" {.echo ON
+CREATE TABLE foo(a);
+INSERT INTO foo(a) VALUES(1);
+SELECT * FROM foo;}
+} {0 {CREATE TABLE foo(a);
+INSERT INTO foo(a) VALUES(1);
+SELECT * FROM foo;
+1}}
+
+# Test with echo on using dot command and
+# turning off mid- processing.
+# NB. whitespace is important
+do_test shell2-1.4.4 {
+ file delete -force foo.db
+ catchcmd "foo.db" {.echo ON
+CREATE TABLE foo(a);
+.echo OFF
+INSERT INTO foo(a) VALUES(1);
+SELECT * FROM foo;}
+} {0 {CREATE TABLE foo(a);
+.echo OFF
+1}}
+
+# Test with echo on using dot command and
+# multiple commands per line.
+# NB. whitespace is important
+do_test shell2-1.4.5 {
+ file delete -force foo.db
+ catchcmd "foo.db" {.echo ON
+CREATE TABLE foo1(a);
+INSERT INTO foo1(a) VALUES(1);
+CREATE TABLE foo2(b);
+INSERT INTO foo2(b) VALUES(1);
+SELECT * FROM foo1; SELECT * FROM foo2;
+INSERT INTO foo1(a) VALUES(2); INSERT INTO foo2(b) VALUES(2);
+SELECT * FROM foo1; SELECT * FROM foo2;
+}
+} {0 {CREATE TABLE foo1(a);
+INSERT INTO foo1(a) VALUES(1);
+CREATE TABLE foo2(b);
+INSERT INTO foo2(b) VALUES(1);
+SELECT * FROM foo1;
+1
+SELECT * FROM foo2;
+1
+INSERT INTO foo1(a) VALUES(2);
+INSERT INTO foo2(b) VALUES(2);
+SELECT * FROM foo1;
+1
+2
+SELECT * FROM foo2;
+1
+2}}
+
+# Test with echo on and headers on using dot command and
+# multiple commands per line.
+# NB. whitespace is important
+do_test shell2-1.4.6 {
+ file delete -force foo.db
+ catchcmd "foo.db" {.echo ON
+.headers ON
+CREATE TABLE foo1(a);
+INSERT INTO foo1(a) VALUES(1);
+CREATE TABLE foo2(b);
+INSERT INTO foo2(b) VALUES(1);
+SELECT * FROM foo1; SELECT * FROM foo2;
+INSERT INTO foo1(a) VALUES(2); INSERT INTO foo2(b) VALUES(2);
+SELECT * FROM foo1; SELECT * FROM foo2;
+}
+} {0 {.headers ON
+CREATE TABLE foo1(a);
+INSERT INTO foo1(a) VALUES(1);
+CREATE TABLE foo2(b);
+INSERT INTO foo2(b) VALUES(1);
+SELECT * FROM foo1;
+a
+1
+SELECT * FROM foo2;
+b
+1
+INSERT INTO foo1(a) VALUES(2);
+INSERT INTO foo2(b) VALUES(2);
+SELECT * FROM foo1;
+a
+1
+2
+SELECT * FROM foo2;
+b
+1
+2}}
+
+finish_test
diff --git a/test/shell3.test b/test/shell3.test
new file mode 100644
index 0000000..d02177b
--- /dev/null
+++ b/test/shell3.test
@@ -0,0 +1,97 @@
+# 2009 Dec 16
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The focus of this file is testing the CLI shell tool.
+#
+# $Id: shell2.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
+#
+
+# Test plan:
+#
+# shell3-1.*: Basic tests for running SQL statments from command line.
+# shell3-2.*: Basic tests for running SQL file from command line.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+if {$tcl_platform(platform)=="windows"} {
+ set CLI "sqlite3.exe"
+} else {
+ set CLI "./sqlite3"
+}
+if {![file executable $CLI]} {
+ finish_test
+ return
+}
+db close
+forcedelete test.db test.db-journal test.db-wal
+sqlite3 db test.db
+
+#----------------------------------------------------------------------------
+# shell3-1.*: Basic tests for running SQL statments from command line.
+#
+
+# Run SQL statement from command line
+do_test shell3-1.1 {
+ file delete -force foo.db
+ set rc [ catchcmd "foo.db \"CREATE TABLE t1(a);\"" ]
+ set fexist [file exist foo.db]
+ list $rc $fexist
+} {{0 {}} 1}
+do_test shell3-1.2 {
+ catchcmd "foo.db" ".tables"
+} {0 t1}
+do_test shell3-1.3 {
+ catchcmd "foo.db \"DROP TABLE t1;\""
+} {0 {}}
+do_test shell3-1.4 {
+ catchcmd "foo.db" ".tables"
+} {0 {}}
+do_test shell3-1.5 {
+ catchcmd "foo.db \"CREATE TABLE t1(a); DROP TABLE t1;\""
+} {0 {}}
+do_test shell3-1.6 {
+ catchcmd "foo.db" ".tables"
+} {0 {}}
+do_test shell3-1.7 {
+ catchcmd "foo.db \"CREATE TABLE\""
+} {1 {Error: near "TABLE": syntax error}}
+
+#----------------------------------------------------------------------------
+# shell3-2.*: Basic tests for running SQL file from command line.
+#
+
+# Run SQL file from command line
+do_test shell3-2.1 {
+ file delete -force foo.db
+ set rc [ catchcmd "foo.db" "CREATE TABLE t1(a);" ]
+ set fexist [file exist foo.db]
+ list $rc $fexist
+} {{0 {}} 1}
+do_test shell3-2.2 {
+ catchcmd "foo.db" ".tables"
+} {0 t1}
+do_test shell3-2.3 {
+ catchcmd "foo.db" "DROP TABLE t1;"
+} {0 {}}
+do_test shell3-2.4 {
+ catchcmd "foo.db" ".tables"
+} {0 {}}
+do_test shell3-2.5 {
+ catchcmd "foo.db" "CREATE TABLE t1(a); DROP TABLE t1;"
+} {0 {}}
+do_test shell3-2.6 {
+ catchcmd "foo.db" ".tables"
+} {0 {}}
+do_test shell3-2.7 {
+ catchcmd "foo.db" "CREATE TABLE"
+} {1 {Error: incomplete SQL: CREATE TABLE}}
+
+finish_test
diff --git a/test/shell4.test b/test/shell4.test
new file mode 100644
index 0000000..5af44c8
--- /dev/null
+++ b/test/shell4.test
@@ -0,0 +1,116 @@
+# 2010 July 28
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The focus of this file is testing the CLI shell tool.
+# These tests are specific to the .stats command.
+#
+# $Id: shell4.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
+#
+
+# Test plan:
+#
+# shell4-1.*: Basic tests specific to the "stats" command.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+if {$tcl_platform(platform)=="windows"} {
+ set CLI "sqlite3.exe"
+} else {
+ set CLI "./sqlite3"
+}
+if {![file executable $CLI]} {
+ finish_test
+ return
+}
+db close
+forcedelete test.db test.db-journal test.db-wal
+sqlite3 db test.db
+
+#----------------------------------------------------------------------------
+# Test cases shell4-1.*: Tests specific to the "stats" command.
+#
+
+# should default to off
+do_test shell4-1.1.1 {
+ set res [catchcmd "test.db" ".show"]
+ list [regexp {stats: off} $res]
+} {1}
+
+do_test shell4-1.1.2 {
+ set res [catchcmd "test.db" ".show"]
+ list [regexp {stats: on} $res]
+} {0}
+
+# -stats should turn it on
+do_test shell4-1.2.1 {
+ set res [catchcmd "-stats test.db" ".show"]
+ list [regexp {stats: on} $res]
+} {1}
+
+do_test shell4-1.2.2 {
+ set res [catchcmd "-stats test.db" ".show"]
+ list [regexp {stats: off} $res]
+} {0}
+
+# .stats ON|OFF Turn stats on or off
+do_test shell4-1.3.1 {
+ catchcmd "test.db" ".stats"
+} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
+do_test shell4-1.3.2 {
+ catchcmd "test.db" ".stats ON"
+} {0 {}}
+do_test shell4-1.3.3 {
+ catchcmd "test.db" ".stats OFF"
+} {0 {}}
+do_test shell4-1.3.4 {
+ # too many arguments
+ catchcmd "test.db" ".stats OFF BAD"
+} {1 {Error: unknown command or invalid arguments: "stats". Enter ".help" for help}}
+
+# NB. whitespace is important
+do_test shell4-1.4.1 {
+ set res [catchcmd "test.db" {.show}]
+ list [regexp {stats: off} $res]
+} {1}
+
+do_test shell4-1.4.2 {
+ set res [catchcmd "test.db" {.stats ON
+.show
+}]
+ list [regexp {stats: on} $res]
+} {1}
+
+do_test shell4-1.4.3 {
+ set res [catchcmd "test.db" {.stats OFF
+.show
+}]
+ list [regexp {stats: off} $res]
+} {1}
+
+# make sure stats not present when off
+do_test shell4-1.5.1 {
+ set res [catchcmd "test.db" {SELECT 1;}]
+ list [regexp {Memory Used} $res] \
+ [regexp {Heap Usage} $res] \
+ [regexp {Autoindex Inserts} $res]
+} {0 0 0}
+
+# make sure stats are present when on
+do_test shell4-1.5.2 {
+ set res [catchcmd "test.db" {.stats ON
+SELECT 1;
+}]
+ list [regexp {Memory Used} $res] \
+ [regexp {Heap Usage} $res] \
+ [regexp {Autoindex Inserts} $res]
+} {1 1 1}
+
+finish_test
diff --git a/test/shell5.test b/test/shell5.test
new file mode 100644
index 0000000..d90cedf
--- /dev/null
+++ b/test/shell5.test
@@ -0,0 +1,229 @@
+# 2010 August 4
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# The focus of this file is testing the CLI shell tool.
+# These tests are specific to the .import command.
+#
+# $Id: shell5.test,v 1.7 2009/07/17 16:54:48 shaneh Exp $
+#
+
+# Test plan:
+#
+# shell5-1.*: Basic tests specific to the ".import" command.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+if {$tcl_platform(platform)=="windows"} {
+ set CLI "sqlite3.exe"
+} else {
+ set CLI "./sqlite3"
+}
+if {![file executable $CLI]} {
+ finish_test
+ return
+}
+db close
+forcedelete test.db test.db-journal test.db-wal
+sqlite3 db test.db
+
+#----------------------------------------------------------------------------
+# Test cases shell5-1.*: Basic handling of the .import and .separator commands.
+#
+
+# .import FILE TABLE Import data from FILE into TABLE
+do_test shell5-1.1.1 {
+ catchcmd "test.db" ".import"
+} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
+do_test shell5-1.1.2 {
+ catchcmd "test.db" ".import FOO"
+} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
+do_test shell5-1.1.2 {
+ catchcmd "test.db" ".import FOO BAR"
+} {1 {Error: no such table: BAR}}
+do_test shell5-1.1.3 {
+ # too many arguments
+ catchcmd "test.db" ".import FOO BAR BAD"
+} {1 {Error: unknown command or invalid arguments: "import". Enter ".help" for help}}
+
+# .separator STRING Change separator used by output mode and .import
+do_test shell1-1.2.1 {
+ catchcmd "test.db" ".separator"
+} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
+do_test shell1-1.2.2 {
+ catchcmd "test.db" ".separator FOO"
+} {0 {}}
+do_test shell1-1.2.3 {
+ # too many arguments
+ catchcmd "test.db" ".separator FOO BAD"
+} {1 {Error: unknown command or invalid arguments: "separator". Enter ".help" for help}}
+
+# separator should default to "|"
+do_test shell5-1.3.1 {
+ set res [catchcmd "test.db" ".show"]
+ list [regexp {separator: \"\|\"} $res]
+} {1}
+
+# set separator to different value.
+# check that .show reports new value
+do_test shell5-1.3.2 {
+ set res [catchcmd "test.db" {.separator ,
+.show}]
+ list [regexp {separator: \",\"} $res]
+} {1}
+
+# import file doesn't exist
+do_test shell5-1.4.1 {
+ file delete -force FOO
+ set res [catchcmd "test.db" {CREATE TABLE t1(a, b);
+.import FOO t1}]
+} {1 {Error: cannot open "FOO"}}
+
+# empty import file
+do_test shell5-1.4.2 {
+ file delete -force shell5.csv
+ set in [open shell5.csv w]
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 0}
+
+# import file with 1 row, 1 column (expecting 2 cols)
+do_test shell5-1.4.3 {
+ set in [open shell5.csv w]
+ puts $in "1"
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1}]
+} {1 {Error: shell5.csv line 1: expected 2 columns of data but found 1}}
+
+# import file with 1 row, 3 columns (expecting 2 cols)
+do_test shell5-1.4.4 {
+ set in [open shell5.csv w]
+ puts $in "1|2|3"
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1}]
+} {1 {Error: shell5.csv line 1: expected 2 columns of data but found 3}}
+
+# import file with 1 row, 2 columns
+do_test shell5-1.4.5 {
+ set in [open shell5.csv w]
+ puts $in "1|2"
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 1}
+
+# import file with 2 rows, 2 columns
+# note we end up with 3 rows because of the 1 row
+# imported above.
+do_test shell5-1.4.6 {
+ set in [open shell5.csv w]
+ puts $in "2|3"
+ puts $in "3|4"
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 3}
+
+# import file with 1 row, 2 columns, using a comma
+do_test shell5-1.4.7 {
+ set in [open shell5.csv w]
+ puts $in "4,5"
+ close $in
+ set res [catchcmd "test.db" {.separator ,
+.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 4}
+
+# import file with 1 row, 2 columns, text data
+do_test shell5-1.4.8.1 {
+ set in [open shell5.csv w]
+ puts $in "5|Now is the time for all good men to come to the aid of their country."
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 5}
+
+do_test shell5-1.4.8.2 {
+ catchcmd "test.db" {SELECT b FROM t1 WHERE a='5';}
+} {0 {Now is the time for all good men to come to the aid of their country.}}
+
+# import file with 1 row, 2 columns, quoted text data
+# note that currently sqlite doesn't support quoted fields, and
+# imports the entire field, quotes and all.
+do_test shell5-1.4.9.1 {
+ set in [open shell5.csv w]
+ puts $in "6|'Now is the time for all good men to come to the aid of their country.'"
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 6}
+
+do_test shell5-1.4.9.2 {
+ catchcmd "test.db" {SELECT b FROM t1 WHERE a='6';}
+} {0 {'Now is the time for all good men to come to the aid of their country.'}}
+
+# import file with 1 row, 2 columns, quoted text data
+do_test shell5-1.4.10.1 {
+ set in [open shell5.csv w]
+ puts $in "7|\"Now is the time for all good men to come to the aid of their country.\""
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT COUNT(*) FROM t1;}]
+} {0 7}
+
+do_test shell5-1.4.10.2 {
+ catchcmd "test.db" {SELECT b FROM t1 WHERE a='7';}
+} {0 {Now is the time for all good men to come to the aid of their country.}}
+
+# check importing very long field
+do_test shell5-1.5.1 {
+ set str [string repeat X 999]
+ set in [open shell5.csv w]
+ puts $in "8|$str"
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t1
+SELECT length(b) FROM t1 WHERE a='8';}]
+} {0 999}
+
+# try importing into a table with a large number of columns.
+# This is limited by SQLITE_MAX_VARIABLE_NUMBER, which defaults to 999.
+set cols 999
+do_test shell5-1.6.1 {
+ set sql {CREATE TABLE t2(}
+ set data {}
+ for {set i 1} {$i<$cols} {incr i} {
+ append sql "c$i,"
+ append data "$i|"
+ }
+ append sql "c$cols);"
+ append data "$cols"
+ catchcmd "test.db" $sql
+ set in [open shell5.csv w]
+ puts $in $data
+ close $in
+ set res [catchcmd "test.db" {.import shell5.csv t2
+SELECT COUNT(*) FROM t2;}]
+} {0 1}
+
+# try importing a large number of rows
+set rows 999999
+do_test shell5-1.7.1 {
+ set in [open shell5.csv w]
+ for {set i 1} {$i<=$rows} {incr i} {
+ puts $in $i
+ }
+ close $in
+ set res [catchcmd "test.db" {CREATE TABLE t3(a);
+.import shell5.csv t3
+SELECT COUNT(*) FROM t3;}]
+} [list 0 $rows]
+
+finish_test
diff --git a/test/shrink.test b/test/shrink.test
new file mode 100644
index 0000000..e03ebee
--- /dev/null
+++ b/test/shrink.test
@@ -0,0 +1,43 @@
+# 2011 November 16
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file contains test cases for sqlite3_db_release_memory and
+# the PRAGMA shrink_memory statement.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+unset -nocomplain baseline
+do_test shrink-1.1 {
+ db eval {
+ PRAGMA cache_size = 2000;
+ CREATE TABLE t1(x,y);
+ INSERT INTO t1 VALUES(randomblob(1000000),1);
+ }
+ set ::baseline sqlite3_memory_used
+ sqlite3_db_release_memory db
+ expr {$::baseline > [sqlite3_memory_used]+500000}
+} {1}
+do_test shrink-1.2 {
+ set baseline [sqlite3_memory_used]
+ db eval {
+ UPDATE t1 SET y=y+1;
+ }
+ expr {$::baseline+500000 < [sqlite3_memory_used]}
+} {1}
+do_test shrink-1.3 {
+ set baseline [sqlite3_memory_used]
+ db eval {PRAGMA shrink_memory}
+ expr {$::baseline > [sqlite3_memory_used]+500000}
+} {1}
+
+finish_test
diff --git a/test/stat.test b/test/stat.test
index 21726eb..926d9b7 100644
--- a/test/stat.test
+++ b/test/stat.test
@@ -15,7 +15,7 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
-ifcapable !vtab {
+ifcapable !vtab||!compound {
finish_test
return
}
diff --git a/test/subquery.test b/test/subquery.test
index 169ceda..d9d2952 100644
--- a/test/subquery.test
+++ b/test/subquery.test
@@ -331,6 +331,91 @@ do_test subquery-3.3.5 {
}
} {1 1 2 1}
+# The following tests check for aggregate subqueries in an aggregate
+# query.
+#
+do_test subquery-3.4.1 {
+ execsql {
+ CREATE TABLE t34(x,y);
+ INSERT INTO t34 VALUES(106,4), (107,3), (106,5), (107,5);
+ SELECT a.x, avg(a.y)
+ FROM t34 AS a
+ GROUP BY a.x
+ HAVING NOT EXISTS( SELECT b.x, avg(b.y)
+ FROM t34 AS b
+ GROUP BY b.x
+ HAVING avg(a.y) > avg(b.y));
+ }
+} {107 4.0}
+do_test subquery-3.4.2 {
+ execsql {
+ SELECT a.x, avg(a.y) AS avg1
+ FROM t34 AS a
+ GROUP BY a.x
+ HAVING NOT EXISTS( SELECT b.x, avg(b.y) AS avg2
+ FROM t34 AS b
+ GROUP BY b.x
+ HAVING avg1 > avg2);
+ }
+} {107 4.0}
+do_test subquery-3.4.3 {
+ execsql {
+ SELECT
+ a.x,
+ avg(a.y),
+ NOT EXISTS ( SELECT b.x, avg(b.y)
+ FROM t34 AS b
+ GROUP BY b.x
+ HAVING avg(a.y) > avg(b.y)),
+ EXISTS ( SELECT c.x, avg(c.y)
+ FROM t34 AS c
+ GROUP BY c.x
+ HAVING avg(a.y) > avg(c.y))
+ FROM t34 AS a
+ GROUP BY a.x
+ ORDER BY a.x;
+ }
+} {106 4.5 0 1 107 4.0 1 0}
+
+do_test subquery-3.5.1 {
+ execsql {
+ CREATE TABLE t35a(x); INSERT INTO t35a VALUES(1),(2),(3);
+ CREATE TABLE t35b(y); INSERT INTO t35b VALUES(98), (99);
+ SELECT max((SELECT avg(y) FROM t35b)) FROM t35a;
+ }
+} {98.5}
+do_test subquery-3.5.2 {
+ execsql {
+ SELECT max((SELECT count(y) FROM t35b)) FROM t35a;
+ }
+} {2}
+do_test subquery-3.5.3 {
+ execsql {
+ SELECT max((SELECT count() FROM t35b)) FROM t35a;
+ }
+} {2}
+do_test subquery-3.5.4 {
+ catchsql {
+ SELECT max((SELECT count(x) FROM t35b)) FROM t35a;
+ }
+} {1 {misuse of aggregate: count()}}
+do_test subquery-3.5.5 {
+ catchsql {
+ SELECT max((SELECT count(x) FROM t35b)) FROM t35a;
+ }
+} {1 {misuse of aggregate: count()}}
+do_test subquery-3.5.6 {
+ catchsql {
+ SELECT max((SELECT a FROM (SELECT count(x) AS a FROM t35b))) FROM t35a;
+ }
+} {1 {misuse of aggregate: count()}}
+do_test subquery-3.5.7 {
+ execsql {
+ SELECT max((SELECT a FROM (SELECT count(y) AS a FROM t35b))) FROM t35a;
+ }
+} {2}
+
+
#------------------------------------------------------------------
# These tests - subquery-4.* - use the TCL statement cache to try
# and expose bugs to do with re-using statements that have been
diff --git a/test/superlock.test b/test/superlock.test
index 8155d92..8199d52 100644
--- a/test/superlock.test
+++ b/test/superlock.test
@@ -76,7 +76,10 @@ do_catchsql_test 3.4 { INSERT INTO t1 VALUES(5, 6)} {1 {database is locked}}
do_catchsql_test 3.5 { PRAGMA wal_checkpoint } {0 {1 -1 -1}}
do_test 3.6 { unlock } {}
-do_execsql_test 4.1 { PRAGMA wal_checkpoint } {0 2 2}
+# At this point the WAL file consists of a single frame only - written
+# by test case 3.1. If the ZERO_DAMAGE flag were not set, it would consist
+# of two frames - the frame written by 3.1 and a padding frame.
+do_execsql_test 4.1 { PRAGMA wal_checkpoint } {0 1 1}
do_test 4.2 { sqlite3demo_superlock unlock test.db } {unlock}
do_catchsql_test 4.3 { SELECT * FROM t1 } {1 {database is locked}}
diff --git a/test/syscall.test b/test/syscall.test
index 201bd63..d841a9a 100644
--- a/test/syscall.test
+++ b/test/syscall.test
@@ -59,7 +59,8 @@ do_test 2.1.2 { test_syscall exists nosuchcall } 0
foreach s {
open close access getcwd stat fstat ftruncate
fcntl read pread write pwrite fchmod fallocate
- pread64 pwrite64 unlink openDirectory
+ pread64 pwrite64 unlink openDirectory mkdir rmdir
+ statvfs fchown umask
} {
if {[test_syscall exists $s]} {lappend syscall_list $s}
}
diff --git a/test/tclsqlite.test b/test/tclsqlite.test
index 0ed0602..c8b0303 100644
--- a/test/tclsqlite.test
+++ b/test/tclsqlite.test
@@ -25,7 +25,7 @@ source $testdir/tester.tcl
if {[sqlite3 -has-codec]} {
set r "sqlite_orig HANDLE FILENAME ?-key CODEC-KEY?"
} else {
- set r "sqlite_orig HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN?"
+ set r "sqlite_orig HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?"
}
do_test tcl-1.1 {
set v [catch {sqlite3 bogus} msg]
diff --git a/test/tester.tcl b/test/tester.tcl
index 3c34b45..07eebcb 100644
--- a/test/tester.tcl
+++ b/test/tester.tcl
@@ -19,6 +19,7 @@
#
# Commands to manipulate the db and the file-system at a high level:
#
+# get_pwd
# copy_file FROM TO
# delete_file FILENAME
# drop_all_tables ?DB?
@@ -57,7 +58,7 @@
# Commands providing a lower level interface to the global test counters:
#
# set_test_counter COUNTER ?VALUE?
-# omit_test TESTNAME REASON
+# omit_test TESTNAME REASON ?APPEND?
# fail_test TESTNAME
# incr_ntest
#
@@ -148,6 +149,24 @@ proc getFileRetryDelay {} {
return $::G(file-retry-delay)
}
+# Return the string representing the name of the current directory. On
+# Windows, the result is "normalized" to whatever our parent command shell
+# is using to prevent case-mismatch issues.
+#
+proc get_pwd {} {
+ if {$::tcl_platform(platform) eq "windows"} {
+ #
+ # NOTE: Cannot use [file normalize] here because it would alter the
+ # case of the result to what Tcl considers canonical, which would
+ # defeat the purpose of this procedure.
+ #
+ return [string map [list \\ /] \
+ [string trim [exec -- $::env(ComSpec) /c echo %CD%]]]
+ } else {
+ return [pwd]
+ }
+}
+
# Copy file $from into $to. This is used because some versions of
# TCL for windows (notably the 8.4.1 binary package shipped with the
# current mingw release) have a broken "file copy" command.
@@ -274,6 +293,7 @@ if {[info exists cmdlinearg]==0} {
# --file-retries=N
# --file-retry-delay=N
# --start=[$permutation:]$testfile
+ # --match=$pattern
#
set cmdlinearg(soft-heap-limit) 0
set cmdlinearg(maxerror) 1000
@@ -283,7 +303,8 @@ if {[info exists cmdlinearg]==0} {
set cmdlinearg(soak) 0
set cmdlinearg(file-retries) 0
set cmdlinearg(file-retry-delay) 0
- set cmdlinearg(start) ""
+ set cmdlinearg(start) ""
+ set cmdlinearg(match) ""
set leftover [list]
foreach a $argv {
@@ -336,6 +357,12 @@ if {[info exists cmdlinearg]==0} {
}
if {$::G(start:file) == ""} {unset ::G(start:file)}
}
+ {^-+match=.+$} {
+ foreach {dummy cmdlinearg(match)} [split $a =] break
+
+ set ::G(match) $cmdlinearg(match)
+ if {$::G(match) == ""} {unset ::G(match)}
+ }
default {
lappend leftover $a
}
@@ -414,9 +441,11 @@ if {0==[info exists ::SLAVE]} {
# Record the fact that a sequence of tests were omitted.
#
-proc omit_test {name reason} {
+proc omit_test {name reason {append 1}} {
set omitList [set_test_counter omit_list]
- lappend omitList [list $name $reason]
+ if {$append} {
+ lappend omitList [list $name $reason]
+ }
set_test_counter omit_list $omitList
}
@@ -445,7 +474,6 @@ proc incr_ntest {} {
# Invoke the do_test procedure to run a single test
#
proc do_test {name cmd expected} {
-
global argv cmdlinearg
fix_testname name
@@ -471,18 +499,47 @@ proc do_test {name cmd expected} {
incr_ntest
puts -nonewline $name...
flush stdout
- if {[catch {uplevel #0 "$cmd;\n"} result]} {
- puts "\nError: $result"
- fail_test $name
- } elseif {[string compare $result $expected]} {
- puts "\nExpected: \[$expected\]\n Got: \[$result\]"
- fail_test $name
+
+ if {![info exists ::G(match)] || [string match $::G(match) $name]} {
+ if {[catch {uplevel #0 "$cmd;\n"} result]} {
+ puts "\nError: $result"
+ fail_test $name
+ } else {
+ if {[regexp {^~?/.*/$} $expected]} {
+ if {[string index $expected 0]=="~"} {
+ set re [string range $expected 2 end-1]
+ set ok [expr {![regexp $re $result]}]
+ } else {
+ set re [string range $expected 1 end-1]
+ set ok [regexp $re $result]
+ }
+ } else {
+ set ok [expr {[string compare $result $expected]==0}]
+ }
+ if {!$ok} {
+ puts "\nExpected: \[$expected\]\n Got: \[$result\]"
+ fail_test $name
+ } else {
+ puts " Ok"
+ }
+ }
} else {
- puts " Ok"
+ puts " Omitted"
+ omit_test $name "pattern mismatch" 0
}
flush stdout
}
+proc catchcmd {db {cmd ""}} {
+ global CLI
+ set out [open cmds.txt w]
+ puts $out $cmd
+ close $out
+ set line "exec $CLI $db < cmds.txt"
+ set rc [catch { eval $line } msg]
+ list $rc $msg
+}
+
proc filepath_normalize {p} {
# test cases should be written to assume "unix"-like file paths
if {$::tcl_platform(platform)!="unix"} {
@@ -968,7 +1025,7 @@ proc crashsql {args} {
# $crashfile gets compared to the native filename in
# cfSync(), which can be different then what TCL uses by
# default, so here we force it to the "nativename" format.
- set cfile [string map {\\ \\\\} [file nativename [file join [pwd] $crashfile]]]
+ set cfile [string map {\\ \\\\} [file nativename [file join [get_pwd] $crashfile]]]
set f [open crash.tcl w]
puts $f "sqlite3_crash_enable 1"
@@ -1557,5 +1614,8 @@ proc db_delete_and_reopen {{file test.db}} {
# to non-zero, then set the global variable $AUTOVACUUM to 1.
set AUTOVACUUM $sqlite_options(default_autovacuum)
+# Make sure the FTS enhanced query syntax is disabled.
+set sqlite_fts3_enable_parentheses 0
+
source $testdir/thread_common.tcl
source $testdir/malloc_common.tcl
diff --git a/test/tkt-02a8e81d44.test b/test/tkt-02a8e81d44.test
index 4a48fb0..7ca9866 100644
--- a/test/tkt-02a8e81d44.test
+++ b/test/tkt-02a8e81d44.test
@@ -17,6 +17,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_test tkt-02a838-1.1 {
execsql {
CREATE TABLE t1(a);
diff --git a/test/tkt-2a5629202f.test b/test/tkt-2a5629202f.test
new file mode 100644
index 0000000..037f100
--- /dev/null
+++ b/test/tkt-2a5629202f.test
@@ -0,0 +1,71 @@
+# 2012 April 19
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# The tests in this file were used while developing the SQLite 4 code.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix tkt-2a5629202f
+
+# This procedure executes the SQL. Then it checks to see if the OP_Sort
+# opcode was executed. If an OP_Sort did occur, then "sort" is appended
+# to the result. If no OP_Sort happened, then "nosort" is appended.
+#
+# This procedure is used to check to make sure sorting is or is not
+# occurring as expected.
+#
+proc cksort {sql} {
+ set data [execsql $sql]
+ if {[db status sort]} {set x sort} {set x nosort}
+ lappend data $x
+ return $data
+}
+
+do_execsql_test 1.1 {
+ CREATE TABLE t8(b TEXT, c TEXT);
+ INSERT INTO t8 VALUES('a', 'one');
+ INSERT INTO t8 VALUES('b', 'two');
+ INSERT INTO t8 VALUES(NULL, 'three');
+ INSERT INTO t8 VALUES(NULL, 'four');
+}
+
+do_execsql_test 1.2 {
+ SELECT coalesce(b, 'null') || '/' || c FROM t8 x ORDER BY x.b, x.c
+} {null/four null/three a/one b/two}
+
+do_execsql_test 1.3 {
+ CREATE UNIQUE INDEX i1 ON t8(b);
+ SELECT coalesce(b, 'null') || '/' || c FROM t8 x ORDER BY x.b, x.c
+} {null/four null/three a/one b/two}
+
+#-------------------------------------------------------------------------
+#
+
+do_execsql_test 2.1 {
+ CREATE TABLE t2(a, b NOT NULL, c);
+ CREATE UNIQUE INDEX t2ab ON t2(a, b);
+ CREATE UNIQUE INDEX t2ba ON t2(b, a);
+}
+
+do_test 2.2 {
+ cksort { SELECT * FROM t2 WHERE a = 10 ORDER BY a, b, c }
+} {nosort}
+
+do_test 2.3 {
+ cksort { SELECT * FROM t2 WHERE b = 10 ORDER BY a, b, c }
+} {sort}
+
+do_test 2.4 {
+ cksort { SELECT * FROM t2 WHERE a IS NULL ORDER BY a, b, c }
+} {sort}
+
+finish_test
+
diff --git a/test/tkt-385a5b56b9.test b/test/tkt-385a5b56b9.test
new file mode 100644
index 0000000..614e82d
--- /dev/null
+++ b/test/tkt-385a5b56b9.test
@@ -0,0 +1,54 @@
+# 2012 April 02
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# The tests in this file were used while developing the SQLite 4 code.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix tkt-385a5b56b9
+
+do_execsql_test 1.0 {
+ CREATE TABLE t1(x, y);
+ INSERT INTO t1 VALUES(1, NULL);
+ INSERT INTO t1 VALUES(2, NULL);
+ INSERT INTO t1 VALUES(1, NULL);
+}
+
+do_execsql_test 1.1 { SELECT DISTINCT x, y FROM t1 } {1 {} 2 {}}
+do_execsql_test 1.2 { CREATE UNIQUE INDEX i1 ON t1(x, y) }
+do_execsql_test 1.3 { SELECT DISTINCT x, y FROM t1 } {1 {} 2 {}}
+
+
+#-------------------------------------------------------------------------
+
+do_execsql_test 2.0 {
+ CREATE TABLE t2(x, y NOT NULL);
+ CREATE UNIQUE INDEX t2x ON t2(x);
+ CREATE UNIQUE INDEX t2y ON t2(y);
+}
+
+do_eqp_test 2.1 { SELECT DISTINCT x FROM t2 } {
+ 0 0 0 {SCAN TABLE t2 USING COVERING INDEX t2x (~1000000 rows)}
+}
+
+do_eqp_test 2.2 { SELECT DISTINCT y FROM t2 } {
+ 0 0 0 {SCAN TABLE t2 (~1000000 rows)}
+}
+
+do_eqp_test 2.3 { SELECT DISTINCT x, y FROM t2 WHERE y=10 } {
+ 0 0 0 {SEARCH TABLE t2 USING INDEX t2y (y=?) (~1 rows)}
+}
+
+do_eqp_test 2.4 { SELECT DISTINCT x, y FROM t2 WHERE x=10 } {
+ 0 0 0 {SEARCH TABLE t2 USING INDEX t2x (x=?) (~1 rows)}
+}
+
+finish_test
+
diff --git a/test/tkt-38cb5df375.test b/test/tkt-38cb5df375.test
index 47b0b55..e5e0267 100644
--- a/test/tkt-38cb5df375.test
+++ b/test/tkt-38cb5df375.test
@@ -16,6 +16,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_test tkt-38cb5df375.0 {
execsql {
CREATE TABLE t1(a);
diff --git a/test/tkt-3a77c9714e.test b/test/tkt-3a77c9714e.test
new file mode 100644
index 0000000..6eaec16
--- /dev/null
+++ b/test/tkt-3a77c9714e.test
@@ -0,0 +1,73 @@
+# 2011 December 06
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests to verify that ticket [3a77c9714e] has been
+# fixed.
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+ifcapable !compound {
+ finish_test
+ return
+}
+
+set testprefix "tkt-3a77c9714e"
+
+do_execsql_test 1.1 {
+ CREATE TABLE t1(t1_id INTEGER PRIMARY KEY, t1_title TEXT);
+ CREATE TABLE t2(t2_id INTEGER PRIMARY KEY, t2_title TEXT);
+ CREATE TABLE t3(t3_id INTEGER PRIMARY KEY, t3_title TEXT);
+
+ INSERT INTO t1 (t1_id, t1_title) VALUES (888, 'ABCDEF');
+ INSERT INTO t2 (t2_id, t2_title) VALUES (999, 'ABCDEF');
+ INSERT INTO t3 (t3_id, t3_title) VALUES (999, 'ABCDEF');
+}
+
+do_execsql_test 1.2 {
+ SELECT t1_title, t2_title
+ FROM t1 LEFT JOIN t2
+ WHERE
+ t2_id = (SELECT t3_id FROM
+ ( SELECT t3_id FROM t3 WHERE t3_title=t1_title LIMIT 500 )
+ )
+} {ABCDEF ABCDEF}
+
+do_execsql_test 2.1 {
+ CREATE TABLE [Beginnings] (
+ [Id] INTEGER PRIMARY KEY AUTOINCREMENT,[Title] TEXT, [EndingId] INTEGER
+ );
+ CREATE TABLE [Endings] (Id INT,Title TEXT,EndingId INT);
+ INSERT INTO Beginnings (Id, Title, EndingId) VALUES (1, 'FACTOR', 18);
+ INSERT INTO Beginnings (Id, Title, EndingId) VALUES (2, 'SWIMM', 18);
+ INSERT INTO Endings (Id, Title, EndingId) VALUES (1, 'ING', 18);
+}
+
+do_execsql_test 2.2 {
+ SELECT
+ SrcWord, Beginnings.Title
+ FROM
+ (SELECT 'FACTORING' AS SrcWord UNION SELECT 'SWIMMING' AS SrcWord )
+ LEFT JOIN
+ Beginnings
+ WHERE Beginnings.Id= (
+ SELECT BeginningId FROM (
+ SELECT SrcWord, B.Id as BeginningId, B.Title || E.Title As Connected
+ FROM Beginnings B LEFT JOIN Endings E ON B.EndingId=E.EndingId
+ WHERE Connected=SrcWord LIMIT 1
+ )
+ )
+} {FACTORING FACTOR SWIMMING SWIMM}
+
+
+finish_test
+
diff --git a/test/tkt-7bbfb7d442.test b/test/tkt-7bbfb7d442.test
new file mode 100644
index 0000000..dcb9b16
--- /dev/null
+++ b/test/tkt-7bbfb7d442.test
@@ -0,0 +1,156 @@
+# 2011 December 9
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests to verify that ticket [7bbfb7d442] has been
+# fixed.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix tkt-7bbfb7d442
+
+do_execsql_test 1.1 {
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 'one');
+ INSERT INTO t1 VALUES(2, 'two');
+ INSERT INTO t1 VALUES(3, 'three');
+
+ CREATE TABLE t2(c, d);
+ INSERT INTO t2 VALUES('one', 'I');
+ INSERT INTO t2 VALUES('two', 'II');
+ INSERT INTO t2 VALUES('three', 'III');
+
+ CREATE TABLE t3(t3_a PRIMARY KEY, t3_d);
+ CREATE TRIGGER t3t AFTER INSERT ON t3 WHEN new.t3_d IS NULL BEGIN
+ UPDATE t3 SET t3_d = (
+ SELECT d FROM
+ (SELECT * FROM t2 WHERE (new.t3_a%2)=(rowid%2) LIMIT 10),
+ (SELECT * FROM t1 WHERE (new.t3_a%2)=(rowid%2) LIMIT 10)
+ WHERE a = new.t3_a AND b = c
+ ) WHERE t3_a = new.t3_a;
+ END;
+}
+
+do_execsql_test 1.2 {
+ INSERT INTO t3(t3_a) VALUES(1);
+ INSERT INTO t3(t3_a) VALUES(2);
+ INSERT INTO t3(t3_a) VALUES(3);
+ SELECT * FROM t3;
+} {1 I 2 II 3 III}
+
+do_execsql_test 1.3 { DELETE FROM t3 }
+
+ifcapable compound {
+ do_execsql_test 1.4 {
+ INSERT INTO t3(t3_a) SELECT 1 UNION SELECT 2 UNION SELECT 3;
+ SELECT * FROM t3;
+ } {1 I 2 II 3 III}
+}
+
+
+
+#-------------------------------------------------------------------------
+# The following test case - 2.* - is from the original bug report as
+# posted to the mailing list.
+#
+do_execsql_test 2.1 {
+ CREATE TABLE InventoryControl (
+ InventoryControlId INTEGER PRIMARY KEY AUTOINCREMENT,
+ SKU INTEGER NOT NULL,
+ Variant INTEGER NOT NULL DEFAULT 0,
+ ControlDate DATE NOT NULL,
+ ControlState INTEGER NOT NULL DEFAULT -1,
+ DeliveredQty VARCHAR(30)
+ );
+
+ CREATE TRIGGER TGR_InventoryControl_AfterInsert
+ AFTER INSERT ON InventoryControl
+ FOR EACH ROW WHEN NEW.ControlState=-1 BEGIN
+
+ INSERT OR REPLACE INTO InventoryControl(
+ InventoryControlId,SKU,Variant,ControlDate,ControlState,DeliveredQty
+ ) SELECT
+ T1.InventoryControlId AS InventoryControlId,
+ T1.SKU AS SKU,
+ T1.Variant AS Variant,
+ T1.ControlDate AS ControlDate,
+ 1 AS ControlState,
+ COALESCE(T2.DeliveredQty,0) AS DeliveredQty
+ FROM (
+ SELECT
+ NEW.InventoryControlId AS InventoryControlId,
+ II.SKU AS SKU,
+ II.Variant AS Variant,
+ COALESCE(LastClosedIC.ControlDate,NEW.ControlDate) AS ControlDate
+ FROM
+ InventoryItem II
+ LEFT JOIN
+ InventoryControl LastClosedIC
+ ON LastClosedIC.InventoryControlId IN ( SELECT 99999 )
+ WHERE
+ II.SKU=NEW.SKU AND
+ II.Variant=NEW.Variant
+ ) T1
+ LEFT JOIN (
+ SELECT
+ TD.SKU AS SKU,
+ TD.Variant AS Variant,
+ 10 AS DeliveredQty
+ FROM
+ TransactionDetail TD
+ WHERE
+ TD.SKU=NEW.SKU AND
+ TD.Variant=NEW.Variant
+ ) T2
+ ON T2.SKU=T1.SKU AND
+ T2.Variant=T1.Variant;
+ END;
+
+ CREATE TABLE InventoryItem (
+ SKU INTEGER NOT NULL,
+ Variant INTEGER NOT NULL DEFAULT 0,
+ DeptCode INTEGER NOT NULL,
+ GroupCode INTEGER NOT NULL,
+ ItemDescription VARCHAR(120) NOT NULL,
+ PRIMARY KEY(SKU, Variant)
+ );
+
+ INSERT INTO InventoryItem VALUES(220,0,1,170,'Scoth Tampon Recurer');
+ INSERT INTO InventoryItem VALUES(31,0,1,110,'Fromage');
+
+ CREATE TABLE TransactionDetail (
+ TransactionId INTEGER NOT NULL,
+ SKU INTEGER NOT NULL,
+ Variant INTEGER NOT NULL DEFAULT 0,
+ PRIMARY KEY(TransactionId, SKU, Variant)
+ );
+ INSERT INTO TransactionDetail(TransactionId, SKU, Variant) VALUES(44, 31, 0);
+
+
+ INSERT INTO InventoryControl(SKU, Variant, ControlDate) SELECT
+ II.SKU AS SKU, II.Variant AS Variant, '2011-08-30' AS ControlDate
+ FROM InventoryItem II;
+}
+
+do_execsql_test 2.2 {
+ SELECT SKU, DeliveredQty FROM InventoryControl WHERE SKU=31
+} {31 10}
+
+do_execsql_test 2.3 {
+ SELECT CASE WHEN DeliveredQty=10 THEN "TEST PASSED!" ELSE "TEST FAILED!" END
+ FROM InventoryControl WHERE SKU=31;
+} {{TEST PASSED!}}
+
+
+finish_test
+
+
diff --git a/test/tkt-80ba201079.test b/test/tkt-80ba201079.test
index 95e99b5..0122e95 100644
--- a/test/tkt-80ba201079.test
+++ b/test/tkt-80ba201079.test
@@ -164,11 +164,13 @@ do_execsql_test 303 {
(b='B' AND c IN (SELECT c FROM t1))
} {A B C D E}
-do_execsql_test 304 {
- SELECT * FROM t1, t2 WHERE
- (a='A' AND d='E') OR
- (b='B' AND c IN (SELECT 'B' UNION SELECT 'C' UNION SELECT 'D'))
-} {A B C D E}
+ifcapable compound {
+ do_execsql_test 304 {
+ SELECT * FROM t1, t2 WHERE
+ (a='A' AND d='E') OR
+ (b='B' AND c IN (SELECT 'B' UNION SELECT 'C' UNION SELECT 'D'))
+ } {A B C D E}
+}
do_execsql_test 305 {
SELECT * FROM t1, t2 WHERE
@@ -182,10 +184,12 @@ do_execsql_test 306 {
(a='A' AND d='E')
} {A B C D E}
-do_execsql_test 307 {
- SELECT * FROM t1, t2 WHERE
- (b='B' AND c IN (SELECT 'B' UNION SELECT 'C' UNION SELECT 'D')) OR
- (a='A' AND d='E')
-} {A B C D E}
+ifcapable compound {
+ do_execsql_test 307 {
+ SELECT * FROM t1, t2 WHERE
+ (b='B' AND c IN (SELECT 'B' UNION SELECT 'C' UNION SELECT 'D')) OR
+ (a='A' AND d='E')
+ } {A B C D E}
+}
finish_test
diff --git a/test/tkt-94c04eaadb.test b/test/tkt-94c04eaadb.test
index cce8a98..0063de6 100644
--- a/test/tkt-94c04eaadb.test
+++ b/test/tkt-94c04eaadb.test
@@ -27,7 +27,7 @@ do_test tkt-94c94-1.1 {
# Grow the file to larger than 4096MB (2^32 bytes)
db close
-if {[catch {fake_big_file 4096 [pwd]/test.db} msg]} {
+if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} {
puts "**** Unable to create a file larger than 4096 MB. *****"
finish_test
return
diff --git a/test/tkt-b72787b1.test b/test/tkt-b72787b1.test
index 11ea41e..dea3f49 100644
--- a/test/tkt-b72787b1.test
+++ b/test/tkt-b72787b1.test
@@ -35,6 +35,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
unset -nocomplain ::STMT
proc runsql {} {
db eval {CREATE TABLE IF NOT EXISTS t4(q)}
diff --git a/test/tkt-bdc6bbbb38.test b/test/tkt-bdc6bbbb38.test
new file mode 100644
index 0000000..8b0d55d
--- /dev/null
+++ b/test/tkt-bdc6bbbb38.test
@@ -0,0 +1,90 @@
+# 2012 May 11
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library.
+#
+# This file implements tests to verify that ticket [bdc6bbbb38] has been
+# fixed.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix tkt-bdc6bbbb38
+
+# If SQLITE_ENABLE_FTS3 is defined, omit this file.
+ifcapable !fts3 { finish_test ; return }
+set sqlite_fts3_enable_parentheses 1
+
+foreach {tn idxdir} {1 ASC 2 DESC} {
+ execsql { DROP TABLE IF EXISTS t2 }
+
+ do_execsql_test $tn.1.1 "CREATE VIRTUAL TABLE t2 USING fts4(x, order=$idxdir)"
+ do_execsql_test $tn.1.2 { INSERT INTO t2 VALUES('a b c') }
+
+ do_execsql_test $tn.1.3 {
+ SELECT offsets(t2) FROM t2 WHERE t2 MATCH 'a AND d OR b' ORDER BY docid ASC
+ } {
+ {0 0 0 1 0 2 2 1}
+ }
+ do_execsql_test $tn.1.4 {
+ SELECT snippet(t2,'[',']') FROM t2 WHERE t2 MATCH 'a AND d OR b'
+ ORDER BY docid ASC
+ } {
+ {[a] [b] c}
+ }
+ do_execsql_test $tn.1.5 { INSERT INTO t2 VALUES('a c d') }
+ do_execsql_test $tn.1.6 {
+ SELECT offsets(t2) FROM t2 WHERE t2 MATCH 'a AND d OR b' ORDER BY docid ASC
+ } {
+ {0 0 0 1 0 2 2 1}
+ {0 0 0 1 0 1 4 1}
+ }
+ do_execsql_test $tn.1.7 {
+ SELECT snippet(t2,'[',']') FROM t2 WHERE t2 MATCH 'a AND d OR b'
+ ORDER BY docid ASC
+ } {
+ {[a] [b] c}
+ {[a] c [d]}
+ }
+
+ execsql { DROP TABLE IF EXISTS t3 }
+ do_execsql_test $tn.2.1 "CREATE VIRTUAL TABLE t3 USING fts4(x, order=$idxdir)"
+ do_execsql_test $tn.2.2 { INSERT INTO t3 VALUES('a c d') }
+ do_execsql_test $tn.2.3 {
+ SELECT offsets(t3) FROM t3 WHERE t3 MATCH 'a AND d OR b' ORDER BY docid DESC
+ } {
+ {0 0 0 1 0 1 4 1}
+ }
+ do_execsql_test $tn.2.4 {
+ SELECT snippet(t3,'[',']') FROM t3 WHERE t3 MATCH 'a AND d OR b'
+ ORDER BY docid DESC
+ } {
+ {[a] c [d]}
+ }
+ do_execsql_test $tn.2.5 {
+ INSERT INTO t3 VALUES('a b c');
+ }
+ do_execsql_test $tn.2.6 {
+ SELECT offsets(t3) FROM t3 WHERE t3 MATCH 'a AND d OR b' ORDER BY docid DESC
+ } {
+ {0 0 0 1 0 2 2 1}
+ {0 0 0 1 0 1 4 1}
+ }
+ do_execsql_test $tn.2.7 {
+ SELECT snippet(t3,'[',']') FROM t3 WHERE t3 MATCH 'a AND d OR b'
+ ORDER BY docid DESC
+ } {
+ {[a] [b] c}
+ {[a] c [d]}
+ }
+}
+
+set sqlite_fts3_enable_parentheses 0
+finish_test
diff --git a/test/tkt-d82e3f3721.test b/test/tkt-d82e3f3721.test
index 31f7d34..da932d1 100644
--- a/test/tkt-d82e3f3721.test
+++ b/test/tkt-d82e3f3721.test
@@ -17,6 +17,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_test tkt-d82e3-1.1 {
db eval {
CREATE TABLE t1(a INTEGER PRIMARY KEY AUTOINCREMENT, b);
diff --git a/test/tkt-f777251dc7a.test b/test/tkt-f777251dc7a.test
index 6f0b43f..af6f71a 100644
--- a/test/tkt-f777251dc7a.test
+++ b/test/tkt-f777251dc7a.test
@@ -17,6 +17,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_test tkt-f7772-1.1 {
execsql {
CREATE TEMP TABLE t1(x UNIQUE);
@@ -37,7 +42,7 @@ do_test tkt-f7772-1.2 {
BEGIN IMMEDIATE;
SELECT x, force_rollback(), EXISTS(SELECT 1 FROM t3 WHERE w=x) FROM t2;
}
-} {1 {callback requested query abort}}
+} {1 {abort due to ROLLBACK}}
do_test tkt-f7772-1.3 {
sqlite3_get_autocommit db
} {1}
diff --git a/test/tkt3527.test b/test/tkt3527.test
index 34e9e61..d9b1dad 100644
--- a/test/tkt3527.test
+++ b/test/tkt3527.test
@@ -18,6 +18,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_test tkt3527-1.1 {
db eval {
CREATE TABLE Element (
diff --git a/test/tkt3773.test b/test/tkt3773.test
index 0dc414e..3f5a1a3 100644
--- a/test/tkt3773.test
+++ b/test/tkt3773.test
@@ -18,6 +18,11 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+ifcapable !compound {
+ finish_test
+ return
+}
+
do_test tkt3773-1.1 {
db eval {
CREATE TABLE t1(a,b);
diff --git a/test/tkt3838.test b/test/tkt3838.test
index 5dfc2b8..fa937ac 100644
--- a/test/tkt3838.test
+++ b/test/tkt3838.test
@@ -38,4 +38,21 @@ do_realnum_test tkt3838-1.1 {
}
} {2 999 9e+99 xyzzy}
+ifcapable trigger {
+ do_test tkt3838-1.2 {
+ db eval {
+ CREATE TABLE log(y);
+ CREATE TRIGGER r1 AFTER INSERT ON T1 BEGIN
+ INSERT INTO log VALUES(new.x);
+ END;
+ INSERT INTO t1(x) VALUES(123);
+ ALTER TABLE T1 RENAME TO XYZ2;
+ INSERT INTO xyz2(x) VALUES(456);
+ ALTER TABLE xyz2 RENAME TO pqr3;
+ INSERT INTO pqr3(x) VALUES(789);
+ SELECT * FROM log;
+ }
+ } {123 456 789}
+}
+
finish_test
diff --git a/test/trace2.test b/test/trace2.test
index 42738db..2f7ae7d 100644
--- a/test/trace2.test
+++ b/test/trace2.test
@@ -130,22 +130,23 @@ ifcapable fts3 {
"INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph');"
"-- INSERT INTO 'main'.'x1_content' VALUES(?,(?))"
"-- REPLACE INTO 'main'.'x1_docsize' VALUES(?,?)"
- "-- SELECT value FROM 'main'.'x1_stat' WHERE id=0"
- "-- REPLACE INTO 'main'.'x1_stat' VALUES(0,?)"
+ "-- SELECT value FROM 'main'.'x1_stat' WHERE id=?"
+ "-- REPLACE INTO 'main'.'x1_stat' VALUES(?,?)"
"-- SELECT (SELECT max(idx) FROM 'main'.'x1_segdir' WHERE level = ?) + 1"
"-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)"
- "-- INSERT INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)"
+ "-- REPLACE INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)"
}
do_trace_test 2.3 {
INSERT INTO x1(x1) VALUES('optimize');
} {
"INSERT INTO x1(x1) VALUES('optimize');"
+ "-- SELECT DISTINCT level / (1024 * ?) FROM 'main'.'x1_segdir'"
"-- SELECT idx, start_block, leaves_end_block, end_block, root FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?ORDER BY level DESC, idx ASC"
"-- SELECT max(level) FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?"
"-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)"
"-- DELETE FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?"
- "-- INSERT INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)"
+ "-- REPLACE INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)"
}
}
diff --git a/test/trans3.test b/test/trans3.test
index ab7db6a..d5b316b 100644
--- a/test/trans3.test
+++ b/test/trans3.test
@@ -64,14 +64,13 @@ do_test trans3-1.5 {
}
} errmsg]
lappend x $errmsg
-} {1 {cannot rollback transaction - SQL statements in progress}}
+} {1 {abort due to ROLLBACK}}
do_test trans3-1.6 {
set ::ecode
-} {SQLITE_BUSY}
+} {}
do_test trans3-1.7 {
- db eval COMMIT
db eval {SELECT * FROM t1}
-} {1 2 3 4 5}
+} {1 2 3 4}
unset -nocomplain ecode
finish_test
diff --git a/test/trigger1.test b/test/trigger1.test
index dc344d4..9d917bd 100644
--- a/test/trigger1.test
+++ b/test/trigger1.test
@@ -29,7 +29,7 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
-ifcapable {!trigger} {
+ifcapable !trigger||!compound {
finish_test
return
}
@@ -290,10 +290,22 @@ ifcapable tempdb {
SELECT * FROM t2;
}
} {1 {no such table: main.t2}}
- do_test trigger-3.6 {
+ do_test trigger-3.6.1 {
catchsql {
DROP TRIGGER r1;
CREATE TEMP TRIGGER r1 AFTER INSERT ON t1 BEGIN
+ INSERT INTO t2 VALUES(NEW.a,NEW.b), (NEW.b*100, NEW.a*100);
+ END;
+ INSERT INTO t1 VALUES(1,2);
+ SELECT * FROM t2;
+ }
+ } {0 {1 2 200 100}}
+ do_test trigger-3.6.2 {
+ catchsql {
+ DROP TRIGGER r1;
+ DELETE FROM t1;
+ DELETE FROM t2;
+ CREATE TEMP TRIGGER r1 AFTER INSERT ON t1 BEGIN
INSERT INTO t2 VALUES(NEW.a,NEW.b);
END;
INSERT INTO t1 VALUES(1,2);
diff --git a/test/unixexcl.test b/test/unixexcl.test
index 057ae0a..0147e6b 100644
--- a/test/unixexcl.test
+++ b/test/unixexcl.test
@@ -80,4 +80,49 @@ do_multiclient_test tn {
} {0 {hello world}}
}
+do_multiclient_test tn {
+ do_test unixexcl-3.$tn.1 {
+ code1 { db close; sqlite3 db file:test.db?psow=0 -vfs unix-excl -uri 1 }
+ code2 { db2 close; sqlite3 db2 file:test.db?psow=0 -vfs unix-excl -uri 1 }
+ sql1 {
+ PRAGMA auto_vacuum = 0;
+ PRAGMA journal_mode = WAL;
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 2);
+ }
+ } {wal}
+
+ if {$tn==1} {
+ do_test unixexcl-3.$tn.1.multiproc {
+ csql2 { SELECT * FROM t1; }
+ } {1 {database is locked}}
+ } else {
+ do_test unixexcl-3.$tn.1.singleproc {
+ sql2 { SELECT * FROM t1; }
+ } {1 2}
+
+ do_test unixexcl-3.$tn.2 {
+ sql2 {
+ BEGIN;
+ SELECT * FROM t1;
+ }
+ } {1 2}
+ do_test unixexcl-3.$tn.3 {
+ sql1 { PRAGMA wal_checkpoint; INSERT INTO t1 VALUES(3, 4); }
+ } {0 3 3}
+ do_test unixexcl-3.$tn.4 {
+ sql2 { SELECT * FROM t1; }
+ } {1 2}
+ do_test unixexcl-3.$tn.5 {
+ sql1 { SELECT * FROM t1; }
+ } {1 2 3 4}
+ do_test unixexcl-3.$tn.6 {
+ sql2 { COMMIT; SELECT * FROM t1; }
+ } {1 2 3 4}
+ do_test unixexcl-3.$tn.7 {
+ sql1 { PRAGMA wal_checkpoint; }
+ } {0 4 4}
+ }
+}
+
finish_test
diff --git a/test/uri.test b/test/uri.test
index 90074d0..93a32b7 100644
--- a/test/uri.test
+++ b/test/uri.test
@@ -54,9 +54,9 @@ foreach {tn uri file} {
if {$tcl_platform(platform)=="windows"} {
if {$tn>14} break
- set uri [string map [list PWD /[pwd]] $uri]
+ set uri [string map [list PWD /[get_pwd]] $uri]
} else {
- set uri [string map [list PWD [pwd]] $uri]
+ set uri [string map [list PWD [get_pwd]] $uri]
}
if {[file isdir $file]} {error "$file is a directory"}
@@ -274,9 +274,9 @@ foreach {tn uri res} {
} {
if {$tcl_platform(platform)=="windows"} {
- set uri [string map [list PWD [string range [pwd] 3 end]] $uri]
+ set uri [string map [list PWD [string range [get_pwd] 3 end]] $uri]
} else {
- set uri [string map [list PWD [string range [pwd] 1 end]] $uri]
+ set uri [string map [list PWD [string range [get_pwd] 1 end]] $uri]
}
do_test 6.$tn {
diff --git a/test/vtab1.test b/test/vtab1.test
index 16f1b43..38aec09 100644
--- a/test/vtab1.test
+++ b/test/vtab1.test
@@ -15,6 +15,7 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
+set testprefix vtab1
ifcapable !vtab||!schema_pragmas {
finish_test
@@ -43,6 +44,9 @@ ifcapable !vtab||!schema_pragmas {
#
# vtab1-14.*: Test 'IN' constraints - i.e. "SELECT * FROM t1 WHERE id IN(...)"
#
+# vtab1-18.*: Check that the LIKE optimization is not applied when the lhs
+# is a virtual table column.
+#
#----------------------------------------------------------------------
@@ -51,7 +55,7 @@ ifcapable !vtab||!schema_pragmas {
# We cannot create a virtual table if the module has not been registered.
#
-do_test vtab1-1.1 {
+do_test vtab1-1.1.1 {
explain {
CREATE VIRTUAL TABLE t1 USING echo;
}
@@ -59,6 +63,11 @@ do_test vtab1-1.1 {
CREATE VIRTUAL TABLE t1 USING echo;
}
} {1 {no such module: echo}}
+do_test vtab1-1.1.2 {
+ catchsql {
+ CREATE VIRTUAL TABLE IF NOT EXISTS t1 USING echo;
+ }
+} {1 {no such module: echo}}
do_test vtab1-1.2 {
execsql {
SELECT name FROM sqlite_master ORDER BY 1
@@ -75,11 +84,16 @@ register_echo_module [sqlite3_connection_pointer db]
# The "echo" module does not invoke sqlite3_declare_vtab() if it is
# passed zero arguments.
#
-do_test vtab1-1.3 {
+do_test vtab1-1.3.1 {
catchsql {
CREATE VIRTUAL TABLE t1 USING echo;
}
} {1 {vtable constructor did not declare schema: t1}}
+do_test vtab1-1.3.2 {
+ catchsql {
+ CREATE VIRTUAL TABLE IF NOT EXISTS t1 USING echo;
+ }
+} {1 {vtable constructor did not declare schema: t1}}
do_test vtab1-1.4 {
execsql {
SELECT name FROM sqlite_master ORDER BY 1
@@ -90,11 +104,16 @@ do_test vtab1-1.4 {
# the virtual table if it is passed an argument that does not correspond
# to an existing real table in the same database.
#
-do_test vtab1-1.5 {
+do_test vtab1-1.5.1 {
catchsql {
CREATE VIRTUAL TABLE t1 USING echo(no_such_table);
}
} {1 {vtable constructor failed: t1}}
+do_test vtab1-1.5.2 {
+ catchsql {
+ CREATE VIRTUAL TABLE IF NOT EXISTS t1 USING echo(no_such_table);
+ }
+} {1 {vtable constructor failed: t1}}
do_test vtab1-1.6 {
execsql {
SELECT name FROM sqlite_master ORDER BY 1
@@ -128,17 +147,27 @@ do_test vtab-1.2152.4 {
# select an illegal table-name (i.e a reserved name or the name of a
# table that already exists).
#
-do_test vtab1-1.7 {
+do_test vtab1-1.7.1 {
catchsql {
CREATE VIRTUAL TABLE sqlite_master USING echo;
}
} {1 {object name reserved for internal use: sqlite_master}}
-do_test vtab1-1.8 {
+do_test vtab1-1.7.2 {
+ catchsql {
+ CREATE VIRTUAL TABLE IF NOT EXISTS sqlite_master USING echo;
+ }
+} {1 {object name reserved for internal use: sqlite_master}}
+do_test vtab1-1.8.1 {
catchsql {
CREATE TABLE treal(a, b, c);
CREATE VIRTUAL TABLE treal USING echo(treal);
}
} {1 {table treal already exists}}
+do_test vtab1-1.8.2 {
+ catchsql {
+ CREATE VIRTUAL TABLE IF NOT EXISTS treal USING echo(treal);
+ }
+} {0 {}}
do_test vtab1-1.9 {
execsql {
DROP TABLE treal;
@@ -1193,5 +1222,57 @@ do_test vtab1-17.1 {
}
} {}
+#-------------------------------------------------------------------------
+# The following tests - vtab1-18.* - test that the optimization of LIKE
+# constraints in where.c plays well with virtual tables.
+#
+# 18.1.*: Case-insensitive LIKE.
+# 18.2.*: Case-sensitive LIKE.
+#
unset -nocomplain echo_module_begin_fail
+
+do_execsql_test 18.1.0 {
+ CREATE TABLE t6(a, b TEXT);
+ CREATE INDEX i6 ON t6(b, a);
+ INSERT INTO t6 VALUES(1, 'Peter');
+ INSERT INTO t6 VALUES(2, 'Andrew');
+ INSERT INTO t6 VALUES(3, 'James');
+ INSERT INTO t6 VALUES(4, 'John');
+ INSERT INTO t6 VALUES(5, 'Phillip');
+ INSERT INTO t6 VALUES(6, 'Bartholomew');
+ CREATE VIRTUAL TABLE e6 USING echo(t6);
+}
+
+foreach {tn sql res filter} {
+ 1.1 "SELECT a FROM e6 WHERE b>'James'" {4 1 5}
+ {xFilter {SELECT rowid, * FROM 't6' WHERE b > ?} James}
+
+ 1.2 "SELECT a FROM e6 WHERE b>='J' AND b<'K'" {3 4}
+ {xFilter {SELECT rowid, * FROM 't6' WHERE b >= ? AND b < ?} J K}
+
+ 1.3 "SELECT a FROM e6 WHERE b LIKE 'J%'" {3 4}
+ {xFilter {SELECT rowid, * FROM 't6'}}
+
+ 1.4 "SELECT a FROM e6 WHERE b LIKE 'j%'" {3 4}
+ {xFilter {SELECT rowid, * FROM 't6'}}
+} {
+ set echo_module {}
+ do_execsql_test 18.$tn.1 $sql $res
+ do_test 18.$tn.2 { lrange $::echo_module 2 end } $filter
+}
+
+do_execsql_test 18.2.0 { PRAGMA case_sensitive_like = ON }
+foreach {tn sql res filter} {
+ 2.1 "SELECT a FROM e6 WHERE b LIKE 'J%'" {3 4}
+ {xFilter {SELECT rowid, * FROM 't6'}}
+
+ 2.2 "SELECT a FROM e6 WHERE b LIKE 'j%'" {}
+ {xFilter {SELECT rowid, * FROM 't6'}}
+} {
+ set echo_module {}
+ do_execsql_test 18.$tn.1 $sql $res
+ do_test 18.$tn.2 { lrange $::echo_module 2 end } $filter
+}
+do_execsql_test 18.2.x { PRAGMA case_sensitive_like = OFF }
+
finish_test
diff --git a/test/vtabD.test b/test/vtabD.test
index 509ba45..589f518 100644
--- a/test/vtabD.test
+++ b/test/vtabD.test
@@ -49,17 +49,15 @@ do_test vtabD-1.5 {
do_test vtabD-1.6 {
execsql { SELECT * FROM tv1 WHERE a < 500 OR b = 810000 }
} [execsql {
- SELECT * FROM t1 WHERE a < 500
- UNION ALL
- SELECT * FROM t1 WHERE b = 810000 AND NOT (a < 500)
+ SELECT * FROM t1 WHERE a < 500;
+ SELECT * FROM t1 WHERE b = 810000 AND NOT (a < 500);
}]
do_test vtabD-1.7 {
execsql { SELECT * FROM tv1 WHERE a < 90000 OR b = 8100000000 }
} [execsql {
- SELECT * FROM t1 WHERE a < 90000
- UNION ALL
- SELECT * FROM t1 WHERE b = 8100000000 AND NOT (a < 90000)
+ SELECT * FROM t1 WHERE a < 90000;
+ SELECT * FROM t1 WHERE b = 8100000000 AND NOT (a < 90000);
}]
if {[working_64bit_int]} {
diff --git a/test/vtab_shared.test b/test/vtab_shared.test
index ce2e432..6a76e27 100644
--- a/test/vtab_shared.test
+++ b/test/vtab_shared.test
@@ -124,23 +124,25 @@ breakpoint
execsql { SELECT * FROM t3 } db2
} {1 2 3 4 5 6}
-do_test vtab_shared-1.12.1 {
- db close
- execsql {
- SELECT * FROM t1 UNION ALL
- SELECT * FROM t2 UNION ALL
- SELECT * FROM t3
- } db2
-} {1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6}
-do_test vtab_shared-1.12.2 {
- sqlite3 db test.db
- register_echo_module [sqlite3_connection_pointer db]
- execsql {
- SELECT * FROM t1 UNION ALL
- SELECT * FROM t2 UNION ALL
- SELECT * FROM t3
- } db
-} {1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6}
+ifcapable compound {
+ do_test vtab_shared-1.12.1 {
+ db close
+ execsql {
+ SELECT * FROM t1 UNION ALL
+ SELECT * FROM t2 UNION ALL
+ SELECT * FROM t3
+ } db2
+ } {1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6}
+ do_test vtab_shared-1.12.2 {
+ sqlite3 db test.db
+ register_echo_module [sqlite3_connection_pointer db]
+ execsql {
+ SELECT * FROM t1 UNION ALL
+ SELECT * FROM t2 UNION ALL
+ SELECT * FROM t3
+ } db
+ } {1 2 3 4 5 6 1 2 3 4 5 6 1 2 3 4 5 6}
+}
# Try a rename or two.
#
diff --git a/test/wal.test b/test/wal.test
index 056becf..32b2608 100644
--- a/test/wal.test
+++ b/test/wal.test
@@ -416,6 +416,7 @@ do_test wal-8.3 {
do_test wal-9.1 {
reopen_db
execsql {
+ PRAGMA cache_size=2000;
CREATE TABLE t1(x PRIMARY KEY);
INSERT INTO t1 VALUES(blob(900));
INSERT INTO t1 VALUES(blob(900));
@@ -545,7 +546,7 @@ do_multiclient_test tn {
} {1 2 3 4 5 6 7 8 9 10}
do_test wal-10.$tn.12 {
catchsql { PRAGMA wal_checkpoint }
- } {0 {0 13 13}} ;# Reader no longer block checkpoints
+ } {0 {0 7 7}} ;# Reader no longer block checkpoints
do_test wal-10.$tn.13 {
execsql { INSERT INTO t1 VALUES(11, 12) }
sql2 {SELECT * FROM t1}
@@ -555,7 +556,7 @@ do_multiclient_test tn {
#
do_test wal-10.$tn.14 {
catchsql { PRAGMA wal_checkpoint }
- } {0 {0 15 13}}
+ } {0 {0 8 7}}
# The following series of test cases used to verify another blocking
# case in WAL - a case which no longer blocks.
@@ -565,10 +566,10 @@ do_multiclient_test tn {
} {1 2 3 4 5 6 7 8 9 10 11 12}
do_test wal-10.$tn.16 {
catchsql { PRAGMA wal_checkpoint }
- } {0 {0 15 15}}
+ } {0 {0 8 8}}
do_test wal-10.$tn.17 {
execsql { PRAGMA wal_checkpoint }
- } {0 15 15}
+ } {0 8 8}
do_test wal-10.$tn.18 {
sql3 { BEGIN; SELECT * FROM t1 }
} {1 2 3 4 5 6 7 8 9 10 11 12}
@@ -591,13 +592,13 @@ do_multiclient_test tn {
#
do_test wal-10.$tn.23 {
execsql { PRAGMA wal_checkpoint }
- } {0 17 17}
+ } {0 9 9}
do_test wal-10.$tn.24 {
sql2 { BEGIN; SELECT * FROM t1; }
} {1 2 3 4 5 6 7 8 9 10 11 12 13 14}
do_test wal-10.$tn.25 {
execsql { PRAGMA wal_checkpoint }
- } {0 17 17}
+ } {0 9 9}
do_test wal-10.$tn.26 {
catchsql { INSERT INTO t1 VALUES(15, 16) }
} {0 {}}
@@ -614,11 +615,11 @@ do_multiclient_test tn {
do_test wal-10.$tn.29 {
execsql { INSERT INTO t1 VALUES(19, 20) }
catchsql { PRAGMA wal_checkpoint }
- } {0 {0 6 0}}
+ } {0 {0 3 0}}
do_test wal-10.$tn.30 {
code3 { sqlite3_finalize $::STMT }
execsql { PRAGMA wal_checkpoint }
- } {0 6 0}
+ } {0 3 0}
# At one point, if a reader failed to upgrade to a writer because it
# was reading an old snapshot, the write-locks were not being released.
@@ -657,7 +658,7 @@ do_multiclient_test tn {
} {a b c d}
do_test wal-10.$tn.36 {
catchsql { PRAGMA wal_checkpoint }
- } {0 {0 16 16}}
+ } {0 {0 8 8}}
do_test wal-10.$tn.36 {
sql3 { INSERT INTO t1 VALUES('e', 'f') }
sql2 { SELECT * FROM t1 }
@@ -665,7 +666,7 @@ do_multiclient_test tn {
do_test wal-10.$tn.37 {
sql2 COMMIT
execsql { PRAGMA wal_checkpoint }
- } {0 18 18}
+ } {0 9 9}
}
#-------------------------------------------------------------------------
@@ -1039,7 +1040,7 @@ foreach {tn ckpt_cmd ckpt_res ckpt_main ckpt_aux} {
5 {sqlite3_wal_checkpoint db aux} SQLITE_OK 0 1
6 {sqlite3_wal_checkpoint db temp} SQLITE_OK 0 0
7 {db eval "PRAGMA main.wal_checkpoint"} {0 10 10} 1 0
- 8 {db eval "PRAGMA aux.wal_checkpoint"} {0 16 16} 0 1
+ 8 {db eval "PRAGMA aux.wal_checkpoint"} {0 13 13} 0 1
9 {db eval "PRAGMA temp.wal_checkpoint"} {0 -1 -1} 0 0
} {
do_test wal-16.$tn.1 {
@@ -1053,7 +1054,8 @@ foreach {tn ckpt_cmd ckpt_res ckpt_main ckpt_aux} {
PRAGMA aux.auto_vacuum = 0;
PRAGMA main.journal_mode = WAL;
PRAGMA aux.journal_mode = WAL;
- PRAGMA synchronous = NORMAL;
+ PRAGMA main.synchronous = NORMAL;
+ PRAGMA aux.synchronous = NORMAL;
}
} {wal wal}
@@ -1071,7 +1073,7 @@ foreach {tn ckpt_cmd ckpt_res ckpt_main ckpt_aux} {
} [list [expr 1*1024] [wal_file_size 10 1024]]
do_test wal-16.$tn.3 {
list [file size test2.db] [file size test2.db-wal]
- } [list [expr 1*1024] [wal_file_size 16 1024]]
+ } [list [expr 1*1024] [wal_file_size 13 1024]]
do_test wal-16.$tn.4 [list eval $ckpt_cmd] $ckpt_res
@@ -1081,7 +1083,7 @@ foreach {tn ckpt_cmd ckpt_res ckpt_main ckpt_aux} {
do_test wal-16.$tn.6 {
list [file size test2.db] [file size test2.db-wal]
- } [list [expr ($ckpt_aux ? 7 : 1)*1024] [wal_file_size 16 1024]]
+ } [list [expr ($ckpt_aux ? 7 : 1)*1024] [wal_file_size 13 1024]]
catch { db close }
}
@@ -1124,6 +1126,7 @@ foreach {tn sectorsize logsize} "
execsql {
PRAGMA auto_vacuum = 0;
PRAGMA page_size = 512;
+ PRAGMA cache_size = -2000;
PRAGMA journal_mode = WAL;
PRAGMA synchronous = FULL;
}
@@ -1220,10 +1223,11 @@ proc logcksum {ckv1 ckv2 blob} {
upvar $ckv1 c1
upvar $ckv2 c2
- set scanpattern I*
- if {$::tcl_platform(byteOrder) eq "littleEndian"} {
- set scanpattern i*
- }
+ # Since the magic number at the start of the -wal file header is
+ # 931071618 that indicates that the content should always be read as
+ # little-endian.
+ #
+ set scanpattern i*
binary scan $blob $scanpattern values
foreach {v1 v2} $values {
@@ -1474,7 +1478,7 @@ foreach pgsz {512 1024 2048 4096 8192 16384 32768 65536} {
# Test that when 1 or more pages are recovered from a WAL file,
# sqlite3_log() is invoked to report this to the user.
#
-set walfile [file nativename [file join [pwd] test.db-wal]]
+set walfile [file nativename [file join [get_pwd] test.db-wal]]
catch {db close}
forcedelete test.db
do_test wal-23.1 {
@@ -1550,9 +1554,13 @@ ifcapable autovacuum {
}
file size test.db
} [expr 3 * 1024]
+
+ # WAL file now contains a single frame - the new root page for table t1.
+ # It would be two frames (the new root page and a padding frame) if the
+ # ZERO_DAMAGE flag were not set.
do_test 24.5 {
file size test.db-wal
- } 2128
+ } [wal_file_size 1 1024]
}
db close
diff --git a/test/wal2.test b/test/wal2.test
index f488706..f30c011 100644
--- a/test/wal2.test
+++ b/test/wal2.test
@@ -46,6 +46,7 @@ proc set_tvfs_hdr {file args} {
}
set blob [tvfs shm $file]
+ if {$::tcl_platform(byteOrder)=="bigEndian"} {set fmt I} {set fmt i}
if {[llength $args]} {
set ia [lindex $args 0]
@@ -54,11 +55,11 @@ proc set_tvfs_hdr {file args} {
set ib [lindex $args 1]
}
binary scan $blob a[expr $nHdr*2]a* dummy tail
- set blob [binary format i${nInt}i${nInt}a* $ia $ib $tail]
+ set blob [binary format ${fmt}${nInt}${fmt}${nInt}a* $ia $ib $tail]
tvfs shm $file $blob
}
- binary scan $blob i${nInt} ints
+ binary scan $blob ${fmt}${nInt} ints
return $ints
}
@@ -361,7 +362,9 @@ do_test wal2-4.1 {
INSERT INTO data VALUES('need xShmOpen to see this');
PRAGMA wal_checkpoint;
}
-} {wal 0 5 5}
+ # Three pages in the WAL file at this point: One copy of page 1 and two
+ # of the root page for table "data".
+} {wal 0 3 3}
do_test wal2-4.2 {
db close
testvfs tvfs -noshm 1
@@ -730,7 +733,7 @@ do_test wal2-6.5.1 {
INSERT INTO t2 VALUES('I', 'II');
PRAGMA journal_mode;
}
-} {wal exclusive 0 3 3 wal}
+} {wal exclusive 0 2 2 wal}
do_test wal2-6.5.2 {
execsql {
PRAGMA locking_mode = normal;
@@ -741,7 +744,7 @@ do_test wal2-6.5.2 {
} {normal exclusive I II III IV}
do_test wal2-6.5.3 {
execsql { PRAGMA wal_checkpoint }
-} {0 4 4}
+} {0 2 2}
db close
proc lock_control {method filename handle spec} {
@@ -1040,7 +1043,10 @@ tvfs delete
#
if {$::tcl_platform(platform) == "unix"} {
faultsim_delete_and_reopen
- set umask [exec /bin/sh -c umask]
+ # Changed on 2012-02-13: umask is deliberately ignored for -wal files.
+ #set umask [exec /bin/sh -c umask]
+ set umask 0
+
do_test wal2-12.1 {
sqlite3 db test.db
@@ -1176,14 +1182,15 @@ if {$::tcl_platform(platform) == "unix"} {
# Test that "PRAGMA checkpoint_fullsync" appears to be working.
#
foreach {tn sql reslist} {
- 1 { } {8 0 3 0 5 0}
- 2 { PRAGMA checkpoint_fullfsync = 1 } {8 4 3 2 5 2}
- 3 { PRAGMA checkpoint_fullfsync = 0 } {8 0 3 0 5 0}
+ 1 { } {10 0 4 0 6 0}
+ 2 { PRAGMA checkpoint_fullfsync = 1 } {10 4 4 2 6 2}
+ 3 { PRAGMA checkpoint_fullfsync = 0 } {10 0 4 0 6 0}
} {
faultsim_delete_and_reopen
execsql {PRAGMA auto_vacuum = 0}
execsql $sql
+ do_execsql_test wal2-14.$tn.0 { PRAGMA page_size = 4096 } {}
do_execsql_test wal2-14.$tn.1 { PRAGMA journal_mode = WAL } {wal}
set sqlite_sync_count 0
@@ -1192,14 +1199,14 @@ foreach {tn sql reslist} {
do_execsql_test wal2-14.$tn.2 {
PRAGMA wal_autocheckpoint = 10;
CREATE TABLE t1(a, b); -- 2 wal syncs
- INSERT INTO t1 VALUES(1, 2); -- 1 wal sync
+ INSERT INTO t1 VALUES(1, 2); -- 2 wal sync
PRAGMA wal_checkpoint; -- 1 wal sync, 1 db sync
BEGIN;
INSERT INTO t1 VALUES(3, 4);
INSERT INTO t1 VALUES(5, 6);
- COMMIT; -- 1 wal sync
+ COMMIT; -- 2 wal sync
PRAGMA wal_checkpoint; -- 1 wal sync, 1 db sync
- } {10 0 5 5 0 2 2}
+ } {10 0 3 3 0 1 1}
do_test wal2-14.$tn.3 {
cond_incr_sync_count 1
@@ -1233,22 +1240,22 @@ catch { db close }
# PRAGMA fullfsync
# PRAGMA synchronous
#
-foreach {tn settings commit_sync ckpt_sync} {
- 1 {0 0 off} {0 0} {0 0}
- 2 {0 0 normal} {0 0} {2 0}
- 3 {0 0 full} {1 0} {2 0}
-
- 4 {0 1 off} {0 0} {0 0}
- 5 {0 1 normal} {0 0} {0 2}
- 6 {0 1 full} {0 1} {0 2}
-
- 7 {1 0 off} {0 0} {0 0}
- 8 {1 0 normal} {0 0} {0 2}
- 9 {1 0 full} {1 0} {0 2}
-
- 10 {1 1 off} {0 0} {0 0}
- 11 {1 1 normal} {0 0} {0 2}
- 12 {1 1 full} {0 1} {0 2}
+foreach {tn settings restart_sync commit_sync ckpt_sync} {
+ 1 {0 0 off} {0 0} {0 0} {0 0}
+ 2 {0 0 normal} {1 0} {0 0} {2 0}
+ 3 {0 0 full} {2 0} {1 0} {2 0}
+
+ 4 {0 1 off} {0 0} {0 0} {0 0}
+ 5 {0 1 normal} {0 1} {0 0} {0 2}
+ 6 {0 1 full} {0 2} {0 1} {0 2}
+
+ 7 {1 0 off} {0 0} {0 0} {0 0}
+ 8 {1 0 normal} {1 0} {0 0} {0 2}
+ 9 {1 0 full} {2 0} {1 0} {0 2}
+
+ 10 {1 1 off} {0 0} {0 0} {0 0}
+ 11 {1 1 normal} {0 1} {0 0} {0 2}
+ 12 {1 1 full} {0 2} {0 1} {0 2}
} {
forcedelete test.db
@@ -1261,30 +1268,40 @@ foreach {tn settings commit_sync ckpt_sync} {
sqlite3 db test.db
do_execsql_test 15.$tn.1 "
+ PRAGMA page_size = 4096;
CREATE TABLE t1(x);
+ PRAGMA wal_autocheckpoint = OFF;
PRAGMA journal_mode = WAL;
PRAGMA checkpoint_fullfsync = [lindex $settings 0];
PRAGMA fullfsync = [lindex $settings 1];
PRAGMA synchronous = [lindex $settings 2];
- " {wal}
+ " {0 wal}
+if { $tn==2} breakpoint
do_test 15.$tn.2 {
set sync(normal) 0
set sync(full) 0
execsql { INSERT INTO t1 VALUES('abc') }
list $::sync(normal) $::sync(full)
- } $commit_sync
+ } $restart_sync
do_test 15.$tn.3 {
set sync(normal) 0
set sync(full) 0
- execsql { INSERT INTO t1 VALUES('def') }
+ execsql { INSERT INTO t1 VALUES('abc') }
list $::sync(normal) $::sync(full)
} $commit_sync
do_test 15.$tn.4 {
set sync(normal) 0
set sync(full) 0
+ execsql { INSERT INTO t1 VALUES('def') }
+ list $::sync(normal) $::sync(full)
+ } $commit_sync
+
+ do_test 15.$tn.5 {
+ set sync(normal) 0
+ set sync(full) 0
execsql { PRAGMA wal_checkpoint }
list $::sync(normal) $::sync(full)
} $ckpt_sync
diff --git a/test/wal3.test b/test/wal3.test
index ea5e705..ccab93e 100644
--- a/test/wal3.test
+++ b/test/wal3.test
@@ -217,6 +217,7 @@ foreach {tn syncmode synccount} {
execsql "PRAGMA synchronous = $syncmode"
execsql { PRAGMA journal_mode = WAL }
+ execsql { CREATE TABLE filler(a,b,c); }
set ::syncs [list]
T filter xSync
@@ -428,7 +429,7 @@ do_test wal3-6.1.2 {
} {o t t f}
do_test wal3-6.1.3 {
execsql { PRAGMA wal_checkpoint } db2
-} {0 7 7}
+} {0 4 4}
# At this point the log file has been fully checkpointed. However,
# connection [db3] holds a lock that prevents the log from being wrapped.
@@ -517,7 +518,7 @@ proc lock_callback {method file handle spec} {
}
do_test wal3-6.2.2 {
execsql { PRAGMA wal_checkpoint }
-} {0 7 7}
+} {0 4 4}
do_test wal3-6.2.3 {
set ::R
} {h h l b}
@@ -627,7 +628,7 @@ do_test wal3-8.1 {
INSERT INTO b VALUES('Markazi');
PRAGMA wal_checkpoint;
}
-} {wal 0 9 9}
+} {wal 0 5 5}
do_test wal3-8.2 {
execsql { SELECT * FROM b }
} {Tehran Qom Markazi}
diff --git a/test/wal5.test b/test/wal5.test
index ad6bcfc..6eceed5 100644
--- a/test/wal5.test
+++ b/test/wal5.test
@@ -197,9 +197,9 @@ foreach {testprefix do_wal_checkpoint} {
INSERT INTO t2 VALUES(1, 2);
}
} {}
- do_test 2.2.$tn.2 { file_page_counts } {1 5 1 5}
- do_test 2.1.$tn.3 { code1 { do_wal_checkpoint db } } {0 5 5}
- do_test 2.1.$tn.4 { file_page_counts } {2 5 2 5}
+ do_test 2.2.$tn.2 { file_page_counts } {1 3 1 3}
+ do_test 2.1.$tn.3 { code1 { do_wal_checkpoint db } } {0 3 3}
+ do_test 2.1.$tn.4 { file_page_counts } {2 3 2 3}
}
do_multiclient_test tn {
@@ -213,10 +213,10 @@ foreach {testprefix do_wal_checkpoint} {
INSERT INTO t2 VALUES(3, 4);
}
} {}
- do_test 2.2.$tn.2 { file_page_counts } {1 5 1 7}
+ do_test 2.2.$tn.2 { file_page_counts } {1 3 1 4}
do_test 2.2.$tn.3 { sql2 { BEGIN; SELECT * FROM t1 } } {1 2}
- do_test 2.2.$tn.4 { code1 { do_wal_checkpoint db -mode restart } } {1 5 5}
- do_test 2.2.$tn.5 { file_page_counts } {2 5 2 7}
+ do_test 2.2.$tn.4 { code1 { do_wal_checkpoint db -mode restart } } {1 3 3}
+ do_test 2.2.$tn.5 { file_page_counts } {2 3 2 4}
}
do_multiclient_test tn {
@@ -229,13 +229,13 @@ foreach {testprefix do_wal_checkpoint} {
INSERT INTO t2 VALUES(1, 2);
}
} {}
- do_test 2.3.$tn.2 { file_page_counts } {1 5 1 5}
+ do_test 2.3.$tn.2 { file_page_counts } {1 3 1 3}
do_test 2.3.$tn.3 { sql2 { BEGIN; SELECT * FROM t1 } } {1 2}
do_test 2.3.$tn.4 { sql1 { INSERT INTO t1 VALUES(3, 4) } } {}
do_test 2.3.$tn.5 { sql1 { INSERT INTO t2 VALUES(3, 4) } } {}
- do_test 2.3.$tn.6 { file_page_counts } {1 7 1 7}
- do_test 2.3.$tn.7 { code1 { do_wal_checkpoint db -mode full } } {1 7 5}
- do_test 2.3.$tn.8 { file_page_counts } {1 7 2 7}
+ do_test 2.3.$tn.6 { file_page_counts } {1 4 1 4}
+ do_test 2.3.$tn.7 { code1 { do_wal_checkpoint db -mode full } } {1 4 3}
+ do_test 2.3.$tn.8 { file_page_counts } {1 4 2 4}
}
# Check that checkpoints block on the correct locks. And respond correctly
@@ -256,18 +256,18 @@ foreach {testprefix do_wal_checkpoint} {
# processes holding all three types of locks.
#
foreach {tn1 checkpoint busy_on ckpt_expected expected} {
- 1 PASSIVE - {0 5 5} -
- 2 TYPO - {0 5 5} -
-
- 3 FULL - {0 7 7} 2
- 4 FULL 1 {1 5 5} 1
- 5 FULL 2 {1 7 5} 2
- 6 FULL 3 {0 7 7} 2
-
- 7 RESTART - {0 7 7} 3
- 8 RESTART 1 {1 5 5} 1
- 9 RESTART 2 {1 7 5} 2
- 10 RESTART 3 {1 7 7} 3
+ 1 PASSIVE - {0 3 3} -
+ 2 TYPO - {0 3 3} -
+
+ 3 FULL - {0 4 4} 2
+ 4 FULL 1 {1 3 3} 1
+ 5 FULL 2 {1 4 3} 2
+ 6 FULL 3 {0 4 4} 2
+
+ 7 RESTART - {0 4 4} 3
+ 8 RESTART 1 {1 3 3} 1
+ 9 RESTART 2 {1 4 3} 2
+ 10 RESTART 3 {1 4 4} 3
} {
do_multiclient_test tn {
diff --git a/test/wal8.test b/test/wal8.test
new file mode 100644
index 0000000..4b97de7
--- /dev/null
+++ b/test/wal8.test
@@ -0,0 +1,90 @@
+# 2012 February 28
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+# This file implements regression tests for SQLite library. The
+# focus of this file is testing the operation of the library in
+# "PRAGMA journal_mode=WAL" mode.
+#
+# Specifically, it tests the case where a connection opens an empty
+# file. Then, another connection opens the same file and initializes
+# the connection as a WAL database. Following this, the first connection
+# executes a "PRAGMA page_size = XXX" command to set its expected page
+# size, and then queries the database.
+#
+# This is an unusual case, as normally SQLite is able to glean the page
+# size from the database file as soon as it is opened (even before the
+# first read transaction is executed), and the "PRAGMA page_size = XXX"
+# is a no-op.
+#
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set ::testprefix wal8
+
+db close
+forcedelete test.db test.db-wal
+
+sqlite3 db test.db
+sqlite3 db2 test.db
+
+do_test 1.0 {
+ execsql {
+ PRAGMA journal_mode = wal;
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 2);
+ } db2
+} {wal}
+
+do_catchsql_test 1.1 {
+ PRAGMA page_size = 4096;
+ VACUUM;
+} {0 {}}
+
+db close
+db2 close
+forcedelete test.db test.db-wal
+
+sqlite3 db test.db
+sqlite3 db2 test.db
+
+do_test 2.0 {
+ execsql {
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 2);
+ PRAGMA journal_mode = wal;
+ } db2
+} {wal}
+
+do_catchsql_test 2.1 {
+ PRAGMA page_size = 4096;
+ VACUUM;
+} {0 {}}
+
+db close
+db2 close
+forcedelete test.db test.db-wal
+
+sqlite3 db test.db
+sqlite3 db2 test.db
+
+do_test 3.0 {
+ execsql {
+ PRAGMA journal_mode = wal;
+ CREATE TABLE t1(a, b);
+ INSERT INTO t1 VALUES(1, 2);
+ } db2
+} {wal}
+
+do_execsql_test 3.1 {
+ PRAGMA page_size = 4096;
+ SELECT name FROM sqlite_master;
+} {t1}
+
+finish_test
+
diff --git a/test/walbig.test b/test/walbig.test
index 092db23..c43b7e2 100644
--- a/test/walbig.test
+++ b/test/walbig.test
@@ -52,7 +52,7 @@ do_test walbig-1.0 {
} {wal}
db close
-if {[catch {fake_big_file 5000 [pwd]/test.db}]} {
+if {[catch {fake_big_file 5000 [get_pwd]/test.db}]} {
puts "**** Unable to create a file larger than 5000 MB. *****"
finish_test
return
diff --git a/test/walcrash.test b/test/walcrash.test
index cfce5fe..adc4841 100644
--- a/test/walcrash.test
+++ b/test/walcrash.test
@@ -76,7 +76,7 @@ for {set i 1} {$i < $REPEATS} {incr i} {
for {set i 1} {$i < $REPEATS} {incr i} {
forcedelete test.db test.db-wal
do_test walcrash-2.$i.1 {
- crashsql -delay 4 -file test.db-wal -seed [incr seed] {
+ crashsql -delay 5 -file test.db-wal -seed [incr seed] {
PRAGMA journal_mode = WAL;
CREATE TABLE t1(a PRIMARY KEY, b);
INSERT INTO t1 VALUES(1, 2);
@@ -147,7 +147,7 @@ for {set i 1} {$i < $REPEATS} {incr i} {
forcedelete test2.db test2.db-wal
do_test walcrash-4.$i.1 {
- crashsql -delay 3 -file test.db-wal -seed [incr seed] -blocksize 4096 {
+ crashsql -delay 4 -file test.db-wal -seed [incr seed] -blocksize 4096 {
PRAGMA journal_mode = WAL;
PRAGMA page_size = 1024;
CREATE TABLE t1(a PRIMARY KEY, b);
@@ -175,7 +175,7 @@ for {set i 1} {$i < $REPEATS} {incr i} {
forcedelete test2.db test2.db-wal
do_test walcrash-5.$i.1 {
- crashsql -delay 11 -file test.db-wal -seed [incr seed] -blocksize 4096 {
+ crashsql -delay 13 -file test.db-wal -seed [incr seed] -blocksize 4096 {
PRAGMA journal_mode = WAL;
PRAGMA page_size = 1024;
BEGIN;
@@ -216,7 +216,7 @@ for {set i 1} {$i < $REPEATS} {incr i} {
forcedelete test2.db test2.db-wal
do_test walcrash-6.$i.1 {
- crashsql -delay 12 -file test.db-wal -seed [incr seed] -blocksize 512 {
+ crashsql -delay 14 -file test.db-wal -seed [incr seed] -blocksize 512 {
PRAGMA journal_mode = WAL;
PRAGMA page_size = 1024;
BEGIN;
@@ -234,9 +234,9 @@ for {set i 1} {$i < $REPEATS} {incr i} {
INSERT INTO t1 SELECT randomblob(900) FROM t1 LIMIT 4; /* 32 */
PRAGMA wal_checkpoint;
- INSERT INTO t1 VALUES(randomblob(900));
- INSERT INTO t1 VALUES(randomblob(900));
- INSERT INTO t1 VALUES(randomblob(900));
+ INSERT INTO t1 VALUES(randomblob(9000));
+ INSERT INTO t1 VALUES(randomblob(9000));
+ INSERT INTO t1 VALUES(randomblob(9000));
}
} {1 {child process exited abnormally}}
diff --git a/test/walcrash3.test b/test/walcrash3.test
new file mode 100644
index 0000000..c2c9a6d
--- /dev/null
+++ b/test/walcrash3.test
@@ -0,0 +1,129 @@
+# 2011 December 16
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This test simulates an application crash immediately following a
+# system call to truncate a file. Specifically, the system call that
+# truncates the WAL file if "PRAGMA journal_size_limit" is configured.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+
+ifcapable !wal {finish_test ; return }
+set testprefix walcrash3
+
+db close
+testvfs tvfs
+tvfs filter {xTruncate xWrite}
+tvfs script tvfs_callback
+proc tvfs_callback {args} {}
+
+sqlite3 db test.db -vfs tvfs
+do_execsql_test 1.1 {
+ PRAGMA page_size = 1024;
+ PRAGMA journal_mode = WAL;
+ PRAGMA wal_autocheckpoint = 128;
+ PRAGMA journal_size_limit = 16384;
+
+ CREATE TABLE t1(a BLOB, b BLOB, UNIQUE(a, b));
+ INSERT INTO t1 VALUES(randomblob(10), randomblob(1000));
+} {wal 128 16384}
+
+proc tvfs_callback {method file arglist} {
+ if {$::state==1} {
+ foreach f [glob -nocomplain xx_test.*] { forcedelete $f }
+ foreach f [glob -nocomplain test.*] { forcecopy $f "xx_$f" }
+ set ::state 2
+ }
+ if {$::state==0 && $method=="xTruncate" && [file tail $file]=="test.db-wal"} {
+ set ::state 1
+ }
+}
+
+for {set i 2} {$i<1000} {incr i} {
+
+ # If the WAL file is truncated within the following, within the following
+ # xWrite call the [tvfs_callback] makes a copy of the database and WAL
+ # files set sets $::state to 2. So that the copied files are in the same
+ # state as the real database and WAL files would be if an application crash
+ # occurred immediately following the xTruncate().
+ #
+ set ::state 0
+ do_execsql_test 1.$i.1 {
+ INSERT INTO t1 VALUES(randomblob(10), randomblob(1000));
+ }
+
+ # If a copy was made, open it and run the integrity-check.
+ #
+ if {$::state==2} {
+ sqlite3 db2 xx_test.db
+ do_test 1.$i.2 { execsql { PRAGMA integrity_check } db2 } "ok"
+ do_test 1.$i.3 { execsql { SELECT count(*) FROM t1 } db2 } [expr $i-1]
+ db2 close
+ }
+}
+catch { db close }
+tvfs delete
+
+#--------------------------------------------------------------------------
+#
+catch { db close }
+forcedelete test.db
+
+do_test 2.1 {
+ sqlite3 db test.db
+ execsql {
+ PRAGMA page_size = 512;
+ PRAGMA journal_mode = WAL;
+ PRAGMA wal_autocheckpoint = 128;
+ CREATE TABLE t1(a PRIMARY KEY, b);
+ INSERT INTO t1 VALUES(randomblob(25), randomblob(200));
+ }
+
+ for {set i 0} {$i < 1500} {incr i} {
+ execsql { INSERT INTO t1 VALUES(randomblob(25), randomblob(200)) }
+ }
+
+ db_save
+ db close
+} {}
+
+set nInitialErr [set_test_counter errors]
+for {set i 2} {$i<10000 && [set_test_counter errors]==$nInitialErr} {incr i} {
+
+ do_test 2.$i.1 {
+ catch { db close }
+ db_restore
+ crashsql -delay 2 -file test.db-wal -seed $i {
+ SELECT * FROM sqlite_master;
+ PRAGMA synchronous = full;
+ PRAGMA wal_checkpoint;
+ BEGIN;
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ INSERT INTO t1 VALUES(randomblob(26), randomblob(200));
+ COMMIT;
+ }
+ } {1 {child process exited abnormally}}
+
+ do_test 2.$i.2 {
+ sqlite3 db test.db
+ execsql { PRAGMA integrity_check }
+ } {ok}
+}
+
+finish_test
+
diff --git a/test/walfault.test b/test/walfault.test
index 1b71d78..6f9aedd 100644
--- a/test/walfault.test
+++ b/test/walfault.test
@@ -123,7 +123,6 @@ do_faultsim_test walfault-3 -prep {
faultsim_test_result {0 {}}
}
-
#--------------------------------------------------------------------------
#
if {[permutation] != "inmemory_journal"} {
@@ -141,7 +140,9 @@ if {[permutation] != "inmemory_journal"} {
SELECT * FROM t1;
}
} -test {
- faultsim_test_result {0 {wal 0 7 7 a b}}
+ # Update: The following changed from {0 {wal 0 7 7 a b}} as a result
+ # of PSOW being set by default.
+ faultsim_test_result {0 {wal 0 5 5 a b}}
faultsim_integrity_check
}
}
@@ -542,10 +543,11 @@ do_faultsim_test walfault-14 -prep {
INSERT INTO abc VALUES(randomblob(1500));
}
} -test {
- faultsim_test_result {0 {0 10 10}}
+ faultsim_test_result {0 {0 9 9}}
faultsim_integrity_check
set nRow [db eval {SELECT count(*) FROM abc}]
if {!(($nRow==2 && $testrc) || $nRow==3)} { error "Bad db content" }
}
+finish_test
finish_test
diff --git a/test/walpersist.test b/test/walpersist.test
index 175dcbf..692728d 100644
--- a/test/walpersist.test
+++ b/test/walpersist.test
@@ -67,7 +67,60 @@ do_test walpersist-1.11 {
list [file exists test.db] [file exists test.db-wal] [file exists test.db-shm]
} {1 1 1}
-
+# Make sure the journal_size_limit works to limit the size of the
+# persisted wal file. In persistent-wal mode, any non-negative
+# journal_size_limit causes the WAL file to be truncated to zero bytes
+# when closing.
+#
+forcedelete test.db test.db-shm test.db-wal
+do_test walpersist-2.1 {
+ sqlite3 db test.db
+ db eval {
+ PRAGMA journal_mode=WAL;
+ PRAGMA wal_autocheckpoint=OFF;
+ PRAGMA journal_size_limit=12000;
+ CREATE TABLE t1(x);
+ INSERT INTO t1 VALUES(randomblob(50000));
+ UPDATE t1 SET x=randomblob(50000);
+ }
+ expr {[file size test.db-wal]>100000}
+} {1}
+do_test walpersist-2.2 {
+ file_control_persist_wal db 1
+ db close
+ concat [file exists test.db-wal] [file size test.db-wal]
+} {1 0}
+do_test walpersist-2.3 {
+ sqlite3 db test.db
+ execsql { PRAGMA integrity_check }
+} {ok}
+do_test 3.1 {
+ catch {db close}
+ forcedelete test.db test.db-shm test.db-wal
+ sqlite3 db test.db
+ execsql {
+ PRAGMA page_size = 1024;
+ PRAGMA journal_mode = WAL;
+ PRAGMA wal_autocheckpoint=128;
+ PRAGMA journal_size_limit=16384;
+ CREATE TABLE t1(a, b, PRIMARY KEY(a, b));
+ }
+} {wal 128 16384}
+do_test 3.2 {
+ for {set i 0} {$i<200} {incr i} {
+ execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) }
+ }
+ file_control_persist_wal db 1
+ db close
+} {}
+do_test walpersist-3.3 {
+ file size test.db-wal
+} {0}
+do_test walpersist-3.4 {
+ sqlite3 db test.db
+ execsql { PRAGMA integrity_check }
+} {ok}
+
finish_test
diff --git a/test/where.test b/test/where.test
index 9145bcc..3826a5f 100644
--- a/test/where.test
+++ b/test/where.test
@@ -1105,15 +1105,17 @@ do_test where-14.4 {
}
} {1/1 1/4 4/1 4/4 nosort}
do_test where-14.5 {
+ # This test case changed from "nosort" to "sort". See ticket 2a5629202f.
cksort {
SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||x.b
}
-} {4/1 4/4 1/1 1/4 nosort}
+} {4/1 4/4 1/1 1/4 sort}
do_test where-14.6 {
+ # This test case changed from "nosort" to "sort". See ticket 2a5629202f.
cksort {
SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, x.a||x.b DESC
}
-} {4/1 4/4 1/1 1/4 nosort}
+} {4/1 4/4 1/1 1/4 sort}
do_test where-14.7 {
cksort {
SELECT x.a || '/' || y.a FROM t8 x, t8 y ORDER BY x.b, y.a||y.b
diff --git a/test/where7.test b/test/where7.test
index ffb7173..b6cd7cc 100644
--- a/test/where7.test
+++ b/test/where7.test
@@ -23339,7 +23339,7 @@ do_execsql_test where7-3.1 {
OR t301.c8 = 1407424651264000)
ORDER BY t302.c5 LIMIT 200;
} {
- 0 0 1 {SEARCH TABLE t301 USING COVERING INDEX t301_c4 (c4=?) (~5 rows)}
+ 0 0 1 {SEARCH TABLE t301 USING COVERING INDEX t301_c4 (c4=?) (~10 rows)}
0 0 1 {SEARCH TABLE t301 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)}
0 1 0 {SEARCH TABLE t302 USING INDEX t302_c8_c3 (c8=? AND c3>?) (~2 rows)}
0 0 0 {USE TEMP B-TREE FOR ORDER BY}
diff --git a/test/where9.test b/test/where9.test
index b4a2d8d..23260a6 100644
--- a/test/where9.test
+++ b/test/where9.test
@@ -15,7 +15,7 @@
set testdir [file dirname $argv0]
source $testdir/tester.tcl
-ifcapable !or_opt {
+ifcapable !or_opt||!compound {
finish_test
return
}
@@ -364,7 +364,7 @@ ifcapable explain {
} {
0 0 0 {SEARCH TABLE t1 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)}
0 1 1 {SEARCH TABLE t2 USING INDEX t2d (d=?) (~2 rows)}
- 0 1 1 {SEARCH TABLE t2 USING COVERING INDEX t2f (f=?) (~5 rows)}
+ 0 1 1 {SEARCH TABLE t2 USING COVERING INDEX t2f (f=?) (~10 rows)}
}
do_execsql_test where9-3.2 {
EXPLAIN QUERY PLAN
@@ -374,7 +374,7 @@ ifcapable explain {
} {
0 0 0 {SEARCH TABLE t1 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)}
0 1 1 {SEARCH TABLE t2 USING INDEX t2d (d=?) (~2 rows)}
- 0 1 1 {SEARCH TABLE t2 USING COVERING INDEX t2f (f=?) (~5 rows)}
+ 0 1 1 {SEARCH TABLE t2 USING COVERING INDEX t2f (f=?) (~10 rows)}
}
}
@@ -453,8 +453,8 @@ ifcapable explain {
do_execsql_test where9-5.1 {
EXPLAIN QUERY PLAN SELECT a FROM t1 WHERE b>1000 AND (c=31031 OR d IS NULL)
} {
- 0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c=?) (~2 rows)}
- 0 0 0 {SEARCH TABLE t1 USING INDEX t1d (d=?) (~2 rows)}
+ 0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c=?) (~3 rows)}
+ 0 0 0 {SEARCH TABLE t1 USING INDEX t1d (d=?) (~3 rows)}
}
# In contrast, b=1000 is preferred over any OR-clause.
@@ -856,5 +856,25 @@ do_test where9-7.3.2 {
}
} {79 81}
+# Fix for ticket [b7c8682cc17f32903f03a610bd0d35ffd3c1e6e4]
+# "Incorrect result from LEFT JOIN with OR in the WHERE clause"
+#
+do_test where9-8.1 {
+ db eval {
+ CREATE TABLE t81(a INTEGER PRIMARY KEY, b, c, d);
+ CREATE TABLE t82(x INTEGER PRIMARY KEY, y);
+ CREATE TABLE t83(p INTEGER PRIMARY KEY, q);
+
+ INSERT INTO t81 VALUES(2,3,4,5);
+ INSERT INTO t81 VALUES(3,4,5,6);
+ INSERT INTO t82 VALUES(2,4);
+ INSERT INTO t83 VALUES(5,55);
+
+ SELECT *
+ FROM t81 LEFT JOIN t82 ON y=b JOIN t83
+ WHERE c==p OR d==p
+ ORDER BY +a;
+ }
+} {2 3 4 5 {} {} 5 55 3 4 5 6 2 4 5 55}
finish_test
diff --git a/test/whereC.test b/test/whereC.test
new file mode 100644
index 0000000..9fa1bba
--- /dev/null
+++ b/test/whereC.test
@@ -0,0 +1,70 @@
+# 2011 November 16
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix whereC
+
+do_execsql_test 1.0 {
+ CREATE TABLE t1(i INTEGER PRIMARY KEY, a, b INTEGER);
+
+ INSERT INTO t1 VALUES(1, 1, 1);
+ INSERT INTO t1 VALUES(2, 1, 1);
+ INSERT INTO t1 VALUES(3, 1, 2);
+ INSERT INTO t1 VALUES(4, 1, 2);
+ INSERT INTO t1 VALUES(5, 1, 2);
+ INSERT INTO t1 VALUES(6, 1, 3);
+ INSERT INTO t1 VALUES(7, 1, 3);
+
+ INSERT INTO t1 VALUES(8, 2, 1);
+ INSERT INTO t1 VALUES(9, 2, 1);
+ INSERT INTO t1 VALUES(10, 2, 2);
+ INSERT INTO t1 VALUES(11, 2, 2);
+ INSERT INTO t1 VALUES(12, 2, 2);
+ INSERT INTO t1 VALUES(13, 2, 3);
+ INSERT INTO t1 VALUES(14, 2, 3);
+
+ INSERT INTO t1 VALUES(15, 2, 1);
+ INSERT INTO t1 VALUES(16, 2, 1);
+ INSERT INTO t1 VALUES(17, 2, 2);
+ INSERT INTO t1 VALUES(18, 2, 2);
+ INSERT INTO t1 VALUES(19, 2, 2);
+ INSERT INTO t1 VALUES(20, 2, 3);
+ INSERT INTO t1 VALUES(21, 2, 3);
+
+ CREATE INDEX i1 ON t1(a, b);
+}
+
+foreach {tn sql res} {
+ 1 "SELECT i FROM t1 WHERE a=1 AND b=2 AND i>3" {4 5}
+ 2 "SELECT i FROM t1 WHERE rowid='12'" {12}
+ 3 "SELECT i FROM t1 WHERE a=1 AND b='2'" {3 4 5}
+ 4 "SELECT i FROM t1 WHERE a=1 AND b='2' AND i>'3'" {4 5}
+ 5 "SELECT i FROM t1 WHERE a=1 AND b='2' AND i<5" {3 4}
+ 6 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i<12" {10 11}
+ 7 "SELECT i FROM t1 WHERE a IN(1, 2) AND b=2 AND i<11" {3 4 5 10}
+ 8 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i BETWEEN 10 AND 12" {10 11 12}
+ 9 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i BETWEEN 11 AND 12" {11 12}
+ 10 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i BETWEEN 10 AND 11" {10 11}
+ 11 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i BETWEEN 12 AND 10" {}
+ 12 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i<NULL" {}
+ 13 "SELECT i FROM t1 WHERE a=2 AND b=2 AND i>=NULL" {}
+ 14 "SELECT i FROM t1 WHERE a=1 AND b='2' AND i<4.5" {3 4}
+} {
+ do_execsql_test 1.$tn.1 $sql $res
+ do_execsql_test 1.$tn.2 "$sql ORDER BY i ASC" [lsort -integer -inc $res]
+ do_execsql_test 1.$tn.3 "$sql ORDER BY i DESC" [lsort -integer -dec $res]
+}
+
+
+finish_test
+
diff --git a/test/zerodamage.test b/test/zerodamage.test
new file mode 100644
index 0000000..3d18c8d
--- /dev/null
+++ b/test/zerodamage.test
@@ -0,0 +1,119 @@
+# 2011 December 21
+#
+# The author disclaims copyright to this source code. In place of
+# a legal notice, here is a blessing:
+#
+# May you do good and not evil.
+# May you find forgiveness for yourself and forgive others.
+# May you share freely, never taking more than you give.
+#
+#***********************************************************************
+#
+# This file implements tests of the SQLITE_IOCAP_POWERSAFE_OVERWRITE property
+# and the SQLITE_FCNTL_POWERSAFE_OVERWRITE file-control for manipulating it.
+#
+# The name of this file comes from the fact that we used to call the
+# POWERSAFE_OVERWRITE property ZERO_DAMAGE.
+#
+
+set testdir [file dirname $argv0]
+source $testdir/tester.tcl
+set testprefix wal5
+
+ifcapable !vtab {
+ finish_test
+ return
+}
+
+# POWERSAFE_OVERWRITE defaults to true
+#
+do_test zerodamage-1.0 {
+ file_control_powersafe_overwrite db -1
+} {0 1}
+
+# Check the ability to turn zero-damage on and off.
+#
+do_test zerodamage-1.1 {
+ file_control_powersafe_overwrite db 0
+ file_control_powersafe_overwrite db -1
+} {0 0}
+do_test zerodamage-1.2 {
+ file_control_powersafe_overwrite db 1
+ file_control_powersafe_overwrite db -1
+} {0 1}
+
+# Run a transaction with zero-damage on, a small page size and a much larger
+# sectorsize. Verify that the maximum journal size is small - that the
+# rollback journal is not being padded.
+#
+do_test zerodamage-2.0 {
+ db close
+ testvfs tv -default 1
+ tv sectorsize 8192
+ sqlite3 db file:test.db?psow=TRUE -uri 1
+ unset -nocomplain ::max_journal_size
+ set ::max_journal_size 0
+ proc xDeleteCallback {method file args} {
+ set sz [file size $file]
+ if {$sz>$::max_journal_size} {set ::max_journal_size $sz}
+ }
+ tv filter xDelete
+ tv script xDeleteCallback
+ register_wholenumber_module db
+ db eval {
+ PRAGMA page_size=1024;
+ PRAGMA journal_mode=DELETE;
+ PRAGMA cache_size=5;
+ CREATE VIRTUAL TABLE nums USING wholenumber;
+ CREATE TABLE t1(x, y);
+ INSERT INTO t1 SELECT value, randomblob(100) FROM nums
+ WHERE value BETWEEN 1 AND 400;
+ }
+ set ::max_journal_size 0
+ db eval {
+ UPDATE t1 SET y=randomblob(50) WHERE x=123;
+ }
+ concat [file_control_powersafe_overwrite db -1] [set ::max_journal_size]
+} {0 1 2576}
+
+# Repeat the previous step with zero-damage turned off. This time the
+# maximum rollback journal size should be much larger.
+#
+do_test zerodamage-2.1 {
+ set ::max_journal_size 0
+ db close
+ sqlite3 db file:test.db?psow=FALSE -uri 1
+ db eval {
+ UPDATE t1 SET y=randomblob(50) WHERE x=124;
+ }
+ concat [file_control_powersafe_overwrite db -1] [set ::max_journal_size]
+} {0 0 24704}
+
+# Run a WAL-mode transaction with POWERSAFE_OVERWRITE on to verify that the
+# WAL file does not get too big.
+#
+do_test zerodamage-3.0 {
+ db eval {
+ PRAGMA journal_mode=WAL;
+ }
+ db close
+ sqlite3 db file:test.db?psow=TRUE -uri 1
+ db eval {
+ UPDATE t1 SET y=randomblob(50) WHERE x=124;
+ }
+ file size test.db-wal
+} {1080}
+
+# Repeat the previous with POWERSAFE_OVERWRITE off. Verify that the WAL file
+# is padded.
+#
+do_test zerodamage-3.1 {
+ db close
+ sqlite3 db file:test.db?psow=FALSE -uri 1
+ db eval {
+ UPDATE t1 SET y=randomblob(50) WHERE x=124;
+ }
+ file size test.db-wal
+} {8416}
+
+finish_test