Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Avoid running some particularly time-consuming tests as part of veryquick.test. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
f465944b75a800ddc6920229ad32c2f3 |
User & Date: | dan 2016-02-04 17:31:03.368 |
Context
2016-02-04
| ||
19:45 | Further improve performance of unindexed fts5 prefix queries. (check-in: c9c6457d8e user: dan tags: trunk) | |
17:31 | Avoid running some particularly time-consuming tests as part of veryquick.test. (check-in: f465944b75 user: dan tags: trunk) | |
11:48 | Remove unnecessary sets of db->mallocFailed. (check-in: b787165b25 user: drh tags: trunk) | |
Changes
Changes to test/fuzzer1.test.
︙ | ︙ | |||
19 20 21 22 23 24 25 | ifcapable !vtab { finish_test return } set ::testprefix fuzzer1 | < | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | ifcapable !vtab { finish_test return } set ::testprefix fuzzer1 load_static_extension db fuzzer # Check configuration errors. # do_catchsql_test fuzzer1-1.1 { CREATE VIRTUAL TABLE f USING fuzzer; } {1 {fuzzer: wrong number of CREATE VIRTUAL TABLE arguments}} |
︙ | ︙ | |||
1644 1645 1646 1647 1648 1649 1650 | DELETE FROM "fuzzer [x] rules table"; INSERT INTO "fuzzer [x] rules table" VALUES((1<<32)+100, 'x', 'y', 2); } do_catchsql_test 5.5.4 { CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table'); } {1 {fuzzer: ruleset must be between 0 and 2147483647}} | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 | DELETE FROM "fuzzer [x] rules table"; INSERT INTO "fuzzer [x] rules table" VALUES((1<<32)+100, 'x', 'y', 2); } do_catchsql_test 5.5.4 { CREATE VIRTUAL TABLE x USING fuzzer('fuzzer [x] rules table'); } {1 {fuzzer: ruleset must be between 0 and 2147483647}} #------------------------------------------------------------------------- # Test using different types of quotes with CREATE VIRTUAL TABLE # arguments. # do_execsql_test 7.1 { CREATE TABLE [x2 "rules] (a, b, c, d); INSERT INTO [x2 "rules] VALUES(0, 'a', 'b', 5); |
︙ | ︙ |
Added test/fuzzer2.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 | # 2016 February 4 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # The focus of the tests is the word-fuzzer virtual table. The tests # in this file are slower than those in fuzzer1.test. So this file does # not run as part of veryquick.test etc. # set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !vtab { finish_test return } set ::testprefix fuzzer2 load_static_extension db fuzzer #------------------------------------------------------------------------- # This test uses a fuzzer table with many rules. There is one rule to # map each possible two character string, where characters are lower-case # letters used in the English language, to all other possible two character # strings. In total, (26^4)-(26^2) mappings (the subtracted term represents # the no-op mappings discarded automatically by the fuzzer). # # do_execsql_test 1.1.1 { DROP TABLE IF EXISTS x1; DROP TABLE IF EXISTS x1_rules; CREATE TABLE x1_rules(ruleset, cFrom, cTo, cost); } puts "This test is slow - perhaps around 7 seconds on an average pc" do_test 1.1.2 { set LETTERS {a b c d e f g h i j k l m n o p q r s t u v w x y z} set cost 1 db transaction { foreach c1 $LETTERS { foreach c2 $LETTERS { foreach c3 $LETTERS { foreach c4 $LETTERS { db eval {INSERT INTO x1_rules VALUES(0, $c1||$c2, $c3||$c4, $cost)} set cost [expr ($cost%1000) + 1] } } } } db eval {UPDATE x1_rules SET cost = 20 WHERE cost<20 AND cFrom!='xx'} } } {} do_execsql_test 1.2 { SELECT count(*) FROM x1_rules WHERE cTo!=cFrom; } [expr 26*26*26*26 - 26*26] do_execsql_test 1.2.1 { CREATE VIRTUAL TABLE x1 USING fuzzer(x1_rules); SELECT word FROM x1 WHERE word MATCH 'xx' LIMIT 10; } {xx hw hx hy hz ia ib ic id ie} do_execsql_test 1.2.2 { SELECT cTo FROM x1_rules WHERE cFrom='xx' ORDER BY cost asc, rowid asc LIMIT 9; } {hw hx hy hz ia ib ic id ie} finish_test |
Changes to test/mmap1.test.
︙ | ︙ | |||
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 | db_enter $db array set stats [btree_pager_stats $bt] db_leave $db # puts [array get stats] return $stats(read) } proc register_rblob_code {dbname seed} { return [subst -nocommands { set ::rcnt $seed proc rblob {n} { set ::rcnt [expr (([set ::rcnt] << 3) + [set ::rcnt] + 456) & 0xFFFFFFFF] set str [format %.8x [expr [set ::rcnt] ^ 0xbdf20da3]] string range [string repeat [set str] [expr [set n]/4]] 1 [set n] } $dbname func rblob rblob }] } # For cases 1.1 and 1.4, the number of pages read using xRead() is 4 on # unix and 9 on windows. The difference is that windows only ever maps # an integer number of OS pages (i.e. creates mappings that are a multiple # of 4KB in size). Whereas on unix any sized mapping may be created. # foreach {t mmap_size nRead c2init} { | > > > > > | 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | db_enter $db array set stats [btree_pager_stats $bt] db_leave $db # puts [array get stats] return $stats(read) } # Return a Tcl script that registers a user-defined scalar function # named rblob() with database handle $dbname. The function returns a # sequence of pseudo-random blobs based on seed value $seed. # proc register_rblob_code {dbname seed} { return [subst -nocommands { set ::rcnt $seed proc rblob {n} { set ::rcnt [expr (([set ::rcnt] << 3) + [set ::rcnt] + 456) & 0xFFFFFFFF] set str [format %.8x [expr [set ::rcnt] ^ 0xbdf20da3]] string range [string repeat [set str] [expr [set n]/4]] 1 [set n] } $dbname func rblob rblob }] } # For cases 1.1 and 1.4, the number of pages read using xRead() is 4 on # unix and 9 on windows. The difference is that windows only ever maps # an integer number of OS pages (i.e. creates mappings that are a multiple # of 4KB in size). Whereas on unix any sized mapping may be created. # foreach {t mmap_size nRead c2init} { |
︙ | ︙ | |||
265 266 267 268 269 270 271 | sqlite3_column_text $::STMT 0 } $bbb do_test 5.5 { sqlite3_finalize $::STMT } SQLITE_OK | < < | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 270 271 272 273 274 275 276 277 278 | sqlite3_column_text $::STMT 0 } $bbb do_test 5.5 { sqlite3_finalize $::STMT } SQLITE_OK finish_test |
Added test/mmap4.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 | # 2016 February 04 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # This file tests the effect of the mmap() or mremap() system calls # returning an error on the library. # # If either mmap() or mremap() fails, SQLite should log an error # message, then continue accessing the database using read() and # write() exclusively. # set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !mmap { finish_test return } source $testdir/lock_common.tcl set testprefix mmap4 # Return a Tcl script that registers a user-defined scalar function # named rblob() with database handle $dbname. The function returns a # sequence of pseudo-random blobs based on seed value $seed. # proc register_rblob_code {dbname seed} { return [subst -nocommands { set ::rcnt $seed proc rblob {n} { set ::rcnt [expr (([set ::rcnt] << 3) + [set ::rcnt] + 456) & 0xFFFFFFFF] set str [format %.8x [expr [set ::rcnt] ^ 0xbdf20da3]] string range [string repeat [set str] [expr [set n]/4]] 1 [set n] } $dbname func rblob rblob }] } #------------------------------------------------------------------------- # Test various mmap_size settings. # foreach {tn1 mmap1 mmap2} { 1 6144 167773 2 18432 140399 3 43008 401302 4 92160 253899 5 190464 2 6 387072 752431 7 780288 291143 8 1566720 594306 9 3139584 829137 10 6285312 793963 11 12576768 1015590 } { do_multiclient_test tn { sql1 { CREATE TABLE t1(a PRIMARY KEY); CREATE TABLE t2(x); INSERT INTO t2 VALUES(''); } code1 [register_rblob_code db 0] code2 [register_rblob_code db2 444] sql1 "PRAGMA mmap_size = $mmap1" sql2 "PRAGMA mmap_size = $mmap2" do_test $tn1.$tn { for {set i 1} {$i <= 100} {incr i} { if {$i % 2} { set c1 sql1 set c2 sql2 } else { set c1 sql2 set c2 sql1 } $c1 { INSERT INTO t1 VALUES( rblob(5000) ); UPDATE t2 SET x = (SELECT md5sum(a) FROM t1); } set res [$c2 { SELECT count(*) FROM t1; SELECT x == (SELECT md5sum(a) FROM t1) FROM t2; PRAGMA integrity_check; }] if {$res != [list $i 1 ok]} { do_test $tn1.$tn.$i { set ::res } [list $i 1 ok] } } set res 1 } {1} } } finish_test |
Changes to test/permutations.test.
︙ | ︙ | |||
109 110 111 112 113 114 115 | speed1.test speed1p.test speed2.test speed3.test speed4.test speed4p.test sqllimits1.test tkt2686.test thread001.test thread002.test thread003.test thread004.test thread005.test trans2.test vacuum3.test incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test vtab_err.test walslow.test walcrash.test walcrash3.test walthread.test rtree3.test indexfault.test securedel2.test sort3.test sort4.test fts4growth.test fts4growth2.test | | > > > > > > > | 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | speed1.test speed1p.test speed2.test speed3.test speed4.test speed4p.test sqllimits1.test tkt2686.test thread001.test thread002.test thread003.test thread004.test thread005.test trans2.test vacuum3.test incrvacuum_ioerr.test autovacuum_crash.test btree8.test shared_err.test vtab_err.test walslow.test walcrash.test walcrash3.test walthread.test rtree3.test indexfault.test securedel2.test sort3.test sort4.test fts4growth.test fts4growth2.test bigsort.test rbu.test walprotocol.test mmap4.test fuzzer2.test walcrash2.test e_fkey.test backup.test fts4merge.test fts4merge2.test fts4merge4.test fts4check.test fts3cov.test fts3snippet.test fts3corrupt2.test fts3an.test fts3defer.test fts4langid.test fts3sort.test fts5unicode.test rtree4.test }] if {[info exists ::env(QUICKTEST_INCLUDE)]} { set allquicktests [concat $allquicktests $::env(QUICKTEST_INCLUDE)] } if {[info exists ::env(QUICKTEST_OMIT)]} { foreach x [split $::env(QUICKTEST_OMIT) ,] { regsub -all \\y$x\\y $allquicktests {} allquicktests |
︙ | ︙ | |||
146 147 148 149 150 151 152 | lappend ::testsuitelist xxx test_suite "veryquick" -prefix "" -description { "Very" quick test suite. Runs in minutes on a workstation. This test suite is the same as the "quick" tests, except that some files that test malloc and IO errors are omitted. } -files [ | | | | | > | 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | lappend ::testsuitelist xxx test_suite "veryquick" -prefix "" -description { "Very" quick test suite. Runs in minutes on a workstation. This test suite is the same as the "quick" tests, except that some files that test malloc and IO errors are omitted. } -files [ test_set $allquicktests -exclude *malloc* *ioerr* *fault* *bigfile* *_err* ] test_suite "extraquick" -prefix "" -description { "Extra" quick test suite. Runs in a few minutes on a workstation. This test suite is the same as the "veryquick" tests, except that slower tests are omitted. } -files [ test_set $allquicktests -exclude *malloc* *ioerr* *fault* *bigfile* *_err* \ wal3.test fts4merge* sort2.test mmap1.test walcrash* \ percentile.test where8m.test walcksum.test savepoint3.test \ fuzzer1.test fuzzer3.test fts3expr3.test ] test_suite "mmap" -prefix "mm-" -description { Similar to veryquick. Except with memory mapping enabled. } -presql { pragma mmap_size = 268435456; } -files [ test_set $allquicktests -exclude *malloc* *ioerr* *fault* -include malloc.test ] test_suite "valgrind" -prefix "" -description { Run the "veryquick" test suite with a couple of multi-process tests (that fail under valgrind) omitted. } -files [ test_set $allquicktests -exclude *malloc* *ioerr* *fault* *_err* wal.test \ shell*.test crash8.test atof1.test selectG.test \ tkt-fc62af4523.test numindex1.test ] -initialize { set ::G(valgrind) 1 } -shutdown { unset -nocomplain ::G(valgrind) } test_suite "valgrind-nolookaside" -prefix "" -description { Run the "veryquick" test suite with a couple of multi-process tests (that fail under valgrind) omitted. } -files [ test_set $allquicktests -exclude *malloc* *ioerr* *fault* *_err* \ wal.test atof1.test ] -initialize { set ::G(valgrind) 1 catch {db close} sqlite3_shutdown sqlite3_config_lookaside 0 0 sqlite3_initialize autoinstall_test_functions |
︙ | ︙ | |||
262 263 264 265 266 267 268 | ] test_suite "nofaultsim" -prefix "" -description { "Very" quick test suite. Runs in less than 5 minutes on a workstation. This test suite is the same as the "quick" tests, except that some files that test malloc and IO errors are omitted. } -files [ | | | 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 | ] test_suite "nofaultsim" -prefix "" -description { "Very" quick test suite. Runs in less than 5 minutes on a workstation. This test suite is the same as the "quick" tests, except that some files that test malloc and IO errors are omitted. } -files [ test_set $allquicktests -exclude *malloc* *ioerr* *fault* *_err* ] -initialize { catch {db close} sqlite3_shutdown install_malloc_faultsim 0 sqlite3_initialize autoinstall_test_functions } -shutdown { |
︙ | ︙ |
Name change from test/savepoint3.test to test/savepointfault.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2008 December 15 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # | < > > | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | # 2008 December 15 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/malloc_common.tcl set testprefix savepointfault do_malloc_test 1 -sqlprep { CREATE TABLE t1(a, b, c); INSERT INTO t1 VALUES(1, 2, 3); } -sqlbody { SAVEPOINT one; INSERT INTO t1 VALUES(4, 5, 6); SAVEPOINT two; DELETE FROM t1; ROLLBACK TO two; RELEASE one; } do_malloc_test 2 -sqlprep { PRAGMA cache_size = 10; CREATE TABLE t1(a, b, c); INSERT INTO t1 VALUES(randstr(400,400), randstr(400,400), randstr(400,400)); INSERT INTO t1 SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; INSERT INTO t1 SELECT randstr(400,400), randstr(400,400), randstr(400,400) FROM t1; |
︙ | ︙ | |||
55 56 57 58 59 60 61 | SAVEPOINT two; DELETE FROM t1 WHERE rowid > 10; ROLLBACK TO two; ROLLBACK TO one; RELEASE one; } | | | | 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 | SAVEPOINT two; DELETE FROM t1 WHERE rowid > 10; ROLLBACK TO two; ROLLBACK TO one; RELEASE one; } do_ioerr_test 3 -sqlprep { CREATE TABLE t1(a, b, c); INSERT INTO t1 VALUES(1, randstr(1000,1000), randstr(1000,1000)); INSERT INTO t1 VALUES(2, randstr(1000,1000), randstr(1000,1000)); } -sqlbody { BEGIN; UPDATE t1 SET a = 3 WHERE a = 1; SAVEPOINT one; UPDATE t1 SET a = 4 WHERE a = 2; COMMIT; } -cleanup { db eval { SAVEPOINT one; RELEASE one; } } # The following test does a really big savepoint rollback. One involving # more than 4000 pages. The idea is to get a specific sqlite3BitvecSet() # operation in pagerPlaybackSavepoint() to fail. #do_malloc_test 4 -sqlprep { # BEGIN; # CREATE TABLE t1(a, b); # CREATE INDEX i1 ON t1(a); # CREATE INDEX i2 ON t1(b); # INSERT INTO t1 VALUES(randstr(500,500), randstr(500,500)); -- 1 # INSERT INTO t1 VALUES(randstr(500,500), randstr(500,500)); -- 2 # INSERT INTO t1 SELECT randstr(500,500), randstr(500,500) FROM t1; -- 4 |
︙ | ︙ | |||
103 104 105 106 107 108 109 | #} -sqlbody { # ROLLBACK TO abc; #} # Cause a specific malloc in savepoint rollback code to fail. # | | | 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 | #} -sqlbody { # ROLLBACK TO abc; #} # Cause a specific malloc in savepoint rollback code to fail. # do_malloc_test 4 -start 7 -sqlprep { PRAGMA auto_vacuum = incremental; PRAGMA cache_size = 1000; CREATE TABLE t1(a, b); CREATE TABLE t2(a, b); CREATE TABLE t3(a, b); INSERT INTO t1 VALUES(1, randstr(500,500)); |
︙ | ︙ |
Changes to test/sort.test.
︙ | ︙ | |||
487 488 489 490 491 492 493 | SELECT a, b FROM t10 ORDER BY a; } [db eval {SELECT a, b FROM t10 ORDER BY a, b}] do_execsql_test sort-13.3 { PRAGMA cache_size = 5; SELECT a, b FROM t10 ORDER BY a; } [db eval {SELECT a, b FROM t10 ORDER BY a, b}] | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 487 488 489 490 491 492 493 494 495 496 497 498 499 500 | SELECT a, b FROM t10 ORDER BY a; } [db eval {SELECT a, b FROM t10 ORDER BY a, b}] do_execsql_test sort-13.3 { PRAGMA cache_size = 5; SELECT a, b FROM t10 ORDER BY a; } [db eval {SELECT a, b FROM t10 ORDER BY a, b}] #------------------------------------------------------------------------- # foreach {tn mmap_limit nWorker tmpstore coremutex fakeheap softheaplimit} { 1 0 3 file true false 0 2 0 3 file true true 0 3 0 0 file true false 0 4 1000000 3 file true false 0 |
︙ | ︙ |
Changes to test/sort2.test.
︙ | ︙ | |||
27 28 29 30 31 32 33 | 1 { } 2 { catch { db close } reset_db catch { db eval {PRAGMA threads=7} } } } { | < | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | 1 { } 2 { catch { db close } reset_db catch { db eval {PRAGMA threads=7} } } } { eval $script do_execsql_test $tn.1 { PRAGMA cache_size = 5; WITH r(x,y) AS ( SELECT 1, randomblob(100) UNION ALL |
︙ | ︙ | |||
63 64 65 66 67 68 69 | do_execsql_test $tn.2.3 { CREATE UNIQUE INDEX i2 ON t1(a); } do_execsql_test $tn.2.4 { PRAGMA integrity_check } {ok} | > > > > | | | | | | | | | | | > | 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 | do_execsql_test $tn.2.3 { CREATE UNIQUE INDEX i2 ON t1(a); } do_execsql_test $tn.2.4 { PRAGMA integrity_check } {ok} # Because it uses so much data, this test can take 12-13 seconds even on # a modern workstation. So it is omitted from "veryquick" and other # permutations.test tests. if {[isquick]==0} { do_execsql_test $tn.3 { PRAGMA cache_size = 5; WITH r(x,y) AS ( SELECT 1, randomblob(100) UNION ALL SELECT x+1, randomblob(100) FROM r LIMIT 1000000 ) SELECT count(x), length(y) FROM r GROUP BY (x%5) } { 200000 100 200000 100 200000 100 200000 100 200000 100 } } } finish_test |
Changes to test/sort3.test.
︙ | ︙ | |||
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | # configured to use mmap(), but the temporary files generated by the # sorter are too large to be completely mapped. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix sort3 # Sort roughly 20MB of data. Once with a mmap limit of 5MB and once without. # foreach {itest limit} { 1 5000000 2 0x7FFFFFFF } { sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $limit | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 | # configured to use mmap(), but the temporary files generated by the # sorter are too large to be completely mapped. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix sort3 #------------------------------------------------------------------------- # Sort some large ( > 4KiB) records. # proc cksum {x} { set i1 1 set i2 2 binary scan $x c* L foreach {a b} $L { set i1 [expr (($i2<<3) + $a) & 0x7FFFFFFF] set i2 [expr (($i1<<3) + $b) & 0x7FFFFFFF] } list $i1 $i2 } db func cksum cksum do_execsql_test 1.0 { PRAGMA cache_size = 5; CREATE TABLE t11(a, b); INSERT INTO t11 VALUES(randomblob(5000), NULL); INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --2 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --3 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --4 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --5 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --6 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --7 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --8 INSERT INTO t11 SELECT randomblob(5000), NULL FROM t11; --9 UPDATE t11 SET b = cksum(a); } foreach {tn mmap_limit} { 1 0 2 1000000 } { do_test 1.$tn { sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $mmap_limit set prev "" db eval { SELECT * FROM t11 ORDER BY b } { if {$b != [cksum $a]} {error "checksum failed"} if {[string compare $b $prev] < 0} {error "sort failed"} set prev $b } set {} {} } {} } # Sort roughly 20MB of data. Once with a mmap limit of 5MB and once without. # foreach {itest limit} { 1 5000000 2 0x7FFFFFFF } { sqlite3_test_control SQLITE_TESTCTRL_SORTER_MMAP db $limit do_execsql_test 2.$itest { WITH r(x,y) AS ( SELECT 1, randomblob(1000) UNION ALL SELECT x+1, randomblob(1000) FROM r LIMIT 20000 ) SELECT count(*), sum(length(y)) FROM r GROUP BY (x%5); } { 4000 4000000 4000 4000000 4000 4000000 4000 4000000 4000 4000000 } } # Sort more than 2GB of data. At one point this was causing a problem. # This test might take one minute or more to run. # do_execsql_test 3 { PRAGMA cache_size = 20000; WITH r(x,y) AS ( SELECT 1, randomblob(1000) UNION ALL SELECT x+1, randomblob(1000) FROM r LIMIT 2200000 ) |
︙ | ︙ |
Changes to test/tester.tcl.
︙ | ︙ | |||
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 | set perm } proc presql {} { set presql "" catch {set presql $::G(perm:presql)} set presql } #------------------------------------------------------------------------- # proc slave_test_script {script} { # Create the interpreter used to run the test script. interp create tinterp | > > > > > > | 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 | set perm } proc presql {} { set presql "" catch {set presql $::G(perm:presql)} set presql } proc isquick {} { set ret 0 catch {set ret $::G(isquick)} set ret } #------------------------------------------------------------------------- # proc slave_test_script {script} { # Create the interpreter used to run the test script. interp create tinterp |
︙ | ︙ |
Changes to test/wal.test.
︙ | ︙ | |||
820 821 822 823 824 825 826 | forcecopy test.db-wal test2.db-wal sqlite3_wal db2 test2.db execsql { SELECT * FROM t2 } db2 } {B 2} db2 close db close | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 820 821 822 823 824 825 826 827 828 829 830 831 832 833 | forcecopy test.db-wal test2.db-wal sqlite3_wal db2 test2.db execsql { SELECT * FROM t2 } db2 } {B 2} db2 close db close #------------------------------------------------------------------------- # Check a fun corruption case has been fixed. # # The problem was that after performing a checkpoint using a connection # that had an out-of-date pager-cache, the next time the connection was # used it did not realize the cache was out-of-date and proceeded to # operate with an inconsistent cache. Leading to corruption. |
︙ | ︙ |
Changes to test/wal3.test.
︙ | ︙ | |||
57 58 59 60 61 62 63 | INSERT INTO t1 SELECT a_string(800) FROM t1; /* 1024 */ INSERT INTO t1 SELECT a_string(800) FROM t1; /* 2048 */ INSERT INTO t1 SELECT a_string(800) FROM t1 LIMIT 1970; /* 4018 */ COMMIT; PRAGMA cache_size = 10; } set x [wal_frame_count test.db-wal 1024] | | | 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 | INSERT INTO t1 SELECT a_string(800) FROM t1; /* 1024 */ INSERT INTO t1 SELECT a_string(800) FROM t1; /* 2048 */ INSERT INTO t1 SELECT a_string(800) FROM t1 LIMIT 1970; /* 4018 */ COMMIT; PRAGMA cache_size = 10; } set x [wal_frame_count test.db-wal 1024] if {[permutation]=="memsubsys1"} { if {$x==4251 || $x==4290} {set x 4056} } set x } 4056 for {set i 1} {$i < 50} {incr i} { |
︙ | ︙ | |||
234 235 236 237 238 239 240 | set ::syncs } $synccount db close T delete } | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 234 235 236 237 238 239 240 241 242 243 244 245 246 247 | set ::syncs } $synccount db close T delete } #------------------------------------------------------------------------- # Only one client may run recovery at a time. Test this mechanism. # # When client-2 tries to open a read transaction while client-1 is # running recovery, it fails to obtain a lock on an aReadMark[] slot # (because they are all locked by recovery). It then tries to obtain |
︙ | ︙ | |||
613 614 615 616 617 618 619 | set ::locks } {{5 1 lock shared} {5 1 unlock shared} {4 1 lock shared} {4 1 unlock shared}} db close db2 close T delete | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | set ::locks } {{5 1 lock shared} {5 1 unlock shared} {4 1 lock shared} {4 1 unlock shared}} db close db2 close T delete #------------------------------------------------------------------------- # When a connection opens a read-lock on the database, it searches for # an aReadMark[] slot that is already set to the mxFrame value for the # new transaction. If it cannot find one, it attempts to obtain an # exclusive lock on an aReadMark[] slot for the purposes of modifying # the value, then drops back to a shared-lock for the duration of the |
︙ | ︙ |
Changes to test/walcksum.test.
︙ | ︙ | |||
330 331 332 333 334 335 336 | PRAGMA integrity_check; SELECT count(*) FROM t1; } db2 } {ok 256} catch { db close } catch { db2 close } | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 330 331 332 333 334 335 336 337 338 | PRAGMA integrity_check; SELECT count(*) FROM t1; } db2 } {ok 256} catch { db close } catch { db2 close } finish_test |
Added test/walprotocol.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 | # 2016 February 4 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing the operation of the library in # "PRAGMA journal_mode=WAL" mode. # # More specifically, it tests "locking protocol" errors - errors that # may be caused if one or more SQLite clients does not follow the expected # locking protocol when accessing a wal-mode database. These tests take # quite a while to run. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/wal_common.tcl ifcapable !wal {finish_test ; return } set testprefix walprotocol #------------------------------------------------------------------------- # When recovering the contents of a WAL file, a process obtains the WRITER # lock, then locks all other bytes before commencing recovery. If it fails # to lock all other bytes (because some other process is holding a read # lock) it should retry up to 100 times. Then return SQLITE_PROTOCOL to the # caller. Test this (test case 1.3). # # Also test the effect of hitting an SQLITE_BUSY while attempting to obtain # the WRITER lock (should be the same). Test case 1.4. # do_execsql_test 1.0 { PRAGMA journal_mode = wal; CREATE TABLE x(y); INSERT INTO x VALUES('z'); } {wal} proc lock_callback {method filename handle lock} { lappend ::locks $lock } do_test 1.1 { testvfs T T filter xShmLock T script lock_callback set ::locks [list] sqlite3 db test.db -vfs T execsql { SELECT * FROM x } lrange $::locks 0 3 } [list {0 1 lock exclusive} {1 7 lock exclusive} \ {1 7 unlock exclusive} {0 1 unlock exclusive} \ ] do_test 1.2 { db close set ::locks [list] sqlite3 db test.db -vfs T execsql { SELECT * FROM x } lrange $::locks 0 3 } [list {0 1 lock exclusive} {1 7 lock exclusive} \ {1 7 unlock exclusive} {0 1 unlock exclusive} \ ] proc lock_callback {method filename handle lock} { if {$lock == "1 7 lock exclusive"} { return SQLITE_BUSY } return SQLITE_OK } puts "# Warning: This next test case causes SQLite to call xSlee(1) 100 times." puts "# Normally this equates to a delay of roughly 10 seconds, but if SQLite" puts "# is built on unix without HAVE_USLEEP defined, it may be much longer." do_test 1.3 { db close set ::locks [list] sqlite3 db test.db -vfs T catchsql { SELECT * FROM x } } {1 {locking protocol}} puts "# Warning: Same again!" proc lock_callback {method filename handle lock} { if {$lock == "0 1 lock exclusive"} { return SQLITE_BUSY } return SQLITE_OK } do_test 1.4 { db close set ::locks [list] sqlite3 db test.db -vfs T catchsql { SELECT * FROM x } } {1 {locking protocol}} db close T delete #------------------------------------------------------------------------- # do_test 2.1 { forcedelete test.db test.db-journal test.db wal sqlite3 db test.db sqlite3 db2 test.db execsql { PRAGMA auto_vacuum = off; PRAGMA journal_mode = WAL; CREATE TABLE b(c); INSERT INTO b VALUES('Tehran'); INSERT INTO b VALUES('Qom'); INSERT INTO b VALUES('Markazi'); PRAGMA wal_checkpoint; } } {wal 0 5 5} do_test 2.2 { execsql { SELECT * FROM b } } {Tehran Qom Markazi} do_test 2.3 { db eval { SELECT * FROM b } { db eval { INSERT INTO b VALUES('Qazvin') } set r [db2 eval { SELECT * FROM b }] break } set r } {Tehran Qom Markazi Qazvin} do_test 2.4 { execsql { INSERT INTO b VALUES('Gilan'); INSERT INTO b VALUES('Ardabil'); } } {} db2 close faultsim_save_and_close testvfs T -default 1 faultsim_restore_and_reopen T filter xShmLock T script lock_callback proc lock_callback {method file handle spec} { if {$spec == "1 7 unlock exclusive"} { T filter {} set ::r [catchsql { SELECT * FROM b } db2] } } sqlite3 db test.db sqlite3 db2 test.db do_test 2.5 { execsql { SELECT * FROM b } } {Tehran Qom Markazi Qazvin Gilan Ardabil} do_test 2.6 { set ::r } {1 {locking protocol}} db close db2 close faultsim_restore_and_reopen sqlite3 db2 test.db T filter xShmLock T script lock_callback proc lock_callback {method file handle spec} { if {$spec == "1 7 unlock exclusive"} { T filter {} set ::r [catchsql { SELECT * FROM b } db2] } } unset ::r do_test 2.7 { execsql { SELECT * FROM b } } {Tehran Qom Markazi Qazvin Gilan Ardabil} do_test 2.8 { set ::r } {1 {locking protocol}} db close db2 close T delete finish_test |
Changes to test/walslow.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 | # focus of this file is testing the operation of the library in # "PRAGMA journal_mode=WAL" mode. The tests in this file use # brute force methods, so may take a while to run. # set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !wal {finish_test ; return } proc reopen_db {} { catch { db close } forcedelete test.db test.db-wal sqlite3 db test.db execsql { PRAGMA journal_mode = wal } } | > > > > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # focus of this file is testing the operation of the library in # "PRAGMA journal_mode=WAL" mode. The tests in this file use # brute force methods, so may take a while to run. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/wal_common.tcl source $testdir/lock_common.tcl ifcapable !wal {finish_test ; return } set testprefix walslow proc reopen_db {} { catch { db close } forcedelete test.db test.db-wal sqlite3 db test.db execsql { PRAGMA journal_mode = wal } } |
︙ | ︙ | |||
65 66 67 68 69 70 71 | do_test walslow-1.seed=$seed.$iTest.4 { execsql { SELECT count(*) FROM t1 WHERE a!=b } db2 } [execsql { SELECT count(*) FROM t1 WHERE a!=b }] db2 close } } | > > > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | do_test walslow-1.seed=$seed.$iTest.4 { execsql { SELECT count(*) FROM t1 WHERE a!=b } db2 } [execsql { SELECT count(*) FROM t1 WHERE a!=b }] db2 close } } #------------------------------------------------------------------------- # Test case walslow-3.* tests that the checksum calculation detects single # byte changes to frame or frame-header data and considers the frame # invalid as a result. # reset_db do_test 3.1 { execsql { PRAGMA synchronous = NORMAL; PRAGMA page_size = 1024; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, randomblob(300)); INSERT INTO t1 VALUES(2, randomblob(300)); PRAGMA journal_mode = WAL; INSERT INTO t1 VALUES(3, randomblob(300)); } file size test.db-wal } [wal_file_size 1 1024] do_test 3.2 { forcecopy test.db-wal test2.db-wal forcecopy test.db test2.db sqlite3 db2 test2.db execsql { SELECT a FROM t1 } db2 } {1 2 3} db2 close forcecopy test.db test2.db foreach incr {1 2 3 20 40 60 80 100 120 140 160 180 200 220 240 253 254 255} { do_test 3.3.$incr { set FAIL 0 for {set iOff 0} {$iOff < [wal_file_size 1 1024]} {incr iOff} { forcecopy test.db-wal test2.db-wal set fd [open test2.db-wal r+] fconfigure $fd -encoding binary fconfigure $fd -translation binary seek $fd $iOff binary scan [read $fd 1] c x seek $fd $iOff puts -nonewline $fd [binary format c [expr {($x+$incr)&0xFF}]] close $fd sqlite3 db2 test2.db if { [execsql { SELECT a FROM t1 } db2] != "1 2" } {set FAIL 1} db2 close } set FAIL } {0} } #------------------------------------------------------------------------- # Test large log summaries. # # In this case "large" usually means a log file that requires a wal-index # mapping larger than 64KB (the default initial allocation). A 64KB wal-index # is large enough for a log file that contains approximately 13100 frames. # So the following tests create logs containing at least this many frames. # # 4.1.*: This test case creates a very large log file within the # file-system (around 200MB). The log file does not contain # any valid frames. Test that the database file can still be # opened and queried, and that the invalid log file causes no # problems. # # 4.2.*: Test that a process may create a large log file and query # the database (including the log file that it itself created). # # 4.3.*: Test that if a very large log file is created, and then a # second connection is opened on the database file, it is possible # to query the database (and the very large log) using the # second connection. # # 4.4.*: Same test as wal-13.3.*. Except in this case the second # connection is opened by an external process. # set ::blobcnt 0 proc blob {nByte} { incr ::blobcnt return [string range [string repeat "${::blobcnt}x" $nByte] 1 $nByte] } reset_db do_execsql_test 4.1 { PRAGMA journal_mode = wal; CREATE TABLE t1(x, y); INSERT INTO "t1" VALUES('A',0); CREATE TABLE t2(x, y); INSERT INTO "t2" VALUES('B',2); } {wal} db close do_test 4.1.1 { list [file exists test.db] [file exists test.db-wal] } {1 0} do_test 4.1.2 { set fd [open test.db-wal w] seek $fd [expr 200*1024*1024] puts $fd "" close $fd sqlite3 db test.db execsql { SELECT * FROM t2 } } {B 2} do_test 4.1.3 { db close file exists test.db-wal } {0} do_test 4.2.1 { sqlite3 db test.db execsql { SELECT count(*) FROM t2 } } {1} do_test 4.2.2 { db function blob blob for {set i 0} {$i < 16} {incr i} { execsql { INSERT INTO t2 SELECT blob(400), blob(400) FROM t2 } } execsql { SELECT count(*) FROM t2 } } [expr int(pow(2, 16))] do_test 4.2.3 { expr [file size test.db-wal] > [wal_file_size 33000 1024] } 1 do_multiclient_test tn { incr tn 2 do_test 4.$tn.0 { sql1 { PRAGMA journal_mode = WAL; CREATE TABLE t1(x); INSERT INTO t1 SELECT randomblob(800); } sql1 { SELECT count(*) FROM t1 } } {1} for {set ii 1} {$ii<16} {incr ii} { do_test 4.$tn.$ii.a { sql2 { INSERT INTO t1 SELECT randomblob(800) FROM t1 } sql2 { SELECT count(*) FROM t1 } } [expr (1<<$ii)] do_test 4.$tn.$ii.b { sql1 { SELECT count(*) FROM t1 } } [expr (1<<$ii)] do_test 4.$tn.$ii.c { sql1 { SELECT count(*) FROM t1 } } [expr (1<<$ii)] do_test 4.$tn.$ii.d { sql1 { PRAGMA integrity_check } } {ok} } } finish_test |
Name change from test/where8m.test to test/wherefault.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2008 December 23 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # This file implements regression tests for SQLite library. The focus | | | > < > > | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | # 2008 December 23 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # This file implements regression tests for SQLite library. The focus # is testing of where.c. More specifically, the focus is on handling OOM # errors within the code that optimizes WHERE clauses that feature the # OR operator. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/malloc_common.tcl set testprefix wherefault do_malloc_test 1 -sqlprep { CREATE TABLE t1(a, b, c); CREATE INDEX i1 ON t1(a); CREATE INDEX i2 ON t1(b); } -sqlbody { SELECT c FROM t1 WHERE a = 2 OR b = 'three' OR a = 4 OR b = 'five' OR a = 6 OR b = 'seven' OR a = 8 OR b = 'nine' OR a = 10 ORDER BY rowid; SELECT c FROM t1 WHERE a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; SELECT c FROM t1 WHERE a BETWEEN 1 AND 3 AND b < 5 AND b > 2 AND c = 4; } do_malloc_test 2 -tclprep { db eval { BEGIN; CREATE TABLE t1(a, b, c); CREATE INDEX i1 ON t1(a); CREATE INDEX i2 ON t1(b); } for {set i 0} {$i < 1000} {incr i} { |
︙ | ︙ |