Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | On Windows, make sure the current directory value used by the test suite is 'normalized' to what the parent command shell sees. Also, clean the test directories used by the quota2.test file. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
82bcd7ec1531f6d71c079578434c58d3 |
User & Date: | mistachkin 2012-03-08 20:00:36.181 |
Context
2012-03-08
| ||
20:28 | On Windows, make sure the returned test current directory value does not contain any backslashes. (check-in: efee39e64b user: mistachkin tags: trunk) | |
20:22 | Merge and manually resolve testing updates from trunk. (check-in: 5eecdb44dd user: mistachkin tags: winrt) | |
20:00 | On Windows, make sure the current directory value used by the test suite is 'normalized' to what the parent command shell sees. Also, clean the test directories used by the quota2.test file. (check-in: 82bcd7ec15 user: mistachkin tags: trunk) | |
2012-03-05
| ||
16:24 | Fix a problem compiling the test code in fts3_test.c when SQLITE_ENABLE_FTS3 is not defined. (check-in: b00ccda307 user: dan tags: trunk) | |
Changes
Changes to Makefile.in.
︙ | ︙ | |||
930 931 932 933 934 935 936 937 938 939 940 941 942 943 | rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc .target_source rm -f tclsqlite3$(TEXE) rm -f testfixture$(TEXE) test.db rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def rm -f sqlite3.c rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c | > | 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 | rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc .target_source rm -f tclsqlite3$(TEXE) rm -f testfixture$(TEXE) test.db rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def rm -f sqlite3.c rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c |
︙ | ︙ |
Changes to Makefile.msc.
︙ | ︙ | |||
981 982 983 984 985 986 987 988 989 990 991 992 993 994 | del /Q *.lo *.ilk *.lib *.obj *.pdb sqlite3.exe libsqlite3.lib del /Q *.da *.bb *.bbg gmon.out del /Q sqlite3.h opcodes.c opcodes.h del /Q lemon.exe lempar.c parse.* del /Q mkkeywordhash.exe keywordhash.h -rmdir /Q/S .deps -rmdir /Q/S .libs -rmdir /Q/S tsrc del /Q .target_source del /Q tclsqlite3.exe del /Q testfixture.exe testfixture.exp test.db del /Q sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def del /Q sqlite3.c del /Q sqlite3_analyzer.exe sqlite3_analyzer.exp sqlite3_analyzer.c | > > > | 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 | del /Q *.lo *.ilk *.lib *.obj *.pdb sqlite3.exe libsqlite3.lib del /Q *.da *.bb *.bbg gmon.out del /Q sqlite3.h opcodes.c opcodes.h del /Q lemon.exe lempar.c parse.* del /Q mkkeywordhash.exe keywordhash.h -rmdir /Q/S .deps -rmdir /Q/S .libs -rmdir /Q/S quota2a -rmdir /Q/S quota2b -rmdir /Q/S quota2c -rmdir /Q/S tsrc del /Q .target_source del /Q tclsqlite3.exe del /Q testfixture.exe testfixture.exp test.db del /Q sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def del /Q sqlite3.c del /Q sqlite3_analyzer.exe sqlite3_analyzer.exp sqlite3_analyzer.c |
︙ | ︙ |
Changes to Makefile.vxworks.
︙ | ︙ | |||
653 654 655 656 657 658 659 660 661 662 663 | ./testfixture$(EXE) $(TOP)/test/loadext.test clean: rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f sqlite3.c fts?amal.c tclsqlite3.c rm -f $(SHPREFIX)sqlite3.$(SO) | > | 653 654 655 656 657 658 659 660 661 662 663 664 | ./testfixture$(EXE) $(TOP)/test/loadext.test clean: rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f sqlite3.c fts?amal.c tclsqlite3.c rm -f $(SHPREFIX)sqlite3.$(SO) |
Changes to main.mk.
︙ | ︙ | |||
597 598 599 600 601 602 603 604 605 606 607 608 609 610 | clean: rm -f *.o sqlite3 sqlite3.exe libsqlite3.a sqlite3.h opcodes.* rm -f lemon lemon.exe lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash mkkeywordhash.exe keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f amalgamation-testfixture amalgamation-testfixture.exe rm -f fts3-testfixture fts3-testfixture.exe rm -f testfixture testfixture.exe rm -f threadtest3 threadtest3.exe rm -f sqlite3.c fts?amal.c tclsqlite3.c | > | 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 | clean: rm -f *.o sqlite3 sqlite3.exe libsqlite3.a sqlite3.h opcodes.* rm -f lemon lemon.exe lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash mkkeywordhash.exe keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f amalgamation-testfixture amalgamation-testfixture.exe rm -f fts3-testfixture fts3-testfixture.exe rm -f testfixture testfixture.exe rm -f threadtest3 threadtest3.exe rm -f sqlite3.c fts?amal.c tclsqlite3.c |
︙ | ︙ |
Changes to src/test6.c.
︙ | ︙ | |||
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 | int nName = strlen(zName); int nCrashFile = strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* | > > > > > > > > | 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 | int nName = strlen(zName); int nCrashFile = strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } #ifdef TRACE_CRASHTEST printf("cfSync(): nName = %d, nCrashFile = %d, zName = %s, zCrashFile = %s\n", nName, nCrashFile, zName, zCrashFile); #endif if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ #ifdef TRACE_CRASHTEST printf("cfSync(): name matched, g.iCrash = %d\n", g.iCrash); #endif if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* |
︙ | ︙ |
Changes to test/bigfile.test.
︙ | ︙ | |||
65 66 67 68 69 70 71 | } $::MAGIC_SUM # Try to create a large file - a file that is larger than 2^32 bytes. # If this fails, it means that the system being tested does not support # large files. So skip all of the remaining tests in this file. # db close | | | 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | } $::MAGIC_SUM # Try to create a large file - a file that is larger than 2^32 bytes. # If this fails, it means that the system being tested does not support # large files. So skip all of the remaining tests in this file. # db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.2 { |
︙ | ︙ | |||
105 106 107 108 109 110 111 | sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM db close | | | 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM db close if {[catch {fake_big_file 8192 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 8192 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.5 { |
︙ | ︙ | |||
144 145 146 147 148 149 150 | do_test bigfile-1.9 { execsql { SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM db close | | | 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | do_test bigfile-1.9 { execsql { SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM db close if {[catch {fake_big_file 16384 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 16384 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.10 { |
︙ | ︙ |
Changes to test/bigfile2.test.
︙ | ︙ | |||
25 26 27 28 29 30 31 | } # Pad the file out to 4GB in size. Then clear the file-size field in the # db header. This will cause SQLite to assume that the first 4GB of pages # are actually in use and new pages will be appended to the file. # db close | | | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | } # Pad the file out to 4GB in size. Then clear the file-size field in the # db header. This will cause SQLite to assume that the first 4GB of pages # are actually in use and new pages will be appended to the file. # db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test 1.2 { |
︙ | ︙ |
Changes to test/crash5.test.
︙ | ︙ | |||
43 44 45 46 47 48 49 | INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } db close do_test crash5-$ii.$jj.1 { crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ [list set iFail $jj] { | | | 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } db close do_test crash5-$ii.$jj.1 { crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ [list set iFail $jj] { sqlite3_crashparams 0 [file join [get_pwd] test.db-journal] # Begin a transaction and evaluate a "CREATE INDEX" statement # with the iFail'th malloc() set to fail. This operation will # have to move the current contents of page 4 (the overflow # page) to make room for the new root page. The bug is that # if malloc() fails at a particular point in sqlite3PagerMovepage(), # sqlite mistakenly thinks that the page being moved (page 4) has |
︙ | ︙ | |||
85 86 87 88 89 90 91 | # the transaction was not rolled back, then the sqlite cache now # has a dirty page 4 that it incorrectly believes is already safely # in the synced part of the journal file. When # sqlite3_release_memory() is called sqlite tries to free memory # by writing page 4 out to the db file. If it crashes later on, # before syncing the journal... Corruption! # | | | 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | # the transaction was not rolled back, then the sqlite cache now # has a dirty page 4 that it incorrectly believes is already safely # in the synced part of the journal file. When # sqlite3_release_memory() is called sqlite tries to free memory # by writing page 4 out to the db file. If it crashes later on, # before syncing the journal... Corruption! # sqlite3_crashparams 1 [file join [get_pwd] test.db-journal] sqlite3_release_memory 8092 }]] {} expr 1 } {1} sqlite3 db test.db do_test crash5-$ii.$jj.2 { |
︙ | ︙ |
Changes to test/e_uri.test.
︙ | ︙ | |||
127 128 129 130 131 132 133 | # EVIDENCE-OF: R-17482-00398 If the authority is not an empty string or # "localhost", an error is returned to the caller. # if {$tcl_platform(platform) == "unix"} { set flags [list SQLITE_OPEN_READWRITE SQLITE_OPEN_CREATE SQLITE_OPEN_URI] foreach {tn uri error} " | | | | | | | | | | 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 | # EVIDENCE-OF: R-17482-00398 If the authority is not an empty string or # "localhost", an error is returned to the caller. # if {$tcl_platform(platform) == "unix"} { set flags [list SQLITE_OPEN_READWRITE SQLITE_OPEN_CREATE SQLITE_OPEN_URI] foreach {tn uri error} " 1 {file://localhost[get_pwd]/test.db} {not an error} 2 {file://[get_pwd]/test.db} {not an error} 3 {file://x[get_pwd]/test.db} {invalid uri authority: x} 4 {file://invalid[get_pwd]/test.db} {invalid uri authority: invalid} " { do_test 2.$tn { set DB [sqlite3_open_v2 $uri $flags ""] set e [sqlite3_errmsg $DB] sqlite3_close $DB set e } $error } } # EVIDENCE-OF: R-45981-25528 The fragment component of a URI, if # present, is ignored. # # It is difficult to test that something is ignored correctly. So these tests # just show that adding a fragment does not interfere with the pathname or # parameters passed through to the VFS xOpen() methods. # foreach {tn uri parse} " 1 {file:test.db#abc} {[get_pwd]/test.db {}} 2 {file:test.db?a=b#abc} {[get_pwd]/test.db {a b}} 3 {file:test.db?a=b#?c=d} {[get_pwd]/test.db {a b}} " { do_filepath_test 3.$tn { parse_uri $uri } $parse } # EVIDENCE-OF: R-62557-09390 SQLite uses the path component of the URI # as the name of the disk file which contains the database. # # EVIDENCE-OF: R-28659-11035 If the path begins with a '/' character, # then it is interpreted as an absolute path. # # EVIDENCE-OF: R-46234-61323 If the path does not begin with a '/' # (meaning that the authority section is omitted from the URI) then the # path is interpreted as a relative path. # foreach {tn uri parse} " 1 {file:test.db} {[get_pwd]/test.db {}} 2 {file:/test.db} {/test.db {}} 3 {file:///test.db} {/test.db {}} 4 {file://localhost/test.db} {/test.db {}} 5 {file:/a/b/c/test.db} {/a/b/c/test.db {}} " { do_filepath_test 4.$tn { parse_uri $uri } $parse } |
︙ | ︙ |
Changes to test/filectrl.test.
︙ | ︙ | |||
30 31 32 33 34 35 36 | do_test filectrl-1.4 { sqlite3 db test.db file_control_lasterrno_test db } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db | | | 30 31 32 33 34 35 36 37 38 39 40 41 | do_test filectrl-1.4 { sqlite3 db test.db file_control_lasterrno_test db } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db file_control_lockproxy_test db [get_pwd] } {} db close forcedelete .test_control_lockproxy.db-conch test.proxy finish_test |
Changes to test/misc7.test.
︙ | ︙ | |||
479 480 481 482 483 484 485 | do_test misc7-20.1 { sqlite3_global_recover } {SQLITE_OK} # Try to open a really long file name. # do_test misc7-21.1 { | | | 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 | do_test misc7-20.1 { sqlite3_global_recover } {SQLITE_OK} # Try to open a really long file name. # do_test misc7-21.1 { set zFile [file join [get_pwd] "[string repeat abcde 104].db"] set rc [catch {sqlite3 db2 $zFile} msg] list $rc $msg } {1 {unable to open database file}} db close forcedelete test.db |
︙ | ︙ |
Changes to test/pager1.test.
︙ | ︙ | |||
531 532 533 534 535 536 537 | if {[string match *mj* [file tail $filename]]} { set ::mj_filename_length [string length $filename] faultsim_save } return SQLITE_OK } | | | 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 | if {[string match *mj* [file tail $filename]]} { set ::mj_filename_length [string length $filename] faultsim_save } return SQLITE_OK } set pwd [get_pwd] foreach {tn1 tcl} { 1 { set prefix "test.db" } 2 { # This test depends on the underlying VFS being able to open paths # 512 bytes in length. The idea is to create a hot-journal file that # contains a master-journal pointer so large that it could contain # a valid page record (if the file page-size is 512 bytes). So as to |
︙ | ︙ | |||
997 998 999 1000 1001 1002 1003 | # # 1) 512 byte header + # 2) 2 * (1024+8) byte records + # 3) 20+N bytes of master-journal pointer, where N is the size of # the master-journal name encoded as utf-8 with no nul term. # set mj_pointer [expr { | | | | 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 | # # 1) 512 byte header + # 2) 2 * (1024+8) byte records + # 3) 20+N bytes of master-journal pointer, where N is the size of # the master-journal name encoded as utf-8 with no nul term. # set mj_pointer [expr { 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"] }] expr {$::max_journal==(512+2*(1024+8)+$mj_pointer)} } 1 do_test pager1-5.4.2 { set ::max_journal 0 execsql { PRAGMA synchronous = full; BEGIN; DELETE FROM t1 WHERE b = 'Lenin'; DELETE FROM t2 WHERE b = 'Lenin'; COMMIT; } # In synchronous=full mode, the master-journal pointer is not written # directly after the last record in the journal file. Instead, it is # written starting at the next (in this case 512 byte) sector boundary. # set mj_pointer [expr { 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"] }] expr {$::max_journal==(((512+2*(1024+8)+511)/512)*512 + $mj_pointer)} } 1 db close tv delete do_test pager1-5.5.1 { |
︙ | ︙ |
Changes to test/pragma.test.
︙ | ︙ | |||
986 987 988 989 990 991 992 | do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} ifcapable wsd { do_test pragma-9.5 { | | | | 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 | do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} ifcapable wsd { do_test pragma-9.5 { set pwd [string map {' ''} [file nativename [get_pwd]]] execsql " PRAGMA temp_store_directory='$pwd'; " } {} do_test pragma-9.6 { execsql { PRAGMA temp_store_directory; } } [list [file nativename [get_pwd]]] do_test pragma-9.7 { catchsql { PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR'; } } {1 {not a writable directory}} do_test pragma-9.8 { execsql { |
︙ | ︙ |
Changes to test/quota.test.
︙ | ︙ | |||
217 218 219 220 221 222 223 | sqlite3_quota_set * 4096 quota_callback do_test quota-3.3.1 { execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1b execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b set ::quota | | | 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | sqlite3_quota_set * 4096 quota_callback do_test quota-3.3.1 { execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1b execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b set ::quota } [list [file join [get_pwd] test.db] 5120] do_test quota-3.2.X { foreach db {db1a db2a db2b db1b} { catch { $db close } } sqlite3_quota_set * 0 {} } {SQLITE_OK} #------------------------------------------------------------------------- |
︙ | ︙ |
Changes to test/quota2.test.
︙ | ︙ | |||
24 25 26 27 28 29 30 | file mkdir $dir } # The standard_path procedure converts a pathname into a standard format # that is the same across platforms. # unset -nocomplain ::quota_pwd ::quota_mapping | | | 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | file mkdir $dir } # The standard_path procedure converts a pathname into a standard format # that is the same across platforms. # unset -nocomplain ::quota_pwd ::quota_mapping set ::quota_pwd [string map {\\ /} [get_pwd]] set ::quota_mapping [list $::quota_pwd PWD] proc standard_path {x} { set x [string map {\\ /} $x] return [string map $::quota_mapping $x] } # The quota_check procedure is a callback from the quota handler. |
︙ | ︙ |
Changes to test/tester.tcl.
︙ | ︙ | |||
15 16 17 18 19 20 21 22 23 24 25 26 27 28 | #------------------------------------------------------------------------- # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to | > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | #------------------------------------------------------------------------- # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # get_pwd # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to |
︙ | ︙ | |||
143 144 145 146 147 148 149 150 151 152 153 154 155 156 | # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to | > > > > > > > > > > > > | 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Return the string representing the name of the current directory. On # Windows, the result is "normalized" to whatever our parent command shell # is using to prevent case-mismatch issues. # proc get_pwd {} { if {$::tcl_platform(platform) eq "windows"} { return [string trim [exec -- $::env(ComSpec) /c echo %CD%]] } else { return [pwd] } } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to |
︙ | ︙ | |||
980 981 982 983 984 985 986 | if {$crashfile eq ""} { error "Compulsory option -file missing" } # $crashfile gets compared to the native filename in # cfSync(), which can be different then what TCL uses by # default, so here we force it to the "nativename" format. | | | 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 | if {$crashfile eq ""} { error "Compulsory option -file missing" } # $crashfile gets compared to the native filename in # cfSync(), which can be different then what TCL uses by # default, so here we force it to the "nativename" format. set cfile [string map {\\ \\\\} [file nativename [file join [get_pwd] $crashfile]]] set f [open crash.tcl w] puts $f "sqlite3_crash_enable 1" puts $f "sqlite3_crashparams $blocksize $dc $crashdelay $cfile" puts $f "sqlite3_test_control_pending_byte $::sqlite_pending_byte" puts $f "sqlite3 db test.db -vfs crash" |
︙ | ︙ |
Changes to test/tkt-94c04eaadb.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | # Create a database. do_test tkt-94c94-1.1 { execsql { CREATE TABLE t1(a, b) } } {} # Grow the file to larger than 4096MB (2^32 bytes) db close | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | # Create a database. do_test tkt-94c94-1.1 { execsql { CREATE TABLE t1(a, b) } } {} # Grow the file to larger than 4096MB (2^32 bytes) db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } # Switch to async mode. sqlite3async_initialize "" 1 |
︙ | ︙ |
Changes to test/uri.test.
︙ | ︙ | |||
50 51 52 53 54 55 56 | 15 test.db?mork=1#boris test.db?mork=1#boris 16 file://localhostPWD/test.db%3Fhello test.db?hello } { if {$tcl_platform(platform)=="windows"} { if {$tn>14} break | | | | 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | 15 test.db?mork=1#boris test.db?mork=1#boris 16 file://localhostPWD/test.db%3Fhello test.db?hello } { if {$tcl_platform(platform)=="windows"} { if {$tn>14} break set uri [string map [list PWD /[get_pwd]] $uri] } else { set uri [string map [list PWD [get_pwd]] $uri] } if {[file isdir $file]} {error "$file is a directory"} forcedelete $file do_test 1.$tn.1 { file exists $file } 0 set DB [sqlite3_open $uri] do_test 1.$tn.2 { file exists $file } 1 |
︙ | ︙ | |||
270 271 272 273 274 275 276 | 3 "file:/PWD/test.db" {not an error} 4 "file://l%6Fcalhost/PWD/test.db" {invalid uri authority: l%6Fcalhost} 5 "file://lbcalhost/PWD/test.db" {invalid uri authority: lbcalhost} 6 "file://x/PWD/test.db" {invalid uri authority: x} } { if {$tcl_platform(platform)=="windows"} { | | | | 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 | 3 "file:/PWD/test.db" {not an error} 4 "file://l%6Fcalhost/PWD/test.db" {invalid uri authority: l%6Fcalhost} 5 "file://lbcalhost/PWD/test.db" {invalid uri authority: lbcalhost} 6 "file://x/PWD/test.db" {invalid uri authority: x} } { if {$tcl_platform(platform)=="windows"} { set uri [string map [list PWD [string range [get_pwd] 3 end]] $uri] } else { set uri [string map [list PWD [string range [get_pwd] 1 end]] $uri] } do_test 6.$tn { set DB [sqlite3_open $uri] sqlite3_errmsg $DB } $res catch { sqlite3_close $DB } |
︙ | ︙ |
Changes to test/wal.test.
︙ | ︙ | |||
1473 1474 1475 1476 1477 1478 1479 | }] } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # | | | 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 | }] } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # set walfile [file nativename [file join [get_pwd] test.db-wal]] catch {db close} forcedelete test.db do_test wal-23.1 { faultsim_delete_and_reopen execsql { CREATE TABLE t1(a, b); PRAGMA journal_mode = WAL; |
︙ | ︙ |
Changes to test/walbig.test.
︙ | ︙ | |||
48 49 50 51 52 53 54 | INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; } } {wal} db close | | | 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; } } {wal} db close if {[catch {fake_big_file 5000 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 5000 MB. *****" finish_test return } hexio_write test.db 28 00000000 sqlite3 db test.db |
︙ | ︙ |