Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge and manually resolve testing updates from trunk. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | winrt |
Files: | files | file ages | folders |
SHA1: |
5eecdb44dd809e91002b8ecf59ada72f |
User & Date: | mistachkin 2012-03-08 20:22:42.680 |
Context
2012-03-08
| ||
20:39 | Merge test current directory value fix from trunk and fix uri tests. (check-in: 88963c33c1 user: mistachkin tags: winrt) | |
20:22 | Merge and manually resolve testing updates from trunk. (check-in: 5eecdb44dd user: mistachkin tags: winrt) | |
20:00 | On Windows, make sure the current directory value used by the test suite is 'normalized' to what the parent command shell sees. Also, clean the test directories used by the quota2.test file. (check-in: 82bcd7ec15 user: mistachkin tags: trunk) | |
2012-03-07
| ||
20:11 | When compiled for WinRT, use the CreateFile2 and LoadPackagedLibrary functions instead of CreateFile and LoadLibrary. (check-in: 27d6942ca2 user: mistachkin tags: winrt) | |
Changes
Changes to Makefile.in.
︙ | ︙ | |||
930 931 932 933 934 935 936 937 938 939 940 941 942 943 | rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc .target_source rm -f tclsqlite3$(TEXE) rm -f testfixture$(TEXE) test.db rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def rm -f sqlite3.c rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c | > | 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 | rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc .target_source rm -f tclsqlite3$(TEXE) rm -f testfixture$(TEXE) test.db rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def rm -f sqlite3.c rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c |
︙ | ︙ |
Changes to Makefile.msc.
︙ | ︙ | |||
981 982 983 984 985 986 987 988 989 990 991 992 993 994 | del /Q *.lo *.ilk *.lib *.obj *.pdb sqlite3.exe libsqlite3.lib del /Q *.da *.bb *.bbg gmon.out del /Q sqlite3.h opcodes.c opcodes.h del /Q lemon.exe lempar.c parse.* del /Q mkkeywordhash.exe keywordhash.h -rmdir /Q/S .deps -rmdir /Q/S .libs -rmdir /Q/S tsrc del /Q .target_source del /Q tclsqlite3.exe del /Q testfixture.exe testfixture.exp test.db del /Q sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def del /Q sqlite3.c del /Q sqlite3_analyzer.exe sqlite3_analyzer.exp sqlite3_analyzer.c | > > > | 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 | del /Q *.lo *.ilk *.lib *.obj *.pdb sqlite3.exe libsqlite3.lib del /Q *.da *.bb *.bbg gmon.out del /Q sqlite3.h opcodes.c opcodes.h del /Q lemon.exe lempar.c parse.* del /Q mkkeywordhash.exe keywordhash.h -rmdir /Q/S .deps -rmdir /Q/S .libs -rmdir /Q/S quota2a -rmdir /Q/S quota2b -rmdir /Q/S quota2c -rmdir /Q/S tsrc del /Q .target_source del /Q tclsqlite3.exe del /Q testfixture.exe testfixture.exp test.db del /Q sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def del /Q sqlite3.c del /Q sqlite3_analyzer.exe sqlite3_analyzer.exp sqlite3_analyzer.c |
︙ | ︙ |
Changes to Makefile.vxworks.
︙ | ︙ | |||
653 654 655 656 657 658 659 660 661 662 663 | ./testfixture$(EXE) $(TOP)/test/loadext.test clean: rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f sqlite3.c fts?amal.c tclsqlite3.c rm -f $(SHPREFIX)sqlite3.$(SO) | > | 653 654 655 656 657 658 659 660 661 662 663 664 | ./testfixture$(EXE) $(TOP)/test/loadext.test clean: rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f sqlite3.c fts?amal.c tclsqlite3.c rm -f $(SHPREFIX)sqlite3.$(SO) |
Changes to main.mk.
︙ | ︙ | |||
597 598 599 600 601 602 603 604 605 606 607 608 609 610 | clean: rm -f *.o sqlite3 sqlite3.exe libsqlite3.a sqlite3.h opcodes.* rm -f lemon lemon.exe lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash mkkeywordhash.exe keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f amalgamation-testfixture amalgamation-testfixture.exe rm -f fts3-testfixture fts3-testfixture.exe rm -f testfixture testfixture.exe rm -f threadtest3 threadtest3.exe rm -f sqlite3.c fts?amal.c tclsqlite3.c | > | 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 | clean: rm -f *.o sqlite3 sqlite3.exe libsqlite3.a sqlite3.h opcodes.* rm -f lemon lemon.exe lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash mkkeywordhash.exe keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f amalgamation-testfixture amalgamation-testfixture.exe rm -f fts3-testfixture fts3-testfixture.exe rm -f testfixture testfixture.exe rm -f threadtest3 threadtest3.exe rm -f sqlite3.c fts?amal.c tclsqlite3.c |
︙ | ︙ |
Changes to src/test6.c.
︙ | ︙ | |||
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 | int nName = strlen(zName); int nCrashFile = strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* | > > > > > > > > | 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 | int nName = strlen(zName); int nCrashFile = strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } #ifdef TRACE_CRASHTEST printf("cfSync(): nName = %d, nCrashFile = %d, zName = %s, zCrashFile = %s\n", nName, nCrashFile, zName, zCrashFile); #endif if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ #ifdef TRACE_CRASHTEST printf("cfSync(): name matched, g.iCrash = %d\n", g.iCrash); #endif if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* |
︙ | ︙ |
Changes to test/bigfile.test.
︙ | ︙ | |||
67 68 69 70 71 72 73 | } $::MAGIC_SUM # Try to create a large file - a file that is larger than 2^32 bytes. # If this fails, it means that the system being tested does not support # large files. So skip all of the remaining tests in this file. # db close | | | 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 | } $::MAGIC_SUM # Try to create a large file - a file that is larger than 2^32 bytes. # If this fails, it means that the system being tested does not support # large files. So skip all of the remaining tests in this file. # db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.2 { |
︙ | ︙ | |||
107 108 109 110 111 112 113 | sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM db close | | | 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 | sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM db close if {[catch {fake_big_file 8192 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 8192 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.5 { |
︙ | ︙ | |||
146 147 148 149 150 151 152 | do_test bigfile-1.9 { execsql { SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM db close | | | 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | do_test bigfile-1.9 { execsql { SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM db close if {[catch {fake_big_file 16384 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 16384 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.10 { |
︙ | ︙ |
Changes to test/bigfile2.test.
︙ | ︙ | |||
27 28 29 30 31 32 33 | } # Pad the file out to 4GB in size. Then clear the file-size field in the # db header. This will cause SQLite to assume that the first 4GB of pages # are actually in use and new pages will be appended to the file. # db close | | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | } # Pad the file out to 4GB in size. Then clear the file-size field in the # db header. This will cause SQLite to assume that the first 4GB of pages # are actually in use and new pages will be appended to the file. # db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test 1.2 { |
︙ | ︙ |
Changes to test/crash5.test.
︙ | ︙ | |||
43 44 45 46 47 48 49 | INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } db close do_test crash5-$ii.$jj.1 { crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ [list set iFail $jj] { | | | 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } db close do_test crash5-$ii.$jj.1 { crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ [list set iFail $jj] { sqlite3_crashparams 0 [file join [get_pwd] test.db-journal] # Begin a transaction and evaluate a "CREATE INDEX" statement # with the iFail'th malloc() set to fail. This operation will # have to move the current contents of page 4 (the overflow # page) to make room for the new root page. The bug is that # if malloc() fails at a particular point in sqlite3PagerMovepage(), # sqlite mistakenly thinks that the page being moved (page 4) has |
︙ | ︙ | |||
85 86 87 88 89 90 91 | # the transaction was not rolled back, then the sqlite cache now # has a dirty page 4 that it incorrectly believes is already safely # in the synced part of the journal file. When # sqlite3_release_memory() is called sqlite tries to free memory # by writing page 4 out to the db file. If it crashes later on, # before syncing the journal... Corruption! # | | | 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | # the transaction was not rolled back, then the sqlite cache now # has a dirty page 4 that it incorrectly believes is already safely # in the synced part of the journal file. When # sqlite3_release_memory() is called sqlite tries to free memory # by writing page 4 out to the db file. If it crashes later on, # before syncing the journal... Corruption! # sqlite3_crashparams 1 [file join [get_pwd] test.db-journal] sqlite3_release_memory 8092 }]] {} expr 1 } {1} sqlite3 db test.db do_test crash5-$ii.$jj.2 { |
︙ | ︙ |
Changes to test/filectrl.test.
︙ | ︙ | |||
30 31 32 33 34 35 36 | do_test filectrl-1.4 { sqlite3 db test.db file_control_lasterrno_test db } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db | | | 30 31 32 33 34 35 36 37 38 39 40 41 | do_test filectrl-1.4 { sqlite3 db test.db file_control_lasterrno_test db } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db file_control_lockproxy_test db [get_pwd] } {} db close forcedelete .test_control_lockproxy.db-conch test.proxy finish_test |
Changes to test/misc7.test.
︙ | ︙ | |||
479 480 481 482 483 484 485 | do_test misc7-20.1 { sqlite3_global_recover } {SQLITE_OK} # Try to open a really long file name. # do_test misc7-21.1 { | | | 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 | do_test misc7-20.1 { sqlite3_global_recover } {SQLITE_OK} # Try to open a really long file name. # do_test misc7-21.1 { set zFile [file join [get_pwd] "[string repeat abcde 104].db"] set rc [catch {sqlite3 db2 $zFile} msg] list $rc $msg } {1 {unable to open database file}} db close forcedelete test.db |
︙ | ︙ |
Changes to test/pager1.test.
︙ | ︙ | |||
520 521 522 523 524 525 526 | db close # Set up a VFS that snapshots the file-system just before a master journal # file is deleted to commit a multi-file transaction. Specifically, the # file-system is saved just before the xDelete() call to remove the # master journal file from the file-system. # | | | 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 | db close # Set up a VFS that snapshots the file-system just before a master journal # file is deleted to commit a multi-file transaction. Specifically, the # file-system is saved just before the xDelete() call to remove the # master journal file from the file-system. # set pwd [get_pwd] testvfs tv -default 1 tv script copy_on_mj_delete set ::mj_filename_length 0 proc copy_on_mj_delete {method filename args} { if {[string match *mj* [file tail $filename]]} { # # NOTE: Is the file name relative? If so, add the length of the current |
︙ | ︙ | |||
1015 1016 1017 1018 1019 1020 1021 | # # NOTE: For item 3 above, if the current SQLite VFS lacks the concept of a # current directory, the length of the current directory name plus 1 # character for the directory separator character are NOT counted as # part of the total size; otherwise, they are. # ifcapable curdir { | | | 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 | # # NOTE: For item 3 above, if the current SQLite VFS lacks the concept of a # current directory, the length of the current directory name plus 1 # character for the directory separator character are NOT counted as # part of the total size; otherwise, they are. # ifcapable curdir { set mj_pointer [expr {$mj_pointer + [string length [get_pwd]] + 1}] } expr {$::max_journal==(512+2*(1024+8)+$mj_pointer)} } 1 do_test pager1-5.4.2 { set ::max_journal 0 execsql { PRAGMA synchronous = full; |
︙ | ︙ | |||
1043 1044 1045 1046 1047 1048 1049 | # # NOTE: If the current SQLite VFS lacks the concept of a current directory, # the length of the current directory name plus 1 character for the # directory separator character are NOT counted as part of the total # size; otherwise, they are. # ifcapable curdir { | | | 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 | # # NOTE: If the current SQLite VFS lacks the concept of a current directory, # the length of the current directory name plus 1 character for the # directory separator character are NOT counted as part of the total # size; otherwise, they are. # ifcapable curdir { set mj_pointer [expr {$mj_pointer + [string length [get_pwd]] + 1}] } expr {$::max_journal==(((512+2*(1024+8)+511)/512)*512 + $mj_pointer)} } 1 db close tv delete do_test pager1-5.5.1 { |
︙ | ︙ |
Changes to test/pragma.test.
︙ | ︙ | |||
986 987 988 989 990 991 992 | do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} ifcapable wsd { do_test pragma-9.5 { | | | | 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 | do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} ifcapable wsd { do_test pragma-9.5 { set pwd [string map {' ''} [file nativename [get_pwd]]] execsql " PRAGMA temp_store_directory='$pwd'; " } {} do_test pragma-9.6 { execsql { PRAGMA temp_store_directory; } } [list [file nativename [get_pwd]]] do_test pragma-9.7 { catchsql { PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR'; } } {1 {not a writable directory}} do_test pragma-9.8 { execsql { |
︙ | ︙ |
Changes to test/quota.test.
︙ | ︙ | |||
224 225 226 227 228 229 230 | sqlite3_quota_set * 4096 quota_callback do_test quota-3.3.1 { execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1b execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b set ::quota | | | 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 | sqlite3_quota_set * 4096 quota_callback do_test quota-3.3.1 { execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1b execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b set ::quota } [list [file join [get_pwd] test.db] 5120] do_test quota-3.2.X { foreach db {db1a db2a db2b db1b} { catch { $db close } } sqlite3_quota_set * 0 {} } {SQLITE_OK} #------------------------------------------------------------------------- |
︙ | ︙ |
Changes to test/quota2.test.
︙ | ︙ | |||
31 32 33 34 35 36 37 | file mkdir $dir } # The standard_path procedure converts a pathname into a standard format # that is the same across platforms. # unset -nocomplain ::quota_pwd ::quota_mapping | | | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | file mkdir $dir } # The standard_path procedure converts a pathname into a standard format # that is the same across platforms. # unset -nocomplain ::quota_pwd ::quota_mapping set ::quota_pwd [string map {\\ /} [get_pwd]] set ::quota_mapping [list $::quota_pwd PWD] proc standard_path {x} { set x [string map {\\ /} $x] return [string map $::quota_mapping $x] } # The quota_check procedure is a callback from the quota handler. |
︙ | ︙ |
Changes to test/tester.tcl.
︙ | ︙ | |||
17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # is_relative_file # test_pwd # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to | > | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # is_relative_file # test_pwd # get_pwd # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to |
︙ | ︙ | |||
145 146 147 148 149 150 151 152 153 154 155 156 157 158 | # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to | > > > > > > > > > > > > | 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 | # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Return the string representing the name of the current directory. On # Windows, the result is "normalized" to whatever our parent command shell # is using to prevent case-mismatch issues. # proc get_pwd {} { if {$::tcl_platform(platform) eq "windows"} { return [string trim [exec -- $::env(ComSpec) /c echo %CD%]] } else { return [pwd] } } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to |
︙ | ︙ | |||
213 214 215 216 217 218 219 | } else { set suffix2 $suffix1 } } else { set suffix1 ""; set suffix2 "" } ifcapable curdir { | | | 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 | } else { set suffix2 $suffix1 } } else { set suffix1 ""; set suffix2 "" } ifcapable curdir { return "[get_pwd]$suffix1" } else { return $suffix2 } } # Delete a file or directory # |
︙ | ︙ | |||
1010 1011 1012 1013 1014 1015 1016 | if {$crashfile eq ""} { error "Compulsory option -file missing" } # $crashfile gets compared to the native filename in # cfSync(), which can be different then what TCL uses by # default, so here we force it to the "nativename" format. | | | 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 | if {$crashfile eq ""} { error "Compulsory option -file missing" } # $crashfile gets compared to the native filename in # cfSync(), which can be different then what TCL uses by # default, so here we force it to the "nativename" format. set cfile [string map {\\ \\\\} [file nativename [file join [get_pwd] $crashfile]]] set f [open crash.tcl w] puts $f "sqlite3_crash_enable 1" puts $f "sqlite3_crashparams $blocksize $dc $crashdelay $cfile" puts $f "sqlite3_test_control_pending_byte $::sqlite_pending_byte" puts $f "sqlite3 db test.db -vfs crash" |
︙ | ︙ |
Changes to test/tkt-94c04eaadb.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | # Create a database. do_test tkt-94c94-1.1 { execsql { CREATE TABLE t1(a, b) } } {} # Grow the file to larger than 4096MB (2^32 bytes) db close | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | # Create a database. do_test tkt-94c94-1.1 { execsql { CREATE TABLE t1(a, b) } } {} # Grow the file to larger than 4096MB (2^32 bytes) db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } # Switch to async mode. sqlite3async_initialize "" 1 |
︙ | ︙ |
Changes to test/uri.test.
︙ | ︙ | |||
272 273 274 275 276 277 278 | 3 "file:/PWD/test.db" {not an error} 4 "file://l%6Fcalhost/PWD/test.db" {invalid uri authority: l%6Fcalhost} 5 "file://lbcalhost/PWD/test.db" {invalid uri authority: lbcalhost} 6 "file://x/PWD/test.db" {invalid uri authority: x} } { if {$tcl_platform(platform)=="windows"} { | | | | 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 | 3 "file:/PWD/test.db" {not an error} 4 "file://l%6Fcalhost/PWD/test.db" {invalid uri authority: l%6Fcalhost} 5 "file://lbcalhost/PWD/test.db" {invalid uri authority: lbcalhost} 6 "file://x/PWD/test.db" {invalid uri authority: x} } { if {$tcl_platform(platform)=="windows"} { set uri [string map [list PWD [string range [get_pwd] 3 end]] $uri] } else { set uri [string map [list PWD [string range [get_pwd] 1 end]] $uri] } do_test 6.$tn { set DB [sqlite3_open $uri] sqlite3_errmsg $DB } $res catch { sqlite3_close $DB } |
︙ | ︙ |
Changes to test/wal.test.
︙ | ︙ | |||
1474 1475 1476 1477 1478 1479 1480 | } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # ifcapable curdir { | | | 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 | } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # ifcapable curdir { set walfile [file nativename [file join [get_pwd] test.db-wal]] } else { set walfile test.db-wal } catch {db close} forcedelete test.db do_test wal-23.1 { faultsim_delete_and_reopen |
︙ | ︙ |
Changes to test/walbig.test.
︙ | ︙ | |||
48 49 50 51 52 53 54 | INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; } } {wal} db close | | | 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; } } {wal} db close if {[catch {fake_big_file 5000 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 5000 MB. *****" finish_test return } hexio_write test.db 28 00000000 sqlite3 db test.db |
︙ | ︙ |