Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge trunk changes with experimental branch. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | experimental |
Files: | files | file ages | folders |
SHA1: |
acd26b8b746980c344db017a0e96dbd9 |
User & Date: | dan 2010-08-05 16:22:50.000 |
Context
2010-08-05
| ||
18:53 | Add comments describing UNKNOWN_LOCK to pager.c. Improve some other comments in the same file. (check-in: 54eff6de9d user: dan tags: experimental) | |
16:22 | Merge trunk changes with experimental branch. (check-in: acd26b8b74 user: dan tags: experimental) | |
16:08 | Catch an error code that was not being propagated back to the caller. (check-in: 800f496929 user: dan tags: experimental) | |
11:56 | Make the size of a Bitvec object 512 bytes on all platforms, instead of having the size depend on the size of a pointer. This makes testing easier. (check-in: ca479f3de2 user: drh tags: trunk) | |
Changes
Changes to src/bitvec.c.
︙ | ︙ | |||
33 34 35 36 37 38 39 | ** Bitvec object is the number of pages in the database file at the ** start of a transaction, and is thus usually less than a few thousand, ** but can be as large as 2 billion for a really big database. */ #include "sqliteInt.h" /* Size of the Bitvec structure in bytes. */ | | | 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | ** Bitvec object is the number of pages in the database file at the ** start of a transaction, and is thus usually less than a few thousand, ** but can be as large as 2 billion for a really big database. */ #include "sqliteInt.h" /* Size of the Bitvec structure in bytes. */ #define BITVEC_SZ 512 /* Round the union size down to the nearest pointer boundary, since that's how ** it will be aligned within the Bitvec struct. */ #define BITVEC_USIZE (((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*)) /* Type of the array "element" for the bitmap representation. ** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE. |
︙ | ︙ |
Changes to src/btree.c.
︙ | ︙ | |||
2565 2566 2567 2568 2569 2570 2571 | } #endif } p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); if( p->inTrans>pBt->inTransaction ){ pBt->inTransaction = p->inTrans; } | < > > > | > > > > > > > > | > > > > | 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 | } #endif } p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); if( p->inTrans>pBt->inTransaction ){ pBt->inTransaction = p->inTrans; } if( wrflag ){ MemPage *pPage1 = pBt->pPage1; #ifndef SQLITE_OMIT_SHARED_CACHE assert( !pBt->pWriter ); pBt->pWriter = p; pBt->isExclusive = (u8)(wrflag>1); #endif /* If the db-size header field is incorrect (as it may be if an old ** client has been writing the database file), update it now. Doing ** this sooner rather than later means the database size can safely ** re-read the database size from page 1 if a savepoint or transaction ** rollback occurs within the transaction. */ if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pPage1->aData[28], pBt->nPage); } } } } trans_begun: if( rc==SQLITE_OK && wrflag ){ /* This call makes sure that the pager has the correct number of ** open savepoints. If the second parameter is greater than 0 and |
︙ | ︙ | |||
3310 3311 3312 3313 3314 3315 3316 | assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); sqlite3BtreeEnter(p); rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); if( rc==SQLITE_OK ){ if( iSavepoint<0 && pBt->initiallyEmpty ) pBt->nPage = 0; rc = newDatabase(pBt); pBt->nPage = get4byte(28 + pBt->pPage1->aData); | < < | > > > > | 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 | assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); sqlite3BtreeEnter(p); rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); if( rc==SQLITE_OK ){ if( iSavepoint<0 && pBt->initiallyEmpty ) pBt->nPage = 0; rc = newDatabase(pBt); pBt->nPage = get4byte(28 + pBt->pPage1->aData); /* The database size was written into the offset 28 of the header ** when the transaction started, so we know that the value at offset ** 28 is nonzero. */ assert( pBt->nPage>0 ); } sqlite3BtreeLeave(p); } return rc; } /* |
︙ | ︙ |
Changes to src/where.c.
︙ | ︙ | |||
4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 | for(i=iFrom=0, pLevel=pWInfo->a; i<nTabList; i++, pLevel++){ WhereCost bestPlan; /* Most efficient plan seen so far */ Index *pIdx; /* Index for FROM table at pTabItem */ int j; /* For looping over FROM tables */ int bestJ = -1; /* The value of j */ Bitmask m; /* Bitmask value for j or bestJ */ int isOptimal; /* Iterator for optimal/non-optimal search */ memset(&bestPlan, 0, sizeof(bestPlan)); bestPlan.rCost = SQLITE_BIG_DBL; /* Loop through the remaining entries in the FROM clause to find the ** next nested loop. The loop tests all FROM clause entries ** either once or twice. | > > | 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 | for(i=iFrom=0, pLevel=pWInfo->a; i<nTabList; i++, pLevel++){ WhereCost bestPlan; /* Most efficient plan seen so far */ Index *pIdx; /* Index for FROM table at pTabItem */ int j; /* For looping over FROM tables */ int bestJ = -1; /* The value of j */ Bitmask m; /* Bitmask value for j or bestJ */ int isOptimal; /* Iterator for optimal/non-optimal search */ int nUnconstrained; /* Number tables without INDEXED BY */ Bitmask notIndexed; /* Mask of tables that cannot use an index */ memset(&bestPlan, 0, sizeof(bestPlan)); bestPlan.rCost = SQLITE_BIG_DBL; /* Loop through the remaining entries in the FROM clause to find the ** next nested loop. The loop tests all FROM clause entries ** either once or twice. |
︙ | ︙ | |||
4101 4102 4103 4104 4105 4106 4107 4108 | ** The best strategy is to iterate through table t1 first. However it ** is not possible to determine this with a simple greedy algorithm. ** However, since the cost of a linear scan through table t2 is the same ** as the cost of a linear scan through table t1, a simple greedy ** algorithm may choose to use t2 for the outer loop, which is a much ** costlier approach. */ for(isOptimal=(iFrom<nTabList-1); isOptimal>=0; isOptimal--){ | > > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | > > > > | | | 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 | ** The best strategy is to iterate through table t1 first. However it ** is not possible to determine this with a simple greedy algorithm. ** However, since the cost of a linear scan through table t2 is the same ** as the cost of a linear scan through table t1, a simple greedy ** algorithm may choose to use t2 for the outer loop, which is a much ** costlier approach. */ nUnconstrained = 0; notIndexed = 0; for(isOptimal=(iFrom<nTabList-1); isOptimal>=0; isOptimal--){ Bitmask mask; /* Mask of tables not yet ready */ for(j=iFrom, pTabItem=&pTabList->a[j]; j<nTabList; j++, pTabItem++){ int doNotReorder; /* True if this table should not be reordered */ WhereCost sCost; /* Cost information from best[Virtual]Index() */ ExprList *pOrderBy; /* ORDER BY clause for index to optimize */ doNotReorder = (pTabItem->jointype & (JT_LEFT|JT_CROSS))!=0; if( j!=iFrom && doNotReorder ) break; m = getMask(pMaskSet, pTabItem->iCursor); if( (m & notReady)==0 ){ if( j==iFrom ) iFrom++; continue; } mask = (isOptimal ? m : notReady); pOrderBy = ((i==0 && ppOrderBy )?*ppOrderBy:0); if( pTabItem->pIndex==0 ) nUnconstrained++; assert( pTabItem->pTab ); #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTabItem->pTab) ){ sqlite3_index_info **pp = &pWInfo->a[j].pIdxInfo; bestVirtualIndex(pParse, pWC, pTabItem, mask, pOrderBy, &sCost, pp); }else #endif { bestBtreeIndex(pParse, pWC, pTabItem, mask, pOrderBy, &sCost); } assert( isOptimal || (sCost.used¬Ready)==0 ); /* If an INDEXED BY clause is present, then the plan must use that ** index if it uses any index at all */ assert( pTabItem->pIndex==0 || (sCost.plan.wsFlags & WHERE_NOT_FULLSCAN)==0 || sCost.plan.u.pIdx==pTabItem->pIndex ); if( isOptimal && (sCost.plan.wsFlags & WHERE_NOT_FULLSCAN)==0 ){ notIndexed |= m; } /* Conditions under which this table becomes the best so far: ** ** (1) The table must not depend on other tables that have not ** yet run. ** ** (2) A full-table-scan plan cannot supercede another plan unless ** it is an "optimal" plan as defined above. ** ** (3) All tables have an INDEXED BY clause or this table lacks an ** INDEXED BY clause or this table uses the specific ** index specified by its INDEXED BY clause. This rule ensures ** that a best-so-far is always selected even if an impossible ** combination of INDEXED BY clauses are given. The error ** will be detected and relayed back to the application later. ** The NEVER() comes about because rule (2) above prevents ** An indexable full-table-scan from reaching rule (3). ** ** (4) The plan cost must be lower than prior plans or else the ** cost must be the same and the number of rows must be lower. */ if( (sCost.used¬Ready)==0 /* (1) */ && (bestJ<0 || (notIndexed&m)!=0 /* (2) */ || (sCost.plan.wsFlags & WHERE_NOT_FULLSCAN)!=0) && (nUnconstrained==0 || pTabItem->pIndex==0 /* (3) */ || NEVER((sCost.plan.wsFlags & WHERE_NOT_FULLSCAN)!=0)) && (bestJ<0 || sCost.rCost<bestPlan.rCost /* (4) */ || (sCost.rCost<=bestPlan.rCost && sCost.nRow<bestPlan.nRow)) ){ WHERETRACE(("... best so far with cost=%g and nRow=%g\n", sCost.rCost, sCost.nRow)); bestPlan = sCost; bestJ = j; } if( doNotReorder ) break; |
︙ | ︙ |
Changes to test/filefmt.test.
︙ | ︙ | |||
113 114 115 116 117 118 119 120 121 | sqlite3 db test.db catchsql { SELECT count(*) FROM sqlite_master } } {1 {file is encrypted or is not a database}} } finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 | sqlite3 db test.db catchsql { SELECT count(*) FROM sqlite_master } } {1 {file is encrypted or is not a database}} } #------------------------------------------------------------------------- # The following block of tests - filefmt-2.* - test that versions 3.7.0 # and later can read and write databases that have been modified or created # by 3.6.23.1 and earlier. The difference difference is that 3.7.0 stores # the size of the database in the database file header, whereas 3.6.23.1 # always derives this from the size of the file. # db close file delete -force test.db set a_string_counter 1 proc a_string {n} { incr ::a_string_counter string range [string repeat "${::a_string_counter}." $n] 1 $n } sqlite3 db test.db db func a_string a_string do_execsql_test filefmt-2.1.1 { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } {} do_test filefmt-2.1.2 { hexio_read test.db 28 4 } {00000009} do_test filefmt-2.1.3 { sql36231 { INSERT INTO t1 VALUES(a_string(3000)) } } {} do_execsql_test filefmt-2.1.4 { INSERT INTO t2 VALUES(2) } {} integrity_check filefmt-2.1.5 do_test filefmt-2.1.6 { hexio_read test.db 28 4 } {00000010} db close file delete -force test.db sqlite3 db test.db db func a_string a_string do_execsql_test filefmt-2.2.1 { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } {} do_test filefmt-2.2.2 { hexio_read test.db 28 4 } {00000009} do_test filefmt-2.2.3 { sql36231 { INSERT INTO t1 VALUES(a_string(3000)) } } {} do_execsql_test filefmt-2.2.4 { PRAGMA integrity_check; BEGIN; INSERT INTO t2 VALUES(2); SAVEPOINT a; INSERT INTO t2 VALUES(3); ROLLBACK TO a; } {ok} integrity_check filefmt-2.2.5 do_execsql_test filefmt-2.2.6 { COMMIT } {} db close sqlite3 db test.db integrity_check filefmt-2.2.7 finish_test |
Changes to test/indexedby.test.
︙ | ︙ | |||
119 120 121 122 123 124 125 126 127 128 129 130 131 132 | # do_test indexedby-4.1 { EQP { SELECT * FROM t1, t2 WHERE a = c } } {0 0 {TABLE t1} 1 1 {TABLE t2 WITH INDEX i3}} do_test indexedby-4.2 { EQP { SELECT * FROM t1 INDEXED BY i1, t2 WHERE a = c } } {0 1 {TABLE t2} 1 0 {TABLE t1 WITH INDEX i1}} # Test embedding an INDEXED BY in a CREATE VIEW statement. This block # also tests that nothing bad happens if an index refered to by # a CREATE VIEW statement is dropped and recreated. # do_test indexedby-5.1 { execsql { | > > > > > > > > > > | 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | # do_test indexedby-4.1 { EQP { SELECT * FROM t1, t2 WHERE a = c } } {0 0 {TABLE t1} 1 1 {TABLE t2 WITH INDEX i3}} do_test indexedby-4.2 { EQP { SELECT * FROM t1 INDEXED BY i1, t2 WHERE a = c } } {0 1 {TABLE t2} 1 0 {TABLE t1 WITH INDEX i1}} do_test indexedby-4.3 { catchsql { SELECT * FROM t1 INDEXED BY i1, t2 INDEXED BY i3 WHERE a=c } } {1 {cannot use index: i1}} do_test indexedby-4.4 { catchsql { SELECT * FROM t2 INDEXED BY i3, t1 INDEXED BY i1 WHERE a=c } } {1 {cannot use index: i3}} # Test embedding an INDEXED BY in a CREATE VIEW statement. This block # also tests that nothing bad happens if an index refered to by # a CREATE VIEW statement is dropped and recreated. # do_test indexedby-5.1 { execsql { |
︙ | ︙ |
Changes to test/pagerfault.test.
︙ | ︙ | |||
1041 1042 1043 1044 1045 1046 1047 1048 | sqlite3 db test.db execsql { PRAGMA integrity_check } } {ok} db close } } finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 | sqlite3 db test.db execsql { PRAGMA integrity_check } } {ok} db close } } #------------------------------------------------------------------------- # When a 3.7.0 client opens a write-transaction on a database file that # has been appended to or truncated by a pre-370 client, it updates # the db-size in the file header immediately. This test case provokes # errors during that operation. # do_test pagerfault-22-pre1 { faultsim_delete_and_reopen db func a_string a_string execsql { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } db close sql36231 { INSERT INTO t1 VALUES(a_string(3000)) } faultsim_save_and_close } {} do_faultsim_test pagerfault-22 -prep { faultsim_restore_and_reopen } -body { execsql { INSERT INTO t2 VALUES(2) } execsql { SELECT * FROM t2 } } -test { faultsim_test_result {0 {1 2}} faultsim_integrity_check } finish_test |
Changes to test/tester.tcl.
︙ | ︙ | |||
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 | # Add some info to the output. # puts "Time: $tail $ms ms" show_memstats } # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. set AUTOVACUUM $sqlite_options(default_autovacuum) source $testdir/thread_common.tcl | > > > > > > > > > > > > > > > > > | 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 | # Add some info to the output. # puts "Time: $tail $ms ms" show_memstats } # Open a new connection on database test.db and execute the SQL script # supplied as an argument. Before returning, close the new conection and # restore the 4 byte fields starting at header offsets 28, 92 and 96 # to the values they held before the SQL was executed. This simulates # a write by a pre-3.7.0 client. # proc sql36231 {sql} { set B [hexio_read test.db 92 8] set A [hexio_read test.db 28 4] sqlite3 db36231 test.db catch { db36231 func a_string a_string } execsql $sql db36231 db36231 close hexio_write test.db 28 $A hexio_write test.db 92 $B return "" } # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. set AUTOVACUUM $sqlite_options(default_autovacuum) source $testdir/thread_common.tcl |
Changes to test/where3.test.
︙ | ︙ | |||
208 209 210 211 212 213 214 215 216 | do_test where3-2.7 { queryplan { SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx WHERE cpk=bx AND apk=cx } } {tB {} tC * tA * tD *} finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 | do_test where3-2.7 { queryplan { SELECT * FROM tA, tB, tC LEFT JOIN tD ON dpk=cx WHERE cpk=bx AND apk=cx } } {tB {} tC * tA * tD *} # Ticket [13f033c865f878953] # If the outer loop must be a full table scan, do not let ANALYZE trick # the planner into use a table for the outer loop that might be indexable # if held until an inner loop. # do_test where3-3.0 { execsql { CREATE TABLE t301(a INTEGER PRIMARY KEY,b,c); CREATE INDEX t301c ON t301(c); INSERT INTO t301 VALUES(1,2,3); CREATE TABLE t302(x, y); ANALYZE; explain query plan SELECT * FROM t302, t301 WHERE t302.x=5 AND t301.a=t302.y; } } {0 0 {TABLE t302} 1 1 {TABLE t301 USING PRIMARY KEY}} do_test where3-3.1 { execsql { explain query plan SELECT * FROM t301, t302 WHERE t302.x=5 AND t301.a=t302.y; } } {0 1 {TABLE t302} 1 0 {TABLE t301 USING PRIMARY KEY}} # Verify that when there are multiple tables in a join which must be # full table scans that the query planner attempts put the table with # the fewest number of output rows as the outer loop. # do_test where3-4.0 { execsql { CREATE TABLE t400(a INTEGER PRIMARY KEY, b, c); CREATE TABLE t401(p INTEGER PRIMARY KEY, q, r); CREATE TABLE t402(x INTEGER PRIMARY KEY, y, z); EXPLAIN QUERY PLAN SELECT * FROM t400, t401, t402 WHERE t402.z GLOB 'abc*'; } } {0 2 {TABLE t402} 1 0 {TABLE t400} 2 1 {TABLE t401}} do_test where3-4.1 { execsql { EXPLAIN QUERY PLAN SELECT * FROM t400, t401, t402 WHERE t401.r GLOB 'abc*'; } } {0 1 {TABLE t401} 1 0 {TABLE t400} 2 2 {TABLE t402}} do_test where3-4.2 { execsql { EXPLAIN QUERY PLAN SELECT * FROM t400, t401, t402 WHERE t400.c GLOB 'abc*'; } } {0 0 {TABLE t400} 1 1 {TABLE t401} 2 2 {TABLE t402}} finish_test |