Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge trunk changes into the fts4-incr-merge branch. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | fts4-incr-merge |
Files: | files | file ages | folders |
SHA1: |
f61d5fb0281381228eb1a12a233bacae |
User & Date: | drh 2012-03-20 17:04:17.255 |
Context
2012-03-21
| ||
14:34 | Add fts4merge3.test, for testing that older versions of FTS4 may interoperate with incr-merge capable versions. (check-in: 903ec5126d user: dan tags: fts4-incr-merge) | |
2012-03-20
| ||
17:04 | Merge trunk changes into the fts4-incr-merge branch. (check-in: f61d5fb028 user: drh tags: fts4-incr-merge) | |
15:10 | Remove the _SafeInit() entry points from the TCL interface. They have long been no-ops. Removing them completely avoids confusion as to why they don't work. (check-in: 0fb26c7bfa user: drh tags: trunk) | |
2012-03-17
| ||
16:56 | Fix various incorrect and missing comments and other style issues in and around the FTS incremental merge code. (check-in: 7aabb62c8c user: dan tags: fts4-incr-merge) | |
Changes
Changes to Makefile.in.
︙ | ︙ | |||
930 931 932 933 934 935 936 937 938 939 940 941 942 943 | rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc .target_source rm -f tclsqlite3$(TEXE) rm -f testfixture$(TEXE) test.db rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def rm -f sqlite3.c rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c | > | 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 | rm -f *.lo *.la *.o sqlite3$(TEXE) libsqlite3.la rm -f sqlite3.h opcodes.* rm -rf .libs .deps rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash$(BEXE) keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc .target_source rm -f tclsqlite3$(TEXE) rm -f testfixture$(TEXE) test.db rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def rm -f sqlite3.c rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c |
︙ | ︙ |
Changes to Makefile.msc.
︙ | ︙ | |||
981 982 983 984 985 986 987 988 989 990 991 992 993 994 | del /Q *.lo *.ilk *.lib *.obj *.pdb sqlite3.exe libsqlite3.lib del /Q *.da *.bb *.bbg gmon.out del /Q sqlite3.h opcodes.c opcodes.h del /Q lemon.exe lempar.c parse.* del /Q mkkeywordhash.exe keywordhash.h -rmdir /Q/S .deps -rmdir /Q/S .libs -rmdir /Q/S tsrc del /Q .target_source del /Q tclsqlite3.exe del /Q testfixture.exe testfixture.exp test.db del /Q sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def del /Q sqlite3.c del /Q sqlite3_analyzer.exe sqlite3_analyzer.exp sqlite3_analyzer.c | > > > | 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 | del /Q *.lo *.ilk *.lib *.obj *.pdb sqlite3.exe libsqlite3.lib del /Q *.da *.bb *.bbg gmon.out del /Q sqlite3.h opcodes.c opcodes.h del /Q lemon.exe lempar.c parse.* del /Q mkkeywordhash.exe keywordhash.h -rmdir /Q/S .deps -rmdir /Q/S .libs -rmdir /Q/S quota2a -rmdir /Q/S quota2b -rmdir /Q/S quota2c -rmdir /Q/S tsrc del /Q .target_source del /Q tclsqlite3.exe del /Q testfixture.exe testfixture.exp test.db del /Q sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def del /Q sqlite3.c del /Q sqlite3_analyzer.exe sqlite3_analyzer.exp sqlite3_analyzer.c |
︙ | ︙ |
Changes to Makefile.vxworks.
︙ | ︙ | |||
653 654 655 656 657 658 659 660 661 662 663 | ./testfixture$(EXE) $(TOP)/test/loadext.test clean: rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f sqlite3.c fts?amal.c tclsqlite3.c rm -f $(SHPREFIX)sqlite3.$(SO) | > | 653 654 655 656 657 658 659 660 661 662 663 664 | ./testfixture$(EXE) $(TOP)/test/loadext.test clean: rm -f *.o sqlite3$(EXE) libsqlite3.a sqlite3.h opcodes.* rm -f lemon lempar.c parse.* sqlite*.tar.gz mkkeywordhash keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f sqlite3.c fts?amal.c tclsqlite3.c rm -f $(SHPREFIX)sqlite3.$(SO) |
Changes to ext/fts3/fts3.c.
︙ | ︙ | |||
737 738 739 740 741 742 743 | ** The pointer returned points to memory obtained from sqlite3_malloc(). It ** is the callers responsibility to call sqlite3_free() to release this ** memory. */ static char *fts3QuoteId(char const *zInput){ int nRet; char *zRet; | | | 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 | ** The pointer returned points to memory obtained from sqlite3_malloc(). It ** is the callers responsibility to call sqlite3_free() to release this ** memory. */ static char *fts3QuoteId(char const *zInput){ int nRet; char *zRet; nRet = 2 + (int)strlen(zInput)*2 + 1; zRet = sqlite3_malloc(nRet); if( zRet ){ int i; char *z = zRet; *(z++) = '"'; for(i=0; zInput[i]; i++){ if( zInput[i]=='"' ) *(z++) = '"'; |
︙ | ︙ | |||
993 994 995 996 997 998 999 | /* Loop through the returned columns. Set nStr to the number of bytes of ** space required to store a copy of each column name, including the ** nul-terminator byte. */ nCol = sqlite3_column_count(pStmt); for(i=0; i<nCol; i++){ const char *zCol = sqlite3_column_name(pStmt, i); | | | | 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 | /* Loop through the returned columns. Set nStr to the number of bytes of ** space required to store a copy of each column name, including the ** nul-terminator byte. */ nCol = sqlite3_column_count(pStmt); for(i=0; i<nCol; i++){ const char *zCol = sqlite3_column_name(pStmt, i); nStr += (int)strlen(zCol) + 1; } /* Allocate and populate the array to return. */ azCol = (const char **)sqlite3_malloc(sizeof(char *) * nCol + nStr); if( azCol==0 ){ rc = SQLITE_NOMEM; }else{ char *p = (char *)&azCol[nCol]; for(i=0; i<nCol; i++){ const char *zCol = sqlite3_column_name(pStmt, i); int n = (int)strlen(zCol)+1; memcpy(p, zCol, n); azCol[i] = p; p += n; } } sqlite3_finalize(pStmt); |
︙ | ︙ | |||
1219 1220 1221 1222 1223 1224 1225 | /* If a languageid= option was specified, remove the language id ** column from the aCol[] array. */ if( rc==SQLITE_OK && zLanguageid ){ int j; for(j=0; j<nCol; j++){ if( sqlite3_stricmp(zLanguageid, aCol[j])==0 ){ | > | | 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 | /* If a languageid= option was specified, remove the language id ** column from the aCol[] array. */ if( rc==SQLITE_OK && zLanguageid ){ int j; for(j=0; j<nCol; j++){ if( sqlite3_stricmp(zLanguageid, aCol[j])==0 ){ int k; for(k=j; k<nCol; k++) aCol[k] = aCol[k+1]; nCol--; break; } } } } } |
︙ | ︙ | |||
2326 2327 2328 2329 2330 2331 2332 | fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); fts3PoslistCopy(&p, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *paOut = aOut; | | | 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 | fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); fts3PoslistCopy(&p, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *paOut = aOut; *pnOut = (int)(p-aOut); assert( *pnOut<=n1+n2+FTS3_VARINT_MAX-1 ); return SQLITE_OK; } /* ** This function does a "phrase" merge of two doclists. In a phrase merge, ** the output contains a copy of each position from the right-hand input |
︙ | ︙ | |||
2390 2391 2392 2393 2394 2395 2396 | fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); }else{ fts3PoslistCopy(0, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } | | | 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 | fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); }else{ fts3PoslistCopy(0, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *pnRight = (int)(p - aOut); } /* ** Argument pList points to a position list nList bytes in size. This ** function checks to see if the position list contains any entries for ** a token in position 0 (of any column). If so, it writes argument iDelta ** to the output buffer pOut, followed by a position list consisting only |
︙ | ︙ | |||
3771 3772 3773 3774 3775 3776 3777 | char *p1 = aPoslist; char *p2 = aOut; assert( iPrev>=0 ); fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); sqlite3_free(aPoslist); aPoslist = pList; | | | 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 | char *p1 = aPoslist; char *p2 = aOut; assert( iPrev>=0 ); fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); sqlite3_free(aPoslist); aPoslist = pList; nPoslist = (int)(aOut - aPoslist); if( nPoslist==0 ){ sqlite3_free(aPoslist); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; return SQLITE_OK; } } |
︙ | ︙ | |||
3815 3816 3817 3818 3819 3820 3821 | sqlite3_free(aPoslist); return SQLITE_NOMEM; } pPhrase->doclist.pList = aOut; if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ pPhrase->doclist.bFreeList = 1; | | | 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 | sqlite3_free(aPoslist); return SQLITE_NOMEM; } pPhrase->doclist.pList = aOut; if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ pPhrase->doclist.bFreeList = 1; pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList); }else{ sqlite3_free(aOut); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; } sqlite3_free(aPoslist); } |
︙ | ︙ | |||
3911 3912 3913 3914 3915 3916 3917 | iDocid += (iMul * iDelta); pNext = pDocid; fts3PoslistCopy(0, &pDocid); while( pDocid<pEnd && *pDocid==0 ) pDocid++; iMul = (bDescIdx ? -1 : 1); } | | | | 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 | iDocid += (iMul * iDelta); pNext = pDocid; fts3PoslistCopy(0, &pDocid); while( pDocid<pEnd && *pDocid==0 ) pDocid++; iMul = (bDescIdx ? -1 : 1); } *pnList = (int)(pEnd - pNext); *ppIter = pNext; *piDocid = iDocid; }else{ int iMul = (bDescIdx ? -1 : 1); sqlite3_int64 iDelta; fts3GetReverseVarint(&p, aDoclist, &iDelta); *piDocid -= (iMul * iDelta); if( p==aDoclist ){ *pbEof = 1; }else{ char *pSave = p; fts3ReversePoslist(aDoclist, &p); *pnList = (int)(pSave - p); } *ppIter = p; } } /* ** Attempt to move the phrase iterator to point to the next matching docid. |
︙ | ︙ | |||
3985 3986 3987 3988 3989 3990 3991 | if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ pDL->iDocid += iDelta; }else{ pDL->iDocid -= iDelta; } pDL->pList = pIter; fts3PoslistCopy(0, &pIter); | | | 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 | if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ pDL->iDocid += iDelta; }else{ pDL->iDocid -= iDelta; } pDL->pList = pIter; fts3PoslistCopy(0, &pIter); pDL->nList = (int)(pIter - pDL->pList); /* pIter now points just past the 0x00 that terminates the position- ** list for document pDL->iDocid. However, if this position-list was ** edited in place by fts3EvalNearTrim(), then pIter may not actually ** point to the start of the next docid value. The following line deals ** with this case by advancing pIter past the zero-padding added by ** fts3EvalNearTrim(). */ |
︙ | ︙ | |||
4343 4344 4345 4346 4347 4348 4349 | rc = SQLITE_NOMEM; }else{ int ii; Fts3TokenAndCost *pTC = aTC; Fts3Expr **ppOr = apOr; fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc); | | | | 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 | rc = SQLITE_NOMEM; }else{ int ii; Fts3TokenAndCost *pTC = aTC; Fts3Expr **ppOr = apOr; fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc); nToken = (int)(pTC-aTC); nOr = (int)(ppOr-apOr); if( rc==SQLITE_OK ){ rc = fts3EvalSelectDeferred(pCsr, 0, aTC, nToken); for(ii=0; rc==SQLITE_OK && ii<nOr; ii++){ rc = fts3EvalSelectDeferred(pCsr, apOr[ii], aTC, nToken); } } |
︙ | ︙ | |||
4416 4417 4418 4419 4420 4421 4422 | assert( pPhrase->doclist.pList ); p2 = pOut = pPhrase->doclist.pList; res = fts3PoslistNearMerge( &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 ); if( res ){ | | | 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 | assert( pPhrase->doclist.pList ); p2 = pOut = pPhrase->doclist.pList; res = fts3PoslistNearMerge( &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 ); if( res ){ nNew = (int)(pOut - pPhrase->doclist.pList) - 1; assert( pPhrase->doclist.pList[nNew]=='\0' ); assert( nNew<=pPhrase->doclist.nList && nNew>0 ); memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); pPhrase->doclist.nList = nNew; *paPoslist = pPhrase->doclist.pList; *pnToken = pPhrase->nToken; } |
︙ | ︙ |
Changes to ext/fts3/fts3_aux.c.
︙ | ︙ | |||
75 76 77 78 79 80 81 | *pzErr = sqlite3_mprintf( "wrong number of arguments to fts4aux constructor" ); return SQLITE_ERROR; } zDb = argv[1]; | | | | 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 | *pzErr = sqlite3_mprintf( "wrong number of arguments to fts4aux constructor" ); return SQLITE_ERROR; } zDb = argv[1]; nDb = (int)strlen(zDb); zFts3 = argv[3]; nFts3 = (int)strlen(zFts3); rc = sqlite3_declare_vtab(db, FTS3_TERMS_SCHEMA); if( rc!=SQLITE_OK ) return rc; nByte = sizeof(Fts3auxTable) + sizeof(Fts3Table) + nDb + nFts3 + 2; p = (Fts3auxTable *)sqlite3_malloc(nByte); if( !p ) return SQLITE_NOMEM; |
︙ | ︙ |
Changes to ext/fts3/fts3_porter.c.
︙ | ︙ | |||
626 627 628 629 630 631 632 633 634 635 636 637 638 639 | static const sqlite3_tokenizer_module porterTokenizerModule = { 0, porterCreate, porterDestroy, porterOpen, porterClose, porterNext, }; /* ** Allocate a new porter tokenizer. Return a pointer to the new ** tokenizer in *ppModule */ void sqlite3Fts3PorterTokenizerModule( | > | 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 | static const sqlite3_tokenizer_module porterTokenizerModule = { 0, porterCreate, porterDestroy, porterOpen, porterClose, porterNext, 0 }; /* ** Allocate a new porter tokenizer. Return a pointer to the new ** tokenizer in *ppModule */ void sqlite3Fts3PorterTokenizerModule( |
︙ | ︙ |
Changes to ext/fts3/fts3_tokenizer1.c.
︙ | ︙ | |||
214 215 216 217 218 219 220 221 222 223 224 225 226 227 | static const sqlite3_tokenizer_module simpleTokenizerModule = { 0, simpleCreate, simpleDestroy, simpleOpen, simpleClose, simpleNext, }; /* ** Allocate a new simple tokenizer. Return a pointer to the new ** tokenizer in *ppModule */ void sqlite3Fts3SimpleTokenizerModule( | > | 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 | static const sqlite3_tokenizer_module simpleTokenizerModule = { 0, simpleCreate, simpleDestroy, simpleOpen, simpleClose, simpleNext, 0, }; /* ** Allocate a new simple tokenizer. Return a pointer to the new ** tokenizer in *ppModule */ void sqlite3Fts3SimpleTokenizerModule( |
︙ | ︙ |
Changes to ext/rtree/rtree.c.
︙ | ︙ | |||
3052 3053 3054 3055 3056 3057 3058 | *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); return SQLITE_ERROR; } sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); /* Allocate the sqlite3_vtab structure */ | | | | 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 | *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); return SQLITE_ERROR; } sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); /* Allocate the sqlite3_vtab structure */ nDb = (int)strlen(argv[1]); nName = (int)strlen(argv[2]); pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); if( !pRtree ){ return SQLITE_NOMEM; } memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; |
︙ | ︙ | |||
3148 3149 3150 3151 3152 3153 3154 | char zCell[512]; int nCell = 0; RtreeCell cell; int jj; nodeGetCell(&tree, &node, ii, &cell); sqlite3_snprintf(512-nCell,&zCell[nCell],"%lld", cell.iRowid); | | | | 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 | char zCell[512]; int nCell = 0; RtreeCell cell; int jj; nodeGetCell(&tree, &node, ii, &cell); sqlite3_snprintf(512-nCell,&zCell[nCell],"%lld", cell.iRowid); nCell = (int)strlen(zCell); for(jj=0; jj<tree.nDim*2; jj++){ sqlite3_snprintf(512-nCell,&zCell[nCell]," %f",(double)cell.aCoord[jj].f); nCell = (int)strlen(zCell); } if( zText ){ char *zTextNew = sqlite3_mprintf("%s {%s}", zText, zCell); sqlite3_free(zText); zText = zTextNew; }else{ |
︙ | ︙ |
Changes to main.mk.
︙ | ︙ | |||
597 598 599 600 601 602 603 604 605 606 607 608 609 610 | clean: rm -f *.o sqlite3 sqlite3.exe libsqlite3.a sqlite3.h opcodes.* rm -f lemon lemon.exe lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash mkkeywordhash.exe keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f amalgamation-testfixture amalgamation-testfixture.exe rm -f fts3-testfixture fts3-testfixture.exe rm -f testfixture testfixture.exe rm -f threadtest3 threadtest3.exe rm -f sqlite3.c fts?amal.c tclsqlite3.c | > | 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 | clean: rm -f *.o sqlite3 sqlite3.exe libsqlite3.a sqlite3.h opcodes.* rm -f lemon lemon.exe lempar.c parse.* sqlite*.tar.gz rm -f mkkeywordhash mkkeywordhash.exe keywordhash.h rm -f $(PUBLISH) rm -f *.da *.bb *.bbg gmon.out rm -rf quota2a quota2b quota2c rm -rf tsrc target_source rm -f testloadext.dll libtestloadext.so rm -f amalgamation-testfixture amalgamation-testfixture.exe rm -f fts3-testfixture fts3-testfixture.exe rm -f testfixture testfixture.exe rm -f threadtest3 threadtest3.exe rm -f sqlite3.c fts?amal.c tclsqlite3.c |
︙ | ︙ |
Changes to src/analyze.c.
︙ | ︙ | |||
928 929 930 931 932 933 934 935 936 937 938 939 940 941 | sqlite3_stmt *pStmt = 0; /* An SQL statement being run */ char *zSql; /* Text of the SQL statement */ Index *pPrevIdx = 0; /* Previous index in the loop */ int idx = 0; /* slot in pIdx->aSample[] for next sample */ int eType; /* Datatype of a sample */ IndexSample *pSample; /* A slot in pIdx->aSample[] */ if( !sqlite3FindTable(db, "sqlite_stat3", zDb) ){ return SQLITE_OK; } zSql = sqlite3MPrintf(db, "SELECT idx,count(*) FROM %Q.sqlite_stat3" " GROUP BY idx", zDb); | > | 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 | sqlite3_stmt *pStmt = 0; /* An SQL statement being run */ char *zSql; /* Text of the SQL statement */ Index *pPrevIdx = 0; /* Previous index in the loop */ int idx = 0; /* slot in pIdx->aSample[] for next sample */ int eType; /* Datatype of a sample */ IndexSample *pSample; /* A slot in pIdx->aSample[] */ assert( db->lookaside.bEnabled==0 ); if( !sqlite3FindTable(db, "sqlite_stat3", zDb) ){ return SQLITE_OK; } zSql = sqlite3MPrintf(db, "SELECT idx,count(*) FROM %Q.sqlite_stat3" " GROUP BY idx", zDb); |
︙ | ︙ | |||
954 955 956 957 958 959 960 | zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; nSample = sqlite3_column_int(pStmt, 1); pIdx = sqlite3FindIndex(db, zIndex, zDb); if( pIdx==0 ) continue; assert( pIdx->nSample==0 ); pIdx->nSample = nSample; | | | 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 | zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; nSample = sqlite3_column_int(pStmt, 1); pIdx = sqlite3FindIndex(db, zIndex, zDb); if( pIdx==0 ) continue; assert( pIdx->nSample==0 ); pIdx->nSample = nSample; pIdx->aSample = sqlite3DbMallocZero(db, nSample*sizeof(IndexSample)); pIdx->avgEq = pIdx->aiRowEst[1]; if( pIdx->aSample==0 ){ db->mallocFailed = 1; sqlite3_finalize(pStmt); return SQLITE_NOMEM; } } |
︙ | ︙ | |||
1027 1028 1029 1030 1031 1032 1033 | sqlite3_column_text(pStmt, 4) ); int n = z ? sqlite3_column_bytes(pStmt, 4) : 0; pSample->nByte = n; if( n < 1){ pSample->u.z = 0; }else{ | | | 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 | sqlite3_column_text(pStmt, 4) ); int n = z ? sqlite3_column_bytes(pStmt, 4) : 0; pSample->nByte = n; if( n < 1){ pSample->u.z = 0; }else{ pSample->u.z = sqlite3DbMallocRaw(db, n); if( pSample->u.z==0 ){ db->mallocFailed = 1; sqlite3_finalize(pStmt); return SQLITE_NOMEM; } memcpy(pSample->u.z, z, n); } |
︙ | ︙ | |||
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 | sqlite3DbFree(db, zSql); } /* Load the statistics from the sqlite_stat3 table. */ #ifdef SQLITE_ENABLE_STAT3 if( rc==SQLITE_OK ){ rc = loadStat3(db, sInfo.zDatabase); } #endif if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; } return rc; } #endif /* SQLITE_OMIT_ANALYZE */ | > > > | 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 | sqlite3DbFree(db, zSql); } /* Load the statistics from the sqlite_stat3 table. */ #ifdef SQLITE_ENABLE_STAT3 if( rc==SQLITE_OK ){ int lookasideEnabled = db->lookaside.bEnabled; db->lookaside.bEnabled = 0; rc = loadStat3(db, sInfo.zDatabase); db->lookaside.bEnabled = lookasideEnabled; } #endif if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; } return rc; } #endif /* SQLITE_OMIT_ANALYZE */ |
Changes to src/main.c.
︙ | ︙ | |||
2700 2701 2702 2703 2704 2705 2706 | } /* ** Invoke the xFileControl method on a particular database. */ int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ int rc = SQLITE_ERROR; | > | | < < < < < < < < | | | | | | | | | | | | | | | | | < | 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 | } /* ** Invoke the xFileControl method on a particular database. */ int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ int rc = SQLITE_ERROR; Btree *pBtree; sqlite3_mutex_enter(db->mutex); pBtree = sqlite3DbNameToBtree(db, zDbName); if( pBtree ){ Pager *pPager; sqlite3_file *fd; sqlite3BtreeEnter(pBtree); pPager = sqlite3BtreePager(pBtree); assert( pPager!=0 ); fd = sqlite3PagerFile(pPager); assert( fd!=0 ); if( op==SQLITE_FCNTL_FILE_POINTER ){ *(sqlite3_file**)pArg = fd; rc = SQLITE_OK; }else if( fd->pMethods ){ rc = sqlite3OsFileControl(fd, op, pArg); }else{ rc = SQLITE_NOTFOUND; } sqlite3BtreeLeave(pBtree); } sqlite3_mutex_leave(db->mutex); return rc; } /* ** Interface to the testing logic. |
︙ | ︙ | |||
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 | const char *z = sqlite3_uri_parameter(zFilename, zParam); sqlite3_int64 v; if( z && sqlite3Atoi64(z, &v, sqlite3Strlen30(z), SQLITE_UTF8)==SQLITE_OK ){ bDflt = v; } return bDflt; } /* ** Return the filename of the database associated with a database ** connection. */ const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){ | > > > > > > > > > > > > > > > < < | | | | > > > > > > | | 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 | const char *z = sqlite3_uri_parameter(zFilename, zParam); sqlite3_int64 v; if( z && sqlite3Atoi64(z, &v, sqlite3Strlen30(z), SQLITE_UTF8)==SQLITE_OK ){ bDflt = v; } return bDflt; } /* ** Return the Btree pointer identified by zDbName. Return NULL if not found. */ Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){ int i; for(i=0; i<db->nDb; i++){ if( db->aDb[i].pBt && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zName)==0) ){ return db->aDb[i].pBt; } } return 0; } /* ** Return the filename of the database associated with a database ** connection. */ const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){ Btree *pBt = sqlite3DbNameToBtree(db, zDbName); return pBt ? sqlite3BtreeGetFilename(pBt) : 0; } /* ** Return 1 if database is read-only or 0 if read/write. Return -1 if ** no such database exists. */ int sqlite3_db_readonly(sqlite3 *db, const char *zDbName){ Btree *pBt = sqlite3DbNameToBtree(db, zDbName); return pBt ? sqlite3PagerIsreadonly(sqlite3BtreePager(pBt)) : -1; } |
Changes to src/sqlite.h.in.
︙ | ︙ | |||
4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 | ** ^The filename returned by this function is the output of the ** xFullPathname method of the [VFS]. ^In other words, the filename ** will be an absolute pathname, even if the filename used ** to open the database originally was a URI or relative pathname. */ const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Find the next prepared statement ** ** ^This interface returns a pointer to the next [prepared statement] after ** pStmt associated with the [database connection] pDb. ^If pStmt is NULL ** then this interface returns a pointer to the first prepared statement ** associated with the database connection pDb. ^If no prepared statement | > > > > > > > > > | 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 | ** ^The filename returned by this function is the output of the ** xFullPathname method of the [VFS]. ^In other words, the filename ** will be an absolute pathname, even if the filename used ** to open the database originally was a URI or relative pathname. */ const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Determine if a database is read-only ** ** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N ** of connection D is read-only, 0 if it is read/write, or -1 if N is not ** the name of a database on connection D. */ int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Find the next prepared statement ** ** ^This interface returns a pointer to the next [prepared statement] after ** pStmt associated with the [database connection] pDb. ^If pStmt is NULL ** then this interface returns a pointer to the first prepared statement ** associated with the database connection pDb. ^If no prepared statement |
︙ | ︙ |
Changes to src/sqliteInt.h.
︙ | ︙ | |||
2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 | void sqlite3AddCheckConstraint(Parse*, Expr*); void sqlite3AddColumnType(Parse*,Token*); void sqlite3AddDefaultValue(Parse*,ExprSpan*); void sqlite3AddCollateType(Parse*, Token*); void sqlite3EndTable(Parse*,Token*,Token*,Select*); int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); int sqlite3CodeOnce(Parse *); Bitvec *sqlite3BitvecCreate(u32); int sqlite3BitvecTest(Bitvec*, u32); int sqlite3BitvecSet(Bitvec*, u32); void sqlite3BitvecClear(Bitvec*, u32, void*); void sqlite3BitvecDestroy(Bitvec*); | > | 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 | void sqlite3AddCheckConstraint(Parse*, Expr*); void sqlite3AddColumnType(Parse*,Token*); void sqlite3AddDefaultValue(Parse*,ExprSpan*); void sqlite3AddCollateType(Parse*, Token*); void sqlite3EndTable(Parse*,Token*,Token*,Select*); int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); Btree *sqlite3DbNameToBtree(sqlite3*,const char*); int sqlite3CodeOnce(Parse *); Bitvec *sqlite3BitvecCreate(u32); int sqlite3BitvecTest(Bitvec*, u32); int sqlite3BitvecSet(Bitvec*, u32); void sqlite3BitvecClear(Bitvec*, u32, void*); void sqlite3BitvecDestroy(Bitvec*); |
︙ | ︙ |
Changes to src/tclsqlite.c.
︙ | ︙ | |||
3104 3105 3106 3107 3108 3109 3110 | */ Tcl_CreateObjCommand(interp, "sqlite", (Tcl_ObjCmdProc*)DbMain, 0, 0); #endif return TCL_OK; } EXTERN int Tclsqlite3_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } | < < < < > > > > < < < < | 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 | */ Tcl_CreateObjCommand(interp, "sqlite", (Tcl_ObjCmdProc*)DbMain, 0, 0); #endif return TCL_OK; } EXTERN int Tclsqlite3_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } EXTERN int Sqlite3_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } EXTERN int Tclsqlite3_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } /* Because it accesses the file-system and uses persistent state, SQLite ** is not considered appropriate for safe interpreters. Hence, we deliberately ** omit the _SafeInit() interfaces. */ #ifndef SQLITE_3_SUFFIX_ONLY int Sqlite_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } int Tclsqlite_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } int Sqlite_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } int Tclsqlite_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } #endif #ifdef TCLSH /***************************************************************************** ** All of the code that follows is used to build standalone TCL interpreters ** that are statically linked with SQLite. Enable these by compiling ** with -DTCLSH=n where n can be 1 or 2. An n of 1 generates a standard |
︙ | ︙ |
Changes to src/test1.c.
︙ | ︙ | |||
4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 | return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; zDbName = Tcl_GetString(objv[2]); Tcl_AppendResult(interp, sqlite3_db_filename(db, zDbName), (void*)0); return TCL_OK; } /* ** Usage: sqlite3_soft_heap_limit ?N? ** ** Query or set the soft heap limit for the current thread. The ** limit is only changed if the N is present. The previous limit ** is returned. | > > > > > > > > > > > > > > > > > > > > > > > > | 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 | return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; zDbName = Tcl_GetString(objv[2]); Tcl_AppendResult(interp, sqlite3_db_filename(db, zDbName), (void*)0); return TCL_OK; } /* ** Usage: sqlite3_db_readonly DB DBNAME ** ** Return 1 or 0 if DBNAME is readonly or not. Return -1 if DBNAME does ** not exist. */ static int test_db_readonly( void * clientData, Tcl_Interp *interp, int objc, Tcl_Obj *CONST objv[] ){ sqlite3 *db; const char *zDbName; if( objc!=3 ){ Tcl_WrongNumArgs(interp, 1, objv, "DB DBNAME"); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; zDbName = Tcl_GetString(objv[2]); Tcl_SetObjResult(interp, Tcl_NewIntObj(sqlite3_db_readonly(db, zDbName))); return TCL_OK; } /* ** Usage: sqlite3_soft_heap_limit ?N? ** ** Query or set the soft heap limit for the current thread. The ** limit is only changed if the N is present. The previous limit ** is returned. |
︙ | ︙ | |||
6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 | { "sqlite3_stmt_readonly", test_stmt_readonly ,0 }, { "sqlite3_stmt_busy", test_stmt_busy ,0 }, { "uses_stmt_journal", uses_stmt_journal ,0 }, { "sqlite3_release_memory", test_release_memory, 0}, { "sqlite3_db_release_memory", test_db_release_memory, 0}, { "sqlite3_db_filename", test_db_filename, 0}, { "sqlite3_soft_heap_limit", test_soft_heap_limit, 0}, { "sqlite3_thread_cleanup", test_thread_cleanup, 0}, { "sqlite3_pager_refcounts", test_pager_refcounts, 0}, { "sqlite3_load_extension", test_load_extension, 0}, { "sqlite3_enable_load_extension", test_enable_load, 0}, { "sqlite3_extended_result_codes", test_extended_result_codes, 0}, | > | 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 | { "sqlite3_stmt_readonly", test_stmt_readonly ,0 }, { "sqlite3_stmt_busy", test_stmt_busy ,0 }, { "uses_stmt_journal", uses_stmt_journal ,0 }, { "sqlite3_release_memory", test_release_memory, 0}, { "sqlite3_db_release_memory", test_db_release_memory, 0}, { "sqlite3_db_filename", test_db_filename, 0}, { "sqlite3_db_readonly", test_db_readonly, 0}, { "sqlite3_soft_heap_limit", test_soft_heap_limit, 0}, { "sqlite3_thread_cleanup", test_thread_cleanup, 0}, { "sqlite3_pager_refcounts", test_pager_refcounts, 0}, { "sqlite3_load_extension", test_load_extension, 0}, { "sqlite3_enable_load_extension", test_enable_load, 0}, { "sqlite3_extended_result_codes", test_extended_result_codes, 0}, |
︙ | ︙ |
Changes to src/test6.c.
︙ | ︙ | |||
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 | int nName = strlen(zName); int nCrashFile = strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* | > > > > > > > > | 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 | int nName = strlen(zName); int nCrashFile = strlen(zCrashFile); if( nCrashFile>0 && zCrashFile[nCrashFile-1]=='*' ){ nCrashFile--; if( nName>nCrashFile ) nName = nCrashFile; } #ifdef TRACE_CRASHTEST printf("cfSync(): nName = %d, nCrashFile = %d, zName = %s, zCrashFile = %s\n", nName, nCrashFile, zName, zCrashFile); #endif if( nName==nCrashFile && 0==memcmp(zName, zCrashFile, nName) ){ #ifdef TRACE_CRASHTEST printf("cfSync(): name matched, g.iCrash = %d\n", g.iCrash); #endif if( (--g.iCrash)==0 ) isCrash = 1; } return writeListSync(pCrash, isCrash); } /* |
︙ | ︙ |
Changes to src/test8.c.
︙ | ︙ | |||
827 828 829 830 831 832 833 | pConstraint = &pIdxInfo->aConstraint[ii]; pUsage = &pIdxInfo->aConstraintUsage[ii]; if( !isIgnoreUsable && !pConstraint->usable ) continue; iCol = pConstraint->iColumn; | | | < < < | 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 | pConstraint = &pIdxInfo->aConstraint[ii]; pUsage = &pIdxInfo->aConstraintUsage[ii]; if( !isIgnoreUsable && !pConstraint->usable ) continue; iCol = pConstraint->iColumn; if( iCol<0 || pVtab->aIndex[iCol] ){ char *zCol = iCol>=0 ? pVtab->aCol[iCol] : "rowid"; char *zOp = 0; useIdx = 1; switch( pConstraint->op ){ case SQLITE_INDEX_CONSTRAINT_EQ: zOp = "="; break; case SQLITE_INDEX_CONSTRAINT_LT: zOp = "<"; break; case SQLITE_INDEX_CONSTRAINT_GT: zOp = ">"; break; |
︙ | ︙ | |||
866 867 868 869 870 871 872 | } } /* If there is only one term in the ORDER BY clause, and it is ** on a column that this virtual table has an index for, then consume ** the ORDER BY clause. */ | | > > | < < < | 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 | } } /* If there is only one term in the ORDER BY clause, and it is ** on a column that this virtual table has an index for, then consume ** the ORDER BY clause. */ if( pIdxInfo->nOrderBy==1 && ( pIdxInfo->aOrderBy->iColumn<0 || pVtab->aIndex[pIdxInfo->aOrderBy->iColumn]) ){ int iCol = pIdxInfo->aOrderBy->iColumn; char *zCol = iCol>=0 ? pVtab->aCol[iCol] : "rowid"; char *zDir = pIdxInfo->aOrderBy->desc?"DESC":"ASC"; zNew = sqlite3_mprintf(" ORDER BY %s %s", zCol, zDir); string_concat(&zQuery, zNew, 1, &rc); pIdxInfo->orderByConsumed = 1; } appendToEchoModule(pVtab->interp, "xBestIndex");; appendToEchoModule(pVtab->interp, zQuery); |
︙ | ︙ |
Changes to src/util.c.
︙ | ︙ | |||
212 213 214 215 216 217 218 | /* Convenient short-hand */ #define UpperToLower sqlite3UpperToLower /* ** Some systems have stricmp(). Others have strcasecmp(). Because ** there is no consistency, we will define our own. ** | | > | | | < | 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | /* Convenient short-hand */ #define UpperToLower sqlite3UpperToLower /* ** Some systems have stricmp(). Others have strcasecmp(). Because ** there is no consistency, we will define our own. ** ** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and ** sqlite3_strnicmp() APIs allow applications and extensions to compare ** the contents of two buffers containing UTF-8 strings in a ** case-independent fashion, using the same definition of "case ** independence" that SQLite uses internally when comparing identifiers. */ int sqlite3_stricmp(const char *zLeft, const char *zRight){ register unsigned char *a, *b; a = (unsigned char *)zLeft; b = (unsigned char *)zRight; while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } return UpperToLower[*a] - UpperToLower[*b]; |
︙ | ︙ |
Changes to src/where.c.
︙ | ︙ | |||
3798 3799 3800 3801 3802 3803 3804 | ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. */ static Bitmask codeOneLoopStart( WhereInfo *pWInfo, /* Complete information about the WHERE clause */ int iLevel, /* Which level of pWInfo->a[] should be coded */ u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */ | | < | 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 | ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. */ static Bitmask codeOneLoopStart( WhereInfo *pWInfo, /* Complete information about the WHERE clause */ int iLevel, /* Which level of pWInfo->a[] should be coded */ u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */ Bitmask notReady /* Which tables are currently available */ ){ int j, k; /* Loop counters */ int iCur; /* The VDBE cursor for the table */ int addrNxt; /* Where to jump to continue with the next IN case */ int omitTable; /* True if we use the index only */ int bRev; /* True if we need to scan in reverse order */ WhereLevel *pLevel; /* The where level to be coded */ |
︙ | ︙ | |||
4338 4339 4340 4341 4342 4343 4344 4345 4346 | } iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y ** Then for every term xN, evaluate as the subexpression: xN AND z ** That way, terms in y that are factored into the disjunction will ** be picked up by the recursive calls to sqlite3WhereBegin() below. */ if( pWC->nTerm>1 ){ | > > > > > > > > > > > > | > | > > | 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 | } iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y ** Then for every term xN, evaluate as the subexpression: xN AND z ** That way, terms in y that are factored into the disjunction will ** be picked up by the recursive calls to sqlite3WhereBegin() below. ** ** Actually, each subexpression is converted to "xN AND w" where w is ** the "interesting" terms of z - terms that did not originate in the ** ON or USING clause of a LEFT JOIN, and terms that are usable as ** indices. */ if( pWC->nTerm>1 ){ int iTerm; for(iTerm=0; iTerm<pWC->nTerm; iTerm++){ Expr *pExpr = pWC->a[iTerm].pExpr; if( ExprHasProperty(pExpr, EP_FromJoin) ) continue; if( pWC->a[iTerm].wtFlags & (TERM_VIRTUAL|TERM_ORINFO) ) continue; if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue; pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); pAndExpr = sqlite3ExprAnd(pParse->db, pAndExpr, pExpr); } if( pAndExpr ){ pAndExpr = sqlite3PExpr(pParse, TK_AND, 0, pAndExpr, 0); } } for(ii=0; ii<pOrWc->nTerm; ii++){ WhereTerm *pOrTerm = &pOrWc->a[ii]; if( pOrTerm->leftCursor==iCur || pOrTerm->eOperator==WO_AND ){ WhereInfo *pSubWInfo; /* Info for single OR-term scan */ Expr *pOrExpr = pOrTerm->pExpr; |
︙ | ︙ | |||
4383 4384 4385 4386 4387 4388 4389 | if( pSubWInfo->untestedTerms ) untestedTerms = 1; /* Finish the loop through table entries that match term pOrTerm. */ sqlite3WhereEnd(pSubWInfo); } } } | > > | > | 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 | if( pSubWInfo->untestedTerms ) untestedTerms = 1; /* Finish the loop through table entries that match term pOrTerm. */ sqlite3WhereEnd(pSubWInfo); } } } if( pAndExpr ){ pAndExpr->pLeft = 0; sqlite3ExprDelete(pParse->db, pAndExpr); } sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v)); sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrBrk); sqlite3VdbeResolveLabel(v, iLoopBody); if( pWInfo->nLevel>1 ) sqlite3StackFree(pParse->db, pOrTab); if( !untestedTerms ) disableTerm(pLevel, pTerm); }else |
︙ | ︙ | |||
5039 5040 5041 5042 5043 5044 5045 | ** loop below generates code for a single nested loop of the VM ** program. */ notReady = ~(Bitmask)0; for(i=0; i<nTabList; i++){ pLevel = &pWInfo->a[i]; explainOneScan(pParse, pTabList, pLevel, i, pLevel->iFrom, wctrlFlags); | | | 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 | ** loop below generates code for a single nested loop of the VM ** program. */ notReady = ~(Bitmask)0; for(i=0; i<nTabList; i++){ pLevel = &pWInfo->a[i]; explainOneScan(pParse, pTabList, pLevel, i, pLevel->iFrom, wctrlFlags); notReady = codeOneLoopStart(pWInfo, i, wctrlFlags, notReady); pWInfo->iContinue = pLevel->addrCont; } #ifdef SQLITE_TEST /* For testing and debugging use only */ /* Record in the query plan information about the current table ** and the index used to access it (if any). If the table itself ** is not used, its name is just '{}'. If no index is used |
︙ | ︙ |
Changes to test/bigfile.test.
︙ | ︙ | |||
65 66 67 68 69 70 71 | } $::MAGIC_SUM # Try to create a large file - a file that is larger than 2^32 bytes. # If this fails, it means that the system being tested does not support # large files. So skip all of the remaining tests in this file. # db close | | | 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | } $::MAGIC_SUM # Try to create a large file - a file that is larger than 2^32 bytes. # If this fails, it means that the system being tested does not support # large files. So skip all of the remaining tests in this file. # db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.2 { |
︙ | ︙ | |||
105 106 107 108 109 110 111 | sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM db close | | | 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | sqlite3 db test.db execsql { SELECT md5sum(x) FROM t1; } } $::MAGIC_SUM db close if {[catch {fake_big_file 8192 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 8192 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.5 { |
︙ | ︙ | |||
144 145 146 147 148 149 150 | do_test bigfile-1.9 { execsql { SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM db close | | | 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | do_test bigfile-1.9 { execsql { SELECT md5sum(x) FROM t2; } } $::MAGIC_SUM db close if {[catch {fake_big_file 16384 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 16384 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test bigfile-1.10 { |
︙ | ︙ |
Changes to test/bigfile2.test.
︙ | ︙ | |||
25 26 27 28 29 30 31 | } # Pad the file out to 4GB in size. Then clear the file-size field in the # db header. This will cause SQLite to assume that the first 4GB of pages # are actually in use and new pages will be appended to the file. # db close | | | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | } # Pad the file out to 4GB in size. Then clear the file-size field in the # db header. This will cause SQLite to assume that the first 4GB of pages # are actually in use and new pages will be appended to the file. # db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } hexio_write test.db 28 00000000 do_test 1.2 { |
︙ | ︙ |
Changes to test/crash5.test.
︙ | ︙ | |||
43 44 45 46 47 48 49 | INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } db close do_test crash5-$ii.$jj.1 { crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ [list set iFail $jj] { | | | 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | INSERT INTO t1 VALUES('1111111111', '2222222222', $c); } db close do_test crash5-$ii.$jj.1 { crashsql -delay 1 -file test.db-journal -seed $ii -tclbody [join [list \ [list set iFail $jj] { sqlite3_crashparams 0 [file join [get_pwd] test.db-journal] # Begin a transaction and evaluate a "CREATE INDEX" statement # with the iFail'th malloc() set to fail. This operation will # have to move the current contents of page 4 (the overflow # page) to make room for the new root page. The bug is that # if malloc() fails at a particular point in sqlite3PagerMovepage(), # sqlite mistakenly thinks that the page being moved (page 4) has |
︙ | ︙ | |||
85 86 87 88 89 90 91 | # the transaction was not rolled back, then the sqlite cache now # has a dirty page 4 that it incorrectly believes is already safely # in the synced part of the journal file. When # sqlite3_release_memory() is called sqlite tries to free memory # by writing page 4 out to the db file. If it crashes later on, # before syncing the journal... Corruption! # | | | 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | # the transaction was not rolled back, then the sqlite cache now # has a dirty page 4 that it incorrectly believes is already safely # in the synced part of the journal file. When # sqlite3_release_memory() is called sqlite tries to free memory # by writing page 4 out to the db file. If it crashes later on, # before syncing the journal... Corruption! # sqlite3_crashparams 1 [file join [get_pwd] test.db-journal] sqlite3_release_memory 8092 }]] {} expr 1 } {1} sqlite3 db test.db do_test crash5-$ii.$jj.2 { |
︙ | ︙ |
Changes to test/e_insert.test.
︙ | ︙ | |||
46 47 48 49 50 51 52 | CREATE TABLE a4(c UNIQUE, d); } {} proc do_insert_tests {args} { uplevel do_select_tests $args } | | | 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | CREATE TABLE a4(c UNIQUE, d); } {} proc do_insert_tests {args} { uplevel do_select_tests $args } # EVIDENCE-OF: R-21350-31508 -- syntax diagram insert-stmt # do_insert_tests e_insert-0 { 1 "INSERT INTO a1 DEFAULT VALUES" {} 2 "INSERT INTO main.a1 DEFAULT VALUES" {} 3 "INSERT OR ROLLBACK INTO main.a1 DEFAULT VALUES" {} 4 "INSERT OR ROLLBACK INTO a1 DEFAULT VALUES" {} 5 "INSERT OR ABORT INTO main.a1 DEFAULT VALUES" {} |
︙ | ︙ | |||
119 120 121 122 123 124 125 126 127 128 129 130 131 132 | 64 "INSERT OR REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 65 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 66 "INSERT OR FAIL INTO a1 (b, a) SELECT c, b FROM a2" {} 67 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 68 "INSERT OR IGNORE INTO a1 (b, a) SELECT c, b FROM a2" {} 69 "REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 70 "REPLACE INTO main.a1 (b, a) SELECT c, b FROM a2" {} } delete_all_data # EVIDENCE-OF: R-20288-20462 The first form (with the "VALUES" keyword) # creates a single new row in an existing table. # | > > > > > > > > > > > > > > | 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | 64 "INSERT OR REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 65 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 66 "INSERT OR FAIL INTO a1 (b, a) SELECT c, b FROM a2" {} 67 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 68 "INSERT OR IGNORE INTO a1 (b, a) SELECT c, b FROM a2" {} 69 "REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 70 "REPLACE INTO main.a1 (b, a) SELECT c, b FROM a2" {} 71 "INSERT INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 72 "INSERT INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 73 "INSERT OR ROLLBACK INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 74 "INSERT OR ROLLBACK INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 75 "INSERT OR ABORT INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 76 "INSERT OR ABORT INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 77 "INSERT OR REPLACE INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 78 "INSERT OR REPLACE INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 79 "INSERT OR FAIL INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 80 "INSERT OR FAIL INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 81 "INSERT OR FAIL INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 82 "INSERT OR IGNORE INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 83 "REPLACE INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 84 "REPLACE INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} } delete_all_data # EVIDENCE-OF: R-20288-20462 The first form (with the "VALUES" keyword) # creates a single new row in an existing table. # |
︙ | ︙ |
Changes to test/e_uri.test.
︙ | ︙ | |||
127 128 129 130 131 132 133 | # EVIDENCE-OF: R-17482-00398 If the authority is not an empty string or # "localhost", an error is returned to the caller. # if {$tcl_platform(platform) == "unix"} { set flags [list SQLITE_OPEN_READWRITE SQLITE_OPEN_CREATE SQLITE_OPEN_URI] foreach {tn uri error} " | | | | | | | | | | 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 | # EVIDENCE-OF: R-17482-00398 If the authority is not an empty string or # "localhost", an error is returned to the caller. # if {$tcl_platform(platform) == "unix"} { set flags [list SQLITE_OPEN_READWRITE SQLITE_OPEN_CREATE SQLITE_OPEN_URI] foreach {tn uri error} " 1 {file://localhost[get_pwd]/test.db} {not an error} 2 {file://[get_pwd]/test.db} {not an error} 3 {file://x[get_pwd]/test.db} {invalid uri authority: x} 4 {file://invalid[get_pwd]/test.db} {invalid uri authority: invalid} " { do_test 2.$tn { set DB [sqlite3_open_v2 $uri $flags ""] set e [sqlite3_errmsg $DB] sqlite3_close $DB set e } $error } } # EVIDENCE-OF: R-45981-25528 The fragment component of a URI, if # present, is ignored. # # It is difficult to test that something is ignored correctly. So these tests # just show that adding a fragment does not interfere with the pathname or # parameters passed through to the VFS xOpen() methods. # foreach {tn uri parse} " 1 {file:test.db#abc} {[get_pwd]/test.db {}} 2 {file:test.db?a=b#abc} {[get_pwd]/test.db {a b}} 3 {file:test.db?a=b#?c=d} {[get_pwd]/test.db {a b}} " { do_filepath_test 3.$tn { parse_uri $uri } $parse } # EVIDENCE-OF: R-62557-09390 SQLite uses the path component of the URI # as the name of the disk file which contains the database. # # EVIDENCE-OF: R-28659-11035 If the path begins with a '/' character, # then it is interpreted as an absolute path. # # EVIDENCE-OF: R-46234-61323 If the path does not begin with a '/' # (meaning that the authority section is omitted from the URI) then the # path is interpreted as a relative path. # foreach {tn uri parse} " 1 {file:test.db} {[get_pwd]/test.db {}} 2 {file:/test.db} {/test.db {}} 3 {file:///test.db} {/test.db {}} 4 {file://localhost/test.db} {/test.db {}} 5 {file:/a/b/c/test.db} {/a/b/c/test.db {}} " { do_filepath_test 4.$tn { parse_uri $uri } $parse } |
︙ | ︙ |
Changes to test/filectrl.test.
︙ | ︙ | |||
30 31 32 33 34 35 36 | do_test filectrl-1.4 { sqlite3 db test.db file_control_lasterrno_test db } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db | | | 30 31 32 33 34 35 36 37 38 39 40 41 | do_test filectrl-1.4 { sqlite3 db test.db file_control_lasterrno_test db } {} do_test filectrl-1.5 { db close sqlite3 db test_control_lockproxy.db file_control_lockproxy_test db [get_pwd] } {} db close forcedelete .test_control_lockproxy.db-conch test.proxy finish_test |
Changes to test/fts4langid.test.
︙ | ︙ | |||
478 479 480 481 482 483 484 | } {1 2 5} do_execsql_test 5.4.$lid.5 { SELECT count(*) FROM t6_segdir; SELECT count(*) FROM t6_segments; } {4 4} } | < < | 478 479 480 481 482 483 484 485 | } {1 2 5} do_execsql_test 5.4.$lid.5 { SELECT count(*) FROM t6_segdir; SELECT count(*) FROM t6_segments; } {4 4} } finish_test |
Changes to test/ioerr2.test.
︙ | ︙ | |||
126 127 128 129 130 131 132 | set ::sqlite_io_error_pending $::N set sql {UPDATE t2 SET b = randstr(400,400)} foreach {::go res} [catchsql $sql] {} } } } msg] list $rc $msg | | | 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | set ::sqlite_io_error_pending $::N set sql {UPDATE t2 SET b = randstr(400,400)} foreach {::go res} [catchsql $sql] {} } } } msg] list $rc $msg } {1 {abort due to ROLLBACK}} if {$::tcl_platform(platform) == "unix"} { # Cause the call to xAccess used by [pragma temp_store_directory] to # determine if the specified directory is writable to fail. This causes # SQLite to report "not a writable directory", which is probably the # right answer. # |
︙ | ︙ |
Changes to test/misc7.test.
︙ | ︙ | |||
479 480 481 482 483 484 485 | do_test misc7-20.1 { sqlite3_global_recover } {SQLITE_OK} # Try to open a really long file name. # do_test misc7-21.1 { | | | 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 | do_test misc7-20.1 { sqlite3_global_recover } {SQLITE_OK} # Try to open a really long file name. # do_test misc7-21.1 { set zFile [file join [get_pwd] "[string repeat abcde 104].db"] set rc [catch {sqlite3 db2 $zFile} msg] list $rc $msg } {1 {unable to open database file}} db close forcedelete test.db |
︙ | ︙ |
Changes to test/pager1.test.
︙ | ︙ | |||
531 532 533 534 535 536 537 | if {[string match *mj* [file tail $filename]]} { set ::mj_filename_length [string length $filename] faultsim_save } return SQLITE_OK } | | | 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 | if {[string match *mj* [file tail $filename]]} { set ::mj_filename_length [string length $filename] faultsim_save } return SQLITE_OK } set pwd [get_pwd] foreach {tn1 tcl} { 1 { set prefix "test.db" } 2 { # This test depends on the underlying VFS being able to open paths # 512 bytes in length. The idea is to create a hot-journal file that # contains a master-journal pointer so large that it could contain # a valid page record (if the file page-size is 512 bytes). So as to |
︙ | ︙ | |||
883 884 885 886 887 888 889 890 891 892 893 894 895 896 | do_test pager1.4.7.3 { db close catch {file attributes test.db-journal -permissions rw-rw-rw-} catch {file attributes test.db-journal -readonly 0} delete_file test.db-journal file exists test.db-journal } {0} #------------------------------------------------------------------------- # The following tests deal with multi-file commits. # # pager1-5.1.*: The case where a multi-file cannot be committed because # another connection is holding a SHARED lock on one of the # files. After the SHARED lock is removed, the COMMIT succeeds. | > > > > > > > > > > > > > > > > > > | 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 | do_test pager1.4.7.3 { db close catch {file attributes test.db-journal -permissions rw-rw-rw-} catch {file attributes test.db-journal -readonly 0} delete_file test.db-journal file exists test.db-journal } {0} do_test pager1.4.8.1 { catch {file attributes test.db -permissions r--------} catch {file attributes test.db -readonly 1} sqlite3 db test.db db eval { SELECT * FROM t1 } sqlite3_db_readonly db main } {1} do_test pager1.4.8.2 { sqlite3_db_readonly db xyz } {-1} do_test pager1.4.8.3 { db close catch {file attributes test.db -readonly 0} catch {file attributes test.db -permissions rw-rw-rw-} msg sqlite3 db test.db db eval { SELECT * FROM t1 } sqlite3_db_readonly db main } {0} #------------------------------------------------------------------------- # The following tests deal with multi-file commits. # # pager1-5.1.*: The case where a multi-file cannot be committed because # another connection is holding a SHARED lock on one of the # files. After the SHARED lock is removed, the COMMIT succeeds. |
︙ | ︙ | |||
997 998 999 1000 1001 1002 1003 | # # 1) 512 byte header + # 2) 2 * (1024+8) byte records + # 3) 20+N bytes of master-journal pointer, where N is the size of # the master-journal name encoded as utf-8 with no nul term. # set mj_pointer [expr { | | | | 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 | # # 1) 512 byte header + # 2) 2 * (1024+8) byte records + # 3) 20+N bytes of master-journal pointer, where N is the size of # the master-journal name encoded as utf-8 with no nul term. # set mj_pointer [expr { 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"] }] expr {$::max_journal==(512+2*(1024+8)+$mj_pointer)} } 1 do_test pager1-5.4.2 { set ::max_journal 0 execsql { PRAGMA synchronous = full; BEGIN; DELETE FROM t1 WHERE b = 'Lenin'; DELETE FROM t2 WHERE b = 'Lenin'; COMMIT; } # In synchronous=full mode, the master-journal pointer is not written # directly after the last record in the journal file. Instead, it is # written starting at the next (in this case 512 byte) sector boundary. # set mj_pointer [expr { 20 + [string length [get_pwd]] + [string length "/test.db-mjXXXXXX9XX"] }] expr {$::max_journal==(((512+2*(1024+8)+511)/512)*512 + $mj_pointer)} } 1 db close tv delete do_test pager1-5.5.1 { |
︙ | ︙ |
Changes to test/pragma.test.
︙ | ︙ | |||
986 987 988 989 990 991 992 | do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} ifcapable wsd { do_test pragma-9.5 { | | | | 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 | do_test pragma-9.4 { execsql { PRAGMA temp_store_directory; } } {} ifcapable wsd { do_test pragma-9.5 { set pwd [string map {' ''} [file nativename [get_pwd]]] execsql " PRAGMA temp_store_directory='$pwd'; " } {} do_test pragma-9.6 { execsql { PRAGMA temp_store_directory; } } [list [file nativename [get_pwd]]] do_test pragma-9.7 { catchsql { PRAGMA temp_store_directory='/NON/EXISTENT/PATH/FOOBAR'; } } {1 {not a writable directory}} do_test pragma-9.8 { execsql { |
︙ | ︙ |
Changes to test/quota.test.
︙ | ︙ | |||
217 218 219 220 221 222 223 | sqlite3_quota_set * 4096 quota_callback do_test quota-3.3.1 { execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1b execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b set ::quota | | | 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | sqlite3_quota_set * 4096 quota_callback do_test quota-3.3.1 { execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db1b execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2a execsql { INSERT INTO t1 VALUES(randomblob(500), randomblob(500)) } db2b set ::quota } [list [file join [get_pwd] test.db] 5120] do_test quota-3.2.X { foreach db {db1a db2a db2b db1b} { catch { $db close } } sqlite3_quota_set * 0 {} } {SQLITE_OK} #------------------------------------------------------------------------- |
︙ | ︙ |
Changes to test/quota2.test.
︙ | ︙ | |||
24 25 26 27 28 29 30 | file mkdir $dir } # The standard_path procedure converts a pathname into a standard format # that is the same across platforms. # unset -nocomplain ::quota_pwd ::quota_mapping | | | 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | file mkdir $dir } # The standard_path procedure converts a pathname into a standard format # that is the same across platforms. # unset -nocomplain ::quota_pwd ::quota_mapping set ::quota_pwd [string map {\\ /} [get_pwd]] set ::quota_mapping [list $::quota_pwd PWD] proc standard_path {x} { set x [string map {\\ /} $x] return [string map $::quota_mapping $x] } # The quota_check procedure is a callback from the quota handler. |
︙ | ︙ |
Changes to test/tester.tcl.
︙ | ︙ | |||
15 16 17 18 19 20 21 22 23 24 25 26 27 28 | #------------------------------------------------------------------------- # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to | > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | #------------------------------------------------------------------------- # The commands provided by the code in this file to help with creating # test cases are as follows: # # Commands to manipulate the db and the file-system at a high level: # # get_pwd # copy_file FROM TO # delete_file FILENAME # drop_all_tables ?DB? # forcecopy FROM TO # forcedelete FILENAME # # Test the capability of the SQLite version built into the interpreter to |
︙ | ︙ | |||
143 144 145 146 147 148 149 150 151 152 153 154 155 156 | # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to | > > > > > > > > > > > > > > > > > > | 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | # failed [file] operations. A value of zero or less means "do not # wait". # return 100; # TODO: Good default? } return $::G(file-retry-delay) } # Return the string representing the name of the current directory. On # Windows, the result is "normalized" to whatever our parent command shell # is using to prevent case-mismatch issues. # proc get_pwd {} { if {$::tcl_platform(platform) eq "windows"} { # # NOTE: Cannot use [file normalize] here because it would alter the # case of the result to what Tcl considers canonical, which would # defeat the purpose of this procedure. # return [string map [list \\ /] \ [string trim [exec -- $::env(ComSpec) /c echo %CD%]]] } else { return [pwd] } } # Copy file $from into $to. This is used because some versions of # TCL for windows (notably the 8.4.1 binary package shipped with the # current mingw release) have a broken "file copy" command. # proc copy_file {from to} { do_copy_file false $from $to |
︙ | ︙ | |||
980 981 982 983 984 985 986 | if {$crashfile eq ""} { error "Compulsory option -file missing" } # $crashfile gets compared to the native filename in # cfSync(), which can be different then what TCL uses by # default, so here we force it to the "nativename" format. | | | 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 | if {$crashfile eq ""} { error "Compulsory option -file missing" } # $crashfile gets compared to the native filename in # cfSync(), which can be different then what TCL uses by # default, so here we force it to the "nativename" format. set cfile [string map {\\ \\\\} [file nativename [file join [get_pwd] $crashfile]]] set f [open crash.tcl w] puts $f "sqlite3_crash_enable 1" puts $f "sqlite3_crashparams $blocksize $dc $crashdelay $cfile" puts $f "sqlite3_test_control_pending_byte $::sqlite_pending_byte" puts $f "sqlite3 db test.db -vfs crash" |
︙ | ︙ |
Changes to test/tkt-94c04eaadb.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 | # Create a database. do_test tkt-94c94-1.1 { execsql { CREATE TABLE t1(a, b) } } {} # Grow the file to larger than 4096MB (2^32 bytes) db close | | | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | # Create a database. do_test tkt-94c94-1.1 { execsql { CREATE TABLE t1(a, b) } } {} # Grow the file to larger than 4096MB (2^32 bytes) db close if {[catch {fake_big_file 4096 [get_pwd]/test.db} msg]} { puts "**** Unable to create a file larger than 4096 MB. *****" finish_test return } # Switch to async mode. sqlite3async_initialize "" 1 |
︙ | ︙ |
Changes to test/trace2.test.
︙ | ︙ | |||
130 131 132 133 134 135 136 | "INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph');" "-- INSERT INTO 'main'.'x1_content' VALUES(?,(?))" "-- REPLACE INTO 'main'.'x1_docsize' VALUES(?,?)" "-- SELECT value FROM 'main'.'x1_stat' WHERE id=0" "-- REPLACE INTO 'main'.'x1_stat' VALUES(0,?)" "-- SELECT (SELECT max(idx) FROM 'main'.'x1_segdir' WHERE level = ?) + 1" "-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)" | | > | | 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | "INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph');" "-- INSERT INTO 'main'.'x1_content' VALUES(?,(?))" "-- REPLACE INTO 'main'.'x1_docsize' VALUES(?,?)" "-- SELECT value FROM 'main'.'x1_stat' WHERE id=0" "-- REPLACE INTO 'main'.'x1_stat' VALUES(0,?)" "-- SELECT (SELECT max(idx) FROM 'main'.'x1_segdir' WHERE level = ?) + 1" "-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)" "-- REPLACE INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)" } do_trace_test 2.3 { INSERT INTO x1(x1) VALUES('optimize'); } { "INSERT INTO x1(x1) VALUES('optimize');" "-- SELECT DISTINCT level / (1024 * ?) FROM 'main'.'x1_segdir'" "-- SELECT idx, start_block, leaves_end_block, end_block, root FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?ORDER BY level DESC, idx ASC" "-- SELECT max(level) FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?" "-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)" "-- DELETE FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?" "-- REPLACE INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)" } } finish_test |
Changes to test/uri.test.
︙ | ︙ | |||
50 51 52 53 54 55 56 | 15 test.db?mork=1#boris test.db?mork=1#boris 16 file://localhostPWD/test.db%3Fhello test.db?hello } { if {$tcl_platform(platform)=="windows"} { if {$tn>14} break | | | | 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | 15 test.db?mork=1#boris test.db?mork=1#boris 16 file://localhostPWD/test.db%3Fhello test.db?hello } { if {$tcl_platform(platform)=="windows"} { if {$tn>14} break set uri [string map [list PWD /[get_pwd]] $uri] } else { set uri [string map [list PWD [get_pwd]] $uri] } if {[file isdir $file]} {error "$file is a directory"} forcedelete $file do_test 1.$tn.1 { file exists $file } 0 set DB [sqlite3_open $uri] do_test 1.$tn.2 { file exists $file } 1 |
︙ | ︙ | |||
270 271 272 273 274 275 276 | 3 "file:/PWD/test.db" {not an error} 4 "file://l%6Fcalhost/PWD/test.db" {invalid uri authority: l%6Fcalhost} 5 "file://lbcalhost/PWD/test.db" {invalid uri authority: lbcalhost} 6 "file://x/PWD/test.db" {invalid uri authority: x} } { if {$tcl_platform(platform)=="windows"} { | | | | 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 | 3 "file:/PWD/test.db" {not an error} 4 "file://l%6Fcalhost/PWD/test.db" {invalid uri authority: l%6Fcalhost} 5 "file://lbcalhost/PWD/test.db" {invalid uri authority: lbcalhost} 6 "file://x/PWD/test.db" {invalid uri authority: x} } { if {$tcl_platform(platform)=="windows"} { set uri [string map [list PWD [string range [get_pwd] 3 end]] $uri] } else { set uri [string map [list PWD [string range [get_pwd] 1 end]] $uri] } do_test 6.$tn { set DB [sqlite3_open $uri] sqlite3_errmsg $DB } $res catch { sqlite3_close $DB } |
︙ | ︙ |
Changes to test/wal.test.
︙ | ︙ | |||
1473 1474 1475 1476 1477 1478 1479 | }] } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # | | | 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 | }] } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # set walfile [file nativename [file join [get_pwd] test.db-wal]] catch {db close} forcedelete test.db do_test wal-23.1 { faultsim_delete_and_reopen execsql { CREATE TABLE t1(a, b); PRAGMA journal_mode = WAL; |
︙ | ︙ |
Changes to test/walbig.test.
︙ | ︙ | |||
48 49 50 51 52 53 54 | INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; } } {wal} db close | | | 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; INSERT INTO t1 SELECT a_string(300), a_string(500) FROM t1; } } {wal} db close if {[catch {fake_big_file 5000 [get_pwd]/test.db}]} { puts "**** Unable to create a file larger than 5000 MB. *****" finish_test return } hexio_write test.db 28 00000000 sqlite3 db test.db |
︙ | ︙ |
Changes to test/where7.test.
︙ | ︙ | |||
23335 23336 23337 23338 23339 23340 23341 | FROM t302 JOIN t301 ON t302.c8 = t301.c8 WHERE t302.c2 = 19571 AND t302.c3 > 1287603136 AND (t301.c4 = 1407449685622784 OR t301.c8 = 1407424651264000) ORDER BY t302.c5 LIMIT 200; } { | | | 23335 23336 23337 23338 23339 23340 23341 23342 23343 23344 23345 23346 23347 23348 | FROM t302 JOIN t301 ON t302.c8 = t301.c8 WHERE t302.c2 = 19571 AND t302.c3 > 1287603136 AND (t301.c4 = 1407449685622784 OR t301.c8 = 1407424651264000) ORDER BY t302.c5 LIMIT 200; } { 0 0 1 {SEARCH TABLE t301 USING COVERING INDEX t301_c4 (c4=?) (~10 rows)} 0 0 1 {SEARCH TABLE t301 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)} 0 1 0 {SEARCH TABLE t302 USING INDEX t302_c8_c3 (c8=? AND c3>?) (~2 rows)} 0 0 0 {USE TEMP B-TREE FOR ORDER BY} } finish_test |
Changes to test/where9.test.
︙ | ︙ | |||
360 361 362 363 364 365 366 | do_execsql_test where9-3.1 { EXPLAIN QUERY PLAN SELECT t2.a FROM t1, t2 WHERE t1.a=80 AND ((t1.c=t2.c AND t1.d=t2.d) OR t1.f=t2.f) } { 0 0 0 {SEARCH TABLE t1 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)} 0 1 1 {SEARCH TABLE t2 USING INDEX t2d (d=?) (~2 rows)} | | | | 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 | do_execsql_test where9-3.1 { EXPLAIN QUERY PLAN SELECT t2.a FROM t1, t2 WHERE t1.a=80 AND ((t1.c=t2.c AND t1.d=t2.d) OR t1.f=t2.f) } { 0 0 0 {SEARCH TABLE t1 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)} 0 1 1 {SEARCH TABLE t2 USING INDEX t2d (d=?) (~2 rows)} 0 1 1 {SEARCH TABLE t2 USING COVERING INDEX t2f (f=?) (~10 rows)} } do_execsql_test where9-3.2 { EXPLAIN QUERY PLAN SELECT coalesce(t2.a,9999) FROM t1 LEFT JOIN t2 ON (t1.c+1=t2.c AND t1.d=t2.d) OR (t1.f||'x')=t2.f WHERE t1.a=80 } { 0 0 0 {SEARCH TABLE t1 USING INTEGER PRIMARY KEY (rowid=?) (~1 rows)} 0 1 1 {SEARCH TABLE t2 USING INDEX t2d (d=?) (~2 rows)} 0 1 1 {SEARCH TABLE t2 USING COVERING INDEX t2f (f=?) (~10 rows)} } } # Make sure that INDEXED BY and multi-index OR clauses play well with # one another. # do_test where9-4.1 { |
︙ | ︙ | |||
449 450 451 452 453 454 455 | ifcapable explain { # The (c=31031 OR d IS NULL) clause is preferred over b>1000 because # the former is an equality test which is expected to return fewer rows. # do_execsql_test where9-5.1 { EXPLAIN QUERY PLAN SELECT a FROM t1 WHERE b>1000 AND (c=31031 OR d IS NULL) } { | | | | 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 | ifcapable explain { # The (c=31031 OR d IS NULL) clause is preferred over b>1000 because # the former is an equality test which is expected to return fewer rows. # do_execsql_test where9-5.1 { EXPLAIN QUERY PLAN SELECT a FROM t1 WHERE b>1000 AND (c=31031 OR d IS NULL) } { 0 0 0 {SEARCH TABLE t1 USING INDEX t1c (c=?) (~3 rows)} 0 0 0 {SEARCH TABLE t1 USING INDEX t1d (d=?) (~3 rows)} } # In contrast, b=1000 is preferred over any OR-clause. # do_execsql_test where9-5.2 { EXPLAIN QUERY PLAN SELECT a FROM t1 WHERE b=1000 AND (c=31031 OR d IS NULL) } { |
︙ | ︙ | |||
852 853 854 855 856 857 858 | } {79 81 scan 0 sort 1} do_test where9-7.3.2 { execsql { SELECT a FROM t6 WHERE (x='y' OR y='y') AND c=27027 ORDER BY a; } } {79 81} | > > | > > > > > > > > > > > > > > > > > > | 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 | } {79 81 scan 0 sort 1} do_test where9-7.3.2 { execsql { SELECT a FROM t6 WHERE (x='y' OR y='y') AND c=27027 ORDER BY a; } } {79 81} # Fix for ticket [b7c8682cc17f32903f03a610bd0d35ffd3c1e6e4] # "Incorrect result from LEFT JOIN with OR in the WHERE clause" # do_test where9-8.1 { db eval { CREATE TABLE t81(a INTEGER PRIMARY KEY, b, c, d); CREATE TABLE t82(x INTEGER PRIMARY KEY, y); CREATE TABLE t83(p INTEGER PRIMARY KEY, q); INSERT INTO t81 VALUES(2,3,4,5); INSERT INTO t81 VALUES(3,4,5,6); INSERT INTO t82 VALUES(2,4); INSERT INTO t83 VALUES(5,55); SELECT * FROM t81 LEFT JOIN t82 ON y=b JOIN t83 WHERE c==p OR d==p ORDER BY +a; } } {2 3 4 5 {} {} 5 55 3 4 5 6 2 4 5 55} finish_test |