Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge all recent trunk changes into the sessions branch. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | sessions |
Files: | files | file ages | folders |
SHA1: |
fb9b9987de965e194fef56bca563ee65 |
User & Date: | drh 2012-03-30 17:30:33.409 |
Context
2012-04-18
| ||
01:41 | Import all the latest trunk changes into the sessions branch. (check-in: 87a0eab5d9 user: drh tags: sessions) | |
2012-03-30
| ||
17:30 | Merge all recent trunk changes into the sessions branch. (check-in: fb9b9987de user: drh tags: sessions) | |
16:44 | Avoid using the OVERLAPPED struct on WinCE. (check-in: 196ca3a8b0 user: mistachkin tags: trunk) | |
2012-03-19
| ||
11:17 | Merge the latest trunk changes into the sessions branch. (check-in: 2277c70b6f user: dan tags: sessions) | |
Changes
Changes to VERSION.
|
| | | 1 | 3.7.12 |
Changes to ext/fts3/fts3.c.
︙ | ︙ | |||
66 67 68 69 70 71 72 | ** ** FTS3 used to optionally store character offsets using a compile-time ** option. But that functionality is no longer supported. ** ** A doclist is stored like this: ** ** array { | | | 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | ** ** FTS3 used to optionally store character offsets using a compile-time ** option. But that functionality is no longer supported. ** ** A doclist is stored like this: ** ** array { ** varint docid; (delta from previous doclist) ** array { (position list for column 0) ** varint position; (2 more than the delta from previous position) ** } ** array { ** varint POS_COLUMN; (marks start of position list for new column) ** varint column; (index of new column) ** array { |
︙ | ︙ | |||
97 98 99 100 101 102 103 | ** value: 123 5 9 1 1 14 35 0 234 72 0 ** ** The 123 value is the first docid. For column zero in this document ** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1 ** at D signals the start of a new column; the 1 at E indicates that the ** new column is column number 1. There are two positions at 12 and 45 ** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The | | | | 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | ** value: 123 5 9 1 1 14 35 0 234 72 0 ** ** The 123 value is the first docid. For column zero in this document ** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1 ** at D signals the start of a new column; the 1 at E indicates that the ** new column is column number 1. There are two positions at 12 and 45 ** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The ** 234 at I is the delta to next docid (357). It has one position 70 ** (72-2) and then terminates with the 0 at K. ** ** A "position-list" is the list of positions for multiple columns for ** a single docid. A "column-list" is the set of positions for a single ** column. Hence, a position-list consists of one or more column-lists, ** a document record consists of a docid followed by a position-list and ** a doclist consists of one or more document records. ** |
︙ | ︙ | |||
565 566 567 568 569 570 571 572 573 574 575 576 577 578 | } sqlite3_free(zSql); sqlite3_free(zCols); *pRc = rc; } } /* ** Create the backing store tables (%_content, %_segments and %_segdir) ** required by the FTS3 table passed as the only argument. This is done ** as part of the vtab xCreate() method. ** ** If the p->bHasDocsize boolean is true (indicating that this is an | > > > > > > > > > > > > | 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 | } sqlite3_free(zSql); sqlite3_free(zCols); *pRc = rc; } } /* ** Create the %_stat table if it does not already exist. */ void sqlite3Fts3CreateStatTable(int *pRc, Fts3Table *p){ fts3DbExec(pRc, p->db, "CREATE TABLE IF NOT EXISTS %Q.'%q_stat'" "(id INTEGER PRIMARY KEY, value BLOB);", p->zDb, p->zName ); if( (*pRc)==SQLITE_OK ) p->bHasStat = 1; } /* ** Create the backing store tables (%_content, %_segments and %_segdir) ** required by the FTS3 table passed as the only argument. This is done ** as part of the vtab xCreate() method. ** ** If the p->bHasDocsize boolean is true (indicating that this is an |
︙ | ︙ | |||
626 627 628 629 630 631 632 633 | ); if( p->bHasDocsize ){ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", p->zDb, p->zName ); } if( p->bHasStat ){ | > | < < < | 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 | ); if( p->bHasDocsize ){ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", p->zDb, p->zName ); } assert( p->bHasStat==p->bFts4 ); if( p->bHasStat ){ sqlite3Fts3CreateStatTable(&rc, p); } return rc; } /* ** Store the current database page-size in bytes in p->nPgsz. ** |
︙ | ︙ | |||
737 738 739 740 741 742 743 | ** The pointer returned points to memory obtained from sqlite3_malloc(). It ** is the callers responsibility to call sqlite3_free() to release this ** memory. */ static char *fts3QuoteId(char const *zInput){ int nRet; char *zRet; | | | 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 | ** The pointer returned points to memory obtained from sqlite3_malloc(). It ** is the callers responsibility to call sqlite3_free() to release this ** memory. */ static char *fts3QuoteId(char const *zInput){ int nRet; char *zRet; nRet = 2 + (int)strlen(zInput)*2 + 1; zRet = sqlite3_malloc(nRet); if( zRet ){ int i; char *z = zRet; *(z++) = '"'; for(i=0; zInput[i]; i++){ if( zInput[i]=='"' ) *(z++) = '"'; |
︙ | ︙ | |||
993 994 995 996 997 998 999 | /* Loop through the returned columns. Set nStr to the number of bytes of ** space required to store a copy of each column name, including the ** nul-terminator byte. */ nCol = sqlite3_column_count(pStmt); for(i=0; i<nCol; i++){ const char *zCol = sqlite3_column_name(pStmt, i); | | | | 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 | /* Loop through the returned columns. Set nStr to the number of bytes of ** space required to store a copy of each column name, including the ** nul-terminator byte. */ nCol = sqlite3_column_count(pStmt); for(i=0; i<nCol; i++){ const char *zCol = sqlite3_column_name(pStmt, i); nStr += (int)strlen(zCol) + 1; } /* Allocate and populate the array to return. */ azCol = (const char **)sqlite3_malloc(sizeof(char *) * nCol + nStr); if( azCol==0 ){ rc = SQLITE_NOMEM; }else{ char *p = (char *)&azCol[nCol]; for(i=0; i<nCol; i++){ const char *zCol = sqlite3_column_name(pStmt, i); int n = (int)strlen(zCol)+1; memcpy(p, zCol, n); azCol[i] = p; p += n; } } sqlite3_finalize(pStmt); |
︙ | ︙ | |||
1219 1220 1221 1222 1223 1224 1225 | /* If a languageid= option was specified, remove the language id ** column from the aCol[] array. */ if( rc==SQLITE_OK && zLanguageid ){ int j; for(j=0; j<nCol; j++){ if( sqlite3_stricmp(zLanguageid, aCol[j])==0 ){ | > | | 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 | /* If a languageid= option was specified, remove the language id ** column from the aCol[] array. */ if( rc==SQLITE_OK && zLanguageid ){ int j; for(j=0; j<nCol; j++){ if( sqlite3_stricmp(zLanguageid, aCol[j])==0 ){ int k; for(k=j; k<nCol; k++) aCol[k] = aCol[k+1]; nCol--; break; } } } } } |
︙ | ︙ | |||
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 | p->nColumn = nCol; p->nPendingData = 0; p->azColumn = (char **)&p[1]; p->pTokenizer = pTokenizer; p->nMaxPendingData = FTS3_MAX_PENDING_DATA; p->bHasDocsize = (isFts4 && bNoDocsize==0); p->bHasStat = isFts4; p->bDescIdx = bDescIdx; p->zContentTbl = zContent; p->zLanguageid = zLanguageid; zContent = 0; zLanguageid = 0; TESTONLY( p->inTransaction = -1 ); TESTONLY( p->mxSavepoint = -1 ); | > > | 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 | p->nColumn = nCol; p->nPendingData = 0; p->azColumn = (char **)&p[1]; p->pTokenizer = pTokenizer; p->nMaxPendingData = FTS3_MAX_PENDING_DATA; p->bHasDocsize = (isFts4 && bNoDocsize==0); p->bHasStat = isFts4; p->bFts4 = isFts4; p->bDescIdx = bDescIdx; p->bAutoincrmerge = 0xff; /* 0xff means setting unknown */ p->zContentTbl = zContent; p->zLanguageid = zLanguageid; zContent = 0; zLanguageid = 0; TESTONLY( p->inTransaction = -1 ); TESTONLY( p->mxSavepoint = -1 ); |
︙ | ︙ | |||
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 | /* If this is an xCreate call, create the underlying tables in the ** database. TODO: For xConnect(), it could verify that said tables exist. */ if( isCreate ){ rc = fts3CreateTables(p); } /* Figure out the page-size for the database. This is required in order to ** estimate the cost of loading large doclists from the database. */ fts3DatabasePageSize(&rc, p); p->nNodeSize = p->nPgsz-35; /* Declare the table schema to SQLite. */ | > > > > > > > > > > | 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 | /* If this is an xCreate call, create the underlying tables in the ** database. TODO: For xConnect(), it could verify that said tables exist. */ if( isCreate ){ rc = fts3CreateTables(p); } /* Check to see if a legacy fts3 table has been "upgraded" by the ** addition of a %_stat table so that it can use incremental merge. */ if( !isFts4 && !isCreate ){ int rc2 = SQLITE_OK; fts3DbExec(&rc2, db, "SELECT 1 FROM %Q.'%q_stat' WHERE id=2", p->zDb, p->zName); if( rc2==SQLITE_OK ) p->bHasStat = 1; } /* Figure out the page-size for the database. This is required in order to ** estimate the cost of loading large doclists from the database. */ fts3DatabasePageSize(&rc, p); p->nNodeSize = p->nPgsz-35; /* Declare the table schema to SQLite. */ |
︙ | ︙ | |||
2326 2327 2328 2329 2330 2331 2332 | fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); fts3PoslistCopy(&p, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *paOut = aOut; | | | 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 | fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); fts3PoslistCopy(&p, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *paOut = aOut; *pnOut = (int)(p-aOut); assert( *pnOut<=n1+n2+FTS3_VARINT_MAX-1 ); return SQLITE_OK; } /* ** This function does a "phrase" merge of two doclists. In a phrase merge, ** the output contains a copy of each position from the right-hand input |
︙ | ︙ | |||
2390 2391 2392 2393 2394 2395 2396 | fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); }else{ fts3PoslistCopy(0, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } | | | 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 | fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); }else{ fts3PoslistCopy(0, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *pnRight = (int)(p - aOut); } /* ** Argument pList points to a position list nList bytes in size. This ** function checks to see if the position list contains any entries for ** a token in position 0 (of any column). If so, it writes argument iDelta ** to the output buffer pOut, followed by a position list consisting only |
︙ | ︙ | |||
2666 2667 2668 2669 2670 2671 2672 | /* ** Set up a cursor object for iterating through a full-text index or a ** single level therein. */ int sqlite3Fts3SegReaderCursor( Fts3Table *p, /* FTS3 table handle */ | | < < < < < | 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 | /* ** Set up a cursor object for iterating through a full-text index or a ** single level therein. */ int sqlite3Fts3SegReaderCursor( Fts3Table *p, /* FTS3 table handle */ int iLangid, /* Language-id to search */ int iIndex, /* Index to search (from 0 to p->nIndex-1) */ int iLevel, /* Level of segments to scan */ const char *zTerm, /* Term to query for */ int nTerm, /* Size of zTerm in bytes */ int isPrefix, /* True for a prefix search */ int isScan, /* True to scan from zTerm to EOF */ Fts3MultiSegReader *pCsr /* Cursor object to populate */ ){ assert( iIndex>=0 && iIndex<p->nIndex ); assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel==FTS3_SEGCURSOR_PENDING || iLevel>=0 ); assert( iLevel<FTS3_SEGDIR_MAXLEVEL ); assert( FTS3_SEGCURSOR_ALL<0 && FTS3_SEGCURSOR_PENDING<0 ); assert( isPrefix==0 || isScan==0 ); memset(pCsr, 0, sizeof(Fts3MultiSegReader)); return fts3SegReaderCursor( p, iLangid, iIndex, iLevel, zTerm, nTerm, isPrefix, isScan, pCsr ); } /* ** In addition to its current configuration, have the Fts3MultiSegReader |
︙ | ︙ | |||
2954 2955 2956 2957 2958 2959 2960 | return SQLITE_NOMEM; } pCsr->iLangid = 0; if( nVal==2 ) pCsr->iLangid = sqlite3_value_int(apVal[1]); rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid, | | | 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 | return SQLITE_NOMEM; } pCsr->iLangid = 0; if( nVal==2 ) pCsr->iLangid = sqlite3_value_int(apVal[1]); rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid, p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr ); if( rc!=SQLITE_OK ){ if( rc==SQLITE_ERROR ){ static const char *zErr = "malformed MATCH expression: [%s]"; p->base.zErrMsg = sqlite3_mprintf(zErr, zQuery); } return rc; |
︙ | ︙ | |||
3097 3098 3099 3100 3101 3102 3103 | } /* ** Implementation of xSync() method. Flush the contents of the pending-terms ** hash-table to the database. */ static int fts3SyncMethod(sqlite3_vtab *pVtab){ | > > > > > > > > > > > > > > > > > > > > > > > | > > > > > > > > > > > | | > | 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 | } /* ** Implementation of xSync() method. Flush the contents of the pending-terms ** hash-table to the database. */ static int fts3SyncMethod(sqlite3_vtab *pVtab){ /* Following an incremental-merge operation, assuming that the input ** segments are not completely consumed (the usual case), they are updated ** in place to remove the entries that have already been merged. This ** involves updating the leaf block that contains the smallest unmerged ** entry and each block (if any) between the leaf and the root node. So ** if the height of the input segment b-trees is N, and input segments ** are merged eight at a time, updating the input segments at the end ** of an incremental-merge requires writing (8*(1+N)) blocks. N is usually ** small - often between 0 and 2. So the overhead of the incremental ** merge is somewhere between 8 and 24 blocks. To avoid this overhead ** dwarfing the actual productive work accomplished, the incremental merge ** is only attempted if it will write at least 64 leaf blocks. Hence ** nMinMerge. ** ** Of course, updating the input segments also involves deleting a bunch ** of blocks from the segments table. But this is not considered overhead ** as it would also be required by a crisis-merge that used the same input ** segments. */ const u32 nMinMerge = 64; /* Minimum amount of incr-merge work to do */ Fts3Table *p = (Fts3Table*)pVtab; int rc = sqlite3Fts3PendingTermsFlush(p); if( rc==SQLITE_OK && p->bAutoincrmerge==1 && p->nLeafAdd>(nMinMerge/16) ){ int mxLevel = 0; /* Maximum relative level value in db */ int A; /* Incr-merge parameter A */ rc = sqlite3Fts3MaxLevel(p, &mxLevel); assert( rc==SQLITE_OK || mxLevel==0 ); A = p->nLeafAdd * mxLevel; A += (A/2); if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, 8); } sqlite3Fts3SegmentsClose(p); return rc; } /* ** Implementation of xBegin() method. This is a no-op. */ static int fts3BeginMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table*)pVtab; UNUSED_PARAMETER(pVtab); assert( p->pSegments==0 ); assert( p->nPendingData==0 ); assert( p->inTransaction!=1 ); TESTONLY( p->inTransaction = 1 ); TESTONLY( p->mxSavepoint = -1; ); p->nLeafAdd = 0; return SQLITE_OK; } /* ** Implementation of xCommit() method. This is a no-op. The contents of ** the pending-terms hash-table have already been flushed into the database ** by fts3SyncMethod(). |
︙ | ︙ | |||
3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 | /* ** The xSavepoint() method. ** ** Flush the contents of the pending-terms table to disk. */ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ UNUSED_PARAMETER(iSavepoint); assert( ((Fts3Table *)pVtab)->inTransaction ); assert( ((Fts3Table *)pVtab)->mxSavepoint < iSavepoint ); TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); | > > | > > | 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 | /* ** The xSavepoint() method. ** ** Flush the contents of the pending-terms table to disk. */ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; UNUSED_PARAMETER(iSavepoint); assert( ((Fts3Table *)pVtab)->inTransaction ); assert( ((Fts3Table *)pVtab)->mxSavepoint < iSavepoint ); TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ rc = fts3SyncMethod(pVtab); } return rc; } /* ** The xRelease() method. ** ** This is a no-op. */ |
︙ | ︙ | |||
3771 3772 3773 3774 3775 3776 3777 | char *p1 = aPoslist; char *p2 = aOut; assert( iPrev>=0 ); fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); sqlite3_free(aPoslist); aPoslist = pList; | | | 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 | char *p1 = aPoslist; char *p2 = aOut; assert( iPrev>=0 ); fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); sqlite3_free(aPoslist); aPoslist = pList; nPoslist = (int)(aOut - aPoslist); if( nPoslist==0 ){ sqlite3_free(aPoslist); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; return SQLITE_OK; } } |
︙ | ︙ | |||
3815 3816 3817 3818 3819 3820 3821 | sqlite3_free(aPoslist); return SQLITE_NOMEM; } pPhrase->doclist.pList = aOut; if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ pPhrase->doclist.bFreeList = 1; | | | 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 | sqlite3_free(aPoslist); return SQLITE_NOMEM; } pPhrase->doclist.pList = aOut; if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ pPhrase->doclist.bFreeList = 1; pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList); }else{ sqlite3_free(aOut); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; } sqlite3_free(aPoslist); } |
︙ | ︙ | |||
3911 3912 3913 3914 3915 3916 3917 | iDocid += (iMul * iDelta); pNext = pDocid; fts3PoslistCopy(0, &pDocid); while( pDocid<pEnd && *pDocid==0 ) pDocid++; iMul = (bDescIdx ? -1 : 1); } | | | | 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 | iDocid += (iMul * iDelta); pNext = pDocid; fts3PoslistCopy(0, &pDocid); while( pDocid<pEnd && *pDocid==0 ) pDocid++; iMul = (bDescIdx ? -1 : 1); } *pnList = (int)(pEnd - pNext); *ppIter = pNext; *piDocid = iDocid; }else{ int iMul = (bDescIdx ? -1 : 1); sqlite3_int64 iDelta; fts3GetReverseVarint(&p, aDoclist, &iDelta); *piDocid -= (iMul * iDelta); if( p==aDoclist ){ *pbEof = 1; }else{ char *pSave = p; fts3ReversePoslist(aDoclist, &p); *pnList = (int)(pSave - p); } *ppIter = p; } } /* ** Attempt to move the phrase iterator to point to the next matching docid. |
︙ | ︙ | |||
3985 3986 3987 3988 3989 3990 3991 | if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ pDL->iDocid += iDelta; }else{ pDL->iDocid -= iDelta; } pDL->pList = pIter; fts3PoslistCopy(0, &pIter); | | | 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 | if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ pDL->iDocid += iDelta; }else{ pDL->iDocid -= iDelta; } pDL->pList = pIter; fts3PoslistCopy(0, &pIter); pDL->nList = (int)(pIter - pDL->pList); /* pIter now points just past the 0x00 that terminates the position- ** list for document pDL->iDocid. However, if this position-list was ** edited in place by fts3EvalNearTrim(), then pIter may not actually ** point to the start of the next docid value. The following line deals ** with this case by advancing pIter past the zero-padding added by ** fts3EvalNearTrim(). */ |
︙ | ︙ | |||
4326 4327 4328 4329 4330 4331 4332 | int nToken = 0; int nOr = 0; /* Allocate a MultiSegReader for each token in the expression. */ fts3EvalAllocateReaders(pCsr, pCsr->pExpr, &nToken, &nOr, &rc); /* Determine which, if any, tokens in the expression should be deferred. */ | | | | | 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 | int nToken = 0; int nOr = 0; /* Allocate a MultiSegReader for each token in the expression. */ fts3EvalAllocateReaders(pCsr, pCsr->pExpr, &nToken, &nOr, &rc); /* Determine which, if any, tokens in the expression should be deferred. */ if( rc==SQLITE_OK && nToken>1 && pTab->bFts4 ){ Fts3TokenAndCost *aTC; Fts3Expr **apOr; aTC = (Fts3TokenAndCost *)sqlite3_malloc( sizeof(Fts3TokenAndCost) * nToken + sizeof(Fts3Expr *) * nOr * 2 ); apOr = (Fts3Expr **)&aTC[nToken]; if( !aTC ){ rc = SQLITE_NOMEM; }else{ int ii; Fts3TokenAndCost *pTC = aTC; Fts3Expr **ppOr = apOr; fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc); nToken = (int)(pTC-aTC); nOr = (int)(ppOr-apOr); if( rc==SQLITE_OK ){ rc = fts3EvalSelectDeferred(pCsr, 0, aTC, nToken); for(ii=0; rc==SQLITE_OK && ii<nOr; ii++){ rc = fts3EvalSelectDeferred(pCsr, apOr[ii], aTC, nToken); } } |
︙ | ︙ | |||
4416 4417 4418 4419 4420 4421 4422 | assert( pPhrase->doclist.pList ); p2 = pOut = pPhrase->doclist.pList; res = fts3PoslistNearMerge( &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 ); if( res ){ | | | 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 | assert( pPhrase->doclist.pList ); p2 = pOut = pPhrase->doclist.pList; res = fts3PoslistNearMerge( &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 ); if( res ){ nNew = (int)(pOut - pPhrase->doclist.pList) - 1; assert( pPhrase->doclist.pList[nNew]=='\0' ); assert( nNew<=pPhrase->doclist.nList && nNew>0 ); memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); pPhrase->doclist.nList = nNew; *paPoslist = pPhrase->doclist.pList; *pnToken = pPhrase->nToken; } |
︙ | ︙ | |||
5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 | memset(&pPhrase->doclist, 0, sizeof(Fts3Doclist)); for(i=0; i<pPhrase->nToken; i++){ fts3SegReaderCursorFree(pPhrase->aToken[i].pSegcsr); pPhrase->aToken[i].pSegcsr = 0; } } } /* ** Return SQLITE_CORRUPT_VTAB. */ #ifdef SQLITE_DEBUG int sqlite3Fts3Corrupt(){ return SQLITE_CORRUPT_VTAB; | > | 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 | memset(&pPhrase->doclist, 0, sizeof(Fts3Doclist)); for(i=0; i<pPhrase->nToken; i++){ fts3SegReaderCursorFree(pPhrase->aToken[i].pSegcsr); pPhrase->aToken[i].pSegcsr = 0; } } } /* ** Return SQLITE_CORRUPT_VTAB. */ #ifdef SQLITE_DEBUG int sqlite3Fts3Corrupt(){ return SQLITE_CORRUPT_VTAB; |
︙ | ︙ |
Changes to ext/fts3/fts3Int.h.
︙ | ︙ | |||
63 64 65 66 67 68 69 70 71 72 73 74 75 76 | */ #define SizeofArray(X) ((int)(sizeof(X)/sizeof(X[0]))) #ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) #endif /* ** Maximum length of a varint encoded integer. The varint format is different ** from that used by SQLite, so the maximum length is 10, not 9. */ #define FTS3_VARINT_MAX 10 | > > > | 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | */ #define SizeofArray(X) ((int)(sizeof(X)/sizeof(X[0]))) #ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) #endif #ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) #endif /* ** Maximum length of a varint encoded integer. The varint format is different ** from that used by SQLite, so the maximum length is 10, not 9. */ #define FTS3_VARINT_MAX 10 |
︙ | ︙ | |||
117 118 119 120 121 122 123 | ** false. */ #ifdef SQLITE_COVERAGE_TEST # define ALWAYS(x) (1) # define NEVER(X) (0) #else # define ALWAYS(x) (x) | | > | 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | ** false. */ #ifdef SQLITE_COVERAGE_TEST # define ALWAYS(x) (1) # define NEVER(X) (0) #else # define ALWAYS(x) (x) # define NEVER(x) (x) #endif /* ** Internal types used by SQLite. */ typedef unsigned char u8; /* 1-byte (or larger) unsigned integer */ typedef short int i16; /* 2-byte (or larger) signed integer */ typedef unsigned int u32; /* 4-byte unsigned integer */ typedef sqlite3_uint64 u64; /* 8-byte unsigned integer */ typedef sqlite3_int64 i64; /* 8-byte signed integer */ /* ** Macro used to suppress compiler warnings for unused parameters. */ #define UNUSED_PARAMETER(x) (void)(x) /* |
︙ | ︙ | |||
189 190 191 192 193 194 195 196 197 198 199 | const char *zDb; /* logical database name */ const char *zName; /* virtual table name */ int nColumn; /* number of named columns in virtual table */ char **azColumn; /* column names. malloced */ sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ char *zContentTbl; /* content=xxx option, or NULL */ char *zLanguageid; /* languageid=xxx option, or NULL */ /* Precompiled statements used by the implementation. Each of these ** statements is run and reset within a single virtual table API call. */ | > > | > > | < | < < > | < > > > > > > | 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 | const char *zDb; /* logical database name */ const char *zName; /* virtual table name */ int nColumn; /* number of named columns in virtual table */ char **azColumn; /* column names. malloced */ sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ char *zContentTbl; /* content=xxx option, or NULL */ char *zLanguageid; /* languageid=xxx option, or NULL */ u8 bAutoincrmerge; /* True if automerge=1 */ u32 nLeafAdd; /* Number of leaf blocks added this trans */ /* Precompiled statements used by the implementation. Each of these ** statements is run and reset within a single virtual table API call. */ sqlite3_stmt *aStmt[37]; char *zReadExprlist; char *zWriteExprlist; int nNodeSize; /* Soft limit for node size */ u8 bFts4; /* True for FTS4, false for FTS3 */ u8 bHasStat; /* True if %_stat table exists */ u8 bHasDocsize; /* True if %_docsize table exists */ u8 bDescIdx; /* True if doclists are in reverse order */ u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */ int nPgsz; /* Page size for host database */ char *zSegmentsTbl; /* Name of %_segments table */ sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ /* ** The following array of hash tables is used to buffer pending index ** updates during transactions. All pending updates buffered at any one ** time must share a common language-id (see the FTS4 langid= feature). ** The current language id is stored in variable iPrevLangid. ** ** A single FTS4 table may have multiple full-text indexes. For each index ** there is an entry in the aIndex[] array. Index 0 is an index of all the ** terms that appear in the document set. Each subsequent index in aIndex[] ** is an index of prefixes of a specific length. ** ** Variable nPendingData contains an estimate the memory consumed by the ** pending data structures, including hash table overhead, but not including ** malloc overhead. When nPendingData exceeds nMaxPendingData, all hash ** tables are flushed to disk. Variable iPrevDocid is the docid of the most ** recently inserted record. */ int nIndex; /* Size of aIndex[] */ struct Fts3Index { int nPrefix; /* Prefix length (0 for main terms index) */ Fts3Hash hPending; /* Pending terms table for this index */ } *aIndex; int nMaxPendingData; /* Max pending data before flush to disk */ |
︙ | ︙ | |||
417 418 419 420 421 422 423 424 425 426 427 428 429 430 | int sqlite3Fts3SelectDocsize(Fts3Table *, sqlite3_int64, sqlite3_stmt **); void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *); int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int); int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *); void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *); void sqlite3Fts3SegmentsClose(Fts3Table *); /* Special values interpreted by sqlite3SegReaderCursor() */ #define FTS3_SEGCURSOR_PENDING -1 #define FTS3_SEGCURSOR_ALL -2 int sqlite3Fts3SegReaderStart(Fts3Table*, Fts3MultiSegReader*, Fts3SegFilter*); int sqlite3Fts3SegReaderStep(Fts3Table *, Fts3MultiSegReader *); | > | 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 | int sqlite3Fts3SelectDocsize(Fts3Table *, sqlite3_int64, sqlite3_stmt **); void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *); int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int); int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *); void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *); void sqlite3Fts3SegmentsClose(Fts3Table *); int sqlite3Fts3MaxLevel(Fts3Table *, int *); /* Special values interpreted by sqlite3SegReaderCursor() */ #define FTS3_SEGCURSOR_PENDING -1 #define FTS3_SEGCURSOR_ALL -2 int sqlite3Fts3SegReaderStart(Fts3Table*, Fts3MultiSegReader*, Fts3SegFilter*); int sqlite3Fts3SegReaderStep(Fts3Table *, Fts3MultiSegReader *); |
︙ | ︙ | |||
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 | /* Output values. Valid only after Fts3SegReaderStep() returns SQLITE_ROW. */ char *zTerm; /* Pointer to term buffer */ int nTerm; /* Size of zTerm in bytes */ char *aDoclist; /* Pointer to doclist buffer */ int nDoclist; /* Size of aDoclist[] in bytes */ }; /* fts3.c */ int sqlite3Fts3PutVarint(char *, sqlite3_int64); int sqlite3Fts3GetVarint(const char *, sqlite_int64 *); int sqlite3Fts3GetVarint32(const char *, int *); int sqlite3Fts3VarintLen(sqlite3_uint64); void sqlite3Fts3Dequote(char *); void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,int*,u8*); int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *); int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *); /* fts3_tokenizer.c */ const char *sqlite3Fts3NextToken(const char *, int *); int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *); int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *, sqlite3_tokenizer **, char ** ); | > > > | 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 | /* Output values. Valid only after Fts3SegReaderStep() returns SQLITE_ROW. */ char *zTerm; /* Pointer to term buffer */ int nTerm; /* Size of zTerm in bytes */ char *aDoclist; /* Pointer to doclist buffer */ int nDoclist; /* Size of aDoclist[] in bytes */ }; int sqlite3Fts3Incrmerge(Fts3Table*,int,int); /* fts3.c */ int sqlite3Fts3PutVarint(char *, sqlite3_int64); int sqlite3Fts3GetVarint(const char *, sqlite_int64 *); int sqlite3Fts3GetVarint32(const char *, int *); int sqlite3Fts3VarintLen(sqlite3_uint64); void sqlite3Fts3Dequote(char *); void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,int*,u8*); int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *); int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *); void sqlite3Fts3CreateStatTable(int*, Fts3Table*); /* fts3_tokenizer.c */ const char *sqlite3Fts3NextToken(const char *, int *); int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *); int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *, sqlite3_tokenizer **, char ** ); |
︙ | ︙ |
Changes to ext/fts3/fts3_aux.c.
︙ | ︙ | |||
75 76 77 78 79 80 81 | *pzErr = sqlite3_mprintf( "wrong number of arguments to fts4aux constructor" ); return SQLITE_ERROR; } zDb = argv[1]; | | | | 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 | *pzErr = sqlite3_mprintf( "wrong number of arguments to fts4aux constructor" ); return SQLITE_ERROR; } zDb = argv[1]; nDb = (int)strlen(zDb); zFts3 = argv[3]; nFts3 = (int)strlen(zFts3); rc = sqlite3_declare_vtab(db, FTS3_TERMS_SCHEMA); if( rc!=SQLITE_OK ) return rc; nByte = sizeof(Fts3auxTable) + sizeof(Fts3Table) + nDb + nFts3 + 2; p = (Fts3auxTable *)sqlite3_malloc(nByte); if( !p ) return SQLITE_NOMEM; |
︙ | ︙ |
Changes to ext/fts3/fts3_snippet.c.
︙ | ︙ | |||
790 791 792 793 794 795 796 | static int fts3MatchinfoCheck( Fts3Table *pTab, char cArg, char **pzErr ){ if( (cArg==FTS3_MATCHINFO_NPHRASE) || (cArg==FTS3_MATCHINFO_NCOL) | | | | 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 | static int fts3MatchinfoCheck( Fts3Table *pTab, char cArg, char **pzErr ){ if( (cArg==FTS3_MATCHINFO_NPHRASE) || (cArg==FTS3_MATCHINFO_NCOL) || (cArg==FTS3_MATCHINFO_NDOC && pTab->bFts4) || (cArg==FTS3_MATCHINFO_AVGLENGTH && pTab->bFts4) || (cArg==FTS3_MATCHINFO_LENGTH && pTab->bHasDocsize) || (cArg==FTS3_MATCHINFO_LCS) || (cArg==FTS3_MATCHINFO_HITS) ){ return SQLITE_OK; } *pzErr = sqlite3_mprintf("unrecognized matchinfo request: %c", cArg); |
︙ | ︙ |
Changes to ext/fts3/fts3_term.c.
︙ | ︙ | |||
69 70 71 72 73 74 75 76 77 78 79 80 81 82 | int nDb; /* Result of strlen(zDb) */ int nFts3; /* Result of strlen(zFts3) */ int nByte; /* Bytes of space to allocate here */ int rc; /* value returned by declare_vtab() */ Fts3termTable *p; /* Virtual table object to return */ int iIndex = 0; if( argc==5 ){ iIndex = atoi(argv[4]); argc--; } /* The user should specify a single argument - the name of an fts3 table. */ if( argc!=4 ){ | > | 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | int nDb; /* Result of strlen(zDb) */ int nFts3; /* Result of strlen(zFts3) */ int nByte; /* Bytes of space to allocate here */ int rc; /* value returned by declare_vtab() */ Fts3termTable *p; /* Virtual table object to return */ int iIndex = 0; UNUSED_PARAMETER(pCtx); if( argc==5 ){ iIndex = atoi(argv[4]); argc--; } /* The user should specify a single argument - the name of an fts3 table. */ if( argc!=4 ){ |
︙ | ︙ | |||
227 228 229 230 231 232 233 | pCsr->pNext += sqlite3Fts3GetVarint(pCsr->pNext, &v); pCsr->iCol = 0; pCsr->iPos = 0; } if( v==1 ){ pCsr->pNext += sqlite3Fts3GetVarint(pCsr->pNext, &v); | | | | 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 | pCsr->pNext += sqlite3Fts3GetVarint(pCsr->pNext, &v); pCsr->iCol = 0; pCsr->iPos = 0; } if( v==1 ){ pCsr->pNext += sqlite3Fts3GetVarint(pCsr->pNext, &v); pCsr->iCol += (int)v; pCsr->iPos = 0; pCsr->pNext += sqlite3Fts3GetVarint(pCsr->pNext, &v); } pCsr->iPos += (int)(v - 2); return SQLITE_OK; } /* ** xFilter - Initialize a cursor to point at the start of its data. */ |
︙ | ︙ | |||
353 354 355 356 357 358 359 | fts3termRowidMethod, /* xRowid */ 0, /* xUpdate */ 0, /* xBegin */ 0, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindFunction */ | | > > > | 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 | fts3termRowidMethod, /* xRowid */ 0, /* xUpdate */ 0, /* xBegin */ 0, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindFunction */ 0, /* xRename */ 0, /* xSavepoint */ 0, /* xRelease */ 0 /* xRollbackTo */ }; int rc; /* Return code */ rc = sqlite3_create_module(db, "fts4term", &fts3term_module, 0); return rc; } #endif #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ |
Changes to ext/fts3/fts3_test.c.
︙ | ︙ | |||
156 157 158 159 160 161 162 163 164 165 166 167 168 169 | NearDocument doc = {0, 0}; Tcl_Obj **apDocToken; Tcl_Obj *pRet; Tcl_Obj *pPhrasecount = 0; Tcl_Obj **apExprToken; int nExprToken; /* Must have 3 or more arguments. */ if( objc<3 || (objc%2)==0 ){ Tcl_WrongNumArgs(interp, 1, objv, "DOCUMENT EXPR ?OPTION VALUE?..."); rc = TCL_ERROR; goto near_match_out; } | > > | 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 | NearDocument doc = {0, 0}; Tcl_Obj **apDocToken; Tcl_Obj *pRet; Tcl_Obj *pPhrasecount = 0; Tcl_Obj **apExprToken; int nExprToken; UNUSED_PARAMETER(clientData); /* Must have 3 or more arguments. */ if( objc<3 || (objc%2)==0 ){ Tcl_WrongNumArgs(interp, 1, objv, "DOCUMENT EXPR ?OPTION VALUE?..."); rc = TCL_ERROR; goto near_match_out; } |
︙ | ︙ | |||
310 311 312 313 314 315 316 317 318 319 320 321 322 323 | test_fts3_node_chunksize = iArg1; test_fts3_node_chunk_threshold = iArg2; } Tcl_SetObjResult(interp, pRet); Tcl_DecrRefCount(pRet); #endif return TCL_OK; } #ifdef SQLITE_ENABLE_FTS3 /************************************************************************** ** Beginning of test tokenizer code. ** | > | 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 | test_fts3_node_chunksize = iArg1; test_fts3_node_chunk_threshold = iArg2; } Tcl_SetObjResult(interp, pRet); Tcl_DecrRefCount(pRet); #endif UNUSED_PARAMETER(clientData); return TCL_OK; } #ifdef SQLITE_ENABLE_FTS3 /************************************************************************** ** Beginning of test tokenizer code. ** |
︙ | ︙ | |||
348 349 350 351 352 353 354 355 356 357 358 359 360 361 | } test_tokenizer_cursor; static int testTokenizerCreate( int argc, const char * const *argv, sqlite3_tokenizer **ppTokenizer ){ test_tokenizer *pNew; pNew = sqlite3_malloc(sizeof(test_tokenizer)); if( !pNew ) return SQLITE_NOMEM; memset(pNew, 0, sizeof(test_tokenizer)); *ppTokenizer = (sqlite3_tokenizer *)pNew; return SQLITE_OK; | > > | 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 | } test_tokenizer_cursor; static int testTokenizerCreate( int argc, const char * const *argv, sqlite3_tokenizer **ppTokenizer ){ test_tokenizer *pNew; UNUSED_PARAMETER(argc); UNUSED_PARAMETER(argv); pNew = sqlite3_malloc(sizeof(test_tokenizer)); if( !pNew ) return SQLITE_NOMEM; memset(pNew, 0, sizeof(test_tokenizer)); *ppTokenizer = (sqlite3_tokenizer *)pNew; return SQLITE_OK; |
︙ | ︙ | |||
503 504 505 506 507 508 509 510 511 512 513 514 515 516 | Tcl_WrongNumArgs(interp, 1, objv, ""); return TCL_ERROR; } Tcl_SetObjResult(interp, Tcl_NewByteArrayObj( (const unsigned char *)&pPtr, sizeof(sqlite3_tokenizer_module *) )); #endif return TCL_OK; } /* ** End of tokenizer code. **************************************************************************/ | > | 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 | Tcl_WrongNumArgs(interp, 1, objv, ""); return TCL_ERROR; } Tcl_SetObjResult(interp, Tcl_NewByteArrayObj( (const unsigned char *)&pPtr, sizeof(sqlite3_tokenizer_module *) )); #endif UNUSED_PARAMETER(clientData); return TCL_OK; } /* ** End of tokenizer code. **************************************************************************/ |
︙ | ︙ |
Changes to ext/fts3/fts3_write.c.
︙ | ︙ | |||
20 21 22 23 24 25 26 27 28 29 30 31 32 33 | #include "fts3Int.h" #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) #include <string.h> #include <assert.h> #include <stdlib.h> /* ** When full-text index nodes are loaded from disk, the buffer that they ** are loaded into has the following number of bytes of padding at the end ** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer ** of 920 bytes is allocated for it. ** ** This means that if we have a pointer into a buffer containing node data, | > > > | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | #include "fts3Int.h" #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) #include <string.h> #include <assert.h> #include <stdlib.h> #define FTS_MAX_APPENDABLE_HEIGHT 16 /* ** When full-text index nodes are loaded from disk, the buffer that they ** are loaded into has the following number of bytes of padding at the end ** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer ** of 920 bytes is allocated for it. ** ** This means that if we have a pointer into a buffer containing node data, |
︙ | ︙ | |||
58 59 60 61 62 63 64 65 66 67 68 69 70 71 | int test_fts3_node_chunk_threshold = (4*1024)*4; # define FTS3_NODE_CHUNKSIZE test_fts3_node_chunksize # define FTS3_NODE_CHUNK_THRESHOLD test_fts3_node_chunk_threshold #else # define FTS3_NODE_CHUNKSIZE (4*1024) # define FTS3_NODE_CHUNK_THRESHOLD (FTS3_NODE_CHUNKSIZE*4) #endif typedef struct PendingList PendingList; typedef struct SegmentNode SegmentNode; typedef struct SegmentWriter SegmentWriter; /* ** An instance of the following data structure is used to build doclists | > > > > > > > > > > > > > > > > > > > > > > > | 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 | int test_fts3_node_chunk_threshold = (4*1024)*4; # define FTS3_NODE_CHUNKSIZE test_fts3_node_chunksize # define FTS3_NODE_CHUNK_THRESHOLD test_fts3_node_chunk_threshold #else # define FTS3_NODE_CHUNKSIZE (4*1024) # define FTS3_NODE_CHUNK_THRESHOLD (FTS3_NODE_CHUNKSIZE*4) #endif /* ** The two values that may be meaningfully bound to the :1 parameter in ** statements SQL_REPLACE_STAT and SQL_SELECT_STAT. */ #define FTS_STAT_DOCTOTAL 0 #define FTS_STAT_INCRMERGEHINT 1 #define FTS_STAT_AUTOINCRMERGE 2 /* ** If FTS_LOG_MERGES is defined, call sqlite3_log() to report each automatic ** and incremental merge operation that takes place. This is used for ** debugging FTS only, it should not usually be turned on in production ** systems. */ #ifdef FTS3_LOG_MERGES static void fts3LogMerge(int nMerge, sqlite3_int64 iAbsLevel){ sqlite3_log(SQLITE_OK, "%d-way merge from level %d", nMerge, (int)iAbsLevel); } #else #define fts3LogMerge(x, y) #endif typedef struct PendingList PendingList; typedef struct SegmentNode SegmentNode; typedef struct SegmentWriter SegmentWriter; /* ** An instance of the following data structure is used to build doclists |
︙ | ︙ | |||
220 221 222 223 224 225 226 | #define SQL_SELECT_SEGDIR_MAX_LEVEL 15 #define SQL_DELETE_SEGDIR_LEVEL 16 #define SQL_DELETE_SEGMENTS_RANGE 17 #define SQL_CONTENT_INSERT 18 #define SQL_DELETE_DOCSIZE 19 #define SQL_REPLACE_DOCSIZE 20 #define SQL_SELECT_DOCSIZE 21 | | | | > > > | | | > > > > | 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 | #define SQL_SELECT_SEGDIR_MAX_LEVEL 15 #define SQL_DELETE_SEGDIR_LEVEL 16 #define SQL_DELETE_SEGMENTS_RANGE 17 #define SQL_CONTENT_INSERT 18 #define SQL_DELETE_DOCSIZE 19 #define SQL_REPLACE_DOCSIZE 20 #define SQL_SELECT_DOCSIZE 21 #define SQL_SELECT_STAT 22 #define SQL_REPLACE_STAT 23 #define SQL_SELECT_ALL_PREFIX_LEVEL 24 #define SQL_DELETE_ALL_TERMS_SEGDIR 25 #define SQL_DELETE_SEGDIR_RANGE 26 #define SQL_SELECT_ALL_LANGID 27 #define SQL_FIND_MERGE_LEVEL 28 #define SQL_MAX_LEAF_NODE_ESTIMATE 29 #define SQL_DELETE_SEGDIR_ENTRY 30 #define SQL_SHIFT_SEGDIR_ENTRY 31 #define SQL_SELECT_SEGDIR 32 #define SQL_CHOMP_SEGDIR 33 #define SQL_SEGMENT_IS_APPENDABLE 34 #define SQL_SELECT_INDEXES 35 #define SQL_SELECT_MXLEVEL 36 /* ** This function is used to obtain an SQLite prepared statement handle ** for the statement identified by the second argument. If successful, ** *pp is set to the requested statement handle and SQLITE_OK returned. ** Otherwise, an SQLite error code is returned and *pp is set to 0. ** |
︙ | ︙ | |||
257 258 259 260 261 262 263 | /* 2 */ "DELETE FROM %Q.'%q_content'", /* 3 */ "DELETE FROM %Q.'%q_segments'", /* 4 */ "DELETE FROM %Q.'%q_segdir'", /* 5 */ "DELETE FROM %Q.'%q_docsize'", /* 6 */ "DELETE FROM %Q.'%q_stat'", /* 7 */ "SELECT %s WHERE rowid=?", /* 8 */ "SELECT (SELECT max(idx) FROM %Q.'%q_segdir' WHERE level = ?) + 1", | | | | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 | /* 2 */ "DELETE FROM %Q.'%q_content'", /* 3 */ "DELETE FROM %Q.'%q_segments'", /* 4 */ "DELETE FROM %Q.'%q_segdir'", /* 5 */ "DELETE FROM %Q.'%q_docsize'", /* 6 */ "DELETE FROM %Q.'%q_stat'", /* 7 */ "SELECT %s WHERE rowid=?", /* 8 */ "SELECT (SELECT max(idx) FROM %Q.'%q_segdir' WHERE level = ?) + 1", /* 9 */ "REPLACE INTO %Q.'%q_segments'(blockid, block) VALUES(?, ?)", /* 10 */ "SELECT coalesce((SELECT max(blockid) FROM %Q.'%q_segments') + 1, 1)", /* 11 */ "REPLACE INTO %Q.'%q_segdir' VALUES(?,?,?,?,?,?)", /* Return segments in order from oldest to newest.*/ /* 12 */ "SELECT idx, start_block, leaves_end_block, end_block, root " "FROM %Q.'%q_segdir' WHERE level = ? ORDER BY idx ASC", /* 13 */ "SELECT idx, start_block, leaves_end_block, end_block, root " "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?" "ORDER BY level DESC, idx ASC", /* 14 */ "SELECT count(*) FROM %Q.'%q_segdir' WHERE level = ?", /* 15 */ "SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", /* 16 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ?", /* 17 */ "DELETE FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ?", /* 18 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* 19 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?", /* 20 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* 21 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?", /* 22 */ "SELECT value FROM %Q.'%q_stat' WHERE id=?", /* 23 */ "REPLACE INTO %Q.'%q_stat' VALUES(?,?)", /* 24 */ "", /* 25 */ "", /* 26 */ "DELETE FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", /* 27 */ "SELECT DISTINCT level / (1024 * ?) FROM %Q.'%q_segdir'", /* This statement is used to determine which level to read the input from ** when performing an incremental merge. It returns the absolute level number ** of the oldest level in the db that contains at least ? segments. Or, ** if no level in the FTS index contains more than ? segments, the statement ** returns zero rows. */ /* 28 */ "SELECT level FROM %Q.'%q_segdir' GROUP BY level HAVING count(*)>=?" " ORDER BY (level %% 1024) ASC LIMIT 1", /* Estimate the upper limit on the number of leaf nodes in a new segment ** created by merging the oldest :2 segments from absolute level :1. See ** function sqlite3Fts3Incrmerge() for details. */ /* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) " " FROM %Q.'%q_segdir' WHERE level = ? AND idx < ?", /* SQL_DELETE_SEGDIR_ENTRY ** Delete the %_segdir entry on absolute level :1 with index :2. */ /* 30 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", /* SQL_SHIFT_SEGDIR_ENTRY ** Modify the idx value for the segment with idx=:3 on absolute level :2 ** to :1. */ /* 31 */ "UPDATE %Q.'%q_segdir' SET idx = ? WHERE level=? AND idx=?", /* SQL_SELECT_SEGDIR ** Read a single entry from the %_segdir table. The entry from absolute ** level :1 with index value :2. */ /* 32 */ "SELECT idx, start_block, leaves_end_block, end_block, root " "FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", /* SQL_CHOMP_SEGDIR ** Update the start_block (:1) and root (:2) fields of the %_segdir ** entry located on absolute level :3 with index :4. */ /* 33 */ "UPDATE %Q.'%q_segdir' SET start_block = ?, root = ?" "WHERE level = ? AND idx = ?", /* SQL_SEGMENT_IS_APPENDABLE ** Return a single row if the segment with end_block=? is appendable. Or ** no rows otherwise. */ /* 34 */ "SELECT 1 FROM %Q.'%q_segments' WHERE blockid=? AND block IS NULL", /* SQL_SELECT_INDEXES ** Return the list of valid segment indexes for absolute level ? */ /* 35 */ "SELECT idx FROM %Q.'%q_segdir' WHERE level=? ORDER BY 1 ASC", /* SQL_SELECT_MXLEVEL ** Return the largest relative level in the FTS index or indexes. */ /* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'" }; int rc = SQLITE_OK; sqlite3_stmt *pStmt; assert( SizeofArray(azSql)==SizeofArray(p->aStmt) ); assert( eStmt<SizeofArray(azSql) && eStmt>=0 ); |
︙ | ︙ | |||
321 322 323 324 325 326 327 328 329 330 | for(i=0; rc==SQLITE_OK && i<nParam; i++){ rc = sqlite3_bind_value(pStmt, i+1, apVal[i]); } } *pp = pStmt; return rc; } static int fts3SelectDocsize( Fts3Table *pTab, /* FTS3 table handle */ | > < < < | < | < > > | > > > > > > > > > > > > | | 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 | for(i=0; rc==SQLITE_OK && i<nParam; i++){ rc = sqlite3_bind_value(pStmt, i+1, apVal[i]); } } *pp = pStmt; return rc; } static int fts3SelectDocsize( Fts3Table *pTab, /* FTS3 table handle */ sqlite3_int64 iDocid, /* Docid to bind for SQL_SELECT_DOCSIZE */ sqlite3_stmt **ppStmt /* OUT: Statement handle */ ){ sqlite3_stmt *pStmt = 0; /* Statement requested from fts3SqlStmt() */ int rc; /* Return code */ rc = fts3SqlStmt(pTab, SQL_SELECT_DOCSIZE, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pStmt, 1, iDocid); rc = sqlite3_step(pStmt); if( rc!=SQLITE_ROW || sqlite3_column_type(pStmt, 0)!=SQLITE_BLOB ){ rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ) rc = FTS_CORRUPT_VTAB; pStmt = 0; }else{ rc = SQLITE_OK; } } *ppStmt = pStmt; return rc; } int sqlite3Fts3SelectDoctotal( Fts3Table *pTab, /* Fts3 table handle */ sqlite3_stmt **ppStmt /* OUT: Statement handle */ ){ sqlite3_stmt *pStmt = 0; int rc; rc = fts3SqlStmt(pTab, SQL_SELECT_STAT, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); if( sqlite3_step(pStmt)!=SQLITE_ROW || sqlite3_column_type(pStmt, 0)!=SQLITE_BLOB ){ rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ) rc = FTS_CORRUPT_VTAB; pStmt = 0; } } *ppStmt = pStmt; return rc; } int sqlite3Fts3SelectDocsize( Fts3Table *pTab, /* Fts3 table handle */ sqlite3_int64 iDocid, /* Docid to read size data for */ sqlite3_stmt **ppStmt /* OUT: Statement handle */ ){ return fts3SelectDocsize(pTab, iDocid, ppStmt); } /* ** Similar to fts3SqlStmt(). Except, after binding the parameters in ** array apVal[] to the SQL statement identified by eStmt, the statement ** is executed. ** |
︙ | ︙ | |||
454 455 456 457 458 459 460 | ** Language 1 indexes are allocated immediately following language 0. ** ** So, for a system with nPrefix prefix indexes configured, the block of ** absolute levels that corresponds to language-id iLangid and index ** iIndex starts at absolute level ((iLangid * (nPrefix+1) + iIndex) * 1024). */ static sqlite3_int64 getAbsoluteLevel( | | | | | < | 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 | ** Language 1 indexes are allocated immediately following language 0. ** ** So, for a system with nPrefix prefix indexes configured, the block of ** absolute levels that corresponds to language-id iLangid and index ** iIndex starts at absolute level ((iLangid * (nPrefix+1) + iIndex) * 1024). */ static sqlite3_int64 getAbsoluteLevel( Fts3Table *p, /* FTS3 table handle */ int iLangid, /* Language id */ int iIndex, /* Index in p->aIndex[] */ int iLevel /* Level of segments */ ){ sqlite3_int64 iBase; /* First absolute level for iLangid/iIndex */ assert( iLangid>=0 ); assert( p->nIndex>0 ); assert( iIndex>=0 && iIndex<p->nIndex ); iBase = ((sqlite3_int64)iLangid * p->nIndex + iIndex) * FTS3_SEGDIR_MAXLEVEL; return iBase + iLevel; } /* ** Set *ppStmt to a statement handle that may be used to iterate through ** all rows in the %_segdir table, from oldest to newest. If successful, ** return SQLITE_OK. If an error occurs while preparing the statement, ** return an SQLite error code. ** |
︙ | ︙ | |||
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 | if( rc==SQLITE_OK ){ /* If iNext is FTS3_MERGE_COUNT, indicating that level iLevel is already ** full, merge all segments in level iLevel into a single iLevel+1 ** segment and allocate (newly freed) index 0 at level iLevel. Otherwise, ** if iNext is less than FTS3_MERGE_COUNT, allocate index iNext. */ if( iNext>=FTS3_MERGE_COUNT ){ rc = fts3SegmentMerge(p, iLangid, iIndex, iLevel); *piIdx = 0; }else{ *piIdx = iNext; } } | > | 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 | if( rc==SQLITE_OK ){ /* If iNext is FTS3_MERGE_COUNT, indicating that level iLevel is already ** full, merge all segments in level iLevel into a single iLevel+1 ** segment and allocate (newly freed) index 0 at level iLevel. Otherwise, ** if iNext is less than FTS3_MERGE_COUNT, allocate index iNext. */ if( iNext>=FTS3_MERGE_COUNT ){ fts3LogMerge(16, getAbsoluteLevel(p, iLangid, iIndex, iLevel)); rc = fts3SegmentMerge(p, iLangid, iIndex, iLevel); *piIdx = 0; }else{ *piIdx = iNext; } } |
︙ | ︙ | |||
1081 1082 1083 1084 1085 1086 1087 | char **paBlob, /* OUT: Blob data in malloc'd buffer */ int *pnBlob, /* OUT: Size of blob data */ int *pnLoad /* OUT: Bytes actually loaded */ ){ int rc; /* Return code */ /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */ | | | 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 | char **paBlob, /* OUT: Blob data in malloc'd buffer */ int *pnBlob, /* OUT: Size of blob data */ int *pnLoad /* OUT: Bytes actually loaded */ ){ int rc; /* Return code */ /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */ assert( pnBlob ); if( p->pSegments ){ rc = sqlite3_blob_reopen(p->pSegments, iBlockid); }else{ if( 0==p->zSegmentsTbl ){ p->zSegmentsTbl = sqlite3_mprintf("%s_segments", p->zName); if( 0==p->zSegmentsTbl ) return SQLITE_NOMEM; |
︙ | ︙ | |||
1422 1423 1424 1425 1426 1427 1428 | ){ Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; int nOvfl = 0; int ii; int rc = SQLITE_OK; int pgsz = p->nPgsz; | | | 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 | ){ Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; int nOvfl = 0; int ii; int rc = SQLITE_OK; int pgsz = p->nPgsz; assert( p->bFts4 ); assert( pgsz>0 ); for(ii=0; rc==SQLITE_OK && ii<pMsr->nSegment; ii++){ Fts3SegReader *pReader = pMsr->apSegment[ii]; if( !fts3SegReaderIsPending(pReader) && !fts3SegReaderIsRootOnly(pReader) ){ |
︙ | ︙ | |||
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 | sqlite3_bind_int64(pStmt, 1, iBlock); sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); } return rc; } /* ** Insert a record into the %_segdir table. */ static int fts3WriteSegdir( Fts3Table *p, /* Virtual table handle */ sqlite3_int64 iLevel, /* Value for "level" field (absolute level) */ | > > > > > > > > > > > > > > > > > > > > > | 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 | sqlite3_bind_int64(pStmt, 1, iBlock); sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); } return rc; } /* ** Find the largest relative level number in the table. If successful, set ** *pnMax to this value and return SQLITE_OK. Otherwise, if an error occurs, ** set *pnMax to zero and return an SQLite error code. */ int sqlite3Fts3MaxLevel(Fts3Table *p, int *pnMax){ int rc; int mxLevel = 0; sqlite3_stmt *pStmt = 0; rc = fts3SqlStmt(p, SQL_SELECT_MXLEVEL, &pStmt, 0); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pStmt) ){ mxLevel = sqlite3_column_int(pStmt, 0); } rc = sqlite3_reset(pStmt); } *pnMax = mxLevel; return rc; } /* ** Insert a record into the %_segdir table. */ static int fts3WriteSegdir( Fts3Table *p, /* Virtual table handle */ sqlite3_int64 iLevel, /* Value for "level" field (absolute level) */ |
︙ | ︙ | |||
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 | if( nData>0 && nData+nReq>p->nNodeSize ){ int rc; /* The current leaf node is full. Write it out to the database. */ rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, nData); if( rc!=SQLITE_OK ) return rc; /* Add the current term to the interior node tree. The term added to ** the interior tree must: ** ** a) be greater than the largest term on the leaf node just written ** to the database (still available in pWriter->zTerm), and ** | > | 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 | if( nData>0 && nData+nReq>p->nNodeSize ){ int rc; /* The current leaf node is full. Write it out to the database. */ rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, nData); if( rc!=SQLITE_OK ) return rc; p->nLeafAdd++; /* Add the current term to the interior node tree. The term added to ** the interior tree must: ** ** a) be greater than the largest term on the leaf node just written ** to the database (still available in pWriter->zTerm), and ** |
︙ | ︙ | |||
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 | p, iLevel, iIdx, pWriter->iFirst, iLastLeaf, iLast, zRoot, nRoot); } }else{ /* The entire tree fits on the root node. Write it to the segdir table. */ rc = fts3WriteSegdir( p, iLevel, iIdx, 0, 0, 0, pWriter->aData, pWriter->nData); } return rc; } /* ** Release all memory held by the SegmentWriter object passed as the ** first argument. */ | > | 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 | p, iLevel, iIdx, pWriter->iFirst, iLastLeaf, iLast, zRoot, nRoot); } }else{ /* The entire tree fits on the root node. Write it to the segdir table. */ rc = fts3WriteSegdir( p, iLevel, iIdx, 0, 0, 0, pWriter->aData, pWriter->nData); } p->nLeafAdd++; return rc; } /* ** Release all memory held by the SegmentWriter object passed as the ** first argument. */ |
︙ | ︙ | |||
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 | getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) ); if( SQLITE_ROW==sqlite3_step(pStmt) ){ *pnMax = sqlite3_column_int64(pStmt, 0); } return sqlite3_reset(pStmt); } /* ** This function is used after merging multiple segments into a single large ** segment to delete the old, now redundant, segment b-trees. Specifically, ** it: ** ** 1) Deletes all %_segments entries for the segments associated with | > > > > > > > > > > > > > > > > > > > > > > > | 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 | getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) ); if( SQLITE_ROW==sqlite3_step(pStmt) ){ *pnMax = sqlite3_column_int64(pStmt, 0); } return sqlite3_reset(pStmt); } /* ** Delete all entries in the %_segments table associated with the segment ** opened with seg-reader pSeg. This function does not affect the contents ** of the %_segdir table. */ static int fts3DeleteSegment( Fts3Table *p, /* FTS table handle */ Fts3SegReader *pSeg /* Segment to delete */ ){ int rc = SQLITE_OK; /* Return code */ if( pSeg->iStartBlock ){ sqlite3_stmt *pDelete; /* SQL statement to delete rows */ rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDelete, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDelete, 1, pSeg->iStartBlock); sqlite3_bind_int64(pDelete, 2, pSeg->iEndBlock); sqlite3_step(pDelete); rc = sqlite3_reset(pDelete); } } return rc; } /* ** This function is used after merging multiple segments into a single large ** segment to delete the old, now redundant, segment b-trees. Specifically, ** it: ** ** 1) Deletes all %_segments entries for the segments associated with |
︙ | ︙ | |||
2307 2308 2309 2310 2311 2312 2313 | Fts3Table *p, /* Virtual table handle */ int iLangid, /* Language id */ int iIndex, /* Index for p->aIndex */ int iLevel, /* Level of %_segdir entries to delete */ Fts3SegReader **apSegment, /* Array of SegReader objects */ int nReader /* Size of array apSegment */ ){ | | | < | < < < < < < | 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 | Fts3Table *p, /* Virtual table handle */ int iLangid, /* Language id */ int iIndex, /* Index for p->aIndex */ int iLevel, /* Level of %_segdir entries to delete */ Fts3SegReader **apSegment, /* Array of SegReader objects */ int nReader /* Size of array apSegment */ ){ int rc = SQLITE_OK; /* Return Code */ int i; /* Iterator variable */ sqlite3_stmt *pDelete = 0; /* SQL statement to delete rows */ for(i=0; rc==SQLITE_OK && i<nReader; i++){ rc = fts3DeleteSegment(p, apSegment[i]); } if( rc!=SQLITE_OK ){ return rc; } assert( iLevel>=0 || iLevel==FTS3_SEGCURSOR_ALL ); if( iLevel==FTS3_SEGCURSOR_ALL ){ |
︙ | ︙ | |||
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 | /* ** Flush the contents of pendingTerms to level 0 segments. */ int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ int rc = SQLITE_OK; int i; for(i=0; rc==SQLITE_OK && i<p->nIndex; i++){ rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } sqlite3Fts3PendingTermsClear(p); return rc; } /* ** Encode N integers as varints into a blob. */ static void fts3EncodeIntArray( | > > > > > > > > > > > > > > > > > | 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 | /* ** Flush the contents of pendingTerms to level 0 segments. */ int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ int rc = SQLITE_OK; int i; for(i=0; rc==SQLITE_OK && i<p->nIndex; i++){ rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } sqlite3Fts3PendingTermsClear(p); /* Determine the auto-incr-merge setting if unknown. If enabled, ** estimate the number of leaf blocks of content to be written */ if( rc==SQLITE_OK && p->bHasStat && p->bAutoincrmerge==0xff && p->nLeafAdd>0 ){ sqlite3_stmt *pStmt = 0; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); rc = sqlite3_step(pStmt); p->bAutoincrmerge = (rc==SQLITE_ROW && sqlite3_column_int(pStmt, 0)); rc = sqlite3_reset(pStmt); } } return rc; } /* ** Encode N integers as varints into a blob. */ static void fts3EncodeIntArray( |
︙ | ︙ | |||
3010 3011 3012 3013 3014 3015 3016 | if( *pRC ) return; a = sqlite3_malloc( (sizeof(u32)+10)*nStat ); if( a==0 ){ *pRC = SQLITE_NOMEM; return; } pBlob = (char*)&a[nStat]; | | > | 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 | if( *pRC ) return; a = sqlite3_malloc( (sizeof(u32)+10)*nStat ); if( a==0 ){ *pRC = SQLITE_NOMEM; return; } pBlob = (char*)&a[nStat]; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); if( sqlite3_step(pStmt)==SQLITE_ROW ){ fts3DecodeIntArray(nStat, a, sqlite3_column_blob(pStmt, 0), sqlite3_column_bytes(pStmt, 0)); }else{ memset(a, 0, sizeof(u32)*(nStat) ); } |
︙ | ︙ | |||
3039 3040 3041 3042 3043 3044 3045 | x = 0; }else{ x = x + aSzIns[i] - aSzDel[i]; } a[i+1] = x; } fts3EncodeIntArray(nStat, a, pBlob, &nBlob); | | > | | 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 | x = 0; }else{ x = x + aSzIns[i] - aSzDel[i]; } a[i+1] = x; } fts3EncodeIntArray(nStat, a, pBlob, &nBlob); rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC); sqlite3_step(pStmt); *pRC = sqlite3_reset(pStmt); sqlite3_free(a); } /* ** Merge the entire database so that there is one segment for each |
︙ | ︙ | |||
3150 3151 3152 3153 3154 3155 3156 | }else{ nEntry++; for(iCol=0; iCol<=p->nColumn; iCol++){ aSzIns[iCol] += aSz[iCol]; } } } | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 | }else{ nEntry++; for(iCol=0; iCol<=p->nColumn; iCol++){ aSzIns[iCol] += aSz[iCol]; } } } if( p->bFts4 ){ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nEntry); } sqlite3_free(aSz); if( pStmt ){ int rc2 = sqlite3_finalize(pStmt); if( rc==SQLITE_OK ){ rc = rc2; } } } return rc; } /* ** This function opens a cursor used to read the input data for an ** incremental merge operation. Specifically, it opens a cursor to scan ** the oldest nSeg segments (idx=0 through idx=(nSeg-1)) in absolute ** level iAbsLevel. */ static int fts3IncrmergeCsr( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level to open */ int nSeg, /* Number of segments to merge */ Fts3MultiSegReader *pCsr /* Cursor object to populate */ ){ int rc; /* Return Code */ sqlite3_stmt *pStmt = 0; /* Statement used to read %_segdir entry */ int nByte; /* Bytes allocated at pCsr->apSegment[] */ /* Allocate space for the Fts3MultiSegReader.aCsr[] array */ memset(pCsr, 0, sizeof(*pCsr)); nByte = sizeof(Fts3SegReader *) * nSeg; pCsr->apSegment = (Fts3SegReader **)sqlite3_malloc(nByte); if( pCsr->apSegment==0 ){ rc = SQLITE_NOMEM; }else{ memset(pCsr->apSegment, 0, nByte); rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); } if( rc==SQLITE_OK ){ int i; int rc2; sqlite3_bind_int64(pStmt, 1, iAbsLevel); assert( pCsr->nSegment==0 ); for(i=0; rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW && i<nSeg; i++){ rc = sqlite3Fts3SegReaderNew(i, 0, sqlite3_column_int64(pStmt, 1), /* segdir.start_block */ sqlite3_column_int64(pStmt, 2), /* segdir.leaves_end_block */ sqlite3_column_int64(pStmt, 3), /* segdir.end_block */ sqlite3_column_blob(pStmt, 4), /* segdir.root */ sqlite3_column_bytes(pStmt, 4), /* segdir.root */ &pCsr->apSegment[i] ); pCsr->nSegment++; } rc2 = sqlite3_reset(pStmt); if( rc==SQLITE_OK ) rc = rc2; } return rc; } typedef struct IncrmergeWriter IncrmergeWriter; typedef struct NodeWriter NodeWriter; typedef struct Blob Blob; typedef struct NodeReader NodeReader; /* ** An instance of the following structure is used as a dynamic buffer ** to build up nodes or other blobs of data in. ** ** The function blobGrowBuffer() is used to extend the allocation. */ struct Blob { char *a; /* Pointer to allocation */ int n; /* Number of valid bytes of data in a[] */ int nAlloc; /* Allocated size of a[] (nAlloc>=n) */ }; /* ** This structure is used to build up buffers containing segment b-tree ** nodes (blocks). */ struct NodeWriter { sqlite3_int64 iBlock; /* Current block id */ Blob key; /* Last key written to the current block */ Blob block; /* Current block image */ }; /* ** An object of this type contains the state required to create or append ** to an appendable b-tree segment. */ struct IncrmergeWriter { int nLeafEst; /* Space allocated for leaf blocks */ int nWork; /* Number of leaf pages flushed */ sqlite3_int64 iAbsLevel; /* Absolute level of input segments */ int iIdx; /* Index of *output* segment in iAbsLevel+1 */ sqlite3_int64 iStart; /* Block number of first allocated block */ sqlite3_int64 iEnd; /* Block number of last allocated block */ NodeWriter aNodeWriter[FTS_MAX_APPENDABLE_HEIGHT]; }; /* ** An object of the following type is used to read data from a single ** FTS segment node. See the following functions: ** ** nodeReaderInit() ** nodeReaderNext() ** nodeReaderRelease() */ struct NodeReader { const char *aNode; int nNode; int iOff; /* Current offset within aNode[] */ /* Output variables. Containing the current node entry. */ sqlite3_int64 iChild; /* Pointer to child node */ Blob term; /* Current term */ const char *aDoclist; /* Pointer to doclist */ int nDoclist; /* Size of doclist in bytes */ }; /* ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, if the allocation at pBlob->a is not already at least nMin ** bytes in size, extend (realloc) it to be so. ** ** If an OOM error occurs, set *pRc to SQLITE_NOMEM and leave pBlob->a ** unmodified. Otherwise, if the allocation succeeds, update pBlob->nAlloc ** to reflect the new size of the pBlob->a[] buffer. */ static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){ if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){ int nAlloc = nMin; char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc); if( a ){ pBlob->nAlloc = nAlloc; pBlob->a = a; }else{ *pRc = SQLITE_NOMEM; } } } /* ** Attempt to advance the node-reader object passed as the first argument to ** the next entry on the node. ** ** Return an error code if an error occurs (SQLITE_NOMEM is possible). ** Otherwise return SQLITE_OK. If there is no next entry on the node ** (e.g. because the current entry is the last) set NodeReader->aNode to ** NULL to indicate EOF. Otherwise, populate the NodeReader structure output ** variables for the new entry. */ static int nodeReaderNext(NodeReader *p){ int bFirst = (p->term.n==0); /* True for first term on the node */ int nPrefix = 0; /* Bytes to copy from previous term */ int nSuffix = 0; /* Bytes to append to the prefix */ int rc = SQLITE_OK; /* Return code */ assert( p->aNode ); if( p->iChild && bFirst==0 ) p->iChild++; if( p->iOff>=p->nNode ){ /* EOF */ p->aNode = 0; }else{ if( bFirst==0 ){ p->iOff += sqlite3Fts3GetVarint32(&p->aNode[p->iOff], &nPrefix); } p->iOff += sqlite3Fts3GetVarint32(&p->aNode[p->iOff], &nSuffix); blobGrowBuffer(&p->term, nPrefix+nSuffix, &rc); if( rc==SQLITE_OK ){ memcpy(&p->term.a[nPrefix], &p->aNode[p->iOff], nSuffix); p->term.n = nPrefix+nSuffix; p->iOff += nSuffix; if( p->iChild==0 ){ p->iOff += sqlite3Fts3GetVarint32(&p->aNode[p->iOff], &p->nDoclist); p->aDoclist = &p->aNode[p->iOff]; p->iOff += p->nDoclist; } } } assert( p->iOff<=p->nNode ); return rc; } /* ** Release all dynamic resources held by node-reader object *p. */ static void nodeReaderRelease(NodeReader *p){ sqlite3_free(p->term.a); } /* ** Initialize a node-reader object to read the node in buffer aNode/nNode. ** ** If successful, SQLITE_OK is returned and the NodeReader object set to ** point to the first entry on the node (if any). Otherwise, an SQLite ** error code is returned. */ static int nodeReaderInit(NodeReader *p, const char *aNode, int nNode){ memset(p, 0, sizeof(NodeReader)); p->aNode = aNode; p->nNode = nNode; /* Figure out if this is a leaf or an internal node. */ if( p->aNode[0] ){ /* An internal node. */ p->iOff = 1 + sqlite3Fts3GetVarint(&p->aNode[1], &p->iChild); }else{ p->iOff = 1; } return nodeReaderNext(p); } /* ** This function is called while writing an FTS segment each time a leaf o ** node is finished and written to disk. The key (zTerm/nTerm) is guaranteed ** to be greater than the largest key on the node just written, but smaller ** than or equal to the first key that will be written to the next leaf ** node. ** ** The block id of the leaf node just written to disk may be found in ** (pWriter->aNodeWriter[0].iBlock) when this function is called. */ static int fts3IncrmergePush( Fts3Table *p, /* Fts3 table handle */ IncrmergeWriter *pWriter, /* Writer object */ const char *zTerm, /* Term to write to internal node */ int nTerm /* Bytes at zTerm */ ){ sqlite3_int64 iPtr = pWriter->aNodeWriter[0].iBlock; int iLayer; assert( nTerm>0 ); for(iLayer=1; ALWAYS(iLayer<FTS_MAX_APPENDABLE_HEIGHT); iLayer++){ sqlite3_int64 iNextPtr = 0; NodeWriter *pNode = &pWriter->aNodeWriter[iLayer]; int rc = SQLITE_OK; int nPrefix; int nSuffix; int nSpace; /* Figure out how much space the key will consume if it is written to ** the current node of layer iLayer. Due to the prefix compression, ** the space required changes depending on which node the key is to ** be added to. */ nPrefix = fts3PrefixCompress(pNode->key.a, pNode->key.n, zTerm, nTerm); nSuffix = nTerm - nPrefix; nSpace = sqlite3Fts3VarintLen(nPrefix); nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; if( pNode->key.n==0 || (pNode->block.n + nSpace)<=p->nNodeSize ){ /* If the current node of layer iLayer contains zero keys, or if adding ** the key to it will not cause it to grow to larger than nNodeSize ** bytes in size, write the key here. */ Blob *pBlk = &pNode->block; if( pBlk->n==0 ){ blobGrowBuffer(pBlk, p->nNodeSize, &rc); if( rc==SQLITE_OK ){ pBlk->a[0] = (char)iLayer; pBlk->n = 1 + sqlite3Fts3PutVarint(&pBlk->a[1], iPtr); } } blobGrowBuffer(pBlk, pBlk->n + nSpace, &rc); blobGrowBuffer(&pNode->key, nTerm, &rc); if( rc==SQLITE_OK ){ if( pNode->key.n ){ pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix); } pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix); memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix); pBlk->n += nSuffix; memcpy(pNode->key.a, zTerm, nTerm); pNode->key.n = nTerm; } }else{ /* Otherwise, flush the the current node of layer iLayer to disk. ** Then allocate a new, empty sibling node. The key will be written ** into the parent of this node. */ rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); assert( pNode->block.nAlloc>=p->nNodeSize ); pNode->block.a[0] = (char)iLayer; pNode->block.n = 1 + sqlite3Fts3PutVarint(&pNode->block.a[1], iPtr+1); iNextPtr = pNode->iBlock; pNode->iBlock++; pNode->key.n = 0; } if( rc!=SQLITE_OK || iNextPtr==0 ) return rc; iPtr = iNextPtr; } assert( 0 ); return 0; } /* ** Append a term and (optionally) doclist to the FTS segment node currently ** stored in blob *pNode. The node need not contain any terms, but the ** header must be written before this function is called. ** ** A node header is a single 0x00 byte for a leaf node, or a height varint ** followed by the left-hand-child varint for an internal node. ** ** The term to be appended is passed via arguments zTerm/nTerm. For a ** leaf node, the doclist is passed as aDoclist/nDoclist. For an internal ** node, both aDoclist and nDoclist must be passed 0. ** ** If the size of the value in blob pPrev is zero, then this is the first ** term written to the node. Otherwise, pPrev contains a copy of the ** previous term. Before this function returns, it is updated to contain a ** copy of zTerm/nTerm. ** ** It is assumed that the buffer associated with pNode is already large ** enough to accommodate the new entry. The buffer associated with pPrev ** is extended by this function if requrired. ** ** If an error (i.e. OOM condition) occurs, an SQLite error code is ** returned. Otherwise, SQLITE_OK. */ static int fts3AppendToNode( Blob *pNode, /* Current node image to append to */ Blob *pPrev, /* Buffer containing previous term written */ const char *zTerm, /* New term to write */ int nTerm, /* Size of zTerm in bytes */ const char *aDoclist, /* Doclist (or NULL) to write */ int nDoclist /* Size of aDoclist in bytes */ ){ int rc = SQLITE_OK; /* Return code */ int bFirst = (pPrev->n==0); /* True if this is the first term written */ int nPrefix; /* Size of term prefix in bytes */ int nSuffix; /* Size of term suffix in bytes */ /* Node must have already been started. There must be a doclist for a ** leaf node, and there must not be a doclist for an internal node. */ assert( pNode->n>0 ); assert( (pNode->a[0]=='\0')==(aDoclist!=0) ); blobGrowBuffer(pPrev, nTerm, &rc); if( rc!=SQLITE_OK ) return rc; nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); nSuffix = nTerm - nPrefix; memcpy(pPrev->a, zTerm, nTerm); pPrev->n = nTerm; if( bFirst==0 ){ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nPrefix); } pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nSuffix); memcpy(&pNode->a[pNode->n], &zTerm[nPrefix], nSuffix); pNode->n += nSuffix; if( aDoclist ){ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nDoclist); memcpy(&pNode->a[pNode->n], aDoclist, nDoclist); pNode->n += nDoclist; } assert( pNode->n<=pNode->nAlloc ); return SQLITE_OK; } /* ** Append the current term and doclist pointed to by cursor pCsr to the ** appendable b-tree segment opened for writing by pWriter. ** ** Return SQLITE_OK if successful, or an SQLite error code otherwise. */ static int fts3IncrmergeAppend( Fts3Table *p, /* Fts3 table handle */ IncrmergeWriter *pWriter, /* Writer object */ Fts3MultiSegReader *pCsr /* Cursor containing term and doclist */ ){ const char *zTerm = pCsr->zTerm; int nTerm = pCsr->nTerm; const char *aDoclist = pCsr->aDoclist; int nDoclist = pCsr->nDoclist; int rc = SQLITE_OK; /* Return code */ int nSpace; /* Total space in bytes required on leaf */ int nPrefix; /* Size of prefix shared with previous term */ int nSuffix; /* Size of suffix (nTerm - nPrefix) */ NodeWriter *pLeaf; /* Object used to write leaf nodes */ pLeaf = &pWriter->aNodeWriter[0]; nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm); nSuffix = nTerm - nPrefix; nSpace = sqlite3Fts3VarintLen(nPrefix); nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; /* If the current block is not empty, and if adding this term/doclist ** to the current block would make it larger than Fts3Table.nNodeSize ** bytes, write this block out to the database. */ if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); pWriter->nWork++; /* Add the current term to the parent node. The term added to the ** parent must: ** ** a) be greater than the largest term on the leaf node just written ** to the database (still available in pLeaf->key), and ** ** b) be less than or equal to the term about to be added to the new ** leaf node (zTerm/nTerm). ** ** In other words, it must be the prefix of zTerm 1 byte longer than ** the common prefix (if any) of zTerm and pWriter->zTerm. */ if( rc==SQLITE_OK ){ rc = fts3IncrmergePush(p, pWriter, zTerm, nPrefix+1); } /* Advance to the next output block */ pLeaf->iBlock++; pLeaf->key.n = 0; pLeaf->block.n = 0; nPrefix = 0; nSuffix = nTerm; nSpace = 1; nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; } blobGrowBuffer(&pLeaf->block, pLeaf->block.n + nSpace, &rc); if( rc==SQLITE_OK ){ if( pLeaf->block.n==0 ){ pLeaf->block.n = 1; pLeaf->block.a[0] = '\0'; } rc = fts3AppendToNode( &pLeaf->block, &pLeaf->key, zTerm, nTerm, aDoclist, nDoclist ); } return rc; } /* ** This function is called to release all dynamic resources held by the ** merge-writer object pWriter, and if no error has occurred, to flush ** all outstanding node buffers held by pWriter to disk. ** ** If *pRc is not SQLITE_OK when this function is called, then no attempt ** is made to write any data to disk. Instead, this function serves only ** to release outstanding resources. ** ** Otherwise, if *pRc is initially SQLITE_OK and an error occurs while ** flushing buffers to disk, *pRc is set to an SQLite error code before ** returning. */ static void fts3IncrmergeRelease( Fts3Table *p, /* FTS3 table handle */ IncrmergeWriter *pWriter, /* Merge-writer object */ int *pRc /* IN/OUT: Error code */ ){ int i; /* Used to iterate through non-root layers */ int iRoot; /* Index of root in pWriter->aNodeWriter */ NodeWriter *pRoot; /* NodeWriter for root node */ int rc = *pRc; /* Error code */ /* Set iRoot to the index in pWriter->aNodeWriter[] of the output segment ** root node. If the segment fits entirely on a single leaf node, iRoot ** will be set to 0. If the root node is the parent of the leaves, iRoot ** will be 1. And so on. */ for(iRoot=FTS_MAX_APPENDABLE_HEIGHT-1; iRoot>=0; iRoot--){ NodeWriter *pNode = &pWriter->aNodeWriter[iRoot]; if( pNode->block.n>0 ) break; assert( *pRc || pNode->block.nAlloc==0 ); assert( *pRc || pNode->key.nAlloc==0 ); sqlite3_free(pNode->block.a); sqlite3_free(pNode->key.a); } /* Empty output segment. This is a no-op. */ if( iRoot<0 ) return; /* The entire output segment fits on a single node. Normally, this means ** the node would be stored as a blob in the "root" column of the %_segdir ** table. However, this is not permitted in this case. The problem is that ** space has already been reserved in the %_segments table, and so the ** start_block and end_block fields of the %_segdir table must be populated. ** And, by design or by accident, released versions of FTS cannot handle ** segments that fit entirely on the root node with start_block!=0. ** ** Instead, create a synthetic root node that contains nothing but a ** pointer to the single content node. So that the segment consists of a ** single leaf and a single interior (root) node. ** ** Todo: Better might be to defer allocating space in the %_segments ** table until we are sure it is needed. */ if( iRoot==0 ){ Blob *pBlock = &pWriter->aNodeWriter[1].block; blobGrowBuffer(pBlock, 1 + FTS3_VARINT_MAX, &rc); if( rc==SQLITE_OK ){ pBlock->a[0] = 0x01; pBlock->n = 1 + sqlite3Fts3PutVarint( &pBlock->a[1], pWriter->aNodeWriter[0].iBlock ); } iRoot = 1; } pRoot = &pWriter->aNodeWriter[iRoot]; /* Flush all currently outstanding nodes to disk. */ for(i=0; i<iRoot; i++){ NodeWriter *pNode = &pWriter->aNodeWriter[i]; if( pNode->block.n>0 && rc==SQLITE_OK ){ rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); } sqlite3_free(pNode->block.a); sqlite3_free(pNode->key.a); } /* Write the %_segdir record. */ if( rc==SQLITE_OK ){ rc = fts3WriteSegdir(p, pWriter->iAbsLevel+1, /* level */ pWriter->iIdx, /* idx */ pWriter->iStart, /* start_block */ pWriter->aNodeWriter[0].iBlock, /* leaves_end_block */ pWriter->iEnd, /* end_block */ pRoot->block.a, pRoot->block.n /* root */ ); } sqlite3_free(pRoot->block.a); sqlite3_free(pRoot->key.a); *pRc = rc; } /* ** Compare the term in buffer zLhs (size in bytes nLhs) with that in ** zRhs (size in bytes nRhs) using memcmp. If one term is a prefix of ** the other, it is considered to be smaller than the other. ** ** Return -ve if zLhs is smaller than zRhs, 0 if it is equal, or +ve ** if it is greater. */ static int fts3TermCmp( const char *zLhs, int nLhs, /* LHS of comparison */ const char *zRhs, int nRhs /* RHS of comparison */ ){ int nCmp = MIN(nLhs, nRhs); int res; res = memcmp(zLhs, zRhs, nCmp); if( res==0 ) res = nLhs - nRhs; return res; } /* ** Query to see if the entry in the %_segments table with blockid iEnd is ** NULL. If no error occurs and the entry is NULL, set *pbRes 1 before ** returning. Otherwise, set *pbRes to 0. ** ** Or, if an error occurs while querying the database, return an SQLite ** error code. The final value of *pbRes is undefined in this case. ** ** This is used to test if a segment is an "appendable" segment. If it ** is, then a NULL entry has been inserted into the %_segments table ** with blockid %_segdir.end_block. */ static int fts3IsAppendable(Fts3Table *p, sqlite3_int64 iEnd, int *pbRes){ int bRes = 0; /* Result to set *pbRes to */ sqlite3_stmt *pCheck = 0; /* Statement to query database with */ int rc; /* Return code */ rc = fts3SqlStmt(p, SQL_SEGMENT_IS_APPENDABLE, &pCheck, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pCheck, 1, iEnd); if( SQLITE_ROW==sqlite3_step(pCheck) ) bRes = 1; rc = sqlite3_reset(pCheck); } *pbRes = bRes; return rc; } /* ** This function is called when initializing an incremental-merge operation. ** It checks if the existing segment with index value iIdx at absolute level ** (iAbsLevel+1) can be appended to by the incremental merge. If it can, the ** merge-writer object *pWriter is initialized to write to it. ** ** An existing segment can be appended to by an incremental merge if: ** ** * It was initially created as an appendable segment (with all required ** space pre-allocated), and ** ** * The first key read from the input (arguments zKey and nKey) is ** greater than the largest key currently stored in the potential ** output segment. */ static int fts3IncrmergeLoad( Fts3Table *p, /* Fts3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ int iIdx, /* Index of candidate output segment */ const char *zKey, /* First key to write */ int nKey, /* Number of bytes in nKey */ IncrmergeWriter *pWriter /* Populate this object */ ){ int rc; /* Return code */ sqlite3_stmt *pSelect = 0; /* SELECT to read %_segdir entry */ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pSelect, 0); if( rc==SQLITE_OK ){ sqlite3_int64 iStart = 0; /* Value of %_segdir.start_block */ sqlite3_int64 iLeafEnd = 0; /* Value of %_segdir.leaves_end_block */ sqlite3_int64 iEnd = 0; /* Value of %_segdir.end_block */ const char *aRoot = 0; /* Pointer to %_segdir.root buffer */ int nRoot = 0; /* Size of aRoot[] in bytes */ int rc2; /* Return code from sqlite3_reset() */ int bAppendable = 0; /* Set to true if segment is appendable */ /* Read the %_segdir entry for index iIdx absolute level (iAbsLevel+1) */ sqlite3_bind_int64(pSelect, 1, iAbsLevel+1); sqlite3_bind_int(pSelect, 2, iIdx); if( sqlite3_step(pSelect)==SQLITE_ROW ){ iStart = sqlite3_column_int64(pSelect, 1); iLeafEnd = sqlite3_column_int64(pSelect, 2); iEnd = sqlite3_column_int64(pSelect, 3); nRoot = sqlite3_column_bytes(pSelect, 4); aRoot = sqlite3_column_blob(pSelect, 4); }else{ return sqlite3_reset(pSelect); } /* Check for the zero-length marker in the %_segments table */ rc = fts3IsAppendable(p, iEnd, &bAppendable); /* Check that zKey/nKey is larger than the largest key the candidate */ if( rc==SQLITE_OK && bAppendable ){ char *aLeaf = 0; int nLeaf = 0; rc = sqlite3Fts3ReadBlock(p, iLeafEnd, &aLeaf, &nLeaf, 0); if( rc==SQLITE_OK ){ NodeReader reader; for(rc = nodeReaderInit(&reader, aLeaf, nLeaf); rc==SQLITE_OK && reader.aNode; rc = nodeReaderNext(&reader) ){ assert( reader.aNode ); } if( fts3TermCmp(zKey, nKey, reader.term.a, reader.term.n)<=0 ){ bAppendable = 0; } nodeReaderRelease(&reader); } sqlite3_free(aLeaf); } if( rc==SQLITE_OK && bAppendable ){ /* It is possible to append to this segment. Set up the IncrmergeWriter ** object to do so. */ int i; int nHeight = (int)aRoot[0]; NodeWriter *pNode; pWriter->nLeafEst = (int)((iEnd - iStart) + 1)/FTS_MAX_APPENDABLE_HEIGHT; pWriter->iStart = iStart; pWriter->iEnd = iEnd; pWriter->iAbsLevel = iAbsLevel; pWriter->iIdx = iIdx; for(i=nHeight+1; i<FTS_MAX_APPENDABLE_HEIGHT; i++){ pWriter->aNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; } pNode = &pWriter->aNodeWriter[nHeight]; pNode->iBlock = pWriter->iStart + pWriter->nLeafEst*nHeight; blobGrowBuffer(&pNode->block, MAX(nRoot, p->nNodeSize), &rc); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aRoot, nRoot); pNode->block.n = nRoot; } for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ NodeReader reader; pNode = &pWriter->aNodeWriter[i]; rc = nodeReaderInit(&reader, pNode->block.a, pNode->block.n); while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader); blobGrowBuffer(&pNode->key, reader.term.n, &rc); if( rc==SQLITE_OK ){ memcpy(pNode->key.a, reader.term.a, reader.term.n); pNode->key.n = reader.term.n; if( i>0 ){ char *aBlock = 0; int nBlock = 0; pNode = &pWriter->aNodeWriter[i-1]; pNode->iBlock = reader.iChild; rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize), &rc); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock; } sqlite3_free(aBlock); } } nodeReaderRelease(&reader); } } rc2 = sqlite3_reset(pSelect); if( rc==SQLITE_OK ) rc = rc2; } return rc; } /* ** Determine the largest segment index value that exists within absolute ** level iAbsLevel+1. If no error occurs, set *piIdx to this value plus ** one before returning SQLITE_OK. Or, if there are no segments at all ** within level iAbsLevel, set *piIdx to zero. ** ** If an error occurs, return an SQLite error code. The final value of ** *piIdx is undefined in this case. */ static int fts3IncrmergeOutputIdx( Fts3Table *p, /* FTS Table handle */ sqlite3_int64 iAbsLevel, /* Absolute index of input segments */ int *piIdx /* OUT: Next free index at iAbsLevel+1 */ ){ int rc; sqlite3_stmt *pOutputIdx = 0; /* SQL used to find output index */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pOutputIdx, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pOutputIdx, 1, iAbsLevel+1); sqlite3_step(pOutputIdx); *piIdx = sqlite3_column_int(pOutputIdx, 0); rc = sqlite3_reset(pOutputIdx); } return rc; } /* ** Allocate an appendable output segment on absolute level iAbsLevel+1 ** with idx value iIdx. ** ** In the %_segdir table, a segment is defined by the values in three ** columns: ** ** start_block ** leaves_end_block ** end_block ** ** When an appendable segment is allocated, it is estimated that the ** maximum number of leaf blocks that may be required is the sum of the ** number of leaf blocks consumed by the input segments, plus the number ** of input segments, multiplied by two. This value is stored in stack ** variable nLeafEst. ** ** A total of 16*nLeafEst blocks are allocated when an appendable segment ** is created ((1 + end_block - start_block)==16*nLeafEst). The contiguous ** array of leaf nodes starts at the first block allocated. The array ** of interior nodes that are parents of the leaf nodes start at block ** (start_block + (1 + end_block - start_block) / 16). And so on. ** ** In the actual code below, the value "16" is replaced with the ** pre-processor macro FTS_MAX_APPENDABLE_HEIGHT. */ static int fts3IncrmergeWriter( Fts3Table *p, /* Fts3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ int iIdx, /* Index of new output segment */ Fts3MultiSegReader *pCsr, /* Cursor that data will be read from */ IncrmergeWriter *pWriter /* Populate this object */ ){ int rc; /* Return Code */ int i; /* Iterator variable */ int nLeafEst = 0; /* Blocks allocated for leaf nodes */ sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ /* Calculate nLeafEst. */ rc = fts3SqlStmt(p, SQL_MAX_LEAF_NODE_ESTIMATE, &pLeafEst, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ nLeafEst = sqlite3_column_int(pLeafEst, 0); } rc = sqlite3_reset(pLeafEst); } if( rc!=SQLITE_OK ) return rc; /* Calculate the first block to use in the output segment */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pFirstBlock, 0); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pFirstBlock) ){ pWriter->iStart = sqlite3_column_int64(pFirstBlock, 0); pWriter->iEnd = pWriter->iStart - 1; pWriter->iEnd += nLeafEst * FTS_MAX_APPENDABLE_HEIGHT; } rc = sqlite3_reset(pFirstBlock); } if( rc!=SQLITE_OK ) return rc; /* Insert the marker in the %_segments table to make sure nobody tries ** to steal the space just allocated. This is also used to identify ** appendable segments. */ rc = fts3WriteSegment(p, pWriter->iEnd, 0, 0); if( rc!=SQLITE_OK ) return rc; pWriter->iAbsLevel = iAbsLevel; pWriter->nLeafEst = nLeafEst; pWriter->iIdx = iIdx; /* Set up the array of NodeWriter objects */ for(i=0; i<FTS_MAX_APPENDABLE_HEIGHT; i++){ pWriter->aNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; } return SQLITE_OK; } /* ** Remove an entry from the %_segdir table. This involves running the ** following two statements: ** ** DELETE FROM %_segdir WHERE level = :iAbsLevel AND idx = :iIdx ** UPDATE %_segdir SET idx = idx - 1 WHERE level = :iAbsLevel AND idx > :iIdx ** ** The DELETE statement removes the specific %_segdir level. The UPDATE ** statement ensures that the remaining segments have contiguously allocated ** idx values. */ static int fts3RemoveSegdirEntry( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level to delete from */ int iIdx /* Index of %_segdir entry to delete */ ){ int rc; /* Return code */ sqlite3_stmt *pDelete = 0; /* DELETE statement */ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_ENTRY, &pDelete, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDelete, 1, iAbsLevel); sqlite3_bind_int(pDelete, 2, iIdx); sqlite3_step(pDelete); rc = sqlite3_reset(pDelete); } return rc; } /* ** One or more segments have just been removed from absolute level iAbsLevel. ** Update the 'idx' values of the remaining segments in the level so that ** the idx values are a contiguous sequence starting from 0. */ static int fts3RepackSegdirLevel( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel /* Absolute level to repack */ ){ int rc; /* Return code */ int *aIdx = 0; /* Array of remaining idx values */ int nIdx = 0; /* Valid entries in aIdx[] */ int nAlloc = 0; /* Allocated size of aIdx[] */ int i; /* Iterator variable */ sqlite3_stmt *pSelect = 0; /* Select statement to read idx values */ sqlite3_stmt *pUpdate = 0; /* Update statement to modify idx values */ rc = fts3SqlStmt(p, SQL_SELECT_INDEXES, &pSelect, 0); if( rc==SQLITE_OK ){ int rc2; sqlite3_bind_int64(pSelect, 1, iAbsLevel); while( SQLITE_ROW==sqlite3_step(pSelect) ){ if( nIdx>=nAlloc ){ int *aNew; nAlloc += 16; aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int)); if( !aNew ){ rc = SQLITE_NOMEM; break; } aIdx = aNew; } aIdx[nIdx++] = sqlite3_column_int(pSelect, 0); } rc2 = sqlite3_reset(pSelect); if( rc==SQLITE_OK ) rc = rc2; } if( rc==SQLITE_OK ){ rc = fts3SqlStmt(p, SQL_SHIFT_SEGDIR_ENTRY, &pUpdate, 0); } if( rc==SQLITE_OK ){ sqlite3_bind_int64(pUpdate, 2, iAbsLevel); } assert( p->bIgnoreSavepoint==0 ); p->bIgnoreSavepoint = 1; for(i=0; rc==SQLITE_OK && i<nIdx; i++){ if( aIdx[i]!=i ){ sqlite3_bind_int(pUpdate, 3, aIdx[i]); sqlite3_bind_int(pUpdate, 1, i); sqlite3_step(pUpdate); rc = sqlite3_reset(pUpdate); } } p->bIgnoreSavepoint = 0; sqlite3_free(aIdx); return rc; } static void fts3StartNode(Blob *pNode, int iHeight, sqlite3_int64 iChild){ pNode->a[0] = (char)iHeight; if( iChild ){ assert( pNode->nAlloc>=1+sqlite3Fts3VarintLen(iChild) ); pNode->n = 1 + sqlite3Fts3PutVarint(&pNode->a[1], iChild); }else{ assert( pNode->nAlloc>=1 ); pNode->n = 1; } } /* ** The first two arguments are a pointer to and the size of a segment b-tree ** node. The node may be a leaf or an internal node. ** ** This function creates a new node image in blob object *pNew by copying ** all terms that are greater than or equal to zTerm/nTerm (for leaf nodes) ** or greater than zTerm/nTerm (for internal nodes) from aNode/nNode. */ static int fts3TruncateNode( const char *aNode, /* Current node image */ int nNode, /* Size of aNode in bytes */ Blob *pNew, /* OUT: Write new node image here */ const char *zTerm, /* Omit all terms smaller than this */ int nTerm, /* Size of zTerm in bytes */ sqlite3_int64 *piBlock /* OUT: Block number in next layer down */ ){ NodeReader reader; /* Reader object */ Blob prev = {0, 0, 0}; /* Previous term written to new node */ int rc = SQLITE_OK; /* Return code */ int bLeaf = aNode[0]=='\0'; /* True for a leaf node */ /* Allocate required output space */ blobGrowBuffer(pNew, nNode, &rc); if( rc!=SQLITE_OK ) return rc; pNew->n = 0; /* Populate new node buffer */ for(rc = nodeReaderInit(&reader, aNode, nNode); rc==SQLITE_OK && reader.aNode; rc = nodeReaderNext(&reader) ){ if( pNew->n==0 ){ int res = fts3TermCmp(reader.term.a, reader.term.n, zTerm, nTerm); if( res<0 || (bLeaf==0 && res==0) ) continue; fts3StartNode(pNew, (int)aNode[0], reader.iChild); *piBlock = reader.iChild; } rc = fts3AppendToNode( pNew, &prev, reader.term.a, reader.term.n, reader.aDoclist, reader.nDoclist ); if( rc!=SQLITE_OK ) break; } if( pNew->n==0 ){ fts3StartNode(pNew, (int)aNode[0], reader.iChild); *piBlock = reader.iChild; } assert( pNew->n<=pNew->nAlloc ); nodeReaderRelease(&reader); sqlite3_free(prev.a); return rc; } /* ** Remove all terms smaller than zTerm/nTerm from segment iIdx in absolute ** level iAbsLevel. This may involve deleting entries from the %_segments ** table, and modifying existing entries in both the %_segments and %_segdir ** tables. ** ** SQLITE_OK is returned if the segment is updated successfully. Or an ** SQLite error code otherwise. */ static int fts3TruncateSegment( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of segment to modify */ int iIdx, /* Index within level of segment to modify */ const char *zTerm, /* Remove terms smaller than this */ int nTerm /* Number of bytes in buffer zTerm */ ){ int rc = SQLITE_OK; /* Return code */ Blob root = {0,0,0}; /* New root page image */ Blob block = {0,0,0}; /* Buffer used for any other block */ sqlite3_int64 iBlock = 0; /* Block id */ sqlite3_int64 iNewStart = 0; /* New value for iStartBlock */ sqlite3_int64 iOldStart = 0; /* Old value for iStartBlock */ sqlite3_stmt *pFetch = 0; /* Statement used to fetch segdir */ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pFetch, 0); if( rc==SQLITE_OK ){ int rc2; /* sqlite3_reset() return code */ sqlite3_bind_int64(pFetch, 1, iAbsLevel); sqlite3_bind_int(pFetch, 2, iIdx); if( SQLITE_ROW==sqlite3_step(pFetch) ){ const char *aRoot = sqlite3_column_blob(pFetch, 4); int nRoot = sqlite3_column_bytes(pFetch, 4); iOldStart = sqlite3_column_int64(pFetch, 1); rc = fts3TruncateNode(aRoot, nRoot, &root, zTerm, nTerm, &iBlock); } rc2 = sqlite3_reset(pFetch); if( rc==SQLITE_OK ) rc = rc2; } while( rc==SQLITE_OK && iBlock ){ char *aBlock = 0; int nBlock = 0; iNewStart = iBlock; rc = sqlite3Fts3ReadBlock(p, iBlock, &aBlock, &nBlock, 0); if( rc==SQLITE_OK ){ rc = fts3TruncateNode(aBlock, nBlock, &block, zTerm, nTerm, &iBlock); } if( rc==SQLITE_OK ){ rc = fts3WriteSegment(p, iNewStart, block.a, block.n); } sqlite3_free(aBlock); } /* Variable iNewStart now contains the first valid leaf node. */ if( rc==SQLITE_OK && iNewStart ){ sqlite3_stmt *pDel = 0; rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDel, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDel, 1, iOldStart); sqlite3_bind_int64(pDel, 2, iNewStart-1); sqlite3_step(pDel); rc = sqlite3_reset(pDel); } } if( rc==SQLITE_OK ){ sqlite3_stmt *pChomp = 0; rc = fts3SqlStmt(p, SQL_CHOMP_SEGDIR, &pChomp, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pChomp, 1, iNewStart); sqlite3_bind_blob(pChomp, 2, root.a, root.n, SQLITE_STATIC); sqlite3_bind_int64(pChomp, 3, iAbsLevel); sqlite3_bind_int(pChomp, 4, iIdx); sqlite3_step(pChomp); rc = sqlite3_reset(pChomp); } } sqlite3_free(root.a); sqlite3_free(block.a); return rc; } /* ** This function is called after an incrmental-merge operation has run to ** merge (or partially merge) two or more segments from absolute level ** iAbsLevel. ** ** Each input segment is either removed from the db completely (if all of ** its data was copied to the output segment by the incrmerge operation) ** or modified in place so that it no longer contains those entries that ** have been duplicated in the output segment. */ static int fts3IncrmergeChomp( Fts3Table *p, /* FTS table handle */ sqlite3_int64 iAbsLevel, /* Absolute level containing segments */ Fts3MultiSegReader *pCsr, /* Chomp all segments opened by this cursor */ int *pnRem /* Number of segments not deleted */ ){ int i; int nRem = 0; int rc = SQLITE_OK; for(i=pCsr->nSegment-1; i>=0 && rc==SQLITE_OK; i--){ Fts3SegReader *pSeg = 0; int j; /* Find the Fts3SegReader object with Fts3SegReader.iIdx==i. It is hiding ** somewhere in the pCsr->apSegment[] array. */ for(j=0; ALWAYS(j<pCsr->nSegment); j++){ pSeg = pCsr->apSegment[j]; if( pSeg->iIdx==i ) break; } assert( j<pCsr->nSegment && pSeg->iIdx==i ); if( pSeg->aNode==0 ){ /* Seg-reader is at EOF. Remove the entire input segment. */ rc = fts3DeleteSegment(p, pSeg); if( rc==SQLITE_OK ){ rc = fts3RemoveSegdirEntry(p, iAbsLevel, pSeg->iIdx); } *pnRem = 0; }else{ /* The incremental merge did not copy all the data from this ** segment to the upper level. The segment is modified in place ** so that it contains no keys smaller than zTerm/nTerm. */ const char *zTerm = pSeg->zTerm; int nTerm = pSeg->nTerm; rc = fts3TruncateSegment(p, iAbsLevel, pSeg->iIdx, zTerm, nTerm); nRem++; } } if( rc==SQLITE_OK && nRem!=pCsr->nSegment ){ rc = fts3RepackSegdirLevel(p, iAbsLevel); } *pnRem = nRem; return rc; } /* ** Store an incr-merge hint in the database. */ static int fts3IncrmergeHintStore(Fts3Table *p, Blob *pHint){ sqlite3_stmt *pReplace = 0; int rc; /* Return code */ rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pReplace, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int(pReplace, 1, FTS_STAT_INCRMERGEHINT); sqlite3_bind_blob(pReplace, 2, pHint->a, pHint->n, SQLITE_STATIC); sqlite3_step(pReplace); rc = sqlite3_reset(pReplace); } return rc; } /* ** Load an incr-merge hint from the database. The incr-merge hint, if one ** exists, is stored in the rowid==1 row of the %_stat table. ** ** If successful, populate blob *pHint with the value read from the %_stat ** table and return SQLITE_OK. Otherwise, if an error occurs, return an ** SQLite error code. */ static int fts3IncrmergeHintLoad(Fts3Table *p, Blob *pHint){ sqlite3_stmt *pSelect = 0; int rc; pHint->n = 0; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pSelect, 0); if( rc==SQLITE_OK ){ int rc2; sqlite3_bind_int(pSelect, 1, FTS_STAT_INCRMERGEHINT); if( SQLITE_ROW==sqlite3_step(pSelect) ){ const char *aHint = sqlite3_column_blob(pSelect, 0); int nHint = sqlite3_column_bytes(pSelect, 0); if( aHint ){ blobGrowBuffer(pHint, nHint, &rc); if( rc==SQLITE_OK ){ memcpy(pHint->a, aHint, nHint); pHint->n = nHint; } } } rc2 = sqlite3_reset(pSelect); if( rc==SQLITE_OK ) rc = rc2; } return rc; } /* ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, append an entry to the hint stored in blob *pHint. Each entry ** consists of two varints, the absolute level number of the input segments ** and the number of input segments. ** ** If successful, leave *pRc set to SQLITE_OK and return. If an error occurs, ** set *pRc to an SQLite error code before returning. */ static void fts3IncrmergeHintPush( Blob *pHint, /* Hint blob to append to */ i64 iAbsLevel, /* First varint to store in hint */ int nInput, /* Second varint to store in hint */ int *pRc /* IN/OUT: Error code */ ){ blobGrowBuffer(pHint, pHint->n + 2*FTS3_VARINT_MAX, pRc); if( *pRc==SQLITE_OK ){ pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], iAbsLevel); pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], (i64)nInput); } } /* ** Read the last entry (most recently pushed) from the hint blob *pHint ** and then remove the entry. Write the two values read to *piAbsLevel and ** *pnInput before returning. ** ** If no error occurs, return SQLITE_OK. If the hint blob in *pHint does ** not contain at least two valid varints, return SQLITE_CORRUPT_VTAB. */ static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){ const int nHint = pHint->n; int i; i = pHint->n-2; while( i>0 && (pHint->a[i-1] & 0x80) ) i--; while( i>0 && (pHint->a[i-1] & 0x80) ) i--; pHint->n = i; i += sqlite3Fts3GetVarint(&pHint->a[i], piAbsLevel); i += sqlite3Fts3GetVarint32(&pHint->a[i], pnInput); if( i!=nHint ) return SQLITE_CORRUPT_VTAB; return SQLITE_OK; } /* ** Attempt an incremental merge that writes nMerge leaf blocks. ** ** Incremental merges happen nMin segments at a time. The two ** segments to be merged are the nMin oldest segments (the ones with ** the smallest indexes) in the highest level that contains at least ** nMin segments. Multiple merges might occur in an attempt to write the ** quota of nMerge leaf blocks. */ int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){ int rc; /* Return code */ int nRem = nMerge; /* Number of leaf pages yet to be written */ Fts3MultiSegReader *pCsr; /* Cursor used to read input data */ Fts3SegFilter *pFilter; /* Filter used with cursor pCsr */ IncrmergeWriter *pWriter; /* Writer object */ int nSeg = 0; /* Number of input segments */ sqlite3_int64 iAbsLevel = 0; /* Absolute level number to work on */ Blob hint = {0, 0, 0}; /* Hint read from %_stat table */ int bDirtyHint = 0; /* True if blob 'hint' has been modified */ /* Allocate space for the cursor, filter and writer objects */ const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter); pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc); if( !pWriter ) return SQLITE_NOMEM; pFilter = (Fts3SegFilter *)&pWriter[1]; pCsr = (Fts3MultiSegReader *)&pFilter[1]; rc = fts3IncrmergeHintLoad(p, &hint); while( rc==SQLITE_OK && nRem>0 ){ const i64 nMod = FTS3_SEGDIR_MAXLEVEL * p->nIndex; sqlite3_stmt *pFindLevel = 0; /* SQL used to determine iAbsLevel */ int bUseHint = 0; /* True if attempting to append */ /* Search the %_segdir table for the absolute level with the smallest ** relative level number that contains at least nMin segments, if any. ** If one is found, set iAbsLevel to the absolute level number and ** nSeg to nMin. If no level with at least nMin segments can be found, ** set nSeg to -1. */ rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0); sqlite3_bind_int(pFindLevel, 1, nMin); if( sqlite3_step(pFindLevel)==SQLITE_ROW ){ iAbsLevel = sqlite3_column_int64(pFindLevel, 0); nSeg = nMin; }else{ nSeg = -1; } rc = sqlite3_reset(pFindLevel); /* If the hint read from the %_stat table is not empty, check if the ** last entry in it specifies a relative level smaller than or equal ** to the level identified by the block above (if any). If so, this ** iteration of the loop will work on merging at the hinted level. */ if( rc==SQLITE_OK && hint.n ){ int nHint = hint.n; sqlite3_int64 iHintAbsLevel = 0; /* Hint level */ int nHintSeg = 0; /* Hint number of segments */ rc = fts3IncrmergeHintPop(&hint, &iHintAbsLevel, &nHintSeg); if( nSeg<0 || (iAbsLevel % nMod) >= (iHintAbsLevel % nMod) ){ iAbsLevel = iHintAbsLevel; nSeg = nHintSeg; bUseHint = 1; bDirtyHint = 1; }else{ /* This undoes the effect of the HintPop() above - so that no entry ** is removed from the hint blob. */ hint.n = nHint; } } /* If nSeg is less that zero, then there is no level with at least ** nMin segments and no hint in the %_stat table. No work to do. ** Exit early in this case. */ if( nSeg<0 ) break; /* Open a cursor to iterate through the contents of the oldest nSeg ** indexes of absolute level iAbsLevel. If this cursor is opened using ** the 'hint' parameters, it is possible that there are less than nSeg ** segments available in level iAbsLevel. In this case, no work is ** done on iAbsLevel - fall through to the next iteration of the loop ** to start work on some other level. */ memset(pWriter, 0, nAlloc); pFilter->flags = FTS3_SEGMENT_REQUIRE_POS; if( rc==SQLITE_OK ){ rc = fts3IncrmergeCsr(p, iAbsLevel, nSeg, pCsr); } if( SQLITE_OK==rc && pCsr->nSegment==nSeg && SQLITE_OK==(rc = sqlite3Fts3SegReaderStart(p, pCsr, pFilter)) && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pCsr)) ){ int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */ rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx); if( rc==SQLITE_OK ){ if( bUseHint && iIdx>0 ){ const char *zKey = pCsr->zTerm; int nKey = pCsr->nTerm; rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter); }else{ rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter); } } if( rc==SQLITE_OK && pWriter->nLeafEst ){ fts3LogMerge(nSeg, iAbsLevel); do { rc = fts3IncrmergeAppend(p, pWriter, pCsr); if( rc==SQLITE_OK ) rc = sqlite3Fts3SegReaderStep(p, pCsr); if( pWriter->nWork>=nRem && rc==SQLITE_ROW ) rc = SQLITE_OK; }while( rc==SQLITE_ROW ); /* Update or delete the input segments */ if( rc==SQLITE_OK ){ nRem -= (1 + pWriter->nWork); rc = fts3IncrmergeChomp(p, iAbsLevel, pCsr, &nSeg); if( nSeg!=0 ){ bDirtyHint = 1; fts3IncrmergeHintPush(&hint, iAbsLevel, nSeg, &rc); } } } fts3IncrmergeRelease(p, pWriter, &rc); } sqlite3Fts3SegReaderFinish(pCsr); } /* Write the hint values into the %_stat table for the next incr-merger */ if( bDirtyHint && rc==SQLITE_OK ){ rc = fts3IncrmergeHintStore(p, &hint); } sqlite3_free(pWriter); sqlite3_free(hint.a); return rc; } /* ** Convert the text beginning at *pz into an integer and return ** its value. Advance *pz to point to the first character past ** the integer. */ static int fts3Getint(const char **pz){ const char *z = *pz; int i = 0; while( (*z)>='0' && (*z)<='9' ) i = 10*i + *(z++) - '0'; *pz = z; return i; } /* ** Process statements of the form: ** ** INSERT INTO table(table) VALUES('merge=A,B'); ** ** A and B are integers that decode to be the number of leaf pages ** written for the merge, and the minimum number of segments on a level ** before it will be selected for a merge, respectively. */ static int fts3DoIncrmerge( Fts3Table *p, /* FTS3 table handle */ const char *zParam /* Nul-terminated string containing "A,B" */ ){ int rc; int nMin = (FTS3_MERGE_COUNT / 2); int nMerge = 0; const char *z = zParam; /* Read the first integer value */ nMerge = fts3Getint(&z); /* If the first integer value is followed by a ',', read the second ** integer value. */ if( z[0]==',' && z[1]!='\0' ){ z++; nMin = fts3Getint(&z); } if( z[0]!='\0' || nMin<2 ){ rc = SQLITE_ERROR; }else{ rc = SQLITE_OK; if( !p->bHasStat ){ assert( p->bFts4==0 ); sqlite3Fts3CreateStatTable(&rc, p); } if( rc==SQLITE_OK ){ rc = sqlite3Fts3Incrmerge(p, nMerge, nMin); } sqlite3Fts3SegmentsClose(p); } return rc; } /* ** Process statements of the form: ** ** INSERT INTO table(table) VALUES('automerge=X'); ** ** where X is an integer. X==0 means to turn automerge off. X!=0 means ** turn it on. The setting is persistent. */ static int fts3DoAutoincrmerge( Fts3Table *p, /* FTS3 table handle */ const char *zParam /* Nul-terminated string containing boolean */ ){ int rc = SQLITE_OK; sqlite3_stmt *pStmt = 0; p->bAutoincrmerge = fts3Getint(&zParam)!=0; if( !p->bHasStat ){ assert( p->bFts4==0 ); sqlite3Fts3CreateStatTable(&rc, p); if( rc ) return rc; } rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); if( rc ) return rc;; sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); sqlite3_bind_int(pStmt, 2, p->bAutoincrmerge); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); return rc; } /* ** Return a 64-bit checksum for the FTS index entry specified by the ** arguments to this function. */ static u64 fts3ChecksumEntry( const char *zTerm, /* Pointer to buffer containing term */ int nTerm, /* Size of zTerm in bytes */ int iLangid, /* Language id for current row */ int iIndex, /* Index (0..Fts3Table.nIndex-1) */ i64 iDocid, /* Docid for current row. */ int iCol, /* Column number */ int iPos /* Position */ ){ int i; u64 ret = (u64)iDocid; ret += (ret<<3) + iLangid; ret += (ret<<3) + iIndex; ret += (ret<<3) + iCol; ret += (ret<<3) + iPos; for(i=0; i<nTerm; i++) ret += (ret<<3) + zTerm[i]; return ret; } /* ** Return a checksum of all entries in the FTS index that correspond to ** language id iLangid. The checksum is calculated by XORing the checksums ** of each individual entry (see fts3ChecksumEntry()) together. ** ** If successful, the checksum value is returned and *pRc set to SQLITE_OK. ** Otherwise, if an error occurs, *pRc is set to an SQLite error code. The ** return value is undefined in this case. */ static u64 fts3ChecksumIndex( Fts3Table *p, /* FTS3 table handle */ int iLangid, /* Language id to return cksum for */ int iIndex, /* Index to cksum (0..p->nIndex-1) */ int *pRc /* OUT: Return code */ ){ Fts3SegFilter filter; Fts3MultiSegReader csr; int rc; u64 cksum = 0; assert( *pRc==SQLITE_OK ); memset(&filter, 0, sizeof(filter)); memset(&csr, 0, sizeof(csr)); filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; filter.flags |= FTS3_SEGMENT_SCAN; rc = sqlite3Fts3SegReaderCursor( p, iLangid, iIndex, FTS3_SEGCURSOR_ALL, 0, 0, 0, 1,&csr ); if( rc==SQLITE_OK ){ rc = sqlite3Fts3SegReaderStart(p, &csr, &filter); } if( rc==SQLITE_OK ){ while( SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, &csr)) ){ char *pCsr = csr.aDoclist; char *pEnd = &pCsr[csr.nDoclist]; i64 iDocid = 0; i64 iCol = 0; i64 iPos = 0; pCsr += sqlite3Fts3GetVarint(pCsr, &iDocid); while( pCsr<pEnd ){ i64 iVal = 0; pCsr += sqlite3Fts3GetVarint(pCsr, &iVal); if( pCsr<pEnd ){ if( iVal==0 || iVal==1 ){ iCol = 0; iPos = 0; if( iVal ){ pCsr += sqlite3Fts3GetVarint(pCsr, &iCol); }else{ pCsr += sqlite3Fts3GetVarint(pCsr, &iVal); iDocid += iVal; } }else{ iPos += (iVal - 2); cksum = cksum ^ fts3ChecksumEntry( csr.zTerm, csr.nTerm, iLangid, iIndex, iDocid, (int)iCol, (int)iPos ); } } } } } sqlite3Fts3SegReaderFinish(&csr); *pRc = rc; return cksum; } /* ** Check if the contents of the FTS index match the current contents of the ** content table. If no error occurs and the contents do match, set *pbOk ** to true and return SQLITE_OK. Or if the contents do not match, set *pbOk ** to false before returning. ** ** If an error occurs (e.g. an OOM or IO error), return an SQLite error ** code. The final value of *pbOk is undefined in this case. */ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ int rc = SQLITE_OK; /* Return code */ u64 cksum1 = 0; /* Checksum based on FTS index contents */ u64 cksum2 = 0; /* Checksum based on %_content contents */ sqlite3_stmt *pAllLangid = 0; /* Statement to return all language-ids */ /* This block calculates the checksum according to the FTS index. */ rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0); if( rc==SQLITE_OK ){ int rc2; sqlite3_bind_int(pAllLangid, 1, p->nIndex); while( rc==SQLITE_OK && sqlite3_step(pAllLangid)==SQLITE_ROW ){ int iLangid = sqlite3_column_int(pAllLangid, 0); int i; for(i=0; i<p->nIndex; i++){ cksum1 = cksum1 ^ fts3ChecksumIndex(p, iLangid, i, &rc); } } rc2 = sqlite3_reset(pAllLangid); if( rc==SQLITE_OK ) rc = rc2; } /* This block calculates the checksum according to the %_content table */ rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0); if( rc==SQLITE_OK ){ sqlite3_tokenizer_module const *pModule = p->pTokenizer->pModule; sqlite3_stmt *pStmt = 0; char *zSql; zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); if( !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ i64 iDocid = sqlite3_column_int64(pStmt, 0); int iLang = langidFromSelect(p, pStmt); int iCol; for(iCol=0; rc==SQLITE_OK && iCol<p->nColumn; iCol++){ const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1); int nText = sqlite3_column_bytes(pStmt, iCol+1); sqlite3_tokenizer_cursor *pT = 0; rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, nText, &pT); while( rc==SQLITE_OK ){ char const *zToken; /* Buffer containing token */ int nToken; /* Number of bytes in token */ int iDum1, iDum2; /* Dummy variables */ int iPos; /* Position of token in zText */ rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos); if( rc==SQLITE_OK ){ int i; cksum2 = cksum2 ^ fts3ChecksumEntry( zToken, nToken, iLang, 0, iDocid, iCol, iPos ); for(i=1; i<p->nIndex; i++){ if( p->aIndex[i].nPrefix<=nToken ){ cksum2 = cksum2 ^ fts3ChecksumEntry( zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos ); } } } } if( pT ) pModule->xClose(pT); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } } sqlite3_finalize(pStmt); } *pbOk = (cksum1==cksum2); return rc; } /* ** Run the integrity-check. If no error occurs and the current contents of ** the FTS index are correct, return SQLITE_OK. Or, if the contents of the ** FTS index are incorrect, return SQLITE_CORRUPT_VTAB. ** ** Or, if an error (e.g. an OOM or IO error) occurs, return an SQLite ** error code. ** ** The integrity-check works as follows. For each token and indexed token ** prefix in the document set, a 64-bit checksum is calculated (by code ** in fts3ChecksumEntry()) based on the following: ** ** + The index number (0 for the main index, 1 for the first prefix ** index etc.), ** + The token (or token prefix) text itself, ** + The language-id of the row it appears in, ** + The docid of the row it appears in, ** + The column it appears in, and ** + The tokens position within that column. ** ** The checksums for all entries in the index are XORed together to create ** a single checksum for the entire index. ** ** The integrity-check code calculates the same checksum in two ways: ** ** 1. By scanning the contents of the FTS index, and ** 2. By scanning and tokenizing the content table. ** ** If the two checksums are identical, the integrity-check is deemed to have ** passed. */ static int fts3DoIntegrityCheck( Fts3Table *p /* FTS3 table handle */ ){ int rc; int bOk = 0; rc = fts3IntegrityCheck(p, &bOk); if( rc==SQLITE_OK && bOk==0 ) rc = SQLITE_CORRUPT_VTAB; return rc; } /* ** Handle a 'special' INSERT of the form: ** ** "INSERT INTO tbl(tbl) VALUES(<expr>)" ** ** Argument pVal contains the result of <expr>. Currently the only ** meaningful value to insert is the text 'optimize'. */ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ int rc; /* Return Code */ const char *zVal = (const char *)sqlite3_value_text(pVal); int nVal = sqlite3_value_bytes(pVal); if( !zVal ){ return SQLITE_NOMEM; }else if( nVal==8 && 0==sqlite3_strnicmp(zVal, "optimize", 8) ){ rc = fts3DoOptimize(p, 0); }else if( nVal==7 && 0==sqlite3_strnicmp(zVal, "rebuild", 7) ){ rc = fts3DoRebuild(p); }else if( nVal==15 && 0==sqlite3_strnicmp(zVal, "integrity-check", 15) ){ rc = fts3DoIntegrityCheck(p); }else if( nVal>6 && 0==sqlite3_strnicmp(zVal, "merge=", 6) ){ rc = fts3DoIncrmerge(p, &zVal[6]); }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ rc = fts3DoAutoincrmerge(p, &zVal[10]); #ifdef SQLITE_TEST }else if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ p->nNodeSize = atoi(&zVal[9]); rc = SQLITE_OK; }else if( nVal>11 && 0==sqlite3_strnicmp(zVal, "maxpending=", 9) ){ p->nMaxPendingData = atoi(&zVal[11]); rc = SQLITE_OK; |
︙ | ︙ | |||
3380 3381 3382 3383 3384 3385 3386 | } /* ** This function does the work for the xUpdate method of FTS3 virtual ** tables. The schema of the virtual table being: ** ** CREATE TABLE <table name>( | | | 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 | } /* ** This function does the work for the xUpdate method of FTS3 virtual ** tables. The schema of the virtual table being: ** ** CREATE TABLE <table name>( ** <user columns>, ** <table name> HIDDEN, ** docid HIDDEN, ** <langid> HIDDEN ** ); ** ** */ |
︙ | ︙ | |||
3512 3513 3514 3515 3516 3517 3518 | } if( p->bHasDocsize ){ fts3InsertDocsize(&rc, p, aSzIns); } nChng++; } | | | 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 | } if( p->bHasDocsize ){ fts3InsertDocsize(&rc, p, aSzIns); } nChng++; } if( p->bFts4 ){ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nChng); } update_out: sqlite3_free(aSzIns); sqlite3Fts3SegmentsClose(p); return rc; |
︙ | ︙ |
Added ext/fts3/tool/fts3view.c.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 | /* ** This program is a debugging and analysis utility that displays ** information about an FTS3 or FTS4 index. ** ** Link this program against the SQLite3 amalgamation with the ** SQLITE_ENABLE_FTS4 compile-time option. Then run it as: ** ** fts3view DATABASE ** ** to get a list of all FTS3/4 tables in DATABASE, or do ** ** fts3view DATABASE TABLE COMMAND .... ** ** to see various aspects of the TABLE table. Type fts3view with no ** arguments for a list of available COMMANDs. */ #include <stdio.h> #include <stdarg.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "sqlite3.h" /* ** Extra command-line arguments: */ int nExtra; char **azExtra; /* ** Look for a command-line argument. */ const char *findOption(const char *zName, int hasArg, const char *zDefault){ int i; const char *zResult = zDefault; for(i=0; i<nExtra; i++){ const char *z = azExtra[i]; while( z[0]=='-' ) z++; if( strcmp(z, zName)==0 ){ int j = 1; if( hasArg==0 || i==nExtra-1 ) j = 0; zResult = azExtra[i+j]; while( i+j<nExtra ){ azExtra[i] = azExtra[i+j+1]; i++; } break; } } return zResult; } /* ** Prepare an SQL query */ static sqlite3_stmt *prepare(sqlite3 *db, const char *zFormat, ...){ va_list ap; char *zSql; sqlite3_stmt *pStmt; int rc; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc ){ fprintf(stderr, "Error: %s\nSQL: %s\n", sqlite3_errmsg(db), zSql); exit(1); } sqlite3_free(zSql); return pStmt; } /* ** Run an SQL statement */ static int runSql(sqlite3 *db, const char *zFormat, ...){ va_list ap; char *zSql; int rc; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); rc = sqlite3_exec(db, zSql, 0, 0, 0); va_end(ap); return rc; } /* ** Show the table schema */ static void showSchema(sqlite3 *db, const char *zTab){ sqlite3_stmt *pStmt; pStmt = prepare(db, "SELECT sql FROM sqlite_master" " WHERE name LIKE '%q%%'" " ORDER BY 1", zTab); while( sqlite3_step(pStmt)==SQLITE_ROW ){ printf("%s;\n", sqlite3_column_text(pStmt, 0)); } sqlite3_finalize(pStmt); pStmt = prepare(db, "PRAGMA page_size"); while( sqlite3_step(pStmt)==SQLITE_ROW ){ printf("PRAGMA page_size=%s;\n", sqlite3_column_text(pStmt, 0)); } sqlite3_finalize(pStmt); pStmt = prepare(db, "PRAGMA journal_mode"); while( sqlite3_step(pStmt)==SQLITE_ROW ){ printf("PRAGMA journal_mode=%s;\n", sqlite3_column_text(pStmt, 0)); } sqlite3_finalize(pStmt); pStmt = prepare(db, "PRAGMA auto_vacuum"); while( sqlite3_step(pStmt)==SQLITE_ROW ){ const char *zType = "???"; switch( sqlite3_column_int(pStmt, 0) ){ case 0: zType = "OFF"; break; case 1: zType = "FULL"; break; case 2: zType = "INCREMENTAL"; break; } printf("PRAGMA auto_vacuum=%s;\n", zType); } sqlite3_finalize(pStmt); pStmt = prepare(db, "PRAGMA encoding"); while( sqlite3_step(pStmt)==SQLITE_ROW ){ printf("PRAGMA encoding=%s;\n", sqlite3_column_text(pStmt, 0)); } sqlite3_finalize(pStmt); } /* ** Read a 64-bit variable-length integer from memory starting at p[0]. ** Return the number of bytes read, or 0 on error. ** The value is stored in *v. */ int getVarint(const unsigned char *p, sqlite_int64 *v){ const unsigned char *q = p; sqlite_uint64 x = 0, y = 1; while( (*q&0x80)==0x80 && q-(unsigned char *)p<9 ){ x += y * (*q++ & 0x7f); y <<= 7; } x += y * (*q++); *v = (sqlite_int64) x; return (int) (q - (unsigned char *)p); } /* Show the content of the %_stat table */ static void showStat(sqlite3 *db, const char *zTab){ sqlite3_stmt *pStmt; pStmt = prepare(db, "SELECT id, value FROM '%q_stat'", zTab); while( sqlite3_step(pStmt)==SQLITE_ROW ){ printf("stat[%d] =", sqlite3_column_int(pStmt, 0)); switch( sqlite3_column_type(pStmt, 1) ){ case SQLITE_INTEGER: { printf(" %d\n", sqlite3_column_int(pStmt, 1)); break; } case SQLITE_BLOB: { unsigned char *x = (unsigned char*)sqlite3_column_blob(pStmt, 1); int len = sqlite3_column_bytes(pStmt, 1); int i = 0; sqlite3_int64 v; while( i<len ){ i += getVarint(x, &v); printf(" %lld", v); } printf("\n"); break; } } } sqlite3_finalize(pStmt); } /* ** Report on the vocabulary. This creates an fts4aux table with a random ** name, but deletes it in the end. */ static void showVocabulary(sqlite3 *db, const char *zTab){ char *zAux; sqlite3_uint64 r; sqlite3_stmt *pStmt; int nDoc = 0; int nToken = 0; int nOccurrence = 0; int nTop; int n, i; sqlite3_randomness(sizeof(r), &r); zAux = sqlite3_mprintf("viewer_%llx", zTab, r); runSql(db, "BEGIN"); pStmt = prepare(db, "SELECT count(*) FROM %Q", zTab); while( sqlite3_step(pStmt)==SQLITE_ROW ){ nDoc = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); printf("Number of documents...................... %9d\n", nDoc); runSql(db, "CREATE VIRTUAL TABLE %s USING fts4aux(%Q)", zAux, zTab); pStmt = prepare(db, "SELECT count(*), sum(occurrences) FROM %s WHERE col='*'", zAux); while( sqlite3_step(pStmt)==SQLITE_ROW ){ nToken = sqlite3_column_int(pStmt, 0); nOccurrence = sqlite3_column_int(pStmt, 1); } sqlite3_finalize(pStmt); printf("Total tokens in all documents............ %9d\n", nOccurrence); printf("Total number of distinct tokens.......... %9d\n", nToken); if( nToken==0 ) goto end_vocab; n = 0; pStmt = prepare(db, "SELECT count(*) FROM %s" " WHERE col='*' AND occurrences==1", zAux); while( sqlite3_step(pStmt)==SQLITE_ROW ){ n = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); printf("Tokens used exactly once................. %9d %5.2f%%\n", n, n*100.0/nToken); n = 0; pStmt = prepare(db, "SELECT count(*) FROM %s" " WHERE col='*' AND documents==1", zAux); while( sqlite3_step(pStmt)==SQLITE_ROW ){ n = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); printf("Tokens used in only one document......... %9d %5.2f%%\n", n, n*100.0/nToken); if( nDoc>=2000 ){ n = 0; pStmt = prepare(db, "SELECT count(*) FROM %s" " WHERE col='*' AND occurrences<=%d", zAux, nDoc/1000); while( sqlite3_step(pStmt)==SQLITE_ROW ){ n = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); printf("Tokens used in 0.1%% or less of docs...... %9d %5.2f%%\n", n, n*100.0/nToken); } if( nDoc>=200 ){ n = 0; pStmt = prepare(db, "SELECT count(*) FROM %s" " WHERE col='*' AND occurrences<=%d", zAux, nDoc/100); while( sqlite3_step(pStmt)==SQLITE_ROW ){ n = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); printf("Tokens used in 1%% or less of docs........ %9d %5.2f%%\n", n, n*100.0/nToken); } nTop = atoi(findOption("top", 1, "25")); printf("The %d most common tokens:\n", nTop); pStmt = prepare(db, "SELECT term, documents FROM %s" " WHERE col='*'" " ORDER BY documents DESC, term" " LIMIT %d", zAux, nTop); i = 0; while( sqlite3_step(pStmt)==SQLITE_ROW ){ i++; n = sqlite3_column_int(pStmt, 1); printf(" %2d. %-30s %9d docs %5.2f%%\n", i, sqlite3_column_text(pStmt, 0), n, n*100.0/nDoc); } sqlite3_finalize(pStmt); end_vocab: runSql(db, "ROLLBACK"); sqlite3_free(zAux); } /* ** Report on the number and sizes of segments */ static void showSegmentStats(sqlite3 *db, const char *zTab){ sqlite3_stmt *pStmt; int nSeg = 0; sqlite3_int64 szSeg = 0, mxSeg = 0; int nIdx = 0; sqlite3_int64 szIdx = 0, mxIdx = 0; int nRoot = 0; sqlite3_int64 szRoot = 0, mxRoot = 0; sqlite3_int64 mx; int nLeaf; int n; int pgsz; int mxLevel; int i; pStmt = prepare(db, "SELECT count(*), sum(length(block)), max(length(block))" " FROM '%q_segments'", zTab); while( sqlite3_step(pStmt)==SQLITE_ROW ){ nSeg = sqlite3_column_int(pStmt, 0); szSeg = sqlite3_column_int64(pStmt, 1); mxSeg = sqlite3_column_int64(pStmt, 2); } sqlite3_finalize(pStmt); pStmt = prepare(db, "SELECT count(*), sum(length(block)), max(length(block))" " FROM '%q_segments' a JOIN '%q_segdir' b" " WHERE a.blockid BETWEEN b.leaves_end_block+1 AND b.end_block", zTab, zTab); while( sqlite3_step(pStmt)==SQLITE_ROW ){ nIdx = sqlite3_column_int(pStmt, 0); szIdx = sqlite3_column_int64(pStmt, 1); mxIdx = sqlite3_column_int64(pStmt, 2); } sqlite3_finalize(pStmt); pStmt = prepare(db, "SELECT count(*), sum(length(root)), max(length(root))" " FROM '%q_segdir'", zTab); while( sqlite3_step(pStmt)==SQLITE_ROW ){ nRoot = sqlite3_column_int(pStmt, 0); szRoot = sqlite3_column_int64(pStmt, 1); mxRoot = sqlite3_column_int64(pStmt, 2); } sqlite3_finalize(pStmt); printf("Number of segments....................... %9d\n", nSeg+nRoot); printf("Number of leaf segments.................. %9d\n", nSeg-nIdx); printf("Number of index segments................. %9d\n", nIdx); printf("Number of root segments.................. %9d\n", nRoot); printf("Total size of all segments............... %9lld\n", szSeg+szRoot); printf("Total size of all leaf segments.......... %9lld\n", szSeg-szIdx); printf("Total size of all index segments......... %9lld\n", szIdx); printf("Total size of all root segments.......... %9lld\n", szRoot); if( nSeg>0 ){ printf("Average size of all segments............. %11.1f\n", (double)(szSeg+szRoot)/(double)(nSeg+nRoot)); printf("Average size of leaf segments............ %11.1f\n", (double)(szSeg-szIdx)/(double)(nSeg-nIdx)); } if( nIdx>0 ){ printf("Average size of index segments........... %11.1f\n", (double)szIdx/(double)nIdx); } if( nRoot>0 ){ printf("Average size of root segments............ %11.1f\n", (double)szRoot/(double)nRoot); } mx = mxSeg; if( mx<mxRoot ) mx = mxRoot; printf("Maximum segment size..................... %9lld\n", mx); printf("Maximum index segment size............... %9lld\n", mxIdx); printf("Maximum root segment size................ %9lld\n", mxRoot); pStmt = prepare(db, "PRAGMA page_size"); pgsz = 1024; while( sqlite3_step(pStmt)==SQLITE_ROW ){ pgsz = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); printf("Database page size....................... %9d\n", pgsz); pStmt = prepare(db, "SELECT count(*)" " FROM '%q_segments' a JOIN '%q_segdir' b" " WHERE a.blockid BETWEEN b.start_block AND b.leaves_end_block" " AND length(a.block)>%d", zTab, zTab, pgsz-45); n = 0; while( sqlite3_step(pStmt)==SQLITE_ROW ){ n = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); nLeaf = nSeg - nIdx; printf("Leaf segments larger than %5d bytes.... %9d %5.2f%%\n", pgsz-45, n, n*100.0/nLeaf); pStmt = prepare(db, "SELECT max(level%%1024) FROM '%q_segdir'", zTab); mxLevel = 0; while( sqlite3_step(pStmt)==SQLITE_ROW ){ mxLevel = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); for(i=0; i<=mxLevel; i++){ pStmt = prepare(db, "SELECT count(*), sum(len), avg(len), max(len), sum(len>%d)," " count(distinct idx)" " FROM (SELECT length(a.block) AS len, idx" " FROM '%q_segments' a JOIN '%q_segdir' b" " WHERE (a.blockid BETWEEN b.start_block" " AND b.leaves_end_block)" " AND (b.level%%1024)==%d)", pgsz-45, zTab, zTab, i); if( sqlite3_step(pStmt)==SQLITE_ROW && (nLeaf = sqlite3_column_int(pStmt, 0))>0 ){ int nIdx = sqlite3_column_int(pStmt, 5); sqlite3_int64 sz; printf("For level %d:\n", i); printf(" Number of indexes...................... %9d\n", nIdx); printf(" Number of leaf segments................ %9d\n", nLeaf); if( nIdx>1 ){ printf(" Average leaf segments per index........ %11.1f\n", (double)nLeaf/(double)nIdx); } printf(" Total size of all leaf segments........ %9lld\n", (sz = sqlite3_column_int64(pStmt, 1))); printf(" Average size of leaf segments.......... %11.1f\n", sqlite3_column_double(pStmt, 2)); if( nIdx>1 ){ printf(" Average leaf segment size per index.... %11.1f\n", (double)sz/(double)nIdx); } printf(" Maximum leaf segment size.............. %9lld\n", sqlite3_column_int64(pStmt, 3)); n = sqlite3_column_int(pStmt, 4); printf(" Leaf segments larger than %5d bytes.. %9d %5.2f%%\n", pgsz-45, n, n*100.0/nLeaf); } sqlite3_finalize(pStmt); } } /* ** Print a single "tree" line of the segdir map output. */ static void printTreeLine(sqlite3_int64 iLower, sqlite3_int64 iUpper){ printf(" tree %9lld", iLower); if( iUpper>iLower ){ printf(" thru %9lld (%lld blocks)", iUpper, iUpper-iLower+1); } printf("\n"); } /* ** Check to see if the block of a %_segments entry is NULL. */ static int isNullSegment(sqlite3 *db, const char *zTab, sqlite3_int64 iBlockId){ sqlite3_stmt *pStmt; int rc = 1; pStmt = prepare(db, "SELECT block IS NULL FROM '%q_segments'" " WHERE blockid=%lld", zTab, iBlockId); if( sqlite3_step(pStmt)==SQLITE_ROW ){ rc = sqlite3_column_int(pStmt, 0); } sqlite3_finalize(pStmt); return rc; } /* ** Show a map of segments derived from the %_segdir table. */ static void showSegdirMap(sqlite3 *db, const char *zTab){ int mxIndex, iIndex; sqlite3_stmt *pStmt = 0; sqlite3_stmt *pStmt2 = 0; int prevLevel; pStmt = prepare(db, "SELECT max(level/1024) FROM '%q_segdir'", zTab); if( sqlite3_step(pStmt)==SQLITE_ROW ){ mxIndex = sqlite3_column_int(pStmt, 0); }else{ mxIndex = 0; } sqlite3_finalize(pStmt); printf("Number of inverted indices............... %3d\n", mxIndex+1); pStmt = prepare(db, "SELECT level, idx, start_block, leaves_end_block, end_block, rowid" " FROM '%q_segdir'" " WHERE level/1024==?" " ORDER BY level DESC, idx", zTab); pStmt2 = prepare(db, "SELECT blockid FROM '%q_segments'" " WHERE blockid BETWEEN ? AND ? ORDER BY blockid", zTab); for(iIndex=0; iIndex<=mxIndex; iIndex++){ if( mxIndex>0 ){ printf("**************************** Index %d " "****************************\n", iIndex); } sqlite3_bind_int(pStmt, 1, iIndex); prevLevel = -1; while( sqlite3_step(pStmt)==SQLITE_ROW ){ int iLevel = sqlite3_column_int(pStmt, 0)%1024; int iIdx = sqlite3_column_int(pStmt, 1); sqlite3_int64 iStart = sqlite3_column_int64(pStmt, 2); sqlite3_int64 iLEnd = sqlite3_column_int64(pStmt, 3); sqlite3_int64 iEnd = sqlite3_column_int64(pStmt, 4); char rtag[20]; if( iLevel!=prevLevel ){ printf("level %2d idx %2d", iLevel, iIdx); prevLevel = iLevel; }else{ printf(" idx %2d", iIdx); } sqlite3_snprintf(sizeof(rtag), rtag, "r%lld", sqlite3_column_int64(pStmt,5)); printf(" root %9s\n", rtag); if( iLEnd>iStart ){ sqlite3_int64 iLower, iPrev, iX; if( iLEnd+1<=iEnd ){ sqlite3_bind_int64(pStmt2, 1, iLEnd+1); sqlite3_bind_int64(pStmt2, 2, iEnd); iLower = -1; while( sqlite3_step(pStmt2)==SQLITE_ROW ){ iX = sqlite3_column_int64(pStmt2, 0); if( iLower<0 ){ iLower = iPrev = iX; }else if( iX==iPrev+1 ){ iPrev = iX; }else{ printTreeLine(iLower, iPrev); iLower = iPrev = iX; } } sqlite3_reset(pStmt2); if( iLower>=0 ){ if( iLower==iPrev && iLower==iEnd && isNullSegment(db,zTab,iLower) ){ printf(" null %9lld\n", iLower); }else{ printTreeLine(iLower, iPrev); } } } printf(" leaves %9lld thru %9lld (%lld blocks)\n", iStart, iLEnd, iLEnd - iStart + 1); } } sqlite3_reset(pStmt); } sqlite3_finalize(pStmt); sqlite3_finalize(pStmt2); } /* ** Decode a single segment block and display the results on stdout. */ static void decodeSegment( const unsigned char *aData, /* Content to print */ int nData /* Number of bytes of content */ ){ sqlite3_int64 iChild; sqlite3_int64 iPrefix; sqlite3_int64 nTerm; sqlite3_int64 n; sqlite3_int64 iDocsz; int iHeight; int i = 0; int cnt = 0; char zTerm[1000]; i += getVarint(aData, &n); iHeight = (int)n; printf("height: %d\n", iHeight); if( iHeight>0 ){ i += getVarint(aData+i, &iChild); printf("left-child: %lld\n", iChild); } while( i<nData ){ if( (cnt++)>0 ){ i += getVarint(aData+i, &iPrefix); }else{ iPrefix = 0; } i += getVarint(aData+i, &nTerm); if( iPrefix+nTerm+1 >= sizeof(zTerm) ){ fprintf(stderr, "term to long\n"); exit(1); } memcpy(zTerm+iPrefix, aData+i, nTerm); zTerm[iPrefix+nTerm] = 0; i += nTerm; if( iHeight==0 ){ i += getVarint(aData+i, &iDocsz); printf("term: %-25s doclist %7lld bytes offset %d\n", zTerm, iDocsz, i); i += iDocsz; }else{ printf("term: %-25s child %lld\n", zTerm, ++iChild); } } } /* ** Print a a blob as hex and ascii. */ static void printBlob( const unsigned char *aData, /* Content to print */ int nData /* Number of bytes of content */ ){ int i, j; const char *zOfstFmt; const int perLine = 16; if( (nData&~0xfff)==0 ){ zOfstFmt = " %03x: "; }else if( (nData&~0xffff)==0 ){ zOfstFmt = " %04x: "; }else if( (nData&~0xfffff)==0 ){ zOfstFmt = " %05x: "; }else if( (nData&~0xffffff)==0 ){ zOfstFmt = " %06x: "; }else{ zOfstFmt = " %08x: "; } for(i=0; i<nData; i += perLine){ fprintf(stdout, zOfstFmt, i); for(j=0; j<perLine; j++){ if( i+j>nData ){ fprintf(stdout, " "); }else{ fprintf(stdout,"%02x ", aData[i+j]); } } for(j=0; j<perLine; j++){ if( i+j>nData ){ fprintf(stdout, " "); }else{ fprintf(stdout,"%c", isprint(aData[i+j]) ? aData[i+j] : '.'); } } fprintf(stdout,"\n"); } } /* ** Convert text to a 64-bit integer */ static sqlite3_int64 atoi64(const char *z){ sqlite3_int64 v = 0; while( z[0]>='0' && z[0]<='9' ){ v = v*10 + z[0] - '0'; z++; } return v; } /* ** Return a prepared statement which, when stepped, will return in its ** first column the blob associated with segment zId. If zId begins with ** 'r' then it is a rowid of a %_segdir entry. Otherwise it is a ** %_segment entry. */ static sqlite3_stmt *prepareToGetSegment( sqlite3 *db, /* The database */ const char *zTab, /* The FTS3/4 table name */ const char *zId /* ID of the segment to open */ ){ sqlite3_stmt *pStmt; if( zId[0]=='r' ){ pStmt = prepare(db, "SELECT root FROM '%q_segdir' WHERE rowid=%lld", zTab, atoi64(zId+1)); }else{ pStmt = prepare(db, "SELECT block FROM '%q_segments' WHERE blockid=%lld", zTab, atoi64(zId)); } return pStmt; } /* ** Print the content of a segment or of the root of a segdir. The segment ** or root is identified by azExtra[0]. If the first character of azExtra[0] ** is 'r' then the remainder is the integer rowid of the %_segdir entry. ** If the first character of azExtra[0] is not 'r' then, then all of ** azExtra[0] is an integer which is the block number. ** ** If the --raw option is present in azExtra, then a hex dump is provided. ** Otherwise a decoding is shown. */ static void showSegment(sqlite3 *db, const char *zTab){ const unsigned char *aData; int nData; sqlite3_stmt *pStmt; pStmt = prepareToGetSegment(db, zTab, azExtra[0]); if( sqlite3_step(pStmt)!=SQLITE_ROW ){ sqlite3_finalize(pStmt); return; } nData = sqlite3_column_bytes(pStmt, 0); aData = sqlite3_column_blob(pStmt, 0); printf("Segment %s of size %d bytes:\n", azExtra[0], nData); if( findOption("raw", 0, 0)!=0 ){ printBlob(aData, nData); }else{ decodeSegment(aData, nData); } sqlite3_finalize(pStmt); } /* ** Decode a single doclist and display the results on stdout. */ static void decodeDoclist( const unsigned char *aData, /* Content to print */ int nData /* Number of bytes of content */ ){ sqlite3_int64 iPrevDocid = 0; sqlite3_int64 iDocid; sqlite3_int64 iPos; sqlite3_int64 iPrevPos = 0; sqlite3_int64 iCol; int i = 0; while( i<nData ){ i += getVarint(aData+i, &iDocid); printf("docid %lld col0", iDocid+iPrevDocid); iPrevDocid += iDocid; iPrevPos = 0; while( 1 ){ i += getVarint(aData+i, &iPos); if( iPos==1 ){ i += getVarint(aData+i, &iCol); printf(" col%lld", iCol); iPrevPos = 0; }else if( iPos==0 ){ printf("\n"); break; }else{ iPrevPos += iPos - 2; printf(" %lld", iPrevPos); } } } } /* ** Print the content of a doclist. The segment or segdir-root is ** identified by azExtra[0]. If the first character of azExtra[0] ** is 'r' then the remainder is the integer rowid of the %_segdir entry. ** If the first character of azExtra[0] is not 'r' then, then all of ** azExtra[0] is an integer which is the block number. The offset ** into the segment is identified by azExtra[1]. The size of the doclist ** is azExtra[2]. ** ** If the --raw option is present in azExtra, then a hex dump is provided. ** Otherwise a decoding is shown. */ static void showDoclist(sqlite3 *db, const char *zTab){ const unsigned char *aData; sqlite3_int64 offset, nData; sqlite3_stmt *pStmt; offset = atoi64(azExtra[1]); nData = atoi64(azExtra[2]); pStmt = prepareToGetSegment(db, zTab, azExtra[0]); if( sqlite3_step(pStmt)!=SQLITE_ROW ){ sqlite3_finalize(pStmt); return; } aData = sqlite3_column_blob(pStmt, 0); printf("Doclist at %s offset %lld of size %lld bytes:\n", azExtra[0], offset, nData); if( findOption("raw", 0, 0)!=0 ){ printBlob(aData+offset, nData); }else{ decodeDoclist(aData+offset, nData); } sqlite3_finalize(pStmt); } /* ** Show the top N largest segments */ static void listBigSegments(sqlite3 *db, const char *zTab){ int nTop, i; sqlite3_stmt *pStmt; sqlite3_int64 sz; sqlite3_int64 id; nTop = atoi(findOption("top", 1, "25")); printf("The %d largest segments:\n", nTop); pStmt = prepare(db, "SELECT blockid, length(block) AS len FROM '%q_segments'" " ORDER BY 2 DESC, 1" " LIMIT %d", zTab, nTop); i = 0; while( sqlite3_step(pStmt)==SQLITE_ROW ){ i++; id = sqlite3_column_int64(pStmt, 0); sz = sqlite3_column_int64(pStmt, 1); printf(" %2d. %9lld size %lld\n", i, id, sz); } sqlite3_finalize(pStmt); } static void usage(const char *argv0){ fprintf(stderr, "Usage: %s DATABASE\n" " or: %s DATABASE FTS3TABLE ARGS...\n", argv0, argv0); fprintf(stderr, "ARGS:\n" " big-segments [--top N] show the largest segments\n" " doclist BLOCKID OFFSET SIZE [--raw] Decode a doclist\n" " schema FTS table schema\n" " segdir directory of segments\n" " segment BLOCKID [--raw] content of a segment\n" " segment-stats info on segment sizes\n" " stat the %%_stat table\n" " vocabulary [--top N] document vocabulary\n" ); exit(1); } int main(int argc, char **argv){ sqlite3 *db; int rc; const char *zTab; const char *zCmd; if( argc<2 ) usage(argv[0]); rc = sqlite3_open(argv[1], &db); if( rc ){ fprintf(stderr, "Cannot open %s\n", argv[1]); exit(1); } if( argc==2 ){ sqlite3_stmt *pStmt; int cnt = 0; pStmt = prepare(db, "SELECT b.sql" " FROM sqlite_master a, sqlite_master b" " WHERE a.name GLOB '*_segdir'" " AND b.name=substr(a.name,1,length(a.name)-7)" " ORDER BY 1"); while( sqlite3_step(pStmt)==SQLITE_ROW ){ cnt++; printf("%s;\n", sqlite3_column_text(pStmt, 0)); } sqlite3_finalize(pStmt); if( cnt==0 ){ printf("/* No FTS3/4 tables found in database %s */\n", argv[1]); } return 0; } if( argc<4 ) usage(argv[0]); zTab = argv[2]; zCmd = argv[3]; nExtra = argc-4; azExtra = argv+4; if( strcmp(zCmd,"big-segments")==0 ){ listBigSegments(db, zTab); }else if( strcmp(zCmd,"doclist")==0 ){ if( argc<7 ) usage(argv[0]); showDoclist(db, zTab); }else if( strcmp(zCmd,"schema")==0 ){ showSchema(db, zTab); }else if( strcmp(zCmd,"segdir")==0 ){ showSegdirMap(db, zTab); }else if( strcmp(zCmd,"segment")==0 ){ if( argc<5 ) usage(argv[0]); showSegment(db, zTab); }else if( strcmp(zCmd,"segment-stats")==0 ){ showSegmentStats(db, zTab); }else if( strcmp(zCmd,"stat")==0 ){ showStat(db, zTab); }else if( strcmp(zCmd,"vocabulary")==0 ){ showVocabulary(db, zTab); }else{ usage(argv[0]); } return 0; } |
Changes to ext/rtree/rtree.c.
︙ | ︙ | |||
3052 3053 3054 3055 3056 3057 3058 | *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); return SQLITE_ERROR; } sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); /* Allocate the sqlite3_vtab structure */ | | | | 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 | *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); return SQLITE_ERROR; } sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); /* Allocate the sqlite3_vtab structure */ nDb = (int)strlen(argv[1]); nName = (int)strlen(argv[2]); pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); if( !pRtree ){ return SQLITE_NOMEM; } memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; |
︙ | ︙ | |||
3148 3149 3150 3151 3152 3153 3154 | char zCell[512]; int nCell = 0; RtreeCell cell; int jj; nodeGetCell(&tree, &node, ii, &cell); sqlite3_snprintf(512-nCell,&zCell[nCell],"%lld", cell.iRowid); | | | | 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 | char zCell[512]; int nCell = 0; RtreeCell cell; int jj; nodeGetCell(&tree, &node, ii, &cell); sqlite3_snprintf(512-nCell,&zCell[nCell],"%lld", cell.iRowid); nCell = (int)strlen(zCell); for(jj=0; jj<tree.nDim*2; jj++){ sqlite3_snprintf(512-nCell,&zCell[nCell]," %f",(double)cell.aCoord[jj].f); nCell = (int)strlen(zCell); } if( zText ){ char *zTextNew = sqlite3_mprintf("%s {%s}", zText, zCell); sqlite3_free(zText); zText = zTextNew; }else{ |
︙ | ︙ |
Changes to src/btree.c.
︙ | ︙ | |||
6785 6786 6787 6788 6789 6790 6791 | /* Assert that the caller has been consistent. If this cursor was opened ** expecting an index b-tree, then the caller should be inserting blob ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ assert( (pKey==0)==(pCur->pKeyInfo==0) ); | < < < < < < < > > > > > > > > | 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 | /* Assert that the caller has been consistent. If this cursor was opened ** expecting an index b-tree, then the caller should be inserting blob ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ assert( (pKey==0)==(pCur->pKeyInfo==0) ); /* Save the positions of any other cursors open on this table. ** ** In some cases, the call to btreeMoveto() below is a no-op. For ** example, when inserting data into a table with auto-generated integer ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the ** integer key to use. It then calls this function to actually insert the ** data into the intkey B-Tree. In this case btreeMoveto() recognizes ** that the cursor is already where it needs to be and returns without ** doing any work. To avoid thwarting these optimizations, it is important ** not to clear the cursor here. */ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; /* If this is an insert into a table b-tree, invalidate any incrblob ** cursors open on the row being replaced (assuming this is a replace ** operation - if it is not, the following is a no-op). */ if( pCur->pKeyInfo==0 ){ invalidateIncrblobCursors(p, nKey, 0); } if( !loc ){ rc = btreeMoveto(pCur, pKey, nKey, appendBias, &loc); if( rc ) return rc; } assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); pPage = pCur->apPage[pCur->iPage]; |
︙ | ︙ | |||
6915 6916 6917 6918 6919 6920 6921 | if( NEVER(pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell) || NEVER(pCur->eState!=CURSOR_VALID) ){ return SQLITE_ERROR; /* Something has gone awry. */ } | < < < < < < | 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 | if( NEVER(pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell) || NEVER(pCur->eState!=CURSOR_VALID) ){ return SQLITE_ERROR; /* Something has gone awry. */ } iCellDepth = pCur->iPage; iCellIdx = pCur->aiIdx[iCellDepth]; pPage = pCur->apPage[iCellDepth]; pCell = findCell(pPage, iCellIdx); /* If the page containing the entry to delete is not a leaf page, move ** the cursor to the largest entry in the tree that is smaller than |
︙ | ︙ | |||
6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 | /* Save the positions of any other cursors open on this table before ** making any modifications. Make the page containing the entry to be ** deleted writable. Then free any overflow pages associated with the ** entry and finally remove the cell itself from within the page. */ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; rc = clearCell(pPage, pCell); dropCell(pPage, iCellIdx, cellSizePtr(pPage, pCell), &rc); if( rc ) return rc; /* If the cell deleted was not located on a leaf page, then the cursor | > > > > > > > | 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 | /* Save the positions of any other cursors open on this table before ** making any modifications. Make the page containing the entry to be ** deleted writable. Then free any overflow pages associated with the ** entry and finally remove the cell itself from within the page. */ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; /* If this is a delete operation to remove a row from a table b-tree, ** invalidate any incrblob cursors open on the row being deleted. */ if( pCur->pKeyInfo==0 ){ invalidateIncrblobCursors(p, pCur->info.nKey, 0); } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; rc = clearCell(pPage, pCell); dropCell(pPage, iCellIdx, cellSizePtr(pPage, pCell), &rc); if( rc ) return rc; /* If the cell deleted was not located on a leaf page, then the cursor |
︙ | ︙ | |||
7227 7228 7229 7230 7231 7232 7233 | */ int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); | > > > | | | | < < < | 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 | */ int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); rc = saveAllCursors(pBt, (Pgno)iTable, 0); if( SQLITE_OK==rc ){ /* Invalidate all incrblob cursors open on table iTable (assuming iTable ** is the root of a table b-tree - if it is not, the following call is ** a no-op). */ invalidateIncrblobCursors(p, 0, 1); rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); } sqlite3BtreeLeave(p); return rc; } /* |
︙ | ︙ |
Changes to src/build.c.
︙ | ︙ | |||
533 534 535 536 537 538 539 | /* Delete the Table structure itself. */ sqliteDeleteColumnNames(db, pTable); sqlite3DbFree(db, pTable->zName); sqlite3DbFree(db, pTable->zColAff); sqlite3SelectDelete(db, pTable->pSelect); #ifndef SQLITE_OMIT_CHECK | | | 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 | /* Delete the Table structure itself. */ sqliteDeleteColumnNames(db, pTable); sqlite3DbFree(db, pTable->zName); sqlite3DbFree(db, pTable->zColAff); sqlite3SelectDelete(db, pTable->pSelect); #ifndef SQLITE_OMIT_CHECK sqlite3ExprListDelete(db, pTable->pCheck); #endif #ifndef SQLITE_OMIT_VIRTUALTABLE sqlite3VtabClear(db, pTable); #endif sqlite3DbFree(db, pTable); } |
︙ | ︙ | |||
1196 1197 1198 1199 1200 1201 1202 | /* ** Add a new CHECK constraint to the table currently under construction. */ void sqlite3AddCheckConstraint( Parse *pParse, /* Parsing context */ Expr *pCheckExpr /* The check expression */ ){ | < | > > > | | 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 | /* ** Add a new CHECK constraint to the table currently under construction. */ void sqlite3AddCheckConstraint( Parse *pParse, /* Parsing context */ Expr *pCheckExpr /* The check expression */ ){ #ifndef SQLITE_OMIT_CHECK Table *pTab = pParse->pNewTable; if( pTab && !IN_DECLARE_VTAB ){ pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); if( pParse->constraintName.n ){ sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); } }else #endif { sqlite3ExprDelete(pParse->db, pCheckExpr); } } /* ** Set the collation function of the most recently parsed table column ** to the CollSeq given. */ |
︙ | ︙ | |||
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 | #ifndef SQLITE_OMIT_CHECK /* Resolve names in all CHECK constraint expressions. */ if( p->pCheck ){ SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ memset(&sNC, 0, sizeof(sNC)); memset(&sSrc, 0, sizeof(sSrc)); sSrc.nSrc = 1; sSrc.a[0].zName = p->zName; sSrc.a[0].pTab = p; sSrc.a[0].iCursor = -1; sNC.pParse = pParse; sNC.pSrcList = &sSrc; sNC.isCheck = 1; | > > > > | | > | 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 | #ifndef SQLITE_OMIT_CHECK /* Resolve names in all CHECK constraint expressions. */ if( p->pCheck ){ SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ ExprList *pList; /* List of all CHECK constraints */ int i; /* Loop counter */ memset(&sNC, 0, sizeof(sNC)); memset(&sSrc, 0, sizeof(sSrc)); sSrc.nSrc = 1; sSrc.a[0].zName = p->zName; sSrc.a[0].pTab = p; sSrc.a[0].iCursor = -1; sNC.pParse = pParse; sNC.pSrcList = &sSrc; sNC.isCheck = 1; pList = p->pCheck; for(i=0; i<pList->nExpr; i++){ if( sqlite3ResolveExprNames(&sNC, pList->a[i].pExpr) ){ return; } } } #endif /* !defined(SQLITE_OMIT_CHECK) */ /* If the db->init.busy is 1 it means we are reading the SQL off the ** "sqlite_master" or "sqlite_temp_master" table on the disk. ** So do not write to the disk again. Extract the root page number |
︙ | ︙ | |||
3036 3037 3038 3039 3040 3041 3042 | } exit_drop_index: sqlite3SrcListDelete(db, pName); } /* | | | | | | | > > > | < < > | | 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 | } exit_drop_index: sqlite3SrcListDelete(db, pName); } /* ** pArray is a pointer to an array of objects. Each object in the ** array is szEntry bytes in size. This routine uses sqlite3DbRealloc() ** to extend the array so that there is space for a new object at the end. ** ** When this function is called, *pnEntry contains the current size of ** the array (in entries - so the allocation is ((*pnEntry) * szEntry) bytes ** in total). ** ** If the realloc() is successful (i.e. if no OOM condition occurs), the ** space allocated for the new object is zeroed, *pnEntry updated to ** reflect the new size of the array and a pointer to the new allocation ** returned. *pIdx is set to the index of the new array entry in this case. ** ** Otherwise, if the realloc() fails, *pIdx is set to -1, *pnEntry remains ** unchanged and a copy of pArray returned. */ void *sqlite3ArrayAllocate( sqlite3 *db, /* Connection to notify of malloc failures */ void *pArray, /* Array of objects. Might be reallocated */ int szEntry, /* Size of each object in the array */ int *pnEntry, /* Number of objects currently in use */ int *pIdx /* Write the index of a new slot here */ |
︙ | ︙ |
Changes to src/delete.c.
︙ | ︙ | |||
377 378 379 380 381 382 383 | /* Collect rowids of every row to be deleted. */ sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); pWInfo = sqlite3WhereBegin( pParse, pTabList, pWhere, 0, 0, WHERE_DUPLICATES_OK ); if( pWInfo==0 ) goto delete_from_cleanup; | | | 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 | /* Collect rowids of every row to be deleted. */ sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); pWInfo = sqlite3WhereBegin( pParse, pTabList, pWhere, 0, 0, WHERE_DUPLICATES_OK ); if( pWInfo==0 ) goto delete_from_cleanup; regRowid = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, iRowid, 0); sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, regRowid); if( db->flags & SQLITE_CountRows ){ sqlite3VdbeAddOp2(v, OP_AddImm, memCnt, 1); } sqlite3WhereEnd(pWInfo); /* Delete every item whose key was written to the list during the |
︙ | ︙ |
Changes to src/expr.c.
︙ | ︙ | |||
2028 2029 2030 2031 2032 2033 2034 | /* First replace any existing entry. ** ** Actually, the way the column cache is currently used, we are guaranteed ** that the object will never already be in cache. Verify this guarantee. */ #ifndef NDEBUG for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){ | < < < < < < < < < | 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 | /* First replace any existing entry. ** ** Actually, the way the column cache is currently used, we are guaranteed ** that the object will never already be in cache. Verify this guarantee. */ #ifndef NDEBUG for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){ assert( p->iReg==0 || p->iTable!=iTab || p->iColumn!=iCol ); } #endif /* Find an empty slot and replace it */ for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){ if( p->iReg==0 ){ |
︙ | ︙ | |||
2171 2172 2173 2174 2175 2176 2177 | ** is called. If iColumn<0 then code is generated that extracts the rowid. */ int sqlite3ExprCodeGetColumn( Parse *pParse, /* Parsing and code generating context */ Table *pTab, /* Description of the table we are reading from */ int iColumn, /* Index of the table column */ int iTable, /* The cursor pointing to the table */ | | > > > > | > | 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 | ** is called. If iColumn<0 then code is generated that extracts the rowid. */ int sqlite3ExprCodeGetColumn( Parse *pParse, /* Parsing and code generating context */ Table *pTab, /* Description of the table we are reading from */ int iColumn, /* Index of the table column */ int iTable, /* The cursor pointing to the table */ int iReg, /* Store results here */ u8 p5 /* P5 value for OP_Column */ ){ Vdbe *v = pParse->pVdbe; int i; struct yColCache *p; for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){ if( p->iReg>0 && p->iTable==iTable && p->iColumn==iColumn ){ p->lru = pParse->iCacheCnt++; sqlite3ExprCachePinRegister(pParse, p->iReg); return p->iReg; } } assert( v!=0 ); sqlite3ExprCodeGetColumnOfTable(v, pTab, iTable, iColumn, iReg); if( p5 ){ sqlite3VdbeChangeP5(v, p5); }else{ sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg); } return iReg; } /* ** Clear all column cache entries. */ void sqlite3ExprCacheClear(Parse *pParse){ |
︙ | ︙ | |||
2314 2315 2316 2317 2318 2319 2320 | case TK_COLUMN: { if( pExpr->iTable<0 ){ /* This only happens when coding check constraints */ assert( pParse->ckBase>0 ); inReg = pExpr->iColumn + pParse->ckBase; }else{ inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab, | | > | 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 | case TK_COLUMN: { if( pExpr->iTable<0 ){ /* This only happens when coding check constraints */ assert( pParse->ckBase>0 ); inReg = pExpr->iColumn + pParse->ckBase; }else{ inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab, pExpr->iColumn, pExpr->iTable, target, pExpr->op2); } break; } case TK_INTEGER: { codeInteger(pParse, pExpr, 0, target); break; } |
︙ | ︙ | |||
2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 | sqlite3VdbeResolveLabel(v, endCoalesce); break; } if( pFarg ){ r1 = sqlite3GetTempRange(pParse, nFarg); sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */ sqlite3ExprCodeExprList(pParse, pFarg, r1, 1); sqlite3ExprCachePop(pParse, 1); /* Ticket 2ea2425d34be */ }else{ r1 = 0; } #ifndef SQLITE_OMIT_VIRTUALTABLE | > > > > > > > > > > > > > > > > > > > | 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 | sqlite3VdbeResolveLabel(v, endCoalesce); break; } if( pFarg ){ r1 = sqlite3GetTempRange(pParse, nFarg); /* For length() and typeof() functions with a column argument, ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data ** loading. */ if( (pDef->flags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){ u8 exprOp; assert( nFarg==1 ); assert( pFarg->a[0].pExpr!=0 ); exprOp = pFarg->a[0].pExpr->op; if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){ assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG ); assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG ); testcase( pDef->flags==SQLITE_FUNC_LENGTH ); pFarg->a[0].pExpr->op2 = pDef->flags; } } sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */ sqlite3ExprCodeExprList(pParse, pFarg, r1, 1); sqlite3ExprCachePop(pParse, 1); /* Ticket 2ea2425d34be */ }else{ r1 = 0; } #ifndef SQLITE_OMIT_VIRTUALTABLE |
︙ | ︙ |
Changes to src/func.c.
︙ | ︙ | |||
1538 1539 1540 1541 1542 1543 1544 | FUNCTION(trim, 2, 3, 0, trimFunc ), FUNCTION(min, -1, 0, 1, minmaxFunc ), FUNCTION(min, 0, 0, 1, 0 ), AGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize ), FUNCTION(max, -1, 1, 1, minmaxFunc ), FUNCTION(max, 0, 1, 1, 0 ), AGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize ), | | | | < | < | 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 | FUNCTION(trim, 2, 3, 0, trimFunc ), FUNCTION(min, -1, 0, 1, minmaxFunc ), FUNCTION(min, 0, 0, 1, 0 ), AGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize ), FUNCTION(max, -1, 1, 1, minmaxFunc ), FUNCTION(max, 0, 1, 1, 0 ), AGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize ), FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), FUNCTION(substr, 2, 0, 0, substrFunc ), FUNCTION(substr, 3, 0, 0, substrFunc ), FUNCTION(abs, 1, 0, 0, absFunc ), #ifndef SQLITE_OMIT_FLOATING_POINT FUNCTION(round, 1, 0, 0, roundFunc ), FUNCTION(round, 2, 0, 0, roundFunc ), #endif FUNCTION(upper, 1, 0, 0, upperFunc ), FUNCTION(lower, 1, 0, 0, lowerFunc ), FUNCTION(coalesce, 1, 0, 0, 0 ), FUNCTION(coalesce, 0, 0, 0, 0 ), FUNCTION2(coalesce, -1, 0, 0, ifnullFunc, SQLITE_FUNC_COALESCE), FUNCTION(hex, 1, 0, 0, hexFunc ), FUNCTION2(ifnull, 2, 0, 0, ifnullFunc, SQLITE_FUNC_COALESCE), FUNCTION(random, 0, 0, 0, randomFunc ), FUNCTION(randomblob, 1, 0, 0, randomBlob ), FUNCTION(nullif, 2, 0, 1, nullifFunc ), FUNCTION(sqlite_version, 0, 0, 0, versionFunc ), FUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ), FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ), #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS |
︙ | ︙ |
Changes to src/insert.c.
︙ | ︙ | |||
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 | int nCol; /* Number of columns */ int onError; /* Conflict resolution strategy */ int j1; /* Addresss of jump instruction */ int j2 = 0, j3; /* Addresses of jump instructions */ int regData; /* Register containing first data column */ int iCur; /* Table cursor number */ Index *pIdx; /* Pointer to one of the indices */ int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ int regOldRowid = (rowidChng && isUpdate) ? rowidChng : regRowid; v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ nCol = pTab->nCol; regData = regRowid + 1; /* Test all NOT NULL constraints. | > > | 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 | int nCol; /* Number of columns */ int onError; /* Conflict resolution strategy */ int j1; /* Addresss of jump instruction */ int j2 = 0, j3; /* Addresses of jump instructions */ int regData; /* Register containing first data column */ int iCur; /* Table cursor number */ Index *pIdx; /* Pointer to one of the indices */ sqlite3 *db; /* Database connection */ int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ int regOldRowid = (rowidChng && isUpdate) ? rowidChng : regRowid; db = pParse->db; v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ nCol = pTab->nCol; regData = regRowid + 1; /* Test all NOT NULL constraints. |
︙ | ︙ | |||
1188 1189 1190 1191 1192 1193 1194 | case OE_Abort: sqlite3MayAbort(pParse); case OE_Rollback: case OE_Fail: { char *zMsg; sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT, onError, regData+i); | | | 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 | case OE_Abort: sqlite3MayAbort(pParse); case OE_Rollback: case OE_Fail: { char *zMsg; sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT, onError, regData+i); zMsg = sqlite3MPrintf(db, "%s.%s may not be NULL", pTab->zName, pTab->aCol[i].zName); sqlite3VdbeChangeP4(v, -1, zMsg, P4_DYNAMIC); break; } case OE_Ignore: { sqlite3VdbeAddOp2(v, OP_IsNull, regData+i, ignoreDest); break; |
︙ | ︙ | |||
1210 1211 1212 1213 1214 1215 1216 | } } } /* Test all CHECK constraints */ #ifndef SQLITE_OMIT_CHECK | | > | < > > > | | | > | > > > > > | | | > | 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 | } } } /* Test all CHECK constraints */ #ifndef SQLITE_OMIT_CHECK if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ ExprList *pCheck = pTab->pCheck; int i; pParse->ckBase = regData; onError = overrideError!=OE_Default ? overrideError : OE_Abort; for(i=0; i<pCheck->nExpr; i++){ int allOk = sqlite3VdbeMakeLabel(v); sqlite3ExprIfTrue(pParse, pCheck->a[i].pExpr, allOk, SQLITE_JUMPIFNULL); if( onError==OE_Ignore ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); }else{ char *zConsName = pCheck->a[i].zName; if( onError==OE_Replace ) onError = OE_Abort; /* IMP: R-15569-63625 */ if( zConsName ){ zConsName = sqlite3MPrintf(db, "constraint %s failed", zConsName); }else{ zConsName = 0; } sqlite3HaltConstraint(pParse, onError, zConsName, P4_DYNAMIC); } sqlite3VdbeResolveLabel(v, allOk); } } #endif /* !defined(SQLITE_OMIT_CHECK) */ /* If we have an INTEGER PRIMARY KEY, make sure the primary key ** of the new record does not previously exist. Except, if this ** is an UPDATE and the primary key is not changing, that is OK. */ |
︙ | ︙ | |||
1277 1278 1279 1280 1281 1282 1283 | ** ** REPLACE INTO t(rowid) VALUES($newrowid) ** ** to run without a statement journal if there are no indexes on the ** table. */ Trigger *pTrigger = 0; | | | 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 | ** ** REPLACE INTO t(rowid) VALUES($newrowid) ** ** to run without a statement journal if there are no indexes on the ** table. */ Trigger *pTrigger = 0; if( db->flags&SQLITE_RecTriggers ){ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); } if( pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0) ){ sqlite3MultiWrite(pParse); sqlite3GenerateRowDelete( pParse, pTab, baseCur, regRowid, 0, pTrigger, OE_Replace ); |
︙ | ︙ | |||
1376 1377 1378 1379 1380 1381 1382 | case OE_Fail: { int j; StrAccum errMsg; const char *zSep; char *zErr; sqlite3StrAccumInit(&errMsg, 0, 0, 200); | | | 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 | case OE_Fail: { int j; StrAccum errMsg; const char *zSep; char *zErr; sqlite3StrAccumInit(&errMsg, 0, 0, 200); errMsg.db = db; zSep = pIdx->nColumn>1 ? "columns " : "column "; for(j=0; j<pIdx->nColumn; j++){ char *zCol = pTab->aCol[pIdx->aiColumn[j]].zName; sqlite3StrAccumAppend(&errMsg, zSep, -1); zSep = ", "; sqlite3StrAccumAppend(&errMsg, zCol, -1); } |
︙ | ︙ | |||
1400 1401 1402 1403 1404 1405 1406 | sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); break; } default: { Trigger *pTrigger = 0; assert( onError==OE_Replace ); sqlite3MultiWrite(pParse); | | | 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 | sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); break; } default: { Trigger *pTrigger = 0; assert( onError==OE_Replace ); sqlite3MultiWrite(pParse); if( db->flags&SQLITE_RecTriggers ){ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); } sqlite3GenerateRowDelete( pParse, pTab, baseCur, regR, 0, pTrigger, OE_Replace ); seenReplace = 1; break; |
︙ | ︙ | |||
1730 1731 1732 1733 1734 1735 1736 | if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; } if( pSrcIdx==0 ){ return 0; /* pDestIdx has no corresponding index in pSrc */ } } #ifndef SQLITE_OMIT_CHECK | | | 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 | if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; } if( pSrcIdx==0 ){ return 0; /* pDestIdx has no corresponding index in pSrc */ } } #ifndef SQLITE_OMIT_CHECK if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck, pDest->pCheck) ){ return 0; /* Tables have different CHECK constraints. Ticket #2252 */ } #endif #ifndef SQLITE_OMIT_FOREIGN_KEY /* Disallow the transfer optimization if the destination table constains ** any foreign key constraints. This is more restrictive than necessary. ** But the main beneficiary of the transfer optimization is the VACUUM |
︙ | ︙ |
Changes to src/os_win.c.
︙ | ︙ | |||
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 | ){ osSleep(100); /* Wait a little before trying again */ } sqlite3_free(pFile->zDeleteOnClose); } #endif OSTRACE(("CLOSE %d %s\n", pFile->h, rc ? "ok" : "failed")); OpenCounter(-1); return rc ? SQLITE_OK : winLogError(SQLITE_IOERR_CLOSE, osGetLastError(), "winClose", pFile->zPath); } /* ** Read data from a file into a buffer. Return SQLITE_OK if all ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ static int winRead( sqlite3_file *id, /* File to read from */ void *pBuf, /* Write content into this buffer */ int amt, /* Number of bytes to read */ sqlite3_int64 offset /* Begin reading at this offset */ ){ winFile *pFile = (winFile*)id; /* file handle */ DWORD nRead; /* Number of bytes actually read from file */ int nRetry = 0; /* Number of retrys */ assert( id!=0 ); SimulateIOError(return SQLITE_IOERR_READ); OSTRACE(("READ %d lock=%d\n", pFile->h, pFile->locktype)); if( seekWinFile(pFile, offset) ){ return SQLITE_FULL; } while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){ DWORD lastErrno; if( retryIoerr(&nRetry, &lastErrno) ) continue; pFile->lastErrno = lastErrno; return winLogError(SQLITE_IOERR_READ, pFile->lastErrno, "winRead", pFile->zPath); } logIoerr(nRetry); | > > > > > > > > > > > > > > | 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 | ){ osSleep(100); /* Wait a little before trying again */ } sqlite3_free(pFile->zDeleteOnClose); } #endif OSTRACE(("CLOSE %d %s\n", pFile->h, rc ? "ok" : "failed")); if( rc ){ pFile->h = NULL; } OpenCounter(-1); return rc ? SQLITE_OK : winLogError(SQLITE_IOERR_CLOSE, osGetLastError(), "winClose", pFile->zPath); } /* ** Read data from a file into a buffer. Return SQLITE_OK if all ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ static int winRead( sqlite3_file *id, /* File to read from */ void *pBuf, /* Write content into this buffer */ int amt, /* Number of bytes to read */ sqlite3_int64 offset /* Begin reading at this offset */ ){ #if !SQLITE_OS_WINCE OVERLAPPED overlapped; /* The offset for ReadFile. */ #endif winFile *pFile = (winFile*)id; /* file handle */ DWORD nRead; /* Number of bytes actually read from file */ int nRetry = 0; /* Number of retrys */ assert( id!=0 ); SimulateIOError(return SQLITE_IOERR_READ); OSTRACE(("READ %d lock=%d\n", pFile->h, pFile->locktype)); #if SQLITE_OS_WINCE if( seekWinFile(pFile, offset) ){ return SQLITE_FULL; } while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){ #else memset(&overlapped, 0, sizeof(OVERLAPPED)); overlapped.Offset = (LONG)(offset & 0xffffffff); overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); while( !osReadFile(pFile->h, pBuf, amt, &nRead, &overlapped) && osGetLastError()!=ERROR_HANDLE_EOF ){ #endif DWORD lastErrno; if( retryIoerr(&nRetry, &lastErrno) ) continue; pFile->lastErrno = lastErrno; return winLogError(SQLITE_IOERR_READ, pFile->lastErrno, "winRead", pFile->zPath); } logIoerr(nRetry); |
︙ | ︙ | |||
1667 1668 1669 1670 1671 1672 1673 | */ static int winWrite( sqlite3_file *id, /* File to write into */ const void *pBuf, /* The bytes to be written */ int amt, /* Number of bytes to write */ sqlite3_int64 offset /* Offset into the file to begin writing at */ ){ | | > > > > > > > > > > > > > > > > > | > > > > > > > > | 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 | */ static int winWrite( sqlite3_file *id, /* File to write into */ const void *pBuf, /* The bytes to be written */ int amt, /* Number of bytes to write */ sqlite3_int64 offset /* Offset into the file to begin writing at */ ){ int rc = 0; /* True if error has occured, else false */ winFile *pFile = (winFile*)id; /* File handle */ int nRetry = 0; /* Number of retries */ assert( amt>0 ); assert( pFile ); SimulateIOError(return SQLITE_IOERR_WRITE); SimulateDiskfullError(return SQLITE_FULL); OSTRACE(("WRITE %d lock=%d\n", pFile->h, pFile->locktype)); #if SQLITE_OS_WINCE rc = seekWinFile(pFile, offset); if( rc==0 ){ #else { #endif #if !SQLITE_OS_WINCE OVERLAPPED overlapped; /* The offset for WriteFile. */ #endif u8 *aRem = (u8 *)pBuf; /* Data yet to be written */ int nRem = amt; /* Number of bytes yet to be written */ DWORD nWrite; /* Bytes written by each WriteFile() call */ DWORD lastErrno = NO_ERROR; /* Value returned by GetLastError() */ #if !SQLITE_OS_WINCE memset(&overlapped, 0, sizeof(OVERLAPPED)); overlapped.Offset = (LONG)(offset & 0xffffffff); overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); #endif while( nRem>0 ){ #if SQLITE_OS_WINCE if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, 0) ){ #else if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, &overlapped) ){ #endif if( retryIoerr(&nRetry, &lastErrno) ) continue; break; } if( nWrite<=0 ){ lastErrno = osGetLastError(); break; } #if !SQLITE_OS_WINCE offset += nWrite; overlapped.Offset = (LONG)(offset & 0xffffffff); overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); #endif aRem += nWrite; nRem -= nWrite; } if( nRem>0 ){ pFile->lastErrno = lastErrno; rc = 1; } |
︙ | ︙ |
Changes to src/pager.c.
︙ | ︙ | |||
666 667 668 669 670 671 672 | int pageSize; /* Number of bytes in a page */ Pgno mxPgno; /* Maximum allowed size of the database */ i64 journalSizeLimit; /* Size limit for persistent journal files */ char *zFilename; /* Name of the database file */ char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ | | | > > > > > > > > > | 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 | int pageSize; /* Number of bytes in a page */ Pgno mxPgno; /* Maximum allowed size of the database */ i64 journalSizeLimit; /* Size limit for persistent journal files */ char *zFilename; /* Name of the database file */ char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ int aStat[3]; /* Total cache hits, misses and writes */ #ifdef SQLITE_TEST int nRead; /* Database pages read */ #endif void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */ #ifdef SQLITE_HAS_CODEC void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */ void (*xCodecFree)(void*); /* Destructor for the codec */ void *pCodec; /* First argument to xCodec... methods */ #endif char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ PCache *pPCache; /* Pointer to page cache object */ #ifndef SQLITE_OMIT_WAL Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ char *zWal; /* File name for write-ahead log */ #endif }; /* ** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains ** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS ** or CACHE_WRITE to sqlite3_db_status(). */ #define PAGER_STAT_HIT 0 #define PAGER_STAT_MISS 1 #define PAGER_STAT_WRITE 2 /* ** The following global variables hold counters used for ** testing purposes only. These variables do not exist in ** a non-testing build. These variables are not thread-safe. */ #ifdef SQLITE_TEST int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ |
︙ | ︙ | |||
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 | static int pagerWalFrames( Pager *pPager, /* Pager object */ PgHdr *pList, /* List of frames to log */ Pgno nTruncate, /* Database size after this commit */ int isCommit /* True if this is a commit */ ){ int rc; /* Return code */ #if defined(SQLITE_DEBUG) || defined(SQLITE_CHECK_PAGES) PgHdr *p; /* For looping over pages */ #endif assert( pPager->pWal ); assert( pList ); #ifdef SQLITE_DEBUG /* Verify that the page list is in accending order */ for(p=pList; p && p->pDirty; p=p->pDirty){ assert( p->pgno < p->pDirty->pgno ); } #endif if( isCommit ){ /* If a WAL transaction is being committed, there is no point in writing ** any pages with page numbers greater than nTruncate into the WAL file. ** They will never be read by any client. So remove them from the pDirty ** list here. */ PgHdr *p; PgHdr **ppNext = &pList; for(p=pList; (*ppNext = p); p=p->pDirty){ | > > > | > > | > > > > | 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 | static int pagerWalFrames( Pager *pPager, /* Pager object */ PgHdr *pList, /* List of frames to log */ Pgno nTruncate, /* Database size after this commit */ int isCommit /* True if this is a commit */ ){ int rc; /* Return code */ int nList; /* Number of pages in pList */ #if defined(SQLITE_DEBUG) || defined(SQLITE_CHECK_PAGES) PgHdr *p; /* For looping over pages */ #endif assert( pPager->pWal ); assert( pList ); #ifdef SQLITE_DEBUG /* Verify that the page list is in accending order */ for(p=pList; p && p->pDirty; p=p->pDirty){ assert( p->pgno < p->pDirty->pgno ); } #endif assert( pList->pDirty==0 || isCommit ); if( isCommit ){ /* If a WAL transaction is being committed, there is no point in writing ** any pages with page numbers greater than nTruncate into the WAL file. ** They will never be read by any client. So remove them from the pDirty ** list here. */ PgHdr *p; PgHdr **ppNext = &pList; nList = 0; for(p=pList; (*ppNext = p); p=p->pDirty){ if( p->pgno<=nTruncate ){ ppNext = &p->pDirty; nList++; } } assert( pList ); }else{ nList = 1; } pPager->aStat[PAGER_STAT_WRITE] += nList; if( pList->pgno==1 ) pager_write_changecounter(pList); rc = sqlite3WalFrames(pPager->pWal, pPager->pageSize, pList, nTruncate, isCommit, pPager->walSyncFlags ); if( rc==SQLITE_OK && pPager->pBackup ){ PgHdr *p; |
︙ | ︙ | |||
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 | */ if( pgno==1 ){ memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); } if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; } /* Update any backup objects copying the contents of this pager. */ sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData); PAGERTRACE(("STORE %d page %d hash(%08x)\n", PAGERID(pPager), pgno, pager_pagehash(pList))); IOTRACE(("PGOUT %p %d\n", pPager, pgno)); PAGER_INCR(sqlite3_pager_writedb_count); | > < | 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 | */ if( pgno==1 ){ memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); } if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; } pPager->aStat[PAGER_STAT_WRITE]++; /* Update any backup objects copying the contents of this pager. */ sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData); PAGERTRACE(("STORE %d page %d hash(%08x)\n", PAGERID(pPager), pgno, pager_pagehash(pList))); IOTRACE(("PGOUT %p %d\n", pPager, pgno)); PAGER_INCR(sqlite3_pager_writedb_count); }else{ PAGERTRACE(("NOSTORE %d page %d\n", PAGERID(pPager), pgno)); } pager_set_pagehash(pList); pList = pList->pDirty; } |
︙ | ︙ | |||
5025 5026 5027 5028 5029 5030 5031 | assert( (*ppPage)->pgno==pgno ); assert( (*ppPage)->pPager==pPager || (*ppPage)->pPager==0 ); if( (*ppPage)->pPager && !noContent ){ /* In this case the pcache already contains an initialized copy of ** the page. Return without further ado. */ assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) ); | | | 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 | assert( (*ppPage)->pgno==pgno ); assert( (*ppPage)->pPager==pPager || (*ppPage)->pPager==0 ); if( (*ppPage)->pPager && !noContent ){ /* In this case the pcache already contains an initialized copy of ** the page. Return without further ado. */ assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) ); pPager->aStat[PAGER_STAT_HIT]++; return SQLITE_OK; }else{ /* The pager cache has created a new page. Its content needs to ** be initialized. */ pPg = *ppPage; |
︙ | ︙ | |||
5067 5068 5069 5070 5071 5072 5073 | testcase( rc==SQLITE_NOMEM ); sqlite3EndBenignMalloc(); } memset(pPg->pData, 0, pPager->pageSize); IOTRACE(("ZERO %p %d\n", pPager, pgno)); }else{ assert( pPg->pPager==pPager ); | | | 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 | testcase( rc==SQLITE_NOMEM ); sqlite3EndBenignMalloc(); } memset(pPg->pData, 0, pPager->pageSize); IOTRACE(("ZERO %p %d\n", pPager, pgno)); }else{ assert( pPg->pPager==pPager ); pPager->aStat[PAGER_STAT_MISS]++; rc = readDbPage(pPg); if( rc!=SQLITE_OK ){ goto pager_acquire_err; } } pager_set_pagehash(pPg); } |
︙ | ︙ | |||
5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 | /* If running in direct mode, write the contents of page 1 to the file. */ if( DIRECT_MODE ){ const void *zBuf; assert( pPager->dbFileSize>0 ); CODEC2(pPager, pPgHdr->pData, 1, 6, rc=SQLITE_NOMEM, zBuf); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); } if( rc==SQLITE_OK ){ pPager->changeCountDone = 1; } }else{ pPager->changeCountDone = 1; } | > | 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 | /* If running in direct mode, write the contents of page 1 to the file. */ if( DIRECT_MODE ){ const void *zBuf; assert( pPager->dbFileSize>0 ); CODEC2(pPager, pPgHdr->pData, 1, 6, rc=SQLITE_NOMEM, zBuf); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); pPager->aStat[PAGER_STAT_WRITE]++; } if( rc==SQLITE_OK ){ pPager->changeCountDone = 1; } }else{ pPager->changeCountDone = 1; } |
︙ | ︙ | |||
6095 6096 6097 6098 6099 6100 6101 | static int a[11]; a[0] = sqlite3PcacheRefCount(pPager->pPCache); a[1] = sqlite3PcachePagecount(pPager->pPCache); a[2] = sqlite3PcacheGetCachesize(pPager->pPCache); a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; a[4] = pPager->eState; a[5] = pPager->errCode; | | | | < > < < < < | > > > | | | 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 | static int a[11]; a[0] = sqlite3PcacheRefCount(pPager->pPCache); a[1] = sqlite3PcachePagecount(pPager->pPCache); a[2] = sqlite3PcacheGetCachesize(pPager->pPCache); a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; a[4] = pPager->eState; a[5] = pPager->errCode; a[6] = pPager->aStat[PAGER_STAT_HIT]; a[7] = pPager->aStat[PAGER_STAT_MISS]; a[8] = 0; /* Used to be pPager->nOvfl */ a[9] = pPager->nRead; a[10] = pPager->aStat[PAGER_STAT_WRITE]; return a; } #endif /* ** Parameter eStat must be either SQLITE_DBSTATUS_CACHE_HIT or ** SQLITE_DBSTATUS_CACHE_MISS. Before returning, *pnVal is incremented by the ** current cache hit or miss count, according to the value of eStat. If the ** reset parameter is non-zero, the cache hit or miss count is zeroed before ** returning. */ void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ assert( eStat==SQLITE_DBSTATUS_CACHE_HIT || eStat==SQLITE_DBSTATUS_CACHE_MISS || eStat==SQLITE_DBSTATUS_CACHE_WRITE ); assert( SQLITE_DBSTATUS_CACHE_HIT+1==SQLITE_DBSTATUS_CACHE_MISS ); assert( SQLITE_DBSTATUS_CACHE_HIT+2==SQLITE_DBSTATUS_CACHE_WRITE ); assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1 && PAGER_STAT_WRITE==2 ); *pnVal += pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT]; if( reset ){ pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT] = 0; } } /* ** Return true if this is an in-memory pager. */ int sqlite3PagerIsMemdb(Pager *pPager){ |
︙ | ︙ |
Changes to src/parse.y.
︙ | ︙ | |||
71 72 73 74 75 76 77 | /* ** An instance of this structure is used to store the LIKE, ** GLOB, NOT LIKE, and NOT GLOB operators. */ struct LikeOp { Token eOperator; /* "like" or "glob" or "regexp" */ | | | 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | /* ** An instance of this structure is used to store the LIKE, ** GLOB, NOT LIKE, and NOT GLOB operators. */ struct LikeOp { Token eOperator; /* "like" or "glob" or "regexp" */ int bNot; /* True if the NOT keyword is present */ }; /* ** An instance of the following structure describes the event of a ** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT, ** TK_DELETE, or TK_INSTEAD. If the event is of the form ** |
︙ | ︙ | |||
269 270 271 272 273 274 275 | typename(A) ::= typename(X) ids(Y). {A.z=X.z; A.n=Y.n+(int)(Y.z-X.z);} signed ::= plus_num. signed ::= minus_num. // "carglist" is a list of additional constraints that come after the // column name and column type in a CREATE TABLE statement. // | | | | | 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 | typename(A) ::= typename(X) ids(Y). {A.z=X.z; A.n=Y.n+(int)(Y.z-X.z);} signed ::= plus_num. signed ::= minus_num. // "carglist" is a list of additional constraints that come after the // column name and column type in a CREATE TABLE statement. // carglist ::= carglist cname ccons. carglist ::= . cname ::= CONSTRAINT nm(X). {pParse->constraintName = X;} cname ::= . {pParse->constraintName.n = 0;} ccons ::= DEFAULT term(X). {sqlite3AddDefaultValue(pParse,&X);} ccons ::= DEFAULT LP expr(X) RP. {sqlite3AddDefaultValue(pParse,&X);} ccons ::= DEFAULT PLUS term(X). {sqlite3AddDefaultValue(pParse,&X);} ccons ::= DEFAULT MINUS(A) term(X). { ExprSpan v; v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, X.pExpr, 0, 0); v.zStart = A.z; |
︙ | ︙ | |||
335 336 337 338 339 340 341 | defer_subclause(A) ::= NOT DEFERRABLE init_deferred_pred_opt. {A = 0;} defer_subclause(A) ::= DEFERRABLE init_deferred_pred_opt(X). {A = X;} %type init_deferred_pred_opt {int} init_deferred_pred_opt(A) ::= . {A = 0;} init_deferred_pred_opt(A) ::= INITIALLY DEFERRED. {A = 1;} init_deferred_pred_opt(A) ::= INITIALLY IMMEDIATE. {A = 0;} | < < < | < | < | 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 | defer_subclause(A) ::= NOT DEFERRABLE init_deferred_pred_opt. {A = 0;} defer_subclause(A) ::= DEFERRABLE init_deferred_pred_opt(X). {A = X;} %type init_deferred_pred_opt {int} init_deferred_pred_opt(A) ::= . {A = 0;} init_deferred_pred_opt(A) ::= INITIALLY DEFERRED. {A = 1;} init_deferred_pred_opt(A) ::= INITIALLY IMMEDIATE. {A = 0;} conslist_opt(A) ::= . {A.n = 0; A.z = 0;} conslist_opt(A) ::= COMMA(X) conslist. {A = X;} conslist ::= conslist COMMA cname tcons. conslist ::= cname tcons. tcons ::= PRIMARY KEY LP idxlist(X) autoinc(I) RP onconf(R). {sqlite3AddPrimaryKey(pParse,X,R,I,0);} tcons ::= UNIQUE LP idxlist(X) RP onconf(R). {sqlite3CreateIndex(pParse,0,0,0,X,R,0,0,0,0);} tcons ::= CHECK LP expr(E) RP onconf. {sqlite3AddCheckConstraint(pParse,E.pExpr);} tcons ::= FOREIGN KEY LP idxlist(FA) RP |
︙ | ︙ | |||
877 878 879 880 881 882 883 | {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) PLUS|MINUS(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) STAR|SLASH|REM(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) CONCAT(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} %type likeop {struct LikeOp} | | | | | | | | 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 | {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) PLUS|MINUS(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) STAR|SLASH|REM(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} expr(A) ::= expr(X) CONCAT(OP) expr(Y). {spanBinaryExpr(&A,pParse,@OP,&X,&Y);} %type likeop {struct LikeOp} likeop(A) ::= LIKE_KW(X). {A.eOperator = X; A.bNot = 0;} likeop(A) ::= NOT LIKE_KW(X). {A.eOperator = X; A.bNot = 1;} likeop(A) ::= MATCH(X). {A.eOperator = X; A.bNot = 0;} likeop(A) ::= NOT MATCH(X). {A.eOperator = X; A.bNot = 1;} expr(A) ::= expr(X) likeop(OP) expr(Y). [LIKE_KW] { ExprList *pList; pList = sqlite3ExprListAppend(pParse,0, Y.pExpr); pList = sqlite3ExprListAppend(pParse,pList, X.pExpr); A.pExpr = sqlite3ExprFunction(pParse, pList, &OP.eOperator); if( OP.bNot ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); A.zStart = X.zStart; A.zEnd = Y.zEnd; if( A.pExpr ) A.pExpr->flags |= EP_InfixFunc; } expr(A) ::= expr(X) likeop(OP) expr(Y) ESCAPE expr(E). [LIKE_KW] { ExprList *pList; pList = sqlite3ExprListAppend(pParse,0, Y.pExpr); pList = sqlite3ExprListAppend(pParse,pList, X.pExpr); pList = sqlite3ExprListAppend(pParse,pList, E.pExpr); A.pExpr = sqlite3ExprFunction(pParse, pList, &OP.eOperator); if( OP.bNot ) A.pExpr = sqlite3PExpr(pParse, TK_NOT, A.pExpr, 0, 0); A.zStart = X.zStart; A.zEnd = E.zEnd; if( A.pExpr ) A.pExpr->flags |= EP_InfixFunc; } %include { /* Construct an expression node for a unary postfix operator |
︙ | ︙ |
Changes to src/select.c.
︙ | ︙ | |||
4224 4225 4226 4227 4228 4229 4230 | for(i=0; i<sAggInfo.nColumn; i++){ struct AggInfo_col *pCol = &sAggInfo.aCol[i]; if( pCol->iSorterColumn>=j ){ int r1 = j + regBase; int r2; r2 = sqlite3ExprCodeGetColumn(pParse, | | | 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 | for(i=0; i<sAggInfo.nColumn; i++){ struct AggInfo_col *pCol = &sAggInfo.aCol[i]; if( pCol->iSorterColumn>=j ){ int r1 = j + regBase; int r2; r2 = sqlite3ExprCodeGetColumn(pParse, pCol->pTab, pCol->iColumn, pCol->iTable, r1, 0); if( r1!=r2 ){ sqlite3VdbeAddOp2(v, OP_SCopy, r2, r1); } j++; } } regRecord = sqlite3GetTempReg(pParse); |
︙ | ︙ |
Changes to src/shell.c.
︙ | ︙ | |||
64 65 66 67 68 69 70 71 72 73 74 75 76 77 | # define stifle_history(X) #endif #if defined(_WIN32) || defined(WIN32) # include <io.h> #define isatty(h) _isatty(h) #define access(f,m) _access((f),(m)) #else /* Make sure isatty() has a prototype. */ extern int isatty(int); #endif #if defined(_WIN32_WCE) | > > | 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | # define stifle_history(X) #endif #if defined(_WIN32) || defined(WIN32) # include <io.h> #define isatty(h) _isatty(h) #define access(f,m) _access((f),(m)) #define popen(a,b) _popen((a),(b)) #define pclose(x) _pclose(x) #else /* Make sure isatty() has a prototype. */ extern int isatty(int); #endif #if defined(_WIN32_WCE) |
︙ | ︙ | |||
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 | fprintf(pArg->out, "Pager Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1); fprintf(pArg->out, "Page cache hits: %d\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_MISS, &iCur, &iHiwtr, 1); fprintf(pArg->out, "Page cache misses: %d\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Schema Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Statement Heap/Lookaside Usage: %d bytes\n", iCur); } | > > > | 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 | fprintf(pArg->out, "Pager Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1); fprintf(pArg->out, "Page cache hits: %d\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_MISS, &iCur, &iHiwtr, 1); fprintf(pArg->out, "Page cache misses: %d\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_WRITE, &iCur, &iHiwtr, 1); fprintf(pArg->out, "Page cache writes: %d\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Schema Heap Usage: %d bytes\n", iCur); iHiwtr = iCur = -1; sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset); fprintf(pArg->out, "Statement Heap/Lookaside Usage: %d bytes\n", iCur); } |
︙ | ︙ | |||
1283 1284 1285 1286 1287 1288 1289 | if( strcmp(zType, "table")==0 ){ sqlite3_stmt *pTableInfo = 0; char *zSelect = 0; char *zTableInfo = 0; char *zTmp = 0; int nRow = 0; | < | | < < < | | 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 | if( strcmp(zType, "table")==0 ){ sqlite3_stmt *pTableInfo = 0; char *zSelect = 0; char *zTableInfo = 0; char *zTmp = 0; int nRow = 0; zTableInfo = appendText(zTableInfo, "PRAGMA table_info(", 0); zTableInfo = appendText(zTableInfo, zTable, '"'); zTableInfo = appendText(zTableInfo, ");", 0); rc = sqlite3_prepare(p->db, zTableInfo, -1, &pTableInfo, 0); free(zTableInfo); if( rc!=SQLITE_OK || !pTableInfo ){ return 1; } zSelect = appendText(zSelect, "SELECT 'INSERT INTO ' || ", 0); /* Always quote the table name, even if it appears to be pure ascii, ** in case it is a keyword. Ex: INSERT INTO "table" ... */ zTmp = appendText(zTmp, zTable, '"'); if( zTmp ){ zSelect = appendText(zSelect, zTmp, '\''); } zSelect = appendText(zSelect, " || ' VALUES(' || ", 0); rc = sqlite3_step(pTableInfo); while( rc==SQLITE_ROW ){ const char *zText = (const char *)sqlite3_column_text(pTableInfo, 1); |
︙ | ︙ | |||
1996 1997 1998 1999 2000 2001 2002 | if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 && nArg==2 ) { sqlite3_snprintf(sizeof(p->nullvalue), p->nullvalue, "%.*s", (int)ArraySize(p->nullvalue)-1, azArg[1]); }else if( c=='o' && strncmp(azArg[0], "output", n)==0 && nArg==2 ){ if( p->out!=stdout ){ | > > > | > > > > > > > > > > | 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 | if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 && nArg==2 ) { sqlite3_snprintf(sizeof(p->nullvalue), p->nullvalue, "%.*s", (int)ArraySize(p->nullvalue)-1, azArg[1]); }else if( c=='o' && strncmp(azArg[0], "output", n)==0 && nArg==2 ){ if( p->out!=stdout ){ if( p->outfile[0]=='|' ){ pclose(p->out); }else{ fclose(p->out); } } if( strcmp(azArg[1],"stdout")==0 ){ p->out = stdout; sqlite3_snprintf(sizeof(p->outfile), p->outfile, "stdout"); }else if( azArg[1][0]=='|' ){ p->out = popen(&azArg[1][1], "w"); if( p->out==0 ){ fprintf(stderr,"Error: cannot open pipe \"%s\"\n", &azArg[1][1]); p->out = stdout; rc = 1; }else{ sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", azArg[1]); } }else{ p->out = fopen(azArg[1], "wb"); if( p->out==0 ){ fprintf(stderr,"Error: cannot write to \"%s\"\n", azArg[1]); p->out = stdout; rc = 1; } else { |
︙ | ︙ |
Changes to src/sqlite.h.in.
︙ | ︙ | |||
5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 | ** </dd> ** ** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(<dt>SQLITE_DBSTATUS_CACHE_MISS</dt> ** <dd>This parameter returns the number of pager cache misses that have ** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS ** is always 0. ** </dd> ** </dl> */ #define SQLITE_DBSTATUS_LOOKASIDE_USED 0 #define SQLITE_DBSTATUS_CACHE_USED 1 #define SQLITE_DBSTATUS_SCHEMA_USED 2 #define SQLITE_DBSTATUS_STMT_USED 3 #define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 #define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 #define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 #define SQLITE_DBSTATUS_CACHE_HIT 7 #define SQLITE_DBSTATUS_CACHE_MISS 8 | > > > > > > > > > > > > | | 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 | ** </dd> ** ** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(<dt>SQLITE_DBSTATUS_CACHE_MISS</dt> ** <dd>This parameter returns the number of pager cache misses that have ** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS ** is always 0. ** </dd> ** ** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(<dt>SQLITE_DBSTATUS_CACHE_WRITE</dt> ** <dd>This parameter returns the number of dirty cache entries that have ** been written to disk. Specifically, the number of pages written to the ** wal file in wal mode databases, or the number of pages written to the ** database file in rollback mode databases. Any pages written as part of ** transaction rollback or database recovery operations are not included. ** If an IO or other error occurs while writing a page to disk, the effect ** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined). ^The ** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. ** </dd> ** </dl> */ #define SQLITE_DBSTATUS_LOOKASIDE_USED 0 #define SQLITE_DBSTATUS_CACHE_USED 1 #define SQLITE_DBSTATUS_SCHEMA_USED 2 #define SQLITE_DBSTATUS_STMT_USED 3 #define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 #define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 #define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 #define SQLITE_DBSTATUS_CACHE_HIT 7 #define SQLITE_DBSTATUS_CACHE_MISS 8 #define SQLITE_DBSTATUS_CACHE_WRITE 9 #define SQLITE_DBSTATUS_MAX 9 /* Largest defined DBSTATUS */ /* ** CAPI3REF: Prepared Statement Status ** ** ^(Each prepared statement maintains various ** [SQLITE_STMTSTATUS counters] that measure the number |
︙ | ︙ |
Changes to src/sqliteInt.h.
︙ | ︙ | |||
1013 1014 1015 1016 1017 1018 1019 | struct FuncDestructor { int nRef; void (*xDestroy)(void *); void *pUserData; }; /* | | > > | | > > | 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 | struct FuncDestructor { int nRef; void (*xDestroy)(void *); void *pUserData; }; /* ** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF ** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. There ** are assert() statements in the code to verify this. */ #define SQLITE_FUNC_LIKE 0x01 /* Candidate for the LIKE optimization */ #define SQLITE_FUNC_CASE 0x02 /* Case-sensitive LIKE-type function */ #define SQLITE_FUNC_EPHEM 0x04 /* Ephemeral. Delete with VDBE */ #define SQLITE_FUNC_NEEDCOLL 0x08 /* sqlite3GetFuncCollSeq() might be called */ #define SQLITE_FUNC_COUNT 0x10 /* Built-in count(*) aggregate */ #define SQLITE_FUNC_COALESCE 0x20 /* Built-in coalesce() or ifnull() function */ #define SQLITE_FUNC_LENGTH 0x40 /* Built-in length() function */ #define SQLITE_FUNC_TYPEOF 0x80 /* Built-in typeof() function */ /* ** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are ** used to create the initializers for the FuncDef structures. ** ** FUNCTION(zName, nArg, iArg, bNC, xFunc) ** Used to create a scalar function definition of a function zName |
︙ | ︙ | |||
1048 1049 1050 1051 1052 1053 1054 | ** that accepts nArg arguments and is implemented by a call to C ** function likeFunc. Argument pArg is cast to a (void *) and made ** available as the function user-data (sqlite3_user_data()). The ** FuncDef.flags variable is set to the value passed as the flags ** parameter. */ #define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ | | > > > | 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 | ** that accepts nArg arguments and is implemented by a call to C ** function likeFunc. Argument pArg is cast to a (void *) and made ** available as the function user-data (sqlite3_user_data()). The ** FuncDef.flags variable is set to the value passed as the flags ** parameter. */ #define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ {nArg, SQLITE_UTF8, (bNC*SQLITE_FUNC_NEEDCOLL), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} #define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \ {nArg, SQLITE_UTF8, (bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags, \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} #define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ {nArg, SQLITE_UTF8, bNC*SQLITE_FUNC_NEEDCOLL, \ pArg, 0, xFunc, 0, 0, #zName, 0, 0} #define LIKEFUNC(zName, nArg, arg, flags) \ {nArg, SQLITE_UTF8, flags, (void *)arg, 0, likeFunc, 0, 0, #zName, 0, 0} #define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \ |
︙ | ︙ | |||
1278 1279 1280 1281 1282 1283 1284 | Select *pSelect; /* NULL for tables. Points to definition if a view. */ u16 nRef; /* Number of pointers to this Table */ u8 tabFlags; /* Mask of TF_* values */ u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ FKey *pFKey; /* Linked list of all foreign keys in this table */ char *zColAff; /* String defining the affinity of each column */ #ifndef SQLITE_OMIT_CHECK | | | 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 | Select *pSelect; /* NULL for tables. Points to definition if a view. */ u16 nRef; /* Number of pointers to this Table */ u8 tabFlags; /* Mask of TF_* values */ u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ FKey *pFKey; /* Linked list of all foreign keys in this table */ char *zColAff; /* String defining the affinity of each column */ #ifndef SQLITE_OMIT_CHECK ExprList *pCheck; /* All CHECK constraints */ #endif #ifndef SQLITE_OMIT_ALTERTABLE int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ #endif #ifndef SQLITE_OMIT_VIRTUALTABLE VTable *pVTable; /* List of VTable objects. */ int nModuleArg; /* Number of arguments to the module */ |
︙ | ︙ | |||
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 | ** TK_TRIGGER: 1 -> new, 0 -> old */ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid. ** TK_VARIABLE: variable number (always >= 1). */ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ u8 flags2; /* Second set of flags. EP2_... */ u8 op2; /* If a TK_REGISTER, the original value of Expr.op */ AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ Table *pTab; /* Table for TK_COLUMN expressions. */ #if SQLITE_MAX_EXPR_DEPTH>0 int nHeight; /* Height of the tree headed by this node */ #endif }; | > | 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 | ** TK_TRIGGER: 1 -> new, 0 -> old */ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid. ** TK_VARIABLE: variable number (always >= 1). */ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ u8 flags2; /* Second set of flags. EP2_... */ u8 op2; /* If a TK_REGISTER, the original value of Expr.op */ /* If TK_COLUMN, the value of p5 for OP_Column */ AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ Table *pTab; /* Table for TK_COLUMN expressions. */ #if SQLITE_MAX_EXPR_DEPTH>0 int nHeight; /* Height of the tree headed by this node */ #endif }; |
︙ | ︙ | |||
1693 1694 1695 1696 1697 1698 1699 | #define EP_VarSelect 0x0020 /* pSelect is correlated, not constant */ #define EP_DblQuoted 0x0040 /* token.z was originally in "..." */ #define EP_InfixFunc 0x0080 /* True for an infix function: LIKE, GLOB, etc */ #define EP_ExpCollate 0x0100 /* Collating sequence specified explicitly */ #define EP_FixedDest 0x0200 /* Result needed in a specific register */ #define EP_IntValue 0x0400 /* Integer value contained in u.iValue */ #define EP_xIsSelect 0x0800 /* x.pSelect is valid (otherwise x.pList is) */ | | | 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 | #define EP_VarSelect 0x0020 /* pSelect is correlated, not constant */ #define EP_DblQuoted 0x0040 /* token.z was originally in "..." */ #define EP_InfixFunc 0x0080 /* True for an infix function: LIKE, GLOB, etc */ #define EP_ExpCollate 0x0100 /* Collating sequence specified explicitly */ #define EP_FixedDest 0x0200 /* Result needed in a specific register */ #define EP_IntValue 0x0400 /* Integer value contained in u.iValue */ #define EP_xIsSelect 0x0800 /* x.pSelect is valid (otherwise x.pList is) */ #define EP_Hint 0x1000 /* Not used */ #define EP_Reduced 0x2000 /* Expr struct is EXPR_REDUCEDSIZE bytes only */ #define EP_TokenOnly 0x4000 /* Expr struct is EXPR_TOKENONLYSIZE bytes only */ #define EP_Static 0x8000 /* Held in memory not obtained from malloc() */ /* ** The following are the meanings of bits in the Expr.flags2 field. */ |
︙ | ︙ | |||
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 | yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */ int cookieGoto; /* Address of OP_Goto to cookie verifier subroutine */ int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */ int regRowid; /* Register holding rowid of CREATE TABLE entry */ int regRoot; /* Register holding root page number for new objects */ int nMaxArg; /* Max args passed to user function by sub-program */ #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */ #endif AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ /* Information used while coding trigger programs. */ | > | 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 | yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */ int cookieGoto; /* Address of OP_Goto to cookie verifier subroutine */ int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */ int regRowid; /* Register holding rowid of CREATE TABLE entry */ int regRoot; /* Register holding root page number for new objects */ int nMaxArg; /* Max args passed to user function by sub-program */ Token constraintName;/* Name of the constraint currently being parsed */ #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */ #endif AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ /* Information used while coding trigger programs. */ |
︙ | ︙ | |||
2280 2281 2282 2283 2284 2285 2286 | */ struct AuthContext { const char *zAuthContext; /* Put saved Parse.zAuthContext here */ Parse *pParse; /* The Parse structure */ }; /* | | > > > > > | 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 | */ struct AuthContext { const char *zAuthContext; /* Put saved Parse.zAuthContext here */ Parse *pParse; /* The Parse structure */ }; /* ** Bitfield flags for P5 value in various opcodes. ** ** Note that the values for ISNOOP and LENGTHARG are the same. But as ** those bits are never used on the same opcode, the overlap is harmless. */ #define OPFLAG_NCHANGE 0x01 /* Set to update db->nChange */ #define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */ #define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */ #define OPFLAG_APPEND 0x08 /* This is likely to be an append */ #define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */ #define OPFLAG_CLEARCACHE 0x20 /* Clear pseudo-table cache in OP_Column */ #define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ /* * Each trigger present in the database schema is stored as an instance of * struct Trigger. * * Pointers to instances of struct Trigger are stored in two ways. * 1. In the "trigHash" hash table (part of the sqlite3* that represents the |
︙ | ︙ | |||
2772 2773 2774 2775 2776 2777 2778 | #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) Expr *sqlite3LimitWhere(Parse *, SrcList *, Expr *, ExprList *, Expr *, Expr *, char *); #endif void sqlite3DeleteFrom(Parse*, SrcList*, Expr*); void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); WhereInfo *sqlite3WhereBegin(Parse*, SrcList*, Expr*, ExprList**,ExprList*,u16); void sqlite3WhereEnd(WhereInfo*); | | | 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 | #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) Expr *sqlite3LimitWhere(Parse *, SrcList *, Expr *, ExprList *, Expr *, Expr *, char *); #endif void sqlite3DeleteFrom(Parse*, SrcList*, Expr*); void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); WhereInfo *sqlite3WhereBegin(Parse*, SrcList*, Expr*, ExprList**,ExprList*,u16); void sqlite3WhereEnd(WhereInfo*); int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8); void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int); void sqlite3ExprCodeMove(Parse*, int, int, int); void sqlite3ExprCodeCopy(Parse*, int, int, int); void sqlite3ExprCacheStore(Parse*, int, int, int); void sqlite3ExprCachePush(Parse*); void sqlite3ExprCachePop(Parse*, int); void sqlite3ExprCacheRemove(Parse*, int, int); |
︙ | ︙ |
Changes to src/status.c.
︙ | ︙ | |||
220 221 222 223 224 225 226 | /* ** Set *pCurrent to the total cache hits or misses encountered by all ** pagers the database handle is connected to. *pHighwater is always set ** to zero. */ case SQLITE_DBSTATUS_CACHE_HIT: | | > > | 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 | /* ** Set *pCurrent to the total cache hits or misses encountered by all ** pagers the database handle is connected to. *pHighwater is always set ** to zero. */ case SQLITE_DBSTATUS_CACHE_HIT: case SQLITE_DBSTATUS_CACHE_MISS: case SQLITE_DBSTATUS_CACHE_WRITE:{ int i; int nRet = 0; assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); for(i=0; i<db->nDb; i++){ if( db->aDb[i].pBt ){ Pager *pPager = sqlite3BtreePager(db->aDb[i].pBt); sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet); } } |
︙ | ︙ |
Changes to src/tclsqlite.c.
︙ | ︙ | |||
3250 3251 3252 3253 3254 3255 3256 | */ Tcl_CreateObjCommand(interp, "sqlite", (Tcl_ObjCmdProc*)DbMain, 0, 0); #endif return TCL_OK; } EXTERN int Tclsqlite3_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } | < < < < > > > > < < < < | 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 | */ Tcl_CreateObjCommand(interp, "sqlite", (Tcl_ObjCmdProc*)DbMain, 0, 0); #endif return TCL_OK; } EXTERN int Tclsqlite3_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } EXTERN int Sqlite3_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } EXTERN int Tclsqlite3_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } /* Because it accesses the file-system and uses persistent state, SQLite ** is not considered appropriate for safe interpreters. Hence, we deliberately ** omit the _SafeInit() interfaces. */ #ifndef SQLITE_3_SUFFIX_ONLY int Sqlite_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } int Tclsqlite_Init(Tcl_Interp *interp){ return Sqlite3_Init(interp); } int Sqlite_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } int Tclsqlite_Unload(Tcl_Interp *interp, int flags){ return TCL_OK; } #endif #ifdef TCLSH /***************************************************************************** ** All of the code that follows is used to build standalone TCL interpreters ** that are statically linked with SQLite. Enable these by compiling ** with -DTCLSH=n where n can be 1 or 2. An n of 1 generates a standard |
︙ | ︙ |
Changes to src/test1.c.
︙ | ︙ | |||
3259 3260 3261 3262 3263 3264 3265 | #ifndef SQLITE_OMIT_UTF16 sqlite3_stmt *pStmt; int idx; int bytes; char *value; int rc; | | | 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 | #ifndef SQLITE_OMIT_UTF16 sqlite3_stmt *pStmt; int idx; int bytes; char *value; int rc; void (*xDel)(void*) = (objc==6?SQLITE_STATIC:SQLITE_TRANSIENT); Tcl_Obj *oStmt = objv[objc-4]; Tcl_Obj *oN = objv[objc-3]; Tcl_Obj *oString = objv[objc-2]; Tcl_Obj *oBytes = objv[objc-1]; if( objc!=5 && objc!=6){ Tcl_AppendResult(interp, "wrong # args: should be \"", |
︙ | ︙ | |||
3607 3608 3609 3610 3611 3612 3613 | rc = sqlite3_prepare(db, zSql, bytes, &pStmt, objc>=5 ? &zTail : 0); Tcl_ResetResult(interp); if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; if( zTail && objc>=5 ){ if( bytes>=0 ){ bytes = bytes - (zTail-zSql); } | | | 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 | rc = sqlite3_prepare(db, zSql, bytes, &pStmt, objc>=5 ? &zTail : 0); Tcl_ResetResult(interp); if( sqlite3TestErrCode(interp, db, rc) ) return TCL_ERROR; if( zTail && objc>=5 ){ if( bytes>=0 ){ bytes = bytes - (zTail-zSql); } if( (int)strlen(zTail)<bytes ){ bytes = strlen(zTail); } Tcl_ObjSetVar2(interp, objv[4], 0, Tcl_NewStringObj(zTail, bytes), 0); } if( rc!=SQLITE_OK ){ assert( pStmt==0 ); sprintf(zBuf, "(%d) ", rc); |
︙ | ︙ | |||
5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 | int ok; /* Finished ok */ int err; /* True if an error occurs */ }; #endif #if SQLITE_OS_WIN /* ** The background thread that does file locking. */ static void win32_file_locker(void *pAppData){ struct win32FileLocker *p = (struct win32FileLocker*)pAppData; if( p->evName ){ HANDLE ev = OpenEvent(EVENT_MODIFY_STATE, FALSE, p->evName); | > | 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 | int ok; /* Finished ok */ int err; /* True if an error occurs */ }; #endif #if SQLITE_OS_WIN #include <process.h> /* ** The background thread that does file locking. */ static void win32_file_locker(void *pAppData){ struct win32FileLocker *p = (struct win32FileLocker*)pAppData; if( p->evName ){ HANDLE ev = OpenEvent(EVENT_MODIFY_STATE, FALSE, p->evName); |
︙ | ︙ | |||
6124 6125 6126 6127 6128 6129 6130 | { "sqlite3_column_name16", test_stmt_utf16, (void*)sqlite3_column_name16}, { "add_alignment_test_collations", add_alignment_test_collations, 0 }, #ifndef SQLITE_OMIT_DECLTYPE { "sqlite3_column_decltype16",test_stmt_utf16,(void*)sqlite3_column_decltype16}, #endif #ifdef SQLITE_ENABLE_COLUMN_METADATA {"sqlite3_column_database_name16", | | | 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 | { "sqlite3_column_name16", test_stmt_utf16, (void*)sqlite3_column_name16}, { "add_alignment_test_collations", add_alignment_test_collations, 0 }, #ifndef SQLITE_OMIT_DECLTYPE { "sqlite3_column_decltype16",test_stmt_utf16,(void*)sqlite3_column_decltype16}, #endif #ifdef SQLITE_ENABLE_COLUMN_METADATA {"sqlite3_column_database_name16", test_stmt_utf16, (void*)sqlite3_column_database_name16}, {"sqlite3_column_table_name16", test_stmt_utf16, (void*)sqlite3_column_table_name16}, {"sqlite3_column_origin_name16", test_stmt_utf16, (void*)sqlite3_column_origin_name16}, #endif #endif { "sqlite3_create_collation_v2", test_create_collation_v2, 0 }, { "sqlite3_global_recover", test_global_recover, 0 }, { "working_64bit_int", working_64bit_int, 0 }, |
︙ | ︙ |
Changes to src/test3.c.
︙ | ︙ | |||
461 462 463 464 465 466 467 | } if( Tcl_GetInt(interp, argv[1], (int*)&start) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[2], (int*)&mult) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[3], (int*)&count) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[4], (int*)&incr) ) return TCL_ERROR; in = start; in *= mult; | | | 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 | } if( Tcl_GetInt(interp, argv[1], (int*)&start) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[2], (int*)&mult) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[3], (int*)&count) ) return TCL_ERROR; if( Tcl_GetInt(interp, argv[4], (int*)&incr) ) return TCL_ERROR; in = start; in *= mult; for(i=0; i<(int)count; i++){ char zErr[200]; n1 = putVarint(zBuf, in); if( n1>9 || n1<1 ){ sprintf(zErr, "putVarint returned %d - should be between 1 and 9", n1); Tcl_AppendResult(interp, zErr, 0); return TCL_ERROR; } |
︙ | ︙ |
Changes to src/test6.c.
︙ | ︙ | |||
173 174 175 176 177 178 179 | static int writeDbFile(CrashFile *p, u8 *z, i64 iAmt, i64 iOff){ int rc = SQLITE_OK; int iSkip = 0; if( iOff==PENDING_BYTE && (p->flags&SQLITE_OPEN_MAIN_DB) ){ iSkip = 512; } if( (iAmt-iSkip)>0 ){ | | | 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | static int writeDbFile(CrashFile *p, u8 *z, i64 iAmt, i64 iOff){ int rc = SQLITE_OK; int iSkip = 0; if( iOff==PENDING_BYTE && (p->flags&SQLITE_OPEN_MAIN_DB) ){ iSkip = 512; } if( (iAmt-iSkip)>0 ){ rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], (int)(iAmt-iSkip), iOff+iSkip); } return rc; } /* ** Flush the write-list as if xSync() had been called on file handle ** pFile. If isCrash is true, simulate a crash. |
︙ | ︙ | |||
302 303 304 305 306 307 308 | ); } #endif break; } case 3: { /* Trash sectors */ u8 *zGarbage; | | | | 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 | ); } #endif break; } case 3: { /* Trash sectors */ u8 *zGarbage; int iFirst = (int)(pWrite->iOffset/g.iSectorSize); int iLast = (int)((pWrite->iOffset+pWrite->nBuf-1)/g.iSectorSize); assert(pWrite->zBuf); #ifdef TRACE_CRASHTEST printf("Trashing %d sectors @ sector %d (%s)\n", 1+iLast-iFirst, iFirst, pWrite->pFile->zName ); |
︙ | ︙ | |||
426 427 428 429 430 431 432 | sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ CrashFile *pCrash = (CrashFile *)pFile; if( iAmt+iOfst>pCrash->iSize ){ | | | 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 | sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ CrashFile *pCrash = (CrashFile *)pFile; if( iAmt+iOfst>pCrash->iSize ){ pCrash->iSize = (int)(iAmt+iOfst); } while( pCrash->iSize>pCrash->nData ){ u8 *zNew; int nNew = (pCrash->nData*2) + 4096; zNew = crash_realloc(pCrash->zData, nNew); if( !zNew ){ return SQLITE_NOMEM; |
︙ | ︙ | |||
450 451 452 453 454 455 456 | /* ** Truncate a crash-file. */ static int cfTruncate(sqlite3_file *pFile, sqlite_int64 size){ CrashFile *pCrash = (CrashFile *)pFile; assert(size>=0); if( pCrash->iSize>size ){ | | | 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 | /* ** Truncate a crash-file. */ static int cfTruncate(sqlite3_file *pFile, sqlite_int64 size){ CrashFile *pCrash = (CrashFile *)pFile; assert(size>=0); if( pCrash->iSize>size ){ pCrash->iSize = (int)size; } return writeListAppend(pFile, size, 0, 0); } /* ** Sync a crash-file. */ |
︙ | ︙ | |||
514 515 516 517 518 519 520 | } static int cfFileControl(sqlite3_file *pFile, int op, void *pArg){ if( op==SQLITE_FCNTL_SIZE_HINT ){ CrashFile *pCrash = (CrashFile *)pFile; i64 nByte = *(i64 *)pArg; if( nByte>pCrash->iSize ){ if( SQLITE_OK==writeListAppend(pFile, nByte, 0, 0) ){ | | | 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 | } static int cfFileControl(sqlite3_file *pFile, int op, void *pArg){ if( op==SQLITE_FCNTL_SIZE_HINT ){ CrashFile *pCrash = (CrashFile *)pFile; i64 nByte = *(i64 *)pArg; if( nByte>pCrash->iSize ){ if( SQLITE_OK==writeListAppend(pFile, nByte, 0, 0) ){ pCrash->iSize = (int)nByte; } } return SQLITE_OK; } return sqlite3OsFileControl(((CrashFile *)pFile)->pRealFile, op, pArg); } |
︙ | ︙ | |||
631 632 633 634 635 636 637 | */ const int isDb = (flags&SQLITE_OPEN_MAIN_DB); i64 iChunk = pWrapper->iSize; if( iChunk>PENDING_BYTE && isDb ){ iChunk = PENDING_BYTE; } memset(pWrapper->zData, 0, pWrapper->nData); | | | | 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 | */ const int isDb = (flags&SQLITE_OPEN_MAIN_DB); i64 iChunk = pWrapper->iSize; if( iChunk>PENDING_BYTE && isDb ){ iChunk = PENDING_BYTE; } memset(pWrapper->zData, 0, pWrapper->nData); rc = sqlite3OsRead(pReal, pWrapper->zData, (int)iChunk, 0); if( SQLITE_OK==rc && pWrapper->iSize>(PENDING_BYTE+512) && isDb ){ i64 iOff = PENDING_BYTE+512; iChunk = pWrapper->iSize - iOff; rc = sqlite3OsRead(pReal, &pWrapper->zData[iOff], (int)iChunk, iOff); } }else{ rc = SQLITE_NOMEM; } } if( rc!=SQLITE_OK && pWrapper->pMethod ){ sqlite3OsClose(pFile); |
︙ | ︙ |
Changes to src/test8.c.
︙ | ︙ | |||
827 828 829 830 831 832 833 | pConstraint = &pIdxInfo->aConstraint[ii]; pUsage = &pIdxInfo->aConstraintUsage[ii]; if( !isIgnoreUsable && !pConstraint->usable ) continue; iCol = pConstraint->iColumn; | | | < < < | 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 | pConstraint = &pIdxInfo->aConstraint[ii]; pUsage = &pIdxInfo->aConstraintUsage[ii]; if( !isIgnoreUsable && !pConstraint->usable ) continue; iCol = pConstraint->iColumn; if( iCol<0 || pVtab->aIndex[iCol] ){ char *zCol = iCol>=0 ? pVtab->aCol[iCol] : "rowid"; char *zOp = 0; useIdx = 1; switch( pConstraint->op ){ case SQLITE_INDEX_CONSTRAINT_EQ: zOp = "="; break; case SQLITE_INDEX_CONSTRAINT_LT: zOp = "<"; break; case SQLITE_INDEX_CONSTRAINT_GT: zOp = ">"; break; |
︙ | ︙ | |||
866 867 868 869 870 871 872 | } } /* If there is only one term in the ORDER BY clause, and it is ** on a column that this virtual table has an index for, then consume ** the ORDER BY clause. */ | | > > | < < < | 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 | } } /* If there is only one term in the ORDER BY clause, and it is ** on a column that this virtual table has an index for, then consume ** the ORDER BY clause. */ if( pIdxInfo->nOrderBy==1 && ( pIdxInfo->aOrderBy->iColumn<0 || pVtab->aIndex[pIdxInfo->aOrderBy->iColumn]) ){ int iCol = pIdxInfo->aOrderBy->iColumn; char *zCol = iCol>=0 ? pVtab->aCol[iCol] : "rowid"; char *zDir = pIdxInfo->aOrderBy->desc?"DESC":"ASC"; zNew = sqlite3_mprintf(" ORDER BY %s %s", zCol, zDir); string_concat(&zQuery, zNew, 1, &rc); pIdxInfo->orderByConsumed = 1; } appendToEchoModule(pVtab->interp, "xBestIndex");; appendToEchoModule(pVtab->interp, zQuery); |
︙ | ︙ |
Changes to src/test_journal.c.
︙ | ︙ | |||
400 401 402 403 404 405 406 | } iTrunk = decodeUint32(aData); } /* Calculate and store a checksum for each page in the database file. */ if( rc==SQLITE_OK ){ int ii; | | | 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 | } iTrunk = decodeUint32(aData); } /* Calculate and store a checksum for each page in the database file. */ if( rc==SQLITE_OK ){ int ii; for(ii=0; rc==SQLITE_OK && ii<(int)pMain->nPage; ii++){ i64 iOff = (i64)(pMain->nPagesize) * (i64)ii; if( iOff==PENDING_BYTE ) continue; rc = sqlite3OsRead(pMain->pReal, aData, pMain->nPagesize, iOff); pMain->aCksum[ii] = genCksum(aData, pMain->nPagesize); if( ii+1==pMain->nPage && rc==SQLITE_IOERR_SHORT_READ ) rc = SQLITE_OK; } } |
︙ | ︙ | |||
462 463 464 465 466 467 468 | */ if( iSize>=(iOff+nSector) ){ rc = sqlite3OsRead(pReal, zBuf, 28, iOff); if( rc!=SQLITE_OK || 0==decodeJournalHdr(zBuf, 0, 0, 0, 0) ){ continue; } } | | | 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 | */ if( iSize>=(iOff+nSector) ){ rc = sqlite3OsRead(pReal, zBuf, 28, iOff); if( rc!=SQLITE_OK || 0==decodeJournalHdr(zBuf, 0, 0, 0, 0) ){ continue; } } nRec = (u32)((iSize-iOff) / (pMain->nPagesize+8)); } /* Read all the records that follow the journal-header just read. */ for(ii=0; rc==SQLITE_OK && ii<nRec && iOff<iSize; ii++){ u32 pgno; rc = sqlite3OsRead(pReal, zBuf, 4, iOff); if( rc==SQLITE_OK ){ |
︙ | ︙ | |||
534 535 536 537 538 539 540 | } if( p->iMaxOff<(iOfst + iAmt) ){ p->iMaxOff = iOfst + iAmt; } } if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ | | | | 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 | } if( p->iMaxOff<(iOfst + iAmt) ){ p->iMaxOff = iOfst + iAmt; } } if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ if( iAmt<(int)p->nPagesize && p->nPagesize%iAmt==0 && iOfst>=(PENDING_BYTE+512) && iOfst+iAmt<=PENDING_BYTE+p->nPagesize ){ /* No-op. This special case is hit when the backup code is copying a ** to a database with a larger page-size than the source database and ** it needs to fill in the non-locking-region part of the original ** pending-byte page. */ }else{ u32 pgno = (u32)(iOfst/p->nPagesize + 1); assert( (iAmt==1||iAmt==p->nPagesize) && ((iOfst+iAmt)%p->nPagesize)==0 ); assert( pgno<=p->nPage || p->nSync>0 ); assert( pgno>p->nPage || sqlite3BitvecTest(p->pWritable, pgno) ); } } rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); |
︙ | ︙ | |||
574 575 576 577 578 579 580 | /* Truncating a journal file. This is the end of a transaction. */ jt_file *pMain = locateDatabaseHandle(p->zName); closeTransaction(pMain); } if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ u32 pgno; u32 locking_page = (u32)(PENDING_BYTE/p->nPagesize+1); | | | 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 | /* Truncating a journal file. This is the end of a transaction. */ jt_file *pMain = locateDatabaseHandle(p->zName); closeTransaction(pMain); } if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ u32 pgno; u32 locking_page = (u32)(PENDING_BYTE/p->nPagesize+1); for(pgno=(u32)(size/p->nPagesize+1); pgno<=p->nPage; pgno++){ assert( pgno==locking_page || sqlite3BitvecTest(p->pWritable, pgno) ); } } return sqlite3OsTruncate(p->pReal, size); } /* |
︙ | ︙ |
Changes to src/test_malloc.c.
︙ | ︙ | |||
1319 1320 1321 1322 1323 1324 1325 | { "CACHE_USED", SQLITE_DBSTATUS_CACHE_USED }, { "SCHEMA_USED", SQLITE_DBSTATUS_SCHEMA_USED }, { "STMT_USED", SQLITE_DBSTATUS_STMT_USED }, { "LOOKASIDE_HIT", SQLITE_DBSTATUS_LOOKASIDE_HIT }, { "LOOKASIDE_MISS_SIZE", SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE }, { "LOOKASIDE_MISS_FULL", SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL }, { "CACHE_HIT", SQLITE_DBSTATUS_CACHE_HIT }, | | > | 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 | { "CACHE_USED", SQLITE_DBSTATUS_CACHE_USED }, { "SCHEMA_USED", SQLITE_DBSTATUS_SCHEMA_USED }, { "STMT_USED", SQLITE_DBSTATUS_STMT_USED }, { "LOOKASIDE_HIT", SQLITE_DBSTATUS_LOOKASIDE_HIT }, { "LOOKASIDE_MISS_SIZE", SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE }, { "LOOKASIDE_MISS_FULL", SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL }, { "CACHE_HIT", SQLITE_DBSTATUS_CACHE_HIT }, { "CACHE_MISS", SQLITE_DBSTATUS_CACHE_MISS }, { "CACHE_WRITE", SQLITE_DBSTATUS_CACHE_WRITE } }; Tcl_Obj *pResult; if( objc!=4 ){ Tcl_WrongNumArgs(interp, 1, objv, "DB PARAMETER RESETFLAG"); return TCL_ERROR; } if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ) return TCL_ERROR; |
︙ | ︙ |
Changes to src/test_multiplex.c.
︙ | ︙ | |||
525 526 527 528 529 530 531 | const char *zUri = (flags & SQLITE_OPEN_URI) ? zName : 0; /* assign pointers to extra space allocated */ memset(pGroup, 0, sz); pMultiplexOpen->pGroup = pGroup; pGroup->bEnabled = -1; pGroup->bTruncate = sqlite3_uri_boolean(zUri, "truncate", (flags & SQLITE_OPEN_MAIN_DB)==0); | | | 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 | const char *zUri = (flags & SQLITE_OPEN_URI) ? zName : 0; /* assign pointers to extra space allocated */ memset(pGroup, 0, sz); pMultiplexOpen->pGroup = pGroup; pGroup->bEnabled = -1; pGroup->bTruncate = sqlite3_uri_boolean(zUri, "truncate", (flags & SQLITE_OPEN_MAIN_DB)==0); pGroup->szChunk = (int)sqlite3_uri_int64(zUri, "chunksize", SQLITE_MULTIPLEX_CHUNK_SIZE); pGroup->szChunk = (pGroup->szChunk+0xffff)&~0xffff; if( zName ){ char *p = (char *)&pGroup[1]; pGroup->zName = p; memcpy(pGroup->zName, zName, nName+1); pGroup->nName = nName; |
︙ | ︙ | |||
593 594 595 596 597 598 599 | ** just disable the multiplexor all togethre. */ rc = pOrigVfs->xAccess(pOrigVfs, pGroup->aReal[1].z, SQLITE_ACCESS_EXISTS, &bExists); bExists = multiplexSubSize(pGroup, 1, &rc)>0; if( rc==SQLITE_OK && bExists && sz==(sz&0xffff0000) && sz>0 && sz!=pGroup->szChunk ){ | | | 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 | ** just disable the multiplexor all togethre. */ rc = pOrigVfs->xAccess(pOrigVfs, pGroup->aReal[1].z, SQLITE_ACCESS_EXISTS, &bExists); bExists = multiplexSubSize(pGroup, 1, &rc)>0; if( rc==SQLITE_OK && bExists && sz==(sz&0xffff0000) && sz>0 && sz!=pGroup->szChunk ){ pGroup->szChunk = (int)sz; }else if( rc==SQLITE_OK && !bExists && sz>pGroup->szChunk ){ pGroup->bEnabled = 0; } } } } |
︙ | ︙ |
Changes to src/test_onefile.c.
︙ | ︙ | |||
284 285 286 287 288 289 290 | sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ tmp_file *pTmp = (tmp_file *)pFile; if( (iAmt+iOfst)>pTmp->nAlloc ){ | | | | | 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 | sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ tmp_file *pTmp = (tmp_file *)pFile; if( (iAmt+iOfst)>pTmp->nAlloc ){ int nNew = (int)(2*(iAmt+iOfst+pTmp->nAlloc)); char *zNew = sqlite3_realloc(pTmp->zAlloc, nNew); if( !zNew ){ return SQLITE_NOMEM; } pTmp->zAlloc = zNew; pTmp->nAlloc = nNew; } memcpy(&pTmp->zAlloc[iOfst], zBuf, iAmt); pTmp->nSize = (int)MAX(pTmp->nSize, iOfst+iAmt); return SQLITE_OK; } /* ** Truncate a tmp-file. */ static int tmpTruncate(sqlite3_file *pFile, sqlite_int64 size){ tmp_file *pTmp = (tmp_file *)pFile; pTmp->nSize = (int)MIN(pTmp->nSize, size); return SQLITE_OK; } /* ** Sync a tmp-file. */ static int tmpSync(sqlite3_file *pFile, int flags){ |
︙ | ︙ | |||
414 415 416 417 418 419 420 | rc = SQLITE_IOERR_SHORT_READ; }else if( p->eType==DATABASE_FILE ){ rc = pF->pMethods->xRead(pF, zBuf, iAmt, iOfst+BLOCKSIZE); }else{ /* Journal file. */ int iRem = iAmt; int iBuf = 0; | | | 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 | rc = SQLITE_IOERR_SHORT_READ; }else if( p->eType==DATABASE_FILE ){ rc = pF->pMethods->xRead(pF, zBuf, iAmt, iOfst+BLOCKSIZE); }else{ /* Journal file. */ int iRem = iAmt; int iBuf = 0; int ii = (int)iOfst; while( iRem>0 && rc==SQLITE_OK ){ int iRealOff = pReal->nBlob - BLOCKSIZE*((ii/BLOCKSIZE)+1) + ii%BLOCKSIZE; int iRealAmt = MIN(iRem, BLOCKSIZE - (iRealOff%BLOCKSIZE)); rc = pF->pMethods->xRead(pF, &((char *)zBuf)[iBuf], iRealAmt, iRealOff); ii += iRealAmt; iBuf += iRealAmt; |
︙ | ︙ | |||
449 450 451 452 453 454 455 | if( p->eType==DATABASE_FILE ){ if( (iAmt+iOfst+BLOCKSIZE)>(pReal->nBlob-pReal->nJournal) ){ rc = SQLITE_FULL; }else{ rc = pF->pMethods->xWrite(pF, zBuf, iAmt, iOfst+BLOCKSIZE); if( rc==SQLITE_OK ){ | | | | | | | 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 | if( p->eType==DATABASE_FILE ){ if( (iAmt+iOfst+BLOCKSIZE)>(pReal->nBlob-pReal->nJournal) ){ rc = SQLITE_FULL; }else{ rc = pF->pMethods->xWrite(pF, zBuf, iAmt, iOfst+BLOCKSIZE); if( rc==SQLITE_OK ){ pReal->nDatabase = (int)MAX(pReal->nDatabase, iAmt+iOfst); } } }else{ /* Journal file. */ int iRem = iAmt; int iBuf = 0; int ii = (int)iOfst; while( iRem>0 && rc==SQLITE_OK ){ int iRealOff = pReal->nBlob - BLOCKSIZE*((ii/BLOCKSIZE)+1) + ii%BLOCKSIZE; int iRealAmt = MIN(iRem, BLOCKSIZE - (iRealOff%BLOCKSIZE)); if( iRealOff<(pReal->nDatabase+BLOCKSIZE) ){ rc = SQLITE_FULL; }else{ rc = pF->pMethods->xWrite(pF, &((char *)zBuf)[iBuf], iRealAmt,iRealOff); ii += iRealAmt; iBuf += iRealAmt; iRem -= iRealAmt; } } if( rc==SQLITE_OK ){ pReal->nJournal = (int)MAX(pReal->nJournal, iAmt+iOfst); } } return rc; } /* ** Truncate an fs-file. */ static int fsTruncate(sqlite3_file *pFile, sqlite_int64 size){ fs_file *p = (fs_file *)pFile; fs_real_file *pReal = p->pReal; if( p->eType==DATABASE_FILE ){ pReal->nDatabase = (int)MIN(pReal->nDatabase, size); }else{ pReal->nJournal = (int)MIN(pReal->nJournal, size); } return SQLITE_OK; } /* ** Sync an fs-file. */ |
︙ | ︙ | |||
637 638 639 640 641 642 643 | goto open_out; } if( size==0 ){ rc = pRealFile->pMethods->xWrite(pRealFile, "\0", 1, BLOBSIZE-1); pReal->nBlob = BLOBSIZE; }else{ unsigned char zS[4]; | | | 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 | goto open_out; } if( size==0 ){ rc = pRealFile->pMethods->xWrite(pRealFile, "\0", 1, BLOBSIZE-1); pReal->nBlob = BLOBSIZE; }else{ unsigned char zS[4]; pReal->nBlob = (int)size; rc = pRealFile->pMethods->xRead(pRealFile, zS, 4, 0); pReal->nDatabase = (zS[0]<<24)+(zS[1]<<16)+(zS[2]<<8)+zS[3]; if( rc==SQLITE_OK ){ rc = pRealFile->pMethods->xRead(pRealFile, zS, 4, pReal->nBlob-4); if( zS[0] || zS[1] || zS[2] || zS[3] ){ pReal->nJournal = pReal->nBlob; } |
︙ | ︙ |
Changes to src/test_osinst.c.
︙ | ︙ | |||
238 239 240 241 242 243 244 | } #else static sqlite3_uint64 vfslog_time(){ return 0; } #endif | | | 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 | } #else static sqlite3_uint64 vfslog_time(){ return 0; } #endif static void vfslog_call(sqlite3_vfs *, int, int, sqlite3_int64, int, int, int); static void vfslog_string(sqlite3_vfs *, const char *); /* ** Close an vfslog-file. */ static int vfslogClose(sqlite3_file *pFile){ sqlite3_uint64 t; |
︙ | ︙ | |||
644 645 646 647 648 649 650 | p[3] = v; } static void vfslog_call( sqlite3_vfs *pVfs, int eEvent, int iFileid, | | | | 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 | p[3] = v; } static void vfslog_call( sqlite3_vfs *pVfs, int eEvent, int iFileid, sqlite3_int64 nClick, int return_code, int size, int offset ){ VfslogVfs *p = (VfslogVfs *)pVfs; unsigned char *zRec; if( (24+p->nBuf)>sizeof(p->aBuf) ){ vfslog_flush(p); } zRec = (unsigned char *)&p->aBuf[p->nBuf]; put32bits(&zRec[0], eEvent); put32bits(&zRec[4], iFileid); put32bits(&zRec[8], (unsigned int)(nClick&0xffff)); put32bits(&zRec[12], return_code); put32bits(&zRec[16], size); put32bits(&zRec[20], offset); p->nBuf += 24; } static void vfslog_string(sqlite3_vfs *pVfs, const char *zStr){ |
︙ | ︙ | |||
1039 1040 1041 1042 1043 1044 1045 | switch( i ){ case 0: { sqlite3_result_text(ctx, vfslog_eventname(val), -1, SQLITE_STATIC); break; } case 1: { char *zStr = pCsr->zTransient; | | | 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 | switch( i ){ case 0: { sqlite3_result_text(ctx, vfslog_eventname(val), -1, SQLITE_STATIC); break; } case 1: { char *zStr = pCsr->zTransient; if( val!=0 && val<(unsigned)pCsr->nFile ){ zStr = pCsr->azFile[val]; } sqlite3_result_text(ctx, zStr, -1, SQLITE_TRANSIENT); break; } default: sqlite3_result_int(ctx, val); |
︙ | ︙ |
Changes to src/test_quota.c.
︙ | ︙ | |||
1056 1057 1058 1059 1060 1061 1062 | if( szNew>pGroup->iLimit && pGroup->iLimit>0 ){ if( pGroup->xCallback ){ pGroup->xCallback(pFile->zFilename, &pGroup->iLimit, szNew, pGroup->pArg); } if( szNew>pGroup->iLimit && pGroup->iLimit>0 ){ iEnd = pGroup->iLimit - pGroup->iSize + pFile->iSize; | | | 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 | if( szNew>pGroup->iLimit && pGroup->iLimit>0 ){ if( pGroup->xCallback ){ pGroup->xCallback(pFile->zFilename, &pGroup->iLimit, szNew, pGroup->pArg); } if( szNew>pGroup->iLimit && pGroup->iLimit>0 ){ iEnd = pGroup->iLimit - pGroup->iSize + pFile->iSize; nmemb = (size_t)((iEnd - iOfst)/size); iEnd = iOfst + size*nmemb; szNew = pGroup->iSize - pFile->iSize + iEnd; } } pGroup->iSize = szNew; pFile->iSize = iEnd; quotaLeave(); |
︙ | ︙ |
Changes to src/test_stat.c.
︙ | ︙ | |||
320 321 322 323 324 325 326 | u32 nPayload; /* Bytes of payload total (local+overflow) */ int nLocal; /* Bytes of payload stored locally */ iOff += getVarint32(&aData[iOff], nPayload); if( p->flags==0x0D ){ u64 dummy; iOff += sqlite3GetVarint(&aData[iOff], &dummy); } | | > | | 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 | u32 nPayload; /* Bytes of payload total (local+overflow) */ int nLocal; /* Bytes of payload stored locally */ iOff += getVarint32(&aData[iOff], nPayload); if( p->flags==0x0D ){ u64 dummy; iOff += sqlite3GetVarint(&aData[iOff], &dummy); } if( nPayload>(u32)p->nMxPayload ) p->nMxPayload = nPayload; getLocalPayload(nUsable, p->flags, nPayload, &nLocal); pCell->nLocal = nLocal; assert( nLocal>=0 ); assert( nPayload>=nLocal ); assert( nLocal<=(nUsable-35) ); if( nPayload>(u32)nLocal ){ int j; int nOvfl = ((nPayload - nLocal) + nUsable-4 - 1) / (nUsable - 4); pCell->nLastOvfl = (nPayload-nLocal) - (nOvfl-1) * (nUsable-4); pCell->nOvfl = nOvfl; pCell->aOvfl = sqlite3_malloc(sizeof(u32)*nOvfl); pCell->aOvfl[0] = sqlite3Get4byte(&aData[iOff+nLocal]); for(j=1; j<nOvfl; j++){ |
︙ | ︙ | |||
374 375 376 377 378 379 380 | /* If connected to a ZIPVFS backend, override the page size and ** offset with actual values obtained from ZIPVFS. */ fd = sqlite3PagerFile(pPager); x[0] = pCsr->iPageno; if( sqlite3OsFileControl(fd, 230440, &x)==SQLITE_OK ){ pCsr->iOffset = x[0]; | | | 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 | /* If connected to a ZIPVFS backend, override the page size and ** offset with actual values obtained from ZIPVFS. */ fd = sqlite3PagerFile(pPager); x[0] = pCsr->iPageno; if( sqlite3OsFileControl(fd, 230440, &x)==SQLITE_OK ){ pCsr->iOffset = x[0]; pCsr->szPage = (int)x[1]; } } /* ** Move a statvfs cursor to the next entry in the file. */ static int statNext(sqlite3_vtab_cursor *pCursor){ |
︙ | ︙ | |||
396 397 398 399 400 401 402 | sqlite3_free(pCsr->zPath); pCsr->zPath = 0; if( pCsr->aPage[0].pPg==0 ){ rc = sqlite3_step(pCsr->pStmt); if( rc==SQLITE_ROW ){ int nPage; | | | 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 | sqlite3_free(pCsr->zPath); pCsr->zPath = 0; if( pCsr->aPage[0].pPg==0 ){ rc = sqlite3_step(pCsr->pStmt); if( rc==SQLITE_ROW ){ int nPage; u32 iRoot = (u32)sqlite3_column_int64(pCsr->pStmt, 1); sqlite3PagerPagecount(pPager, &nPage); if( nPage==0 ){ pCsr->isEof = 1; return sqlite3_reset(pCsr->pStmt); } rc = sqlite3PagerGet(pPager, iRoot, &pCsr->aPage[0].pPg); pCsr->aPage[0].iPgno = iRoot; |
︙ | ︙ |
Changes to src/test_wholenumber.c.
︙ | ︙ | |||
29 30 31 32 33 34 35 | #ifndef SQLITE_OMIT_VIRTUALTABLE /* A wholenumber cursor object */ typedef struct wholenumber_cursor wholenumber_cursor; struct wholenumber_cursor { sqlite3_vtab_cursor base; /* Base class - must be first */ | | | | 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | #ifndef SQLITE_OMIT_VIRTUALTABLE /* A wholenumber cursor object */ typedef struct wholenumber_cursor wholenumber_cursor; struct wholenumber_cursor { sqlite3_vtab_cursor base; /* Base class - must be first */ sqlite3_int64 iValue; /* Current value */ sqlite3_int64 mxValue; /* Maximum value */ }; /* Methods for the wholenumber module */ static int wholenumberConnect( sqlite3 *db, void *pAux, int argc, const char *const*argv, |
︙ | ︙ |
Changes to src/util.c.
︙ | ︙ | |||
212 213 214 215 216 217 218 | /* Convenient short-hand */ #define UpperToLower sqlite3UpperToLower /* ** Some systems have stricmp(). Others have strcasecmp(). Because ** there is no consistency, we will define our own. ** | | > | | | < | 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 | /* Convenient short-hand */ #define UpperToLower sqlite3UpperToLower /* ** Some systems have stricmp(). Others have strcasecmp(). Because ** there is no consistency, we will define our own. ** ** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and ** sqlite3_strnicmp() APIs allow applications and extensions to compare ** the contents of two buffers containing UTF-8 strings in a ** case-independent fashion, using the same definition of "case ** independence" that SQLite uses internally when comparing identifiers. */ int sqlite3_stricmp(const char *zLeft, const char *zRight){ register unsigned char *a, *b; a = (unsigned char *)zLeft; b = (unsigned char *)zRight; while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } return UpperToLower[*a] - UpperToLower[*b]; |
︙ | ︙ |
Changes to src/vdbe.c.
︙ | ︙ | |||
2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 | ** if the P4 argument is a P4_MEM use the value of the P4 argument as ** the result. ** ** If the OPFLAG_CLEARCACHE bit is set on P5 and P1 is a pseudo-table cursor, ** then the cache of the cursor is reset prior to extracting the column. ** The first OP_Column against a pseudo-table after the value of the content ** register has changed should have this bit set. */ case OP_Column: { u32 payloadSize; /* Number of bytes in the record */ i64 payloadSize64; /* Number of bytes in the record */ int p1; /* P1 value of the opcode */ int p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ | > > > > > | 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 | ** if the P4 argument is a P4_MEM use the value of the P4 argument as ** the result. ** ** If the OPFLAG_CLEARCACHE bit is set on P5 and P1 is a pseudo-table cursor, ** then the cache of the cursor is reset prior to extracting the column. ** The first OP_Column against a pseudo-table after the value of the content ** register has changed should have this bit set. ** ** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 when ** the result is guaranteed to only be used as the argument of a length() ** or typeof() function, respectively. The loading of large blobs can be ** skipped for length() and all content loading can be skipped for typeof(). */ case OP_Column: { u32 payloadSize; /* Number of bytes in the record */ i64 payloadSize64; /* Number of bytes in the record */ int p1; /* P1 value of the opcode */ int p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ |
︙ | ︙ | |||
2273 2274 2275 2276 2277 2278 2279 | if( payloadSize <= (u32)avail ){ zRec = zData; pC->aRow = (u8*)zData; }else{ pC->aRow = 0; } } | | | 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 | if( payloadSize <= (u32)avail ){ zRec = zData; pC->aRow = (u8*)zData; }else{ pC->aRow = 0; } } /* The following assert is true in all cases except when ** the database file has been corrupted externally. ** assert( zRec!=0 || avail>=payloadSize || avail>=9 ); */ szHdr = getVarint32((u8*)zData, offset); /* Make sure a corrupt database has not given us an oversize header. ** Do this now to avoid an oversize memory allocation. ** |
︙ | ︙ | |||
2348 2349 2350 2351 2352 2353 2354 | szField = sqlite3VdbeSerialTypeLen(t); offset += szField; if( offset<szField ){ /* True if offset overflows */ zIdx = &zEndHdr[1]; /* Forces SQLITE_CORRUPT return below */ break; } }else{ | | | | | 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 | szField = sqlite3VdbeSerialTypeLen(t); offset += szField; if( offset<szField ){ /* True if offset overflows */ zIdx = &zEndHdr[1]; /* Forces SQLITE_CORRUPT return below */ break; } }else{ /* If i is less that nField, then there are fewer fields in this ** record than SetNumColumns indicated there are columns in the ** table. Set the offset for any extra columns not present in ** the record to 0. This tells code below to store the default value ** for the column instead of deserializing a value from the record. */ aOffset[i] = 0; } } sqlite3VdbeMemRelease(&sMem); sMem.flags = MEM_Null; |
︙ | ︙ | |||
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 | ** then there are not enough fields in the record to satisfy the ** request. In this case, set the value NULL or to P4 if P4 is ** a pointer to a Mem object. */ if( aOffset[p2] ){ assert( rc==SQLITE_OK ); if( zRec ){ VdbeMemRelease(pDest); sqlite3VdbeSerialGet((u8 *)&zRec[aOffset[p2]], aType[p2], pDest); }else{ | > > > > > > > > > > > > > | | | > | | | | > | | 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 | ** then there are not enough fields in the record to satisfy the ** request. In this case, set the value NULL or to P4 if P4 is ** a pointer to a Mem object. */ if( aOffset[p2] ){ assert( rc==SQLITE_OK ); if( zRec ){ /* This is the common case where the whole row fits on a single page */ VdbeMemRelease(pDest); sqlite3VdbeSerialGet((u8 *)&zRec[aOffset[p2]], aType[p2], pDest); }else{ /* This branch happens only when the row overflows onto multiple pages */ t = aType[p2]; if( (pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0) ){ /* Content is irrelevant for the typeof() function and for ** the length(X) function if X is a blob. So we might as well use ** bogus content rather than reading content from disk. NULL works ** for text and blob and whatever is in the payloadSize64 variable ** will work for everything else. */ zData = t<12 ? (char*)&payloadSize64 : 0; }else{ len = sqlite3VdbeSerialTypeLen(t); sqlite3VdbeMemMove(&sMem, pDest); rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, pC->isIndex, &sMem); if( rc!=SQLITE_OK ){ goto op_column_out; } zData = sMem.z; } sqlite3VdbeSerialGet((u8*)zData, t, pDest); } pDest->enc = encoding; }else{ if( pOp->p4type==P4_MEM ){ sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static); }else{ MemSetTypeFlag(pDest, MEM_Null); |
︙ | ︙ |
Changes to src/where.c.
︙ | ︙ | |||
682 683 684 685 686 687 688 | return 0; } #ifdef SQLITE_EBCDIC if( *pnoCase ) return 0; #endif pList = pExpr->x.pList; pLeft = pList->a[1].pExpr; | > | > > | 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 | return 0; } #ifdef SQLITE_EBCDIC if( *pnoCase ) return 0; #endif pList = pExpr->x.pList; pLeft = pList->a[1].pExpr; if( pLeft->op!=TK_COLUMN || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT || IsVirtual(pLeft->pTab) ){ /* IMP: R-02065-49465 The left-hand side of the LIKE or GLOB operator must ** be the name of an indexed column with TEXT affinity. */ return 0; } assert( pLeft->iColumn!=(-1) ); /* Because IPK never has AFF_TEXT */ pRight = pList->a[0].pExpr; |
︙ | ︙ | |||
4379 4380 4381 4382 4383 4384 4385 | explainOneScan( pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0 ); if( (wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); int r; r = sqlite3ExprCodeGetColumn(pParse, pTabItem->pTab, -1, iCur, | | | 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 | explainOneScan( pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0 ); if( (wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); int r; r = sqlite3ExprCodeGetColumn(pParse, pTabItem->pTab, -1, iCur, regRowid, 0); sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, sqlite3VdbeCurrentAddr(v)+2, r, iSet); } sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody); /* The pSubWInfo->untestedTerms flag means that this OR term ** contained one or more AND term from a notReady table. The |
︙ | ︙ |
Changes to test/backcompat.test.
︙ | ︙ | |||
23 24 25 26 27 28 29 30 31 | # for documentation of the available commands. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/malloc_common.tcl db close | > < < < < | < < < < < < < < < < < < < < < < < < < < < | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | # for documentation of the available commands. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/malloc_common.tcl source $testdir/bc_common.tcl db close if {"" == [bc_find_binaries backcompat.test]} { finish_test return } proc do_backcompat_test {rv bin1 bin2 script} { forcedelete test.db if {$bin1 != ""} { set ::bc_chan1 [launch_testfixture $bin1] } set ::bc_chan2 [launch_testfixture $bin2] |
︙ | ︙ | |||
89 90 91 92 93 94 95 | catch { close $::bc_chan2 } catch { close $::bc_chan1 } } array set ::incompatible [list] proc do_allbackcompat_test {script} { | | | 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | catch { close $::bc_chan2 } catch { close $::bc_chan1 } } array set ::incompatible [list] proc do_allbackcompat_test {script} { foreach bin $::BC(binaries) { set nErr [set_test_counter errors] foreach dir {0 1} { set bintag [string map {testfixture {}} $bin] set bintag [string map {\.exe {}} $bintag] if {$bintag == ""} {set bintag self} set ::bcname ".$bintag.$dir." |
︙ | ︙ |
Added test/bc_common.tcl.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 | proc bc_find_binaries {zCaption} { # Search for binaries to test against. Any executable files that match # our naming convention are assumed to be testfixture binaries to test # against. # set binaries [list] set pattern "[file tail [info nameofexec]]?*" if {$::tcl_platform(platform)=="windows"} { set pattern [string map {\.exe {}} $pattern] } foreach file [glob -nocomplain $pattern] { if {[file executable $file] && [file isfile $file]} {lappend binaries $file} } if {[llength $binaries]==0} { puts "WARNING: No historical binaries to test against." puts "WARNING: Omitting backwards-compatibility tests" } foreach bin $binaries { puts -nonewline "Testing against $bin - " flush stdout puts "version [get_version $bin]" } set ::BC(binaries) $binaries return $binaries } proc get_version {binary} { set chan [launch_testfixture $binary] set v [testfixture $chan { sqlite3 -version }] close $chan set v } proc do_bc_test {bin script} { forcedelete test.db set ::bc_chan [launch_testfixture $bin] proc code1 {tcl} { uplevel #0 $tcl } proc code2 {tcl} { testfixture $::bc_chan $tcl } proc sql1 sql { code1 [list db eval $sql] } proc sql2 sql { code2 [list db eval $sql] } code1 { sqlite3 db test.db } code2 { sqlite3 db test.db } set bintag [string map {testfixture {}} $bin] set bintag [string map {\.exe {}} $bintag] if {$bintag == ""} {set bintag self} set saved_prefix $::testprefix append ::testprefix ".$bintag" uplevel $script set ::testprefix $saved_prefix catch { code1 { db close } } catch { code2 { db close } } catch { close $::bc_chan } } proc do_all_bc_test {script} { foreach bin $::BC(binaries) { uplevel [list do_bc_test $bin $script] } } |
Changes to test/check.test.
︙ | ︙ | |||
113 114 115 116 117 118 119 | SELECT * FROM t1; } } {4 11.0} do_test check-2.1 { execsql { CREATE TABLE t2( | | | | | 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | SELECT * FROM t1; } } {4 11.0} do_test check-2.1 { execsql { CREATE TABLE t2( x INTEGER CONSTRAINT one CHECK( typeof(coalesce(x,0))=="integer" ), y REAL CONSTRAINT two CHECK( typeof(coalesce(y,0.1))=='real' ), z TEXT CONSTRAINT three CHECK( typeof(coalesce(z,''))=='text' ) ); } } {} do_test check-2.2 { execsql { INSERT INTO t2 VALUES(1,2.2,'three'); SELECT * FROM t2; |
︙ | ︙ | |||
137 138 139 140 141 142 143 | SELECT * FROM t2; } } {1 2.2 three {} {} {}} do_test check-2.4 { catchsql { INSERT INTO t2 VALUES(1.1, NULL, NULL); } | | | | | 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 | SELECT * FROM t2; } } {1 2.2 three {} {} {}} do_test check-2.4 { catchsql { INSERT INTO t2 VALUES(1.1, NULL, NULL); } } {1 {constraint one failed}} do_test check-2.5 { catchsql { INSERT INTO t2 VALUES(NULL, 5, NULL); } } {1 {constraint two failed}} do_test check-2.6 { catchsql { INSERT INTO t2 VALUES(NULL, NULL, 3.14159); } } {1 {constraint three failed}} ifcapable subquery { do_test check-3.1 { catchsql { CREATE TABLE t3( x, y, z, CHECK( x<(SELECT min(x) FROM t1) ) |
︙ | ︙ |
Changes to test/dbstatus2.test.
1 2 3 4 5 6 7 8 9 10 11 | # 2011 September 20 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 | # 2011 September 20 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # Tests for the sqlite3_db_status() function # set testdir [file dirname $argv0] source $testdir/tester.tcl set ::testprefix dbstatus2 |
︙ | ︙ | |||
28 29 30 31 32 33 34 35 36 37 38 39 40 41 | } proc db_hit_miss {db {reset 0}} { set nHit [sqlite3_db_status $db CACHE_HIT $reset] set nMiss [sqlite3_db_status $db CACHE_MISS $reset] list $nHit $nMiss } do_test 1.1 { db close sqlite3 db test.db expr {[file size test.db] / 1024} } 6 | > > > > | 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | } proc db_hit_miss {db {reset 0}} { set nHit [sqlite3_db_status $db CACHE_HIT $reset] set nMiss [sqlite3_db_status $db CACHE_MISS $reset] list $nHit $nMiss } proc db_write {db {reset 0}} { sqlite3_db_status $db CACHE_WRITE $reset } do_test 1.1 { db close sqlite3 db test.db expr {[file size test.db] / 1024} } 6 |
︙ | ︙ | |||
68 69 70 71 72 73 74 | set len [string length [read $fd]] close $fd set len } 600 do_test 1.8 { sqlite3_db_status db CACHE_HIT 0 } {0 2 0} do_test 1.9 { sqlite3_db_status db CACHE_MISS 0 } {0 1 0} | > > > > > > > > | > > > > > > > > > > > | 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | set len [string length [read $fd]] close $fd set len } 600 do_test 1.8 { sqlite3_db_status db CACHE_HIT 0 } {0 2 0} do_test 1.9 { sqlite3_db_status db CACHE_MISS 0 } {0 1 0} do_test 2.1 { db_write db } {0 0 0} do_test 2.2 { execsql { INSERT INTO t1 VALUES(4, randomblob(600)) } db_write db } {0 4 0} do_test 2.3 { db_write db 1 } {0 4 0} do_test 2.4 { db_write db 0 } {0 0 0} do_test 2.5 { db_write db 1 } {0 0 0} do_test 2.6 { execsql { PRAGMA journal_mode = WAL } db_write db 1 } {0 1 0} do_test 2.7 { execsql { INSERT INTO t1 VALUES(5, randomblob(600)) } db_write db } {0 4 0} do_test 2.8 { db_write db 1 } {0 4 0} do_test 2.9 { db_write db 0 } {0 0 0} finish_test |
Changes to test/e_insert.test.
︙ | ︙ | |||
46 47 48 49 50 51 52 | CREATE TABLE a4(c UNIQUE, d); } {} proc do_insert_tests {args} { uplevel do_select_tests $args } | | | 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 | CREATE TABLE a4(c UNIQUE, d); } {} proc do_insert_tests {args} { uplevel do_select_tests $args } # EVIDENCE-OF: R-21350-31508 -- syntax diagram insert-stmt # do_insert_tests e_insert-0 { 1 "INSERT INTO a1 DEFAULT VALUES" {} 2 "INSERT INTO main.a1 DEFAULT VALUES" {} 3 "INSERT OR ROLLBACK INTO main.a1 DEFAULT VALUES" {} 4 "INSERT OR ROLLBACK INTO a1 DEFAULT VALUES" {} 5 "INSERT OR ABORT INTO main.a1 DEFAULT VALUES" {} |
︙ | ︙ | |||
119 120 121 122 123 124 125 126 127 128 129 130 131 132 | 64 "INSERT OR REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 65 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 66 "INSERT OR FAIL INTO a1 (b, a) SELECT c, b FROM a2" {} 67 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 68 "INSERT OR IGNORE INTO a1 (b, a) SELECT c, b FROM a2" {} 69 "REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 70 "REPLACE INTO main.a1 (b, a) SELECT c, b FROM a2" {} } delete_all_data # EVIDENCE-OF: R-20288-20462 The first form (with the "VALUES" keyword) # creates a single new row in an existing table. # | > > > > > > > > > > > > > > | 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | 64 "INSERT OR REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 65 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 66 "INSERT OR FAIL INTO a1 (b, a) SELECT c, b FROM a2" {} 67 "INSERT OR FAIL INTO main.a1 (b, a) SELECT c, b FROM a2" {} 68 "INSERT OR IGNORE INTO a1 (b, a) SELECT c, b FROM a2" {} 69 "REPLACE INTO a1 (b, a) SELECT c, b FROM a2" {} 70 "REPLACE INTO main.a1 (b, a) SELECT c, b FROM a2" {} 71 "INSERT INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 72 "INSERT INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 73 "INSERT OR ROLLBACK INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 74 "INSERT OR ROLLBACK INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 75 "INSERT OR ABORT INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 76 "INSERT OR ABORT INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 77 "INSERT OR REPLACE INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 78 "INSERT OR REPLACE INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 79 "INSERT OR FAIL INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 80 "INSERT OR FAIL INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 81 "INSERT OR FAIL INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} 82 "INSERT OR IGNORE INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 83 "REPLACE INTO a1 (b, a) VALUES(1, 2),(3,4)" {} 84 "REPLACE INTO main.a1 (b, a) VALUES(1, 2),(3,4)" {} } delete_all_data # EVIDENCE-OF: R-20288-20462 The first form (with the "VALUES" keyword) # creates a single new row in an existing table. # |
︙ | ︙ |
Changes to test/fts3_common.tcl.
︙ | ︙ | |||
9 10 11 12 13 14 15 16 17 18 19 20 21 22 | # #*********************************************************************** # # This file contains common code used the fts3 tests. At one point # equivalent functionality was implemented in C code. But it is easier # to use Tcl. # #------------------------------------------------------------------------- # USAGE: fts3_integrity_check TBL # # This proc is used to verify that the full-text index is consistent with # the contents of the fts3 table. In other words, it checks that the # data in the %_contents table matches that in the %_segdir and %_segments | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | # #*********************************************************************** # # This file contains common code used the fts3 tests. At one point # equivalent functionality was implemented in C code. But it is easier # to use Tcl. # #------------------------------------------------------------------------- # INSTRUCTIONS # # The following commands are available: # # fts3_build_db_1 N # Using database handle [db] create an FTS4 table named t1 and populate # it with N rows of data. N must be less than 10,000. Refer to the # header comments above the proc implementation below for details. # # fts3_build_db_2 N # Using database handle [db] create an FTS4 table named t2 and populate # it with N rows of data. N must be less than 100,000. Refer to the # header comments above the proc implementation below for details. # # fts3_integrity_check TBL # TBL must be an FTS table in the database currently opened by handle # [db]. This proc loads and tokenizes all documents within the table, # then checks that the current contents of the FTS index matches the # results. # # fts3_terms TBL WHERE # Todo. # # fts3_doclist TBL TERM WHERE # Todo. # # # #------------------------------------------------------------------------- # USAGE: fts3_build_db_1 SWITCHES N # # Build a sample FTS table in the database opened by database connection # [db]. The name of the new table is "t1". # proc fts3_build_db_1 {args} { set default(-module) fts4 set nArg [llength $args] if {($nArg%2)==0} { error "wrong # args: should be \"fts3_build_db_1 ?switches? n\"" } set n [lindex $args [expr $nArg-1]] array set opts [array get default] array set opts [lrange $args 0 [expr $nArg-2]] foreach k [array names opts] { if {0==[info exists default($k)]} { error "unknown option: $k" } } if {$n > 10000} {error "n must be <= 10000"} db eval "CREATE VIRTUAL TABLE t1 USING $opts(-module) (x, y)" set xwords [list zero one two three four five six seven eight nine ten] set ywords [list alpha beta gamma delta epsilon zeta eta theta iota kappa] for {set i 0} {$i < $n} {incr i} { set x "" set y "" set x [list] lappend x [lindex $xwords [expr ($i / 1000) % 10]] lappend x [lindex $xwords [expr ($i / 100) % 10]] lappend x [lindex $xwords [expr ($i / 10) % 10]] lappend x [lindex $xwords [expr ($i / 1) % 10]] set y [list] lappend y [lindex $ywords [expr ($i / 1000) % 10]] lappend y [lindex $ywords [expr ($i / 100) % 10]] lappend y [lindex $ywords [expr ($i / 10) % 10]] lappend y [lindex $ywords [expr ($i / 1) % 10]] db eval { INSERT INTO t1(docid, x, y) VALUES($i, $x, $y) } } } #------------------------------------------------------------------------- # USAGE: fts3_build_db_2 N ARGS # # Build a sample FTS table in the database opened by database connection # [db]. The name of the new table is "t2". # proc fts3_build_db_2 {args} { set default(-module) fts4 set default(-extra) "" set nArg [llength $args] if {($nArg%2)==0} { error "wrong # args: should be \"fts3_build_db_1 ?switches? n\"" } set n [lindex $args [expr $nArg-1]] array set opts [array get default] array set opts [lrange $args 0 [expr $nArg-2]] foreach k [array names opts] { if {0==[info exists default($k)]} { error "unknown option: $k" } } if {$n > 100000} {error "n must be <= 100000"} set sql "CREATE VIRTUAL TABLE t2 USING $opts(-module) (content" if {$opts(-extra) != ""} { append sql ", " $opts(-extra) } append sql ")" db eval $sql set chars [list a b c d e f g h i j k l m n o p q r s t u v w x y z ""] for {set i 0} {$i < $n} {incr i} { set word "" set nChar [llength $chars] append word [lindex $chars [expr {($i / 1) % $nChar}]] append word [lindex $chars [expr {($i / $nChar) % $nChar}]] append word [lindex $chars [expr {($i / ($nChar*$nChar)) % $nChar}]] db eval { INSERT INTO t2(docid, content) VALUES($i, $word) } } } #------------------------------------------------------------------------- # USAGE: fts3_integrity_check TBL # # This proc is used to verify that the full-text index is consistent with # the contents of the fts3 table. In other words, it checks that the # data in the %_contents table matches that in the %_segdir and %_segments |
︙ | ︙ | |||
42 43 44 45 46 47 48 49 50 51 52 53 54 55 | # # proc fts3_integrity_check {tbl} { fts3_read2 $tbl 1 A foreach zTerm [array names A] { foreach doclist $A($zTerm) { set docid 0 while {[string length $doclist]>0} { set iCol 0 set iPos 0 set lPos [list] set lCol [list] | > | 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | # # proc fts3_integrity_check {tbl} { fts3_read2 $tbl 1 A foreach zTerm [array names A] { #puts $zTerm foreach doclist $A($zTerm) { set docid 0 while {[string length $doclist]>0} { set iCol 0 set iPos 0 set lPos [list] set lCol [list] |
︙ | ︙ | |||
93 94 95 96 97 98 99 | set sql {SELECT fts3_tokenizer_test('simple', $c)} foreach {pos term dummy} [db one $sql] { if {![info exists C($iDoc,$iCol,$pos)]} { set es "Error at docid=$iDoc col=$iCol pos=$pos. Index is missing" lappend errors $es } else { | | | 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | set sql {SELECT fts3_tokenizer_test('simple', $c)} foreach {pos term dummy} [db one $sql] { if {![info exists C($iDoc,$iCol,$pos)]} { set es "Error at docid=$iDoc col=$iCol pos=$pos. Index is missing" lappend errors $es } else { if {[string compare $C($iDoc,$iCol,$pos) $term]} { set es "Error at docid=$iDoc col=$iCol pos=$pos. Index " append es "has \"$C($iDoc,$iCol,$pos)\", document has \"$term\"" lappend errors $es } unset C($iDoc,$iCol,$pos) } } |
︙ | ︙ | |||
229 230 231 232 233 234 235 | while {[string length $blob] > 0} { set nPrefix [gobble_varint blob] set nSuffix [gobble_varint blob] set zTerm [string range $zPrev 0 [expr $nPrefix-1]] append zTerm [gobble_string blob $nSuffix] | > | > > | < | 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 | while {[string length $blob] > 0} { set nPrefix [gobble_varint blob] set nSuffix [gobble_varint blob] set zTerm [string range $zPrev 0 [expr $nPrefix-1]] append zTerm [gobble_string blob $nSuffix] set nDoclist [gobble_varint blob] set doclist [gobble_string blob $nDoclist] lappend terms $zTerm $doclist set zPrev $zTerm } return $terms } proc fts3_read2 {tbl where varname} { upvar $varname a array unset a db eval " SELECT start_block, leaves_end_block, root FROM ${tbl}_segdir WHERE $where ORDER BY level ASC, idx DESC " { set c 0 binary scan $root c c if {$c==0} { foreach {t d} [fts3_readleaf $root] { lappend a($t) $d } } else { db eval " SELECT block FROM ${tbl}_segments WHERE blockid>=$start_block AND blockid<=$leaves_end_block ORDER BY blockid " { foreach {t d} [fts3_readleaf $block] { lappend a($t) $d } } } } } proc fts3_read {tbl where varname} { upvar $varname a |
︙ | ︙ |
Added test/fts4check.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 | # 2012 March 26 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #************************************************************************* # This file implements regression tests for SQLite library. The # focus of this script is testing the FTS 'integrity-check' function, # used to check if the current FTS index accurately reflects the content # of the table. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/fts3_common.tcl set ::testprefix fts4check # If SQLITE_ENABLE_FTS3 is defined, omit this file. ifcapable !fts3 { finish_test return } # Run the integrity-check on FTS table $tbl using database handle $db. If # the integrity-check passes, return "ok". Otherwise, throw an exception. # proc fts_integrity {db tbl} { $db eval "INSERT INTO $tbl ($tbl) VALUES('integrity-check')" return "ok" } #------------------------------------------------------------------------- # Test cases 1.* # # 1.0: Build a reasonably sized FTS table (5000 rows). # # 1.1: Run the integrity check code to check it passes. # # 1.2: Make a series of minor changes to the underlying FTS data structures # (e.g. delete or insert a row from the %_content table). Check that # this causes the integrity-check code to fail. # # Build an FTS table and check the integrity-check passes. # do_test 1.0 { fts3_build_db_1 5000 } {} do_test 1.1 { fts_integrity db t1 } {ok} # Mess around with the underlying tables. Check that this causes the # integrity-check test to fail. # foreach {tn disruption} { 1 { INSERT INTO t1_content(docid, c0x, c1y) VALUES(NULL, 'a', 'b'); } 2 { DELETE FROM t1_content WHERE docid = (SELECT max(docid) FROM t1_content); } 3 { DELETE FROM t1_segdir WHERE level=0 AND idx=( SELECT max(idx) FROM t1_segdir WHERE level=0 ); } } { do_execsql_test 1.2.1.$tn "BEGIN; $disruption" do_catchsql_test 1.2.2.$tn { INSERT INTO t1 (t1) VALUES('integrity-check') } {1 {database disk image is malformed}} do_execsql_test 1.2.3.$tn "ROLLBACK" } do_test 1.3 { fts_integrity db t1 } {ok} #------------------------------------------------------------------------- # Test cases 2.* # # 2.0: Build a reasonably sized FTS table (20000 rows) that includes # prefix indexes. # # 2.1: Run the integrity check code to check it passes. # # 2.2: Make a series of minor changes to the underlying FTS data structures # (e.g. delete or insert a row from the %_content table). Check that # this causes the integrity-check code to fail. # do_test 2.0 { fts3_build_db_2 -extra {prefix="3,1"} 20000 } {} do_test 2.1 { fts_integrity db t2 } {ok} foreach {tn disruption} { 1 { INSERT INTO t2_content VALUES(NULL, 'xyz') } 3 { DELETE FROM t2_segdir WHERE level=0 AND idx=( SELECT max(idx) FROM t2_segdir WHERE level=1024 ); } } { do_execsql_test 2.2.1.$tn "BEGIN; $disruption" do_catchsql_test 2.2.2.$tn { INSERT INTO t2 (t2) VALUES('integrity-check') } {1 {database disk image is malformed}} do_execsql_test 2.2.3.$tn "ROLLBACK" } #------------------------------------------------------------------------- # Test cases 3.* # # 3.0: Build a reasonably sized FTS table (5000 rows) that includes # prefix indexes and uses the languageid= feature. # # 3.1: Run the integrity check code to check it passes. # # 3.2: Make a series of minor changes to the underlying FTS data structures # (e.g. delete or insert a row from the %_content table). Check that # this causes the integrity-check code to fail. # do_test 3.0 { reset_db fts3_build_db_1 5000 execsql { CREATE VIRTUAL TABLE t3 USING fts4(x, y, prefix="2,3", languageid=langid); } foreach docid [execsql {SELECT docid FROM t1 ORDER BY 1 ASC}] { execsql { INSERT INTO t3(x, y, langid) SELECT x, y, (docid%9)*4 FROM t1 WHERE docid=$docid; } } } {} do_test 3.1 { fts_integrity db t3 } {ok} foreach {tn disruption} { 1 { INSERT INTO t3_content(c0x, c1y, langid) VALUES(NULL, 'a', 0); } 2 { UPDATE t3_content SET langid=langid+1 WHERE rowid = ( SELECT max(rowid) FROM t3_content ) } } { do_execsql_test 3.2.1.$tn "BEGIN; $disruption" do_catchsql_test 3.2.2.$tn { INSERT INTO t3 (t3) VALUES('integrity-check') } {1 {database disk image is malformed}} do_execsql_test 3.2.3.$tn "ROLLBACK" } finish_test |
Changes to test/fts4langid.test.
︙ | ︙ | |||
468 469 470 471 472 473 474 | do_execsql_test 5.4.$lid.3 { SELECT count(*) FROM t6_segdir; SELECT count(*) FROM t6_segments; } {8 0} do_execsql_test 5.4.$lid.4 { | | > | < < | 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 | do_execsql_test 5.4.$lid.3 { SELECT count(*) FROM t6_segdir; SELECT count(*) FROM t6_segments; } {8 0} do_execsql_test 5.4.$lid.4 { INSERT INTO t6(t6) VALUES('merge=100,3'); INSERT INTO t6(t6) VALUES('merge=100,3'); SELECT docid FROM t6 WHERE t6 MATCH '"zero zero"' AND lid=$lid; } {1 2 5} do_execsql_test 5.4.$lid.5 { SELECT count(*) FROM t6_segdir; SELECT count(*) FROM t6_segments; } {4 4} } finish_test |
Added test/fts4merge.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 | # 2012 March 06 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #************************************************************************* # This file implements regression tests for SQLite library. The # focus of this script is testing the incremental merge function. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/fts3_common.tcl # If SQLITE_ENABLE_FTS3 is defined, omit this file. ifcapable !fts3 { finish_test return } proc fts3_integrity_check {tbl} { db eval "INSERT INTO $tbl ($tbl) VALUES('integrity-check')" return "ok" } foreach mod {fts3 fts4} { set ::testprefix fts4merge-$mod reset_db #------------------------------------------------------------------------- # Test cases 1.* # do_test 1.0 { fts3_build_db_1 -module $mod 1004 } {} do_test 1.1 { fts3_integrity_check t1 } {ok} do_execsql_test 1.1 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level } { 0 {0 1 2 3 4 5 6 7 8 9 10 11} 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 2 {0 1 2} } for {set i 0} {$i<20} {incr i} { do_execsql_test 1.2.$i.1 { INSERT INTO t1(t1) VALUES('merge=1') } do_test 1.2.$i.2 { fts3_integrity_check t1 } ok do_execsql_test 1.2.$i.3 { SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three' } {123 132 213 231 312 321} } do_execsql_test 1.3 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level } { 0 {0 1 2 3} 1 {0 1 2 3 4 5 6} 2 {0 1 2 3} } for {set i 0} {$i<100} {incr i} { do_execsql_test 1.4.$i { INSERT INTO t1(t1) VALUES('merge=1,4') } do_test 1.4.$i.2 { fts3_integrity_check t1 } ok do_execsql_test 1.4.$i.3 { SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three' } {123 132 213 231 312 321} } do_execsql_test 1.5 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level } { 2 {0 1} 3 0 } #------------------------------------------------------------------------- # Test cases 2.* test that errors in the xxx part of the 'merge=xxx' are # handled correctly. # do_execsql_test 2.0 "CREATE VIRTUAL TABLE t2 USING $mod" foreach {tn arg} { 1 {merge=abc} 2 {merge=%%%} 3 {merge=,} 4 {merge=5,} 5 {merge=6,%} 6 {merge=6,six} 7 {merge=6,1} 8 {merge=6,0} } { do_catchsql_test 2.$tn { INSERT INTO t2(t2) VALUES($arg); } {1 {SQL logic error or missing database}} } #------------------------------------------------------------------------- # Test cases 3.* # do_test 3.0 { reset_db execsql { PRAGMA page_size = 512 } fts3_build_db_2 -module $mod 30040 } {} do_test 3.1 { fts3_integrity_check t2 } {ok} do_execsql_test 3.2 { SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level } { 0 {0 1 2 3 4 5 6} 1 {0 1 2 3 4} 2 {0 1 2 3 4} 3 {0 1 2 3 4 5 6} } do_execsql_test 3.3 { INSERT INTO t2(t2) VALUES('merge=1000000,2'); SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level } { 0 0 2 0 3 0 4 0 6 0 } #------------------------------------------------------------------------- # Test cases 4.* # reset_db do_execsql_test 4.1 " PRAGMA page_size = 512; CREATE VIRTUAL TABLE t4 USING $mod; PRAGMA main.page_size; " {512} do_test 4.2 { foreach x {a c b d e f g h i j k l m n o p} { execsql "INSERT INTO t4 VALUES('[string repeat $x 600]')" } execsql {SELECT level, group_concat(idx, ' ') FROM t4_segdir GROUP BY level} } {0 {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15}} foreach {tn expect} { 1 "0 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 1 0" 2 "0 {0 1 2 3 4 5 6 7 8 9 10 11 12} 1 0" 3 "0 {0 1 2 3 4 5 6 7 8 9 10 11} 1 0" 4 "0 {0 1 2 3 4 5 6 7 8 9 10} 1 0" 5 "0 {0 1 2 3 4 5 6 7 8 9} 1 0" 6 "0 {0 1 2 3 4 5 6 7 8} 1 0" 7 "0 {0 1 2 3 4 5 6 7} 1 0" 8 "0 {0 1 2 3 4 5 6} 1 0" 9 "0 {0 1 2 3 4 5} 1 0" } { do_execsql_test 4.3.$tn { INSERT INTO t4(t4) VALUES('merge=1,16'); SELECT level, group_concat(idx, ' ') FROM t4_segdir GROUP BY level; } $expect } do_execsql_test 4.4.1 { SELECT quote(value) FROM t4_stat WHERE rowid=1 } {X'0006'} do_execsql_test 4.4.2 { DELETE FROM t4_stat WHERE rowid=1; INSERT INTO t4(t4) VALUES('merge=1,12'); SELECT level, group_concat(idx, ' ') FROM t4_segdir GROUP BY level; } "0 {0 1 2 3 4 5} 1 0" #------------------------------------------------------------------------- # Test cases 5.* # # Test that if a crisis-merge occurs that disrupts an ongoing incremental # merge, the next call to "merge=A,B" identifies this and starts a new # incremental merge. There are two scenarios: # # * There are less segments on the input level that the disrupted # incremental merge operated on, or # # * Sufficient segments exist on the input level but the segments # contain keys smaller than the largest key in the potential output # segment. # do_test 5.1 { reset_db fts3_build_db_1 -module $mod 1000 } {} do_execsql_test 5.2 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level; } { 0 {0 1 2 3 4 5 6 7} 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 2 {0 1 2} } do_execsql_test 5.3 { INSERT INTO t1(t1) VALUES('merge=1,5'); INSERT INTO t1(t1) VALUES('merge=1,5'); SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level; } { 0 {0 1 2} 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14} 2 {0 1 2 3} } do_execsql_test 5.4 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'0105'} do_test 5.5 { foreach docid [execsql {SELECT docid FROM t1}] { execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid} } } {} do_execsql_test 5.6 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'0105'} do_execsql_test 5.7 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level; SELECT quote(value) from t1_stat WHERE rowid=1; } { 0 {0 1 2 3 4 5 6 7 8 9 10} 1 {0 1 2 3 4 5 6 7 8 9 10 11 12} 2 {0 1 2 3 4 5 6 7} X'0105' } do_execsql_test 5.8 { INSERT INTO t1(t1) VALUES('merge=1,6'); INSERT INTO t1(t1) VALUES('merge=1,6'); SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level; SELECT quote(value) from t1_stat WHERE rowid=1; } { 0 {0 1 2 3 4} 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 2 {0 1 2 3 4 5 6 7 8} X'0106' } do_test 5.8.1 { fts3_integrity_check t1 } ok do_test 5.9 { set L [expr 16*16*7 + 16*3 + 12] foreach docid [execsql { SELECT docid FROM t1 UNION ALL SELECT docid FROM t1 LIMIT $L }] { execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid} } } {} do_execsql_test 5.10 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level; SELECT quote(value) from t1_stat WHERE rowid=1; } { 0 0 1 {0 1} 2 0 3 0 X'0106' } do_execsql_test 5.11 { INSERT INTO t1(t1) VALUES('merge=1,6'); SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level; SELECT quote(value) from t1_stat WHERE rowid=1; } { 0 0 1 {0 1} 2 0 3 0 X'' } #------------------------------------------------------------------------- # Test cases 6.* # # At one point the following test caused an assert() to fail (because the # second 'merge=1,2' operation below actually "merges" a single input # segment, which was unexpected). # do_test 6.1 { reset_db set a [string repeat a 900] set b [string repeat b 900] set c [string repeat c 900] set d [string repeat d 900] execsql "CREATE VIRTUAL TABLE t1 USING $mod" execsql { BEGIN; INSERT INTO t1 VALUES($a); INSERT INTO t1 VALUES($b); COMMIT; BEGIN; INSERT INTO t1 VALUES($c); INSERT INTO t1 VALUES($d); COMMIT; } execsql { INSERT INTO t1(t1) VALUES('merge=1,2'); INSERT INTO t1(t1) VALUES('merge=1,2'); } } {} #------------------------------------------------------------------------- # Test cases 7.* # # Test that the value returned by sqlite3_total_changes() increases by # 1 following a no-op "merge=A,B", or by more than 1 if actual work is # performed. # do_test 7.0 { reset_db fts3_build_db_1 -module $mod 1000 } {} do_execsql_test 7.1 { SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level } { 0 {0 1 2 3 4 5 6 7} 1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 2 {0 1 2} } do_test 7.2 { set x [db total_changes] execsql { INSERT INTO t1(t1) VALUES('merge=2,10') } expr { ([db total_changes] - $x)>1 } } {1} do_test 7.3 { set x [db total_changes] execsql { INSERT INTO t1(t1) VALUES('merge=200,10') } expr { ([db total_changes] - $x)>1 } } {1} do_test 7.4 { set x [db total_changes] execsql { INSERT INTO t1(t1) VALUES('merge=200,10') } expr { ([db total_changes] - $x)>1 } } {0} do_test 7.5 { set x [db total_changes] execsql { INSERT INTO t1(t1) VALUES('merge=200,10') } expr { ([db total_changes] - $x)>1 } } {0} } finish_test |
Added test/fts4merge2.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 | set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/fts3_common.tcl source $testdir/malloc_common.tcl set ::testprefix fts4merge2 # If SQLITE_ENABLE_FTS3 is defined, omit this file. ifcapable !fts3 { finish_test return } do_test 1.0 { fts3_build_db_1 1000 faultsim_save_and_close } {} do_faultsim_test 1.1 -faults oom-* -prep { faultsim_restore_and_reopen } -body { execsql { INSERT INTO t1(t1) VALUES('merge=32,4') } } -test { faultsim_test_result {0 {}} } do_faultsim_test 1.2 -faults oom-t* -prep { if {$iFail<100} {set iFail 803} faultsim_restore_and_reopen } -body { execsql { INSERT INTO t1(t1) VALUES('merge=1,2') } execsql { INSERT INTO t1(t1) VALUES('merge=1,2') } } -test { faultsim_test_result {0 {}} } finish_test |
Added test/fts4merge3.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 | # 2012 March 06 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #************************************************************************* # This file implements regression tests for SQLite library. The # focus of this script is testing the incremental merge function. # set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/fts3_common.tcl source $testdir/lock_common.tcl source $testdir/bc_common.tcl set ::testprefix fts4merge3 if {"" == [bc_find_binaries backcompat.test]} { finish_test return } db close do_all_bc_test { sql2 { PRAGMA page_size = 512 } if { 0==[catch { sql2 { CREATE VIRTUAL TABLE x USING fts4 } } ] } { # Build a large database. set msg "this takes around 12 seconds" do_test "1.1 ($msg)" { fts3_build_db_2 20000 } {} # Run some queries on it, using the old and new versions. do_test 1.2 { sql1 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485} do_test 1.3 { sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485} do_test 1.4 { sql2 "PRAGMA page_count" } {1286} do_test 1.5 { sql2 { SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1 } } [list 0 15 1 1 2 14 3 4] # Run some incr-merge operations on the db. for {set i 0} {$i<10} {incr i} { do_test 1.6.$i.1 { sql1 { INSERT INTO t2(t2) VALUES('merge=2,2') } } {} do_test 1.6.$i.2 { sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485} } do_test 1.7 { sql2 { SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1 } } [list 0 1 2 18 3 5] # Using the old connection, insert many rows. do_test 1.8 { for {set i 0} {$i < 1500} {incr i} { sql2 "INSERT INTO t2 SELECT content FROM t2 WHERE docid = $i" } } {} do_test 1.9 { sql2 { SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1 } } [list 0 13 1 13 2 5 3 6] # Run a big incr-merge operation on the db. do_test 1.10 { sql1 { INSERT INTO t2(t2) VALUES('merge=2000,2') } } {} do_test 1.11 { sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485 21485} do_test 1.12 { for {set i 0} {$i < 1500} {incr i} { sql2 "INSERT INTO t2 SELECT content FROM t2 WHERE docid = $i" } } {} do_test 1.13 { sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485 21485 22985} do_test 1.14 { sql2 "INSERT INTO t2(t2) VALUES('optimize')" sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" } {1485 21485 22985} do_test 1.15 { sql2 { SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1 } } {6 1} } } finish_test |
Changes to test/func.test.
︙ | ︙ | |||
1242 1243 1244 1245 1246 1247 1248 1249 1250 | db eval { CREATE TABLE t28(x, y DEFAULT(nosuchfunc(1))); } catchsql { INSERT INTO t28(x) VALUES(1); } } {1 {unknown function: nosuchfunc()}} finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 | db eval { CREATE TABLE t28(x, y DEFAULT(nosuchfunc(1))); } catchsql { INSERT INTO t28(x) VALUES(1); } } {1 {unknown function: nosuchfunc()}} # Verify that the length() and typeof() functions do not actually load # the content of their argument. # do_test func-29.1 { db eval { CREATE TABLE t29(id INTEGER PRIMARY KEY, x, y); INSERT INTO t29 VALUES(1, 2, 3), (2, NULL, 4), (3, 4.5, 5); INSERT INTO t29 VALUES(4, randomblob(1000000), 6); INSERT INTO t29 VALUES(5, "hello", 7); } db close sqlite3 db test.db sqlite3_db_status db CACHE_MISS 1 db eval {SELECT typeof(x), length(x), typeof(y) FROM t29 ORDER BY id} } {integer 1 integer null {} integer real 3 integer blob 1000000 integer text 5 integer} do_test func-29.2 { set x [lindex [sqlite3_db_status db CACHE_MISS 1] 1] if {$x<5} {set x 1} set x } {1} do_test func-29.3 { db close sqlite3 db test.db sqlite3_db_status db CACHE_MISS 1 db eval {SELECT typeof(+x) FROM t29 ORDER BY id} } {integer null real blob text} do_test func-29.4 { set x [lindex [sqlite3_db_status db CACHE_MISS 1] 1] if {$x>100} {set x many} set x } {many} do_test func-29.5 { db close sqlite3 db test.db sqlite3_db_status db CACHE_MISS 1 db eval {SELECT sum(length(x)) FROM t29} } {1000009} do_test func-29.6 { set x [lindex [sqlite3_db_status db CACHE_MISS 1] 1] if {$x<5} {set x 1} set x } {1} finish_test |
Added test/incrblob4.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 | # 2012 March 23 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #*********************************************************************** # # set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable {!incrblob} { finish_test ; return } set testprefix incrblob4 proc create_t1 {} { execsql { PRAGMA page_size = 1024; CREATE TABLE t1(k INTEGER PRIMARY KEY, v); } } proc populate_t1 {} { set data [list a b c d e f g h i j k l m n o p q r s t u v w x y z] foreach d $data { set blob [string repeat $d 900] execsql { INSERT INTO t1(v) VALUES($blob) } } } do_test 1.1 { create_t1 populate_t1 } {} do_test 1.2 { set blob [db incrblob t1 v 5] read $blob 10 } {eeeeeeeeee} do_test 1.3 { execsql { DELETE FROM t1 } populate_t1 } {} do_test 2.1 { reset_db create_t1 populate_t1 } {} do_test 2.2 { set blob [db incrblob t1 v 10] read $blob 10 } {jjjjjjjjjj} do_test 2.3 { set new [string repeat % 900] execsql { DELETE FROM t1 WHERE k=10 } execsql { DELETE FROM t1 WHERE k=9 } execsql { INSERT INTO t1(v) VALUES($new) } } {} do_test 3.1 { reset_db create_t1 populate_t1 } {} do_test 3.2 { set blob [db incrblob t1 v 20] read $blob 10 } {tttttttttt} do_test 3.3 { set new [string repeat % 900] execsql { UPDATE t1 SET v = $new WHERE k = 20 } execsql { DELETE FROM t1 WHERE k=19 } execsql { INSERT INTO t1(v) VALUES($new) } } {} finish_test |
Changes to test/pager1.test.
︙ | ︙ | |||
1764 1765 1766 1767 1768 1769 1770 | PRAGMA writable_schema = 1; UPDATE sqlite_master SET rootpage = $lockingpage; } sqlite3 db2 test.db catchsql { SELECT count(*) FROM t1 } db2 } {1 {database disk image is malformed}} db2 close | | > > > > > > > | > > > > > > > > > > > > > > > > > > | | 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 | PRAGMA writable_schema = 1; UPDATE sqlite_master SET rootpage = $lockingpage; } sqlite3 db2 test.db catchsql { SELECT count(*) FROM t1 } db2 } {1 {database disk image is malformed}} db2 close do_test pager1-18.3.1 { execsql { CREATE TABLE t2(x); INSERT INTO t2 VALUES(a_string(5000)); } set pgno [expr ([file size test.db] / 1024)-2] hexio_write test.db [expr ($pgno-1)*1024] 00000000 sqlite3 db2 test.db # even though x is malformed, because typeof() does # not load the content of x, the error is not noticed. catchsql { SELECT typeof(x) FROM t2 } db2 } {0 text} do_test pager1-18.3.2 { # in this case, the value of x is loaded and so the error is # detected catchsql { SELECT length(x||'') FROM t2 } db2 } {1 {database disk image is malformed}} db2 close do_test pager1-18.3.3 { execsql { DELETE FROM t2; INSERT INTO t2 VALUES(randomblob(5000)); } set pgno [expr ([file size test.db] / 1024)-2] hexio_write test.db [expr ($pgno-1)*1024] 00000000 sqlite3 db2 test.db # even though x is malformed, because length() and typeof() do # not load the content of x, the error is not noticed. catchsql { SELECT length(x), typeof(x) FROM t2 } db2 } {0 {5000 blob}} do_test pager1-18.3.4 { # in this case, the value of x is loaded and so the error is # detected catchsql { SELECT length(x||'') FROM t2 } db2 } {1 {database disk image is malformed}} db2 close do_test pager1-18.4 { hexio_write test.db [expr ($pgno-1)*1024] 90000000 sqlite3 db2 test.db catchsql { SELECT length(x||'') FROM t2 } db2 } {1 {database disk image is malformed}} db2 close do_test pager1-18.5 { sqlite3 db "" execsql { CREATE TABLE t1(a, b); CREATE TABLE t2(a, b); |
︙ | ︙ |
Changes to test/permutations.test.
︙ | ︙ | |||
183 184 185 186 187 188 189 | fts3defer.test fts3defer2.test fts3e.test fts3expr.test fts3expr2.test fts3near.test fts3query.test fts3shared.test fts3snippet.test fts3sort.test fts3fault.test fts3malloc.test fts3matchinfo.test fts3aux1.test fts3comp1.test fts3auto.test fts4aa.test fts4content.test fts3conf.test fts3prefix.test fts3fault2.test fts3corrupt.test | | > | 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 | fts3defer.test fts3defer2.test fts3e.test fts3expr.test fts3expr2.test fts3near.test fts3query.test fts3shared.test fts3snippet.test fts3sort.test fts3fault.test fts3malloc.test fts3matchinfo.test fts3aux1.test fts3comp1.test fts3auto.test fts4aa.test fts4content.test fts3conf.test fts3prefix.test fts3fault2.test fts3corrupt.test fts3corrupt2.test fts3first.test fts4langid.test fts4merge.test fts4check.test } lappend ::testsuitelist xxx #------------------------------------------------------------------------- # Define the coverage related test suites: # |
︙ | ︙ |
Changes to test/trace2.test.
︙ | ︙ | |||
126 127 128 129 130 131 132 | do_trace_test 2.2 { INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph'); } { "INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph');" "-- INSERT INTO 'main'.'x1_content' VALUES(?,(?))" "-- REPLACE INTO 'main'.'x1_docsize' VALUES(?,?)" | | | | | | 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | do_trace_test 2.2 { INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph'); } { "INSERT INTO x1 VALUES('North northwest wind between 8 and 14 mph');" "-- INSERT INTO 'main'.'x1_content' VALUES(?,(?))" "-- REPLACE INTO 'main'.'x1_docsize' VALUES(?,?)" "-- SELECT value FROM 'main'.'x1_stat' WHERE id=?" "-- REPLACE INTO 'main'.'x1_stat' VALUES(?,?)" "-- SELECT (SELECT max(idx) FROM 'main'.'x1_segdir' WHERE level = ?) + 1" "-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)" "-- REPLACE INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)" } do_trace_test 2.3 { INSERT INTO x1(x1) VALUES('optimize'); } { "INSERT INTO x1(x1) VALUES('optimize');" "-- SELECT DISTINCT level / (1024 * ?) FROM 'main'.'x1_segdir'" "-- SELECT idx, start_block, leaves_end_block, end_block, root FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?ORDER BY level DESC, idx ASC" "-- SELECT max(level) FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?" "-- SELECT coalesce((SELECT max(blockid) FROM 'main'.'x1_segments') + 1, 1)" "-- DELETE FROM 'main'.'x1_segdir' WHERE level BETWEEN ? AND ?" "-- REPLACE INTO 'main'.'x1_segdir' VALUES(?,?,?,?,?,?)" } } finish_test |
Changes to test/vtab1.test.
︙ | ︙ | |||
11 12 13 14 15 16 17 18 19 20 21 22 23 24 | # This file implements regression tests for SQLite library. The # focus of this file is creating and dropping virtual tables. # # $Id: vtab1.test,v 1.57 2008/08/01 17:51:47 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !vtab||!schema_pragmas { finish_test return } #---------------------------------------------------------------------- | > | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 | # This file implements regression tests for SQLite library. The # focus of this file is creating and dropping virtual tables. # # $Id: vtab1.test,v 1.57 2008/08/01 17:51:47 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix vtab1 ifcapable !vtab||!schema_pragmas { finish_test return } #---------------------------------------------------------------------- |
︙ | ︙ | |||
38 39 40 41 42 43 44 45 46 47 48 49 50 51 | # in that file for the special behaviour of the Tcl $echo_module variable. # # TODO: # * How to test the sqlite3_index_constraint_usage.omit field? # * vtab1-5.* # # vtab1-14.*: Test 'IN' constraints - i.e. "SELECT * FROM t1 WHERE id IN(...)" # #---------------------------------------------------------------------- # Test cases vtab1.1.* # | > > > | 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | # in that file for the special behaviour of the Tcl $echo_module variable. # # TODO: # * How to test the sqlite3_index_constraint_usage.omit field? # * vtab1-5.* # # vtab1-14.*: Test 'IN' constraints - i.e. "SELECT * FROM t1 WHERE id IN(...)" # # vtab1-18.*: Check that the LIKE optimization is not applied when the lhs # is a virtual table column. # #---------------------------------------------------------------------- # Test cases vtab1.1.* # |
︙ | ︙ | |||
1214 1215 1216 1217 1218 1219 1220 1221 1222 | DROP TABLE e5; SAVEPOINT one; ROLLBACK TO one; COMMIT; } } {} unset -nocomplain echo_module_begin_fail finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 | DROP TABLE e5; SAVEPOINT one; ROLLBACK TO one; COMMIT; } } {} #------------------------------------------------------------------------- # The following tests - vtab1-18.* - test that the optimization of LIKE # constraints in where.c plays well with virtual tables. # # 18.1.*: Case-insensitive LIKE. # 18.2.*: Case-sensitive LIKE. # unset -nocomplain echo_module_begin_fail do_execsql_test 18.1.0 { CREATE TABLE t6(a, b TEXT); CREATE INDEX i6 ON t6(b, a); INSERT INTO t6 VALUES(1, 'Peter'); INSERT INTO t6 VALUES(2, 'Andrew'); INSERT INTO t6 VALUES(3, 'James'); INSERT INTO t6 VALUES(4, 'John'); INSERT INTO t6 VALUES(5, 'Phillip'); INSERT INTO t6 VALUES(6, 'Bartholomew'); CREATE VIRTUAL TABLE e6 USING echo(t6); } foreach {tn sql res filter} { 1.1 "SELECT a FROM e6 WHERE b>'James'" {4 1 5} {xFilter {SELECT rowid, * FROM 't6' WHERE b > ?} James} 1.2 "SELECT a FROM e6 WHERE b>='J' AND b<'K'" {3 4} {xFilter {SELECT rowid, * FROM 't6' WHERE b >= ? AND b < ?} J K} 1.3 "SELECT a FROM e6 WHERE b LIKE 'J%'" {3 4} {xFilter {SELECT rowid, * FROM 't6'}} 1.4 "SELECT a FROM e6 WHERE b LIKE 'j%'" {3 4} {xFilter {SELECT rowid, * FROM 't6'}} } { set echo_module {} do_execsql_test 18.$tn.1 $sql $res do_test 18.$tn.2 { lrange $::echo_module 2 end } $filter } do_execsql_test 18.2.0 { PRAGMA case_sensitive_like = ON } foreach {tn sql res filter} { 2.1 "SELECT a FROM e6 WHERE b LIKE 'J%'" {3 4} {xFilter {SELECT rowid, * FROM 't6'}} 2.2 "SELECT a FROM e6 WHERE b LIKE 'j%'" {} {xFilter {SELECT rowid, * FROM 't6'}} } { set echo_module {} do_execsql_test 18.$tn.1 $sql $res do_test 18.$tn.2 { lrange $::echo_module 2 end } $filter } do_execsql_test 18.2.x { PRAGMA case_sensitive_like = OFF } finish_test |