Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge updates from trunk - FTS5 fixes and enhancemenets to the tests scripts so that they work with SEE. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | apple-osx |
Files: | files | file ages | folders |
SHA1: |
f41a7361a1c59d0d681f2a81c2f70c1a |
User & Date: | drh 2016-03-15 12:45:43.061 |
Context
2016-03-16
| ||
01:09 | Merge the SQLITE_OMIT_CODEC_FROM_TCL compile-time option from trunk. (check-in: 161d91e485 user: drh tags: apple-osx) | |
2016-03-15
| ||
12:45 | Merge updates from trunk - FTS5 fixes and enhancemenets to the tests scripts so that they work with SEE. (check-in: f41a7361a1 user: drh tags: apple-osx) | |
12:37 | More test-case changes so that everything works when the reserved_bytes value in the header is non-zero. (check-in: 2fd095b14b user: drh tags: trunk) | |
2016-03-08
| ||
16:35 | Merge changes from trunk, especially the SQLITE_DEFAULT_SYNCHRONOUS enhancements. (check-in: 2974194123 user: drh tags: apple-osx) | |
Changes
Changes to autoconf/Makefile.am.
1 2 3 4 5 6 7 8 | AM_CFLAGS = @THREADSAFE_FLAGS@ @DYNAMIC_EXTENSION_FLAGS@ @FTS5_FLAGS@ @JSON1_FLAGS@ -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE lib_LTLIBRARIES = libsqlite3.la libsqlite3_la_SOURCES = sqlite3.c libsqlite3_la_LDFLAGS = -no-undefined -version-info 8:6:8 bin_PROGRAMS = sqlite3 | | > | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 | AM_CFLAGS = @THREADSAFE_FLAGS@ @DYNAMIC_EXTENSION_FLAGS@ @FTS5_FLAGS@ @JSON1_FLAGS@ -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE lib_LTLIBRARIES = libsqlite3.la libsqlite3_la_SOURCES = sqlite3.c libsqlite3_la_LDFLAGS = -no-undefined -version-info 8:6:8 bin_PROGRAMS = sqlite3 sqlite3_SOURCES = shell.c sqlite3.h EXTRA_sqlite3_SOURCES = sqlite3.c sqlite3_LDADD = @EXTRA_SHELL_OBJ@ @READLINE_LIBS@ sqlite3_DEPENDENCIES = @EXTRA_SHELL_OBJ@ sqlite3_CFLAGS = $(AM_CFLAGS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS include_HEADERS = sqlite3.h sqlite3ext.h EXTRA_DIST = sqlite3.1 tea Makefile.msc sqlite3.rc README.txt Replace.cs pkgconfigdir = ${libdir}/pkgconfig |
︙ | ︙ |
Changes to autoconf/configure.ac.
︙ | ︙ | |||
126 127 128 129 130 131 132 | # --enable-static-shell # AC_ARG_ENABLE(static-shell, [AS_HELP_STRING( [--enable-static-shell], [statically link libsqlite3 into shell tool [default=yes]])], [], [enable_static_shell=yes]) if test x"$enable_static_shell" == "xyes"; then | | | 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | # --enable-static-shell # AC_ARG_ENABLE(static-shell, [AS_HELP_STRING( [--enable-static-shell], [statically link libsqlite3 into shell tool [default=yes]])], [], [enable_static_shell=yes]) if test x"$enable_static_shell" == "xyes"; then EXTRA_SHELL_OBJ=sqlite3-sqlite3.$OBJEXT else EXTRA_SHELL_OBJ=libsqlite3.la fi AC_SUBST(EXTRA_SHELL_OBJ) #----------------------------------------------------------------------- AC_CHECK_FUNCS(posix_fallocate) |
︙ | ︙ |
Changes to ext/fts3/fts3_write.c.
︙ | ︙ | |||
3191 3192 3193 3194 3195 3196 3197 | } if( iLevel==FTS3_SEGCURSOR_ALL ){ /* This call is to merge all segments in the database to a single ** segment. The level of the new segment is equal to the numerically ** greatest segment level currently present in the database for this ** index. The idx of the new segment is always 0. */ | | | 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 | } if( iLevel==FTS3_SEGCURSOR_ALL ){ /* This call is to merge all segments in the database to a single ** segment. The level of the new segment is equal to the numerically ** greatest segment level currently present in the database for this ** index. The idx of the new segment is always 0. */ if( csr.nSegment==1 && 0==fts3SegReaderIsPending(csr.apSegment[0]) ){ rc = SQLITE_DONE; goto finished; } iNewLevel = iMaxLevel; bIgnoreEmpty = 1; }else{ |
︙ | ︙ |
Changes to ext/fts5/fts5Int.h.
︙ | ︙ | |||
168 169 170 171 172 173 174 175 176 177 178 179 180 181 | fts5_tokenizer *pTokApi; /* Values loaded from the %_config table */ int iCookie; /* Incremented when %_config is modified */ int pgsz; /* Approximate page size used in %_data */ int nAutomerge; /* 'automerge' setting */ int nCrisisMerge; /* Maximum allowed segments per level */ int nHashSize; /* Bytes of memory for in-memory hash */ char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; | > | 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 | fts5_tokenizer *pTokApi; /* Values loaded from the %_config table */ int iCookie; /* Incremented when %_config is modified */ int pgsz; /* Approximate page size used in %_data */ int nAutomerge; /* 'automerge' setting */ int nCrisisMerge; /* Maximum allowed segments per level */ int nUsermerge; /* 'usermerge' setting */ int nHashSize; /* Bytes of memory for in-memory hash */ char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; |
︙ | ︙ | |||
695 696 697 698 699 700 701 702 703 704 705 706 707 708 | Fts5ExprNode *sqlite3Fts5ParseNode( Fts5Parse *pParse, int eType, Fts5ExprNode *pLeft, Fts5ExprNode *pRight, Fts5ExprNearset *pNear ); Fts5ExprPhrase *sqlite3Fts5ParseTerm( Fts5Parse *pParse, Fts5ExprPhrase *pPhrase, Fts5Token *pToken, int bPrefix ); | > > > > > > | 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 | Fts5ExprNode *sqlite3Fts5ParseNode( Fts5Parse *pParse, int eType, Fts5ExprNode *pLeft, Fts5ExprNode *pRight, Fts5ExprNearset *pNear ); Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( Fts5Parse *pParse, Fts5ExprNode *pLeft, Fts5ExprNode *pRight ); Fts5ExprPhrase *sqlite3Fts5ParseTerm( Fts5Parse *pParse, Fts5ExprPhrase *pPhrase, Fts5Token *pToken, int bPrefix ); |
︙ | ︙ |
Changes to ext/fts5/fts5_config.c.
︙ | ︙ | |||
14 15 16 17 18 19 20 21 22 23 24 25 26 27 | */ #include "fts5Int.h" #define FTS5_DEFAULT_PAGE_SIZE 4050 #define FTS5_DEFAULT_AUTOMERGE 4 #define FTS5_DEFAULT_CRISISMERGE 16 #define FTS5_DEFAULT_HASHSIZE (1024*1024) /* Maximum allowed page size */ #define FTS5_MAX_PAGE_SIZE (128*1024) static int fts5_iswhitespace(char x){ | > | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | */ #include "fts5Int.h" #define FTS5_DEFAULT_PAGE_SIZE 4050 #define FTS5_DEFAULT_AUTOMERGE 4 #define FTS5_DEFAULT_USERMERGE 4 #define FTS5_DEFAULT_CRISISMERGE 16 #define FTS5_DEFAULT_HASHSIZE (1024*1024) /* Maximum allowed page size */ #define FTS5_MAX_PAGE_SIZE (128*1024) static int fts5_iswhitespace(char x){ |
︙ | ︙ | |||
437 438 439 440 441 442 443 | memcpy(zOut, zIn, nIn+1); if( fts5_isopenquote(zOut[0]) ){ int ii = fts5Dequote(zOut); zRet = &zIn[ii]; *pbQuoted = 1; }else{ zRet = fts5ConfigSkipBareword(zIn); | > | > | 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 | memcpy(zOut, zIn, nIn+1); if( fts5_isopenquote(zOut[0]) ){ int ii = fts5Dequote(zOut); zRet = &zIn[ii]; *pbQuoted = 1; }else{ zRet = fts5ConfigSkipBareword(zIn); if( zRet ){ zOut[zRet-zIn] = '\0'; } } } if( zRet==0 ){ sqlite3_free(zOut); }else{ *pzOut = zOut; |
︙ | ︙ | |||
852 853 854 855 856 857 858 859 860 861 862 863 864 865 | if( nAutomerge<0 || nAutomerge>64 ){ *pbBadkey = 1; }else{ if( nAutomerge==1 ) nAutomerge = FTS5_DEFAULT_AUTOMERGE; pConfig->nAutomerge = nAutomerge; } } else if( 0==sqlite3_stricmp(zKey, "crisismerge") ){ int nCrisisMerge = -1; if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ nCrisisMerge = sqlite3_value_int(pVal); } if( nCrisisMerge<0 ){ | > > > > > > > > > > > > | 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 | if( nAutomerge<0 || nAutomerge>64 ){ *pbBadkey = 1; }else{ if( nAutomerge==1 ) nAutomerge = FTS5_DEFAULT_AUTOMERGE; pConfig->nAutomerge = nAutomerge; } } else if( 0==sqlite3_stricmp(zKey, "usermerge") ){ int nUsermerge = -1; if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ nUsermerge = sqlite3_value_int(pVal); } if( nUsermerge<2 || nUsermerge>16 ){ *pbBadkey = 1; }else{ pConfig->nUsermerge = nUsermerge; } } else if( 0==sqlite3_stricmp(zKey, "crisismerge") ){ int nCrisisMerge = -1; if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ nCrisisMerge = sqlite3_value_int(pVal); } if( nCrisisMerge<0 ){ |
︙ | ︙ | |||
899 900 901 902 903 904 905 906 907 908 909 910 911 912 | sqlite3_stmt *p = 0; int rc = SQLITE_OK; int iVersion = 0; /* Set default values */ pConfig->pgsz = FTS5_DEFAULT_PAGE_SIZE; pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE; pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE; pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE; zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName); if( zSql ){ rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p, 0); sqlite3_free(zSql); | > | 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 | sqlite3_stmt *p = 0; int rc = SQLITE_OK; int iVersion = 0; /* Set default values */ pConfig->pgsz = FTS5_DEFAULT_PAGE_SIZE; pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE; pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE; pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE; pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE; zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName); if( zSql ){ rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p, 0); sqlite3_free(zSql); |
︙ | ︙ |
Changes to ext/fts5/fts5_expr.c.
︙ | ︙ | |||
254 255 256 257 258 259 260 261 262 263 264 265 266 267 | } pNew->pIndex = 0; pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; pNew->nPhrase = sParse.nPhrase; sParse.apPhrase = 0; } } sqlite3_free(sParse.apPhrase); *pzErr = sParse.zErr; return sParse.rc; } | > > | 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 | } pNew->pIndex = 0; pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; pNew->nPhrase = sParse.nPhrase; sParse.apPhrase = 0; } }else{ sqlite3Fts5ParseNodeFree(sParse.pExpr); } sqlite3_free(sParse.apPhrase); *pzErr = sParse.zErr; return sParse.rc; } |
︙ | ︙ | |||
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 | int rc = SQLITE_OK; pNode->bEof = 0; pNode->bNomatch = 0; if( Fts5NodeIsString(pNode) ){ /* Initialize all term iterators in the NEAR object. */ rc = fts5ExprNearInitAll(pExpr, pNode); }else{ int i; int nEof = 0; for(i=0; i<pNode->nChild && rc==SQLITE_OK; i++){ Fts5ExprNode *pChild = pNode->apChild[i]; rc = fts5ExprNodeFirst(pExpr, pNode->apChild[i]); assert( pChild->bEof==0 || pChild->bEof==1 ); | > > | 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 | int rc = SQLITE_OK; pNode->bEof = 0; pNode->bNomatch = 0; if( Fts5NodeIsString(pNode) ){ /* Initialize all term iterators in the NEAR object. */ rc = fts5ExprNearInitAll(pExpr, pNode); }else if( pNode->xNext==0 ){ pNode->bEof = 1; }else{ int i; int nEof = 0; for(i=0; i<pNode->nChild && rc==SQLITE_OK; i++){ Fts5ExprNode *pChild = pNode->apChild[i]; rc = fts5ExprNodeFirst(pExpr, pNode->apChild[i]); assert( pChild->bEof==0 || pChild->bEof==1 ); |
︙ | ︙ | |||
1315 1316 1317 1318 1319 1320 1321 | ** equal to iFirst. ** ** Return SQLITE_OK if successful, or an SQLite error code otherwise. It ** is not considered an error if the query does not match any documents. */ int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){ Fts5ExprNode *pRoot = p->pRoot; | | | | | | | | | | | | | | | < | 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 | ** equal to iFirst. ** ** Return SQLITE_OK if successful, or an SQLite error code otherwise. It ** is not considered an error if the query does not match any documents. */ int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){ Fts5ExprNode *pRoot = p->pRoot; int rc; /* Return code */ p->pIndex = pIdx; p->bDesc = bDesc; rc = fts5ExprNodeFirst(p, pRoot); /* If not at EOF but the current rowid occurs earlier than iFirst in ** the iteration order, move to document iFirst or later. */ if( pRoot->bEof==0 && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 ){ rc = fts5ExprNodeNext(p, pRoot, 1, iFirst); } /* If the iterator is not at a real match, skip forward until it is. */ while( pRoot->bNomatch ){ assert( pRoot->bEof==0 && rc==SQLITE_OK ); rc = fts5ExprNodeNext(p, pRoot, 0, 0); } return rc; } /* ** Move to the next document ** |
︙ | ︙ | |||
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 | } if( pRet==0 ){ assert( pParse->rc!=SQLITE_OK ); sqlite3Fts5ParseNearsetFree(pNear); sqlite3Fts5ParsePhraseFree(pPhrase); }else{ pRet->apPhrase[pRet->nPhrase++] = pPhrase; } return pRet; } typedef struct TokenCtx TokenCtx; struct TokenCtx { | > > > > > > > > > > > > > > > | 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 | } if( pRet==0 ){ assert( pParse->rc!=SQLITE_OK ); sqlite3Fts5ParseNearsetFree(pNear); sqlite3Fts5ParsePhraseFree(pPhrase); }else{ if( pRet->nPhrase>0 ){ Fts5ExprPhrase *pLast = pRet->apPhrase[pRet->nPhrase-1]; assert( pLast==pParse->apPhrase[pParse->nPhrase-2] ); if( pPhrase->nTerm==0 ){ fts5ExprPhraseFree(pPhrase); pRet->nPhrase--; pParse->nPhrase--; pPhrase = pLast; }else if( pLast->nTerm==0 ){ fts5ExprPhraseFree(pLast); pParse->apPhrase[pParse->nPhrase-2] = pPhrase; pParse->nPhrase--; pRet->nPhrase--; } } pRet->apPhrase[pRet->nPhrase++] = pPhrase; } return pRet; } typedef struct TokenCtx TokenCtx; struct TokenCtx { |
︙ | ︙ | |||
1472 1473 1474 1475 1476 1477 1478 | Fts5ExprPhrase *pPhrase = pCtx->pPhrase; UNUSED_PARAM2(iUnused1, iUnused2); /* If an error has already occurred, this is a no-op */ if( pCtx->rc!=SQLITE_OK ) return pCtx->rc; | < | | 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 | Fts5ExprPhrase *pPhrase = pCtx->pPhrase; UNUSED_PARAM2(iUnused1, iUnused2); /* If an error has already occurred, this is a no-op */ if( pCtx->rc!=SQLITE_OK ) return pCtx->rc; if( pPhrase && pPhrase->nTerm>0 && (tflags & FTS5_TOKEN_COLOCATED) ){ Fts5ExprTerm *pSyn; int nByte = sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer) + nToken+1; pSyn = (Fts5ExprTerm*)sqlite3_malloc(nByte); if( pSyn==0 ){ rc = SQLITE_NOMEM; }else{ memset(pSyn, 0, nByte); |
︙ | ︙ | |||
1574 1575 1576 1577 1578 1579 1580 | rc = sqlite3Fts5Tokenize(pConfig, flags, z, n, &sCtx, fts5ParseTokenize); } sqlite3_free(z); if( rc || (rc = sCtx.rc) ){ pParse->rc = rc; fts5ExprPhraseFree(sCtx.pPhrase); sCtx.pPhrase = 0; | | | > > > | | > > | 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 | rc = sqlite3Fts5Tokenize(pConfig, flags, z, n, &sCtx, fts5ParseTokenize); } sqlite3_free(z); if( rc || (rc = sCtx.rc) ){ pParse->rc = rc; fts5ExprPhraseFree(sCtx.pPhrase); sCtx.pPhrase = 0; }else{ if( pAppend==0 ){ if( (pParse->nPhrase % 8)==0 ){ int nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8); Fts5ExprPhrase **apNew; apNew = (Fts5ExprPhrase**)sqlite3_realloc(pParse->apPhrase, nByte); if( apNew==0 ){ pParse->rc = SQLITE_NOMEM; fts5ExprPhraseFree(sCtx.pPhrase); return 0; } pParse->apPhrase = apNew; } pParse->nPhrase++; } if( sCtx.pPhrase==0 ){ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase)); }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = bPrefix; } pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase; } return sCtx.pPhrase; } /* ** Create a new FTS5 expression by cloning phrase iPhrase of the |
︙ | ︙ | |||
1689 1690 1691 1692 1693 1694 1695 | } void sqlite3Fts5ParseSetDistance( Fts5Parse *pParse, Fts5ExprNearset *pNear, Fts5Token *p ){ | > | | | | | | | | | | | | | | | | | > | 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 | } void sqlite3Fts5ParseSetDistance( Fts5Parse *pParse, Fts5ExprNearset *pNear, Fts5Token *p ){ if( pNear ){ int nNear = 0; int i; if( p->n ){ for(i=0; i<p->n; i++){ char c = (char)p->p[i]; if( c<'0' || c>'9' ){ sqlite3Fts5ParseError( pParse, "expected integer, got \"%.*s\"", p->n, p->p ); return; } nNear = nNear * 10 + (p->p[i] - '0'); } }else{ nNear = FTS5_DEFAULT_NEARDIST; } pNear->nNear = nNear; } } /* ** The second argument passed to this function may be NULL, or it may be ** an existing Fts5Colset object. This function returns a pointer to ** a new colset object containing the contents of (p) with new value column ** number iCol appended. |
︙ | ︙ | |||
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 | pRet->eType = eType; pRet->pNear = pNear; fts5ExprAssignXNext(pRet); if( eType==FTS5_STRING ){ int iPhrase; for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){ pNear->apPhrase[iPhrase]->pNode = pRet; } if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL | > > > > | | 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 | pRet->eType = eType; pRet->pNear = pNear; fts5ExprAssignXNext(pRet); if( eType==FTS5_STRING ){ int iPhrase; for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){ pNear->apPhrase[iPhrase]->pNode = pRet; if( pNear->apPhrase[iPhrase]->nTerm==0 ){ pRet->xNext = 0; pRet->eType = FTS5_EOF; } } if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL && (pNear->nPhrase!=1 || pNear->apPhrase[0]->nTerm>1) ){ assert( pParse->rc==SQLITE_OK ); pParse->rc = SQLITE_ERROR; assert( pParse->zErr==0 ); pParse->zErr = sqlite3_mprintf( "fts5: %s queries are not supported (detail!=full)", pNear->nPhrase==1 ? "phrase": "NEAR" |
︙ | ︙ | |||
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 | if( pRet==0 ){ assert( pParse->rc!=SQLITE_OK ); sqlite3Fts5ParseNodeFree(pLeft); sqlite3Fts5ParseNodeFree(pRight); sqlite3Fts5ParseNearsetFree(pNear); } return pRet; } static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ int nByte = 0; Fts5ExprTerm *p; char *zQuoted; | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 | if( pRet==0 ){ assert( pParse->rc!=SQLITE_OK ); sqlite3Fts5ParseNodeFree(pLeft); sqlite3Fts5ParseNodeFree(pRight); sqlite3Fts5ParseNearsetFree(pNear); } return pRet; } Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( Fts5Parse *pParse, /* Parse context */ Fts5ExprNode *pLeft, /* Left hand child expression */ Fts5ExprNode *pRight /* Right hand child expression */ ){ Fts5ExprNode *pRet = 0; Fts5ExprNode *pPrev; if( pParse->rc ){ sqlite3Fts5ParseNodeFree(pLeft); sqlite3Fts5ParseNodeFree(pRight); }else{ assert( pLeft->eType==FTS5_STRING || pLeft->eType==FTS5_TERM || pLeft->eType==FTS5_EOF || pLeft->eType==FTS5_AND ); assert( pRight->eType==FTS5_STRING || pRight->eType==FTS5_TERM || pRight->eType==FTS5_EOF ); if( pLeft->eType==FTS5_AND ){ pPrev = pLeft->apChild[pLeft->nChild-1]; }else{ pPrev = pLeft; } assert( pPrev->eType==FTS5_STRING || pPrev->eType==FTS5_TERM || pPrev->eType==FTS5_EOF ); if( pRight->eType==FTS5_EOF ){ assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] ); sqlite3Fts5ParseNodeFree(pRight); pRet = pLeft; pParse->nPhrase--; } else if( pPrev->eType==FTS5_EOF ){ Fts5ExprPhrase **ap; if( pPrev==pLeft ){ pRet = pRight; }else{ pLeft->apChild[pLeft->nChild-1] = pRight; pRet = pLeft; } ap = &pParse->apPhrase[pParse->nPhrase-1-pRight->pNear->nPhrase]; assert( ap[0]==pPrev->pNear->apPhrase[0] ); memmove(ap, &ap[1], sizeof(Fts5ExprPhrase*)*pRight->pNear->nPhrase); pParse->nPhrase--; sqlite3Fts5ParseNodeFree(pPrev); } else{ pRet = sqlite3Fts5ParseNode(pParse, FTS5_AND, pLeft, pRight, 0); } } return pRet; } static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ int nByte = 0; Fts5ExprTerm *p; char *zQuoted; |
︙ | ︙ | |||
2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 | } return zRet; } static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){ char *zRet = 0; if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){ Fts5ExprNearset *pNear = pExpr->pNear; int i; int iTerm; if( pNear->pColset ){ int iCol = pNear->pColset->aiCol[0]; | > > > | 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 | } return zRet; } static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){ char *zRet = 0; if( pExpr->eType==0 ){ return sqlite3_mprintf("\"\""); }else if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){ Fts5ExprNearset *pNear = pExpr->pNear; int i; int iTerm; if( pNear->pColset ){ int iCol = pNear->pColset->aiCol[0]; |
︙ | ︙ | |||
2118 2119 2120 2121 2122 2123 2124 | for(i=0; i<pExpr->nChild; i++){ char *z = fts5ExprPrint(pConfig, pExpr->apChild[i]); if( z==0 ){ sqlite3_free(zRet); zRet = 0; }else{ int e = pExpr->apChild[i]->eType; | | | 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 | for(i=0; i<pExpr->nChild; i++){ char *z = fts5ExprPrint(pConfig, pExpr->apChild[i]); if( z==0 ){ sqlite3_free(zRet); zRet = 0; }else{ int e = pExpr->apChild[i]->eType; int b = (e!=FTS5_STRING && e!=FTS5_TERM && e!=FTS5_EOF); zRet = fts5PrintfAppend(zRet, "%s%s%z%s", (i==0 ? "" : zOp), (b?"(":""), z, (b?")":"") ); } if( zRet==0 ) break; } |
︙ | ︙ |
Changes to ext/fts5/fts5_index.c.
︙ | ︙ | |||
4175 4176 4177 4178 4179 4180 4181 4182 | fts5MultiIterFree(pIter); fts5BufferFree(&term); if( pnRem ) *pnRem -= writer.nLeafWritten; } /* ** Do up to nPg pages of automerge work on the index. */ | > > | | > > | 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 | fts5MultiIterFree(pIter); fts5BufferFree(&term); if( pnRem ) *pnRem -= writer.nLeafWritten; } /* ** Do up to nPg pages of automerge work on the index. ** ** Return true if any changes were actually made, or false otherwise. */ static int fts5IndexMerge( Fts5Index *p, /* FTS5 backend object */ Fts5Structure **ppStruct, /* IN/OUT: Current structure of index */ int nPg, /* Pages of work to do */ int nMin /* Minimum number of segments to merge */ ){ int nRem = nPg; int bRet = 0; Fts5Structure *pStruct = *ppStruct; while( nRem>0 && p->rc==SQLITE_OK ){ int iLvl; /* To iterate through levels */ int iBestLvl = 0; /* Level offering the most input segments */ int nBest = 0; /* Number of input segments on best level */ /* Set iBestLvl to the level to read input segments from. */ |
︙ | ︙ | |||
4212 4213 4214 4215 4216 4217 4218 | /* If nBest is still 0, then the index must be empty. */ #ifdef SQLITE_DEBUG for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){ assert( pStruct->aLevel[iLvl].nSeg==0 ); } #endif | < | < > > | 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 | /* If nBest is still 0, then the index must be empty. */ #ifdef SQLITE_DEBUG for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){ assert( pStruct->aLevel[iLvl].nSeg==0 ); } #endif if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){ break; } bRet = 1; fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem); if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){ fts5StructurePromote(p, iBestLvl+1, pStruct); } } *ppStruct = pStruct; return bRet; } /* ** A total of nLeaf leaf pages of data has just been flushed to a level-0 ** segment. This function updates the write-counter accordingly and, if ** necessary, performs incremental merge work. ** |
︙ | ︙ | |||
4250 4251 4252 4253 4254 4255 4256 | /* Update the write-counter. While doing so, set nWork. */ nWrite = pStruct->nWriteCounter; nWork = (int)(((nWrite + nLeaf) / p->nWorkUnit) - (nWrite / p->nWorkUnit)); pStruct->nWriteCounter += nLeaf; nRem = (int)(p->nWorkUnit * nWork * pStruct->nLevel); | | | 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 | /* Update the write-counter. While doing so, set nWork. */ nWrite = pStruct->nWriteCounter; nWork = (int)(((nWrite + nLeaf) / p->nWorkUnit) - (nWrite / p->nWorkUnit)); pStruct->nWriteCounter += nLeaf; nRem = (int)(p->nWorkUnit * nWork * pStruct->nLevel); fts5IndexMerge(p, ppStruct, nRem, p->pConfig->nAutomerge); } } static void fts5IndexCrisismerge( Fts5Index *p, /* FTS5 backend object */ Fts5Structure **ppStruct /* IN/OUT: Current structure of index */ ){ |
︙ | ︙ | |||
4470 4471 4472 4473 4474 4475 4476 | if( p->nPendingData ){ assert( p->pHash ); p->nPendingData = 0; fts5FlushOneHash(p); } } | | | | > > | > < < > > > > > > > > > > > > > > | > | | > | < < < | | | < | 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 | if( p->nPendingData ){ assert( p->pHash ); p->nPendingData = 0; fts5FlushOneHash(p); } } static Fts5Structure *fts5IndexOptimizeStruct( Fts5Index *p, Fts5Structure *pStruct ){ Fts5Structure *pNew = 0; int nByte = sizeof(Fts5Structure); int nSeg = pStruct->nSegment; int i; /* Figure out if this structure requires optimization. A structure does ** not require optimization if either: ** ** + it consists of fewer than two segments, or ** + all segments are on the same level, or ** + all segments except one are currently inputs to a merge operation. ** ** In the first case, return NULL. In the second, increment the ref-count ** on *pStruct and return a copy of the pointer to it. */ if( nSeg<2 ) return 0; for(i=0; i<pStruct->nLevel; i++){ int nThis = pStruct->aLevel[i].nSeg; if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){ fts5StructureRef(pStruct); return pStruct; } assert( pStruct->aLevel[i].nMerge<=nThis ); } nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel); pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ Fts5StructureLevel *pLvl; int nByte = nSeg * sizeof(Fts5StructureSegment); pNew->nLevel = pStruct->nLevel+1; pNew->nRef = 1; pNew->nWriteCounter = pStruct->nWriteCounter; pLvl = &pNew->aLevel[pStruct->nLevel]; |
︙ | ︙ | |||
4516 4517 4518 4519 4520 4521 4522 4523 | pNew->nSegment = pLvl->nSeg = nSeg; }else{ sqlite3_free(pNew); pNew = 0; } } if( pNew ){ | > > > > > > > > > > > > > > > > > | > < > > > > | > > > > > > > > | < | | | | > | | | 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 | pNew->nSegment = pLvl->nSeg = nSeg; }else{ sqlite3_free(pNew); pNew = 0; } } return pNew; } int sqlite3Fts5IndexOptimize(Fts5Index *p){ Fts5Structure *pStruct; Fts5Structure *pNew = 0; assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); pStruct = fts5StructureRead(p); if( pStruct ){ pNew = fts5IndexOptimizeStruct(p, pStruct); } fts5StructureRelease(pStruct); assert( pNew==0 || pNew->nSegment>0 ); if( pNew ){ int iLvl; for(iLvl=0; pNew->aLevel[iLvl].nSeg==0; iLvl++){} while( p->rc==SQLITE_OK && pNew->aLevel[iLvl].nSeg>0 ){ int nRem = FTS5_OPT_WORK_UNIT; fts5IndexMergeLevel(p, &pNew, iLvl, &nRem); } fts5StructureWrite(p, pNew); fts5StructureRelease(pNew); } return fts5IndexReturn(p); } /* ** This is called to implement the special "VALUES('merge', $nMerge)" ** INSERT command. */ int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ Fts5Structure *pStruct = fts5StructureRead(p); if( pStruct ){ int nMin = p->pConfig->nUsermerge; if( nMerge<0 ){ Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct); fts5StructureRelease(pStruct); pStruct = pNew; nMin = 2; nMerge = nMerge*-1; } if( pStruct && pStruct->nLevel ){ if( fts5IndexMerge(p, &pStruct, nMerge, nMin) ){ fts5StructureWrite(p, pStruct); } } fts5StructureRelease(pStruct); } return fts5IndexReturn(p); } static void fts5AppendRowid( Fts5Index *p, i64 iDelta, Fts5Iter *pUnused, |
︙ | ︙ |
Changes to ext/fts5/fts5_main.c.
︙ | ︙ | |||
1507 1508 1509 1510 1511 1512 1513 | pTab->base.zErrMsg = sqlite3_mprintf( "cannot %s contentless fts5 table: %s", (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName ); rc = SQLITE_ERROR; } | | | | | 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 | pTab->base.zErrMsg = sqlite3_mprintf( "cannot %s contentless fts5 table: %s", (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName ); rc = SQLITE_ERROR; } /* DELETE */ else if( nArg==1 ){ i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); } /* INSERT */ else if( eType0!=SQLITE_INTEGER ){ /* If this is a REPLACE, first remove the current entry (if any) */ if( eConflict==SQLITE_REPLACE && sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); } /* UPDATE */ else{ i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */ i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */ if( iOld!=iNew ){ if( eConflict==SQLITE_REPLACE ){ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); if( rc==SQLITE_OK ){ |
︙ | ︙ |
Changes to ext/fts5/fts5_test_mi.c.
︙ | ︙ | |||
64 65 66 67 68 69 70 | /* ** Return a pointer to the fts5_api pointer for database connection db. ** If an error occurs, return NULL and leave an error in the database ** handle (accessible using sqlite3_errcode()/errmsg()). */ | | < > > | > | | | | | | > > | | 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | /* ** Return a pointer to the fts5_api pointer for database connection db. ** If an error occurs, return NULL and leave an error in the database ** handle (accessible using sqlite3_errcode()/errmsg()). */ static int fts5_api_from_db(sqlite3 *db, fts5_api **ppApi){ sqlite3_stmt *pStmt = 0; int rc; *ppApi = 0; rc = sqlite3_prepare(db, "SELECT fts5()", -1, &pStmt, 0); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pStmt) && sizeof(fts5_api*)==sqlite3_column_bytes(pStmt, 0) ){ memcpy(ppApi, sqlite3_column_blob(pStmt, 0), sizeof(fts5_api*)); } rc = sqlite3_finalize(pStmt); } return rc; } /* ** Argument f should be a flag accepted by matchinfo() (a valid character ** in the string passed as the second argument). If it is not, -1 is ** returned. Otherwise, if f is a valid matchinfo flag, the value returned |
︙ | ︙ | |||
395 396 397 398 399 400 401 | int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){ int rc; /* Return code */ fts5_api *pApi; /* FTS5 API functions */ /* Extract the FTS5 API pointer from the database handle. The ** fts5_api_from_db() function above is copied verbatim from the ** FTS5 documentation. Refer there for details. */ | | > | 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 | int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){ int rc; /* Return code */ fts5_api *pApi; /* FTS5 API functions */ /* Extract the FTS5 API pointer from the database handle. The ** fts5_api_from_db() function above is copied verbatim from the ** FTS5 documentation. Refer there for details. */ rc = fts5_api_from_db(db, &pApi); if( rc!=SQLITE_OK ) return rc; /* If fts5_api_from_db() returns NULL, then either FTS5 is not registered ** with this database handle, or an error (OOM perhaps?) has occurred. ** ** Also check that the fts5_api object is version 2 or newer. */ if( pApi==0 || pApi->iVersion<2 ){ |
︙ | ︙ |
Changes to ext/fts5/fts5parse.y.
︙ | ︙ | |||
100 101 102 103 104 105 106 | } expr(A) ::= LP expr(X) RP. {A = X;} expr(A) ::= exprlist(X). {A = X;} exprlist(A) ::= cnearset(X). {A = X;} exprlist(A) ::= exprlist(X) cnearset(Y). { | | | 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | } expr(A) ::= LP expr(X) RP. {A = X;} expr(A) ::= exprlist(X). {A = X;} exprlist(A) ::= cnearset(X). {A = X;} exprlist(A) ::= exprlist(X) cnearset(Y). { A = sqlite3Fts5ParseImplicitAnd(pParse, X, Y); } cnearset(A) ::= nearset(X). { A = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, X); } cnearset(A) ::= colset(X) COLON nearset(Y). { sqlite3Fts5ParseSetColset(pParse, Y, X); |
︙ | ︙ |
Changes to ext/fts5/test/fts5_common.tcl.
︙ | ︙ | |||
154 155 156 157 158 159 160 161 162 163 164 165 166 167 | fts5_test_queryphrase fts5_test_phrasecount } { sqlite3_fts5_create_function $db $f $f } } proc fts5_level_segs {tbl} { set sql "SELECT fts5_decode(rowid,block) aS r FROM ${tbl}_data WHERE rowid=10" set ret [list] foreach L [lrange [db one $sql] 1 end] { lappend ret [expr [llength $L] - 3] } | > > > > > > | 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | fts5_test_queryphrase fts5_test_phrasecount } { sqlite3_fts5_create_function $db $f $f } } proc fts5_segcount {tbl} { set N 0 foreach n [fts5_level_segs $tbl] { incr N $n } set N } proc fts5_level_segs {tbl} { set sql "SELECT fts5_decode(rowid,block) aS r FROM ${tbl}_data WHERE rowid=10" set ret [list] foreach L [lrange [db one $sql] 1 end] { lappend ret [expr [llength $L] - 3] } |
︙ | ︙ |
Changes to ext/fts5/test/fts5config.test.
︙ | ︙ | |||
242 243 244 245 246 247 248 249 250 251 | set res [list 1 {malformed detail=... directive}] do_catchsql_test 11.$tn "CREATE VIRTUAL TABLE f1 USING fts5(x, $opt)" $res } do_catchsql_test 12.1 { INSERT INTO t1(t1, rank) VALUES('rank', NULL);; } {1 {SQL logic error or missing database}} finish_test | > > > > > > > > > > > > > > > > | 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 | set res [list 1 {malformed detail=... directive}] do_catchsql_test 11.$tn "CREATE VIRTUAL TABLE f1 USING fts5(x, $opt)" $res } do_catchsql_test 12.1 { INSERT INTO t1(t1, rank) VALUES('rank', NULL);; } {1 {SQL logic error or missing database}} #------------------------------------------------------------------------- # errors in the 'usermerge' option # do_execsql_test 13.0 { CREATE VIRTUAL TABLE tt USING fts5(ttt); } foreach {tn val} { 1 -1 2 4.2 3 17 4 1 } { set sql "INSERT INTO tt(tt, rank) VALUES('usermerge', $val)" do_catchsql_test 13.$tn $sql {1 {SQL logic error or missing database}} } finish_test |
Changes to ext/fts5/test/fts5eb.test.
︙ | ︙ | |||
29 30 31 32 33 34 35 | do_execsql_test $tn {SELECT fts5_expr($se_expr)} [list $res] } foreach {tn expr res} { 1 {abc} {"abc"} 2 {abc ""} {"abc"} 3 {""} {} | | | | | | | | 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 | do_execsql_test $tn {SELECT fts5_expr($se_expr)} [list $res] } foreach {tn expr res} { 1 {abc} {"abc"} 2 {abc ""} {"abc"} 3 {""} {} 4 {abc OR ""} {"abc" OR ""} 5 {abc NOT ""} {"abc" NOT ""} 6 {abc AND ""} {"abc" AND ""} 7 {"" OR abc} {"" OR "abc"} 8 {"" NOT abc} {"" NOT "abc"} 9 {"" AND abc} {"" AND "abc"} 10 {abc + "" + def} {"abc" + "def"} 11 {abc "" def} {"abc" AND "def"} 12 {r+e OR w} {"r" + "e" OR "w"} 13 {a AND b NOT c} {"a" AND ("b" NOT "c")} 14 {a OR b NOT c} {"a" OR ("b" NOT "c")} 15 {a NOT b AND c} {("a" NOT "b") AND "c"} |
︙ | ︙ |
Changes to ext/fts5/test/fts5fault8.test.
︙ | ︙ | |||
50 51 52 53 54 55 56 57 58 59 60 | if {[detail_is_none]==0} { do_faultsim_test 3 -faults oom-* -body { execsql { SELECT rowid FROM t1('b:2') } } -test { faultsim_test_result {0 {1 3}} {1 SQLITE_NOMEM} } } } ;# foreach_detail_mode... finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > | 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | if {[detail_is_none]==0} { do_faultsim_test 3 -faults oom-* -body { execsql { SELECT rowid FROM t1('b:2') } } -test { faultsim_test_result {0 {1 3}} {1 SQLITE_NOMEM} } } } ;# foreach_detail_mode... do_execsql_test 4.0 { CREATE VIRTUAL TABLE x2 USING fts5(a); INSERT INTO x2(x2, rank) VALUES('crisismerge', 2); INSERT INTO x2(x2, rank) VALUES('pgsz', 32); INSERT INTO x2 VALUES('a b c d'); INSERT INTO x2 VALUES('e f g h'); INSERT INTO x2 VALUES('i j k l'); INSERT INTO x2 VALUES('m n o p'); INSERT INTO x2 VALUES('q r s t'); INSERT INTO x2 VALUES('u v w x'); INSERT INTO x2 VALUES('y z a b'); } faultsim_save_and_close do_faultsim_test 4 -faults oom-* -prep { faultsim_restore_and_reopen } -body { execsql { INSERT INTO x2(x2) VALUES('optimize') } } -test { faultsim_test_result {0 {}} {1 SQLITE_NOMEM} } finish_test |
Added ext/fts5/test/fts5fuzz1.test.
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | # 2014 June 17 # # The author disclaims copyright to this source code. In place of # a legal notice, here is a blessing: # # May you do good and not evil. # May you find forgiveness for yourself and forgive others. # May you share freely, never taking more than you give. # #************************************************************************* # This file implements regression tests for SQLite library. The # focus of this script is testing the FTS5 module. # source [file join [file dirname [info script]] fts5_common.tcl] return_if_no_fts5 set testprefix fts5fuzz1 #------------------------------------------------------------------------- reset_db do_catchsql_test 1.1 { CREATE VIRTUAL TABLE f1 USING fts5(a b); } {/1 {parse error in.*}/} #------------------------------------------------------------------------- reset_db do_execsql_test 2.1 { CREATE VIRTUAL TABLE f1 USING fts5(a, b); INSERT INTO f1 VALUES('a b', 'c d'); INSERT INTO f1 VALUES('e f', 'a b'); } do_execsql_test 2.2.1 { SELECT rowid FROM f1('""'); } {} do_execsql_test 2.2.2 { SELECT rowid FROM f1('"" AND a'); } {} do_execsql_test 2.2.3 { SELECT rowid FROM f1('"" a'); } {1 2} do_execsql_test 2.2.4 { SELECT rowid FROM f1('"" OR a'); } {1 2} do_execsql_test 2.3 { SELECT a, b FROM f1('NEAR("")'); } {} do_execsql_test 2.4 { SELECT a, b FROM f1('NEAR("", 5)'); } {} do_execsql_test 2.5 { SELECT a, b FROM f1('NEAR("" c, 5)'); } {{a b} {c d}} do_execsql_test 2.6 { SELECT a, b FROM f1('NEAR("" c d, 5)'); } {{a b} {c d}} do_execsql_test 2.7 { SELECT a, b FROM f1('NEAR(c d, 5)'); } {{a b} {c d}} do_execsql_test 2.8 { SELECT rowid FROM f1('NEAR("a" "b", 5)'); } {1 2} #------------------------------------------------------------------------- reset_db do_execsql_test 3.2 { CREATE VIRTUAL TABLE f2 USING fts5(o, t, tokenize="ascii separators abc"); SELECT * FROM f2('a+4'); } {} #------------------------------------------------------------------------- reset_db do_catchsql_test 4.1 { CREATE VIRTUAL TABLE f2 USING fts5(o, t); SELECT * FROM f2('(8 AND 9)`AND 10'); } {1 {fts5: syntax error near "`"}} finish_test |
Changes to ext/fts5/test/fts5merge.test.
︙ | ︙ | |||
41 42 43 44 45 46 47 | WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg) INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii; WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg) INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii; | | | 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 | WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg) INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii; WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg) INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii; INSERT INTO x8(x8, rank) VALUES('usermerge', 2); } for {set tn 1} {[lindex [fts5_level_segs x8] 0]>0} {incr tn} { do_execsql_test $testname.$tn { INSERT INTO x8(x8, rank) VALUES('merge', 1); INSERT INTO x8(x8) VALUES('integrity-check'); } |
︙ | ︙ | |||
80 81 82 83 84 85 86 | set ::nRow $nRow do_test $testname.1 { for {set i 0} {$i < $::nRow} {incr i} { execsql { INSERT INTO x8 VALUES( rnddoc(($i%16) + 5) ) } while {[not_merged x8]} { execsql { | | | | | | | | | 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | set ::nRow $nRow do_test $testname.1 { for {set i 0} {$i < $::nRow} {incr i} { execsql { INSERT INTO x8 VALUES( rnddoc(($i%16) + 5) ) } while {[not_merged x8]} { execsql { INSERT INTO x8(x8, rank) VALUES('usermerge', 2); INSERT INTO x8(x8, rank) VALUES('merge', 1); INSERT INTO x8(x8, rank) VALUES('usermerge', 16); INSERT INTO x8(x8) VALUES('integrity-check'); } } } } {} } proc not_merged {tbl} { set segs [fts5_level_segs $tbl] foreach s $segs { if {$s>1} { return 1 } } return 0 } do_merge2_test 2.1 5 do_merge2_test 2.2 10 do_merge2_test 2.3 20 #------------------------------------------------------------------------- # Test that a merge will complete any merge that has already been # started, even if the number of input segments is less than the current # value of the 'usermerge' configuration parameter. # db func rnddoc fts5_rnddoc do_execsql_test 3.1 { DROP TABLE IF EXISTS x8; CREATE VIRTUAL TABLE x8 USING fts5(i); INSERT INTO x8(x8, rank) VALUES('pgsz', 32); INSERT INTO x8 VALUES(rnddoc(100)); INSERT INTO x8 VALUES(rnddoc(100)); } do_test 3.2 { execsql { INSERT INTO x8(x8, rank) VALUES('usermerge', 4); INSERT INTO x8(x8, rank) VALUES('merge', 1); } fts5_level_segs x8 } {2} do_test 3.3 { execsql { INSERT INTO x8(x8, rank) VALUES('usermerge', 2); INSERT INTO x8(x8, rank) VALUES('merge', 1); } fts5_level_segs x8 } {2 1} do_test 3.4 { execsql { INSERT INTO x8(x8, rank) VALUES('usermerge', 4) } while {[not_merged x8]} { execsql { INSERT INTO x8(x8, rank) VALUES('merge', 1) } } fts5_level_segs x8 } {0 1} #------------------------------------------------------------------------- |
︙ | ︙ | |||
172 173 174 175 176 177 178 | } do_execsql_test 4.$tn.3 { WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100) INSERT INTO x8 SELECT mydoc() FROM ii; WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100) INSERT INTO x8 SELECT mydoc() FROM ii; | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 | } do_execsql_test 4.$tn.3 { WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100) INSERT INTO x8 SELECT mydoc() FROM ii; WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100) INSERT INTO x8 SELECT mydoc() FROM ii; INSERT INTO x8(x8, rank) VALUES('usermerge', 2); } set expect [mycount] for {set i 0} {$i < 20} {incr i} { do_test 4.$tn.4.$i { execsql { INSERT INTO x8(x8, rank) VALUES('merge', 1); } mycount } $expect break } # db eval {SELECT fts5_decode(rowid, block) AS r FROM x8_data} { puts $r } } #------------------------------------------------------------------------- # Test that the 'merge' command does not modify the database if there is # no work to do. do_execsql_test 5.1 { CREATE VIRTUAL TABLE x9 USING fts5(one, two); INSERT INTO x9(x9, rank) VALUES('pgsz', 32); INSERT INTO x9(x9, rank) VALUES('automerge', 2); INSERT INTO x9(x9, rank) VALUES('usermerge', 2); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100)); } do_test 5.2 { while 1 { set nChange [db total_changes] execsql { INSERT INTO x9(x9, rank) VALUES('merge', 1); } set nChange [expr [db total_changes] - $nChange] #puts $nChange if {$nChange<2} break } } {} #-------------------------------------------------------------------------- # Test that running 'merge' on an empty database does not cause a # problem. # reset_db do_execsql_test 6.0 { CREATE VIRTUAL TABLE g1 USING fts5(a, b); } do_execsql_test 6.1 { INSERT INTO g1(g1, rank) VALUES('merge', 10); } do_execsql_test 6.2 { INSERT INTO g1(g1, rank) VALUES('merge', -10); } do_execsql_test 6.3 { INSERT INTO g1(g1) VALUES('integrity-check'); } finish_test |
Changes to ext/fts5/test/fts5optimize.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | set testprefix fts5optimize # If SQLITE_ENABLE_FTS5 is defined, omit this file. ifcapable !fts5 { finish_test return } proc rnddoc {nWord} { set vocab {a b c d e f g h i j k l m n o p q r s t u v w x y z} set nVocab [llength $vocab] set ret [list] for {set i 0} {$i < $nWord} {incr i} { lappend ret [lindex $vocab [expr {int(rand() * $nVocab)}]] } return $ret } | > > > > > > < < | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | set testprefix fts5optimize # If SQLITE_ENABLE_FTS5 is defined, omit this file. ifcapable !fts5 { finish_test return } # # 1.* - Warm body tests for index optimization using ('optimize') # # 2.* - Warm body tests for index optimization using ('merge', -1) # proc rnddoc {nWord} { set vocab {a b c d e f g h i j k l m n o p q r s t u v w x y z} set nVocab [llength $vocab] set ret [list] for {set i 0} {$i < $nWord} {incr i} { lappend ret [lindex $vocab [expr {int(rand() * $nVocab)}]] } return $ret } foreach {tn nStep} { 1 2 2 10 3 50 4 500 } { reset_db db func rnddoc rnddoc do_execsql_test 1.$tn.1 { CREATE VIRTUAL TABLE t1 USING fts5(x, y); } do_test 1.$tn.2 { for {set i 0} {$i < $nStep} {incr i} { |
︙ | ︙ | |||
56 57 58 59 60 61 62 | do_execsql_test 1.$tn.4 { INSERT INTO t1(t1) VALUES('optimize'); } do_execsql_test 1.$tn.5 { INSERT INTO t1(t1) VALUES('integrity-check'); } | | > | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | do_execsql_test 1.$tn.4 { INSERT INTO t1(t1) VALUES('optimize'); } do_execsql_test 1.$tn.5 { INSERT INTO t1(t1) VALUES('integrity-check'); } do_test 1.$tn.6 { fts5_segcount t1 } 1 } foreach {tn nStep} { 1 2 2 10 3 50 4 500 } { reset_db db func rnddoc rnddoc do_execsql_test 1.$tn.1 { CREATE VIRTUAL TABLE t1 USING fts5(x, y); } do_test 2.$tn.2 { for {set i 0} {$i < $nStep} {incr i} { execsql { INSERT INTO t1 VALUES( rnddoc(5), rnddoc(5) ) } } } {} do_execsql_test 2.$tn.3 { INSERT INTO t1(t1) VALUES('integrity-check'); } do_test 2.$tn.4 { execsql { INSERT INTO t1(t1, rank) VALUES('merge', -1) } while 1 { set c [db total_changes] execsql { INSERT INTO t1(t1, rank) VALUES('merge', 1) } set c [expr [db total_changes]-$c] if {$c<2} break } } {} do_execsql_test 2.$tn.5 { INSERT INTO t1(t1) VALUES('integrity-check'); } do_test 2.$tn.6 { fts5_segcount t1 } 1 } finish_test |
Changes to ext/misc/spellfix.c.
︙ | ︙ | |||
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 | sqlite3_value **argv ){ const unsigned char *zIn = sqlite3_value_text(argv[0]); int nIn = sqlite3_value_bytes(argv[0]); int c, sz; int scriptMask = 0; int res; # define SCRIPT_LATIN 0x0001 # define SCRIPT_CYRILLIC 0x0002 # define SCRIPT_GREEK 0x0004 # define SCRIPT_HEBREW 0x0008 # define SCRIPT_ARABIC 0x0010 while( nIn>0 ){ c = utf8Read(zIn, nIn, &sz); zIn += sz; nIn -= sz; | > > | | > > > > | 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 | sqlite3_value **argv ){ const unsigned char *zIn = sqlite3_value_text(argv[0]); int nIn = sqlite3_value_bytes(argv[0]); int c, sz; int scriptMask = 0; int res; int seenDigit = 0; # define SCRIPT_LATIN 0x0001 # define SCRIPT_CYRILLIC 0x0002 # define SCRIPT_GREEK 0x0004 # define SCRIPT_HEBREW 0x0008 # define SCRIPT_ARABIC 0x0010 while( nIn>0 ){ c = utf8Read(zIn, nIn, &sz); zIn += sz; nIn -= sz; if( c<0x02af ){ if( c>=0x80 || midClass[c&0x7f]<CCLASS_DIGIT ){ scriptMask |= SCRIPT_LATIN; }else if( c>='0' && c<='9' ){ seenDigit = 1; } }else if( c>=0x0400 && c<=0x04ff ){ scriptMask |= SCRIPT_CYRILLIC; }else if( c>=0x0386 && c<=0x03ce ){ scriptMask |= SCRIPT_GREEK; }else if( c>=0x0590 && c<=0x05ff ){ scriptMask |= SCRIPT_HEBREW; }else if( c>=0x0600 && c<=0x06ff ){ scriptMask |= SCRIPT_ARABIC; } } if( scriptMask==0 && seenDigit ) scriptMask = SCRIPT_LATIN; switch( scriptMask ){ case 0: res = 999; break; case SCRIPT_LATIN: res = 215; break; case SCRIPT_CYRILLIC: res = 220; break; case SCRIPT_GREEK: res = 200; break; case SCRIPT_HEBREW: res = 125; break; case SCRIPT_ARABIC: res = 160; break; |
︙ | ︙ |
Changes to src/btree.c.
︙ | ︙ | |||
7574 7575 7576 7577 7578 7579 7580 | /* Obscure case for non-leaf-data trees: If the cell at pCell was ** previously stored on a leaf node, and its reported size was 4 ** bytes, then it may actually be smaller than this ** (see btreeParseCellPtr(), 4 bytes is the minimum size of ** any cell). But it is important to pass the correct size to ** insertCell(), so reparse the cell now. ** | < < | > > | 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 | /* Obscure case for non-leaf-data trees: If the cell at pCell was ** previously stored on a leaf node, and its reported size was 4 ** bytes, then it may actually be smaller than this ** (see btreeParseCellPtr(), 4 bytes is the minimum size of ** any cell). But it is important to pass the correct size to ** insertCell(), so reparse the cell now. ** ** This can only happen for b-trees used to evaluate "IN (SELECT ...)" ** and WITHOUT ROWID tables with exactly one column which is the ** primary key. */ if( b.szCell[j]==4 ){ assert(leafCorrection==4); sz = pParent->xCellSize(pParent, pCell); } } iOvflSpace += sz; |
︙ | ︙ |
Changes to src/build.c.
︙ | ︙ | |||
1131 1132 1133 1134 1135 1136 1137 | ** SQLITE_AFF_NUMERIC is returned. */ char sqlite3AffinityType(const char *zIn, u8 *pszEst){ u32 h = 0; char aff = SQLITE_AFF_NUMERIC; const char *zChar = 0; | | | 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 | ** SQLITE_AFF_NUMERIC is returned. */ char sqlite3AffinityType(const char *zIn, u8 *pszEst){ u32 h = 0; char aff = SQLITE_AFF_NUMERIC; const char *zChar = 0; assert( zIn!=0 ); while( zIn[0] ){ h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; zIn++; if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ aff = SQLITE_AFF_TEXT; zChar = zIn; }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */ |
︙ | ︙ |
Changes to src/expr.c.
︙ | ︙ | |||
1564 1565 1566 1567 1568 1569 1570 | if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; if( sqlite3StrICmp(z, "OID")==0 ) return 1; return 0; } /* | | | < | < | | < < < | > > | > > | > | > | | 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 | if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; if( sqlite3StrICmp(z, "OID")==0 ) return 1; return 0; } /* ** pX is the RHS of an IN operator. If pX is a SELECT statement ** that can be simplified to a direct table access, then return ** a pointer to the SELECT statement. If pX is not a SELECT statement, ** or if the SELECT statement needs to be manifested into a transient ** table, then return NULL. */ #ifndef SQLITE_OMIT_SUBQUERY static Select *isCandidateForInOpt(Expr *pX){ Select *p; SrcList *pSrc; ExprList *pEList; Expr *pRes; Table *pTab; if( !ExprHasProperty(pX, EP_xIsSelect) ) return 0; /* Not a subquery */ if( ExprHasProperty(pX, EP_VarSelect) ) return 0; /* Correlated subq */ p = pX->x.pSelect; if( p->pPrior ) return 0; /* Not a compound SELECT */ if( p->selFlags & (SF_Distinct|SF_Aggregate) ){ testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); return 0; /* No DISTINCT keyword and no aggregate functions */ } assert( p->pGroupBy==0 ); /* Has no GROUP BY clause */ if( p->pLimit ) return 0; /* Has no LIMIT clause */ assert( p->pOffset==0 ); /* No LIMIT means no OFFSET */ if( p->pWhere ) return 0; /* Has no WHERE clause */ pSrc = p->pSrc; assert( pSrc!=0 ); if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ pTab = pSrc->a[0].pTab; assert( pTab!=0 ); assert( pTab->pSelect==0 ); /* FROM clause is not a view */ if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ pEList = p->pEList; if( pEList->nExpr!=1 ) return 0; /* One column in the result set */ pRes = pEList->a[0].pExpr; if( pRes->op!=TK_COLUMN ) return 0; /* Result is a column */ assert( pRes->iTable==pSrc->a[0].iCursor ); /* Not a correlated subquery */ return p; } #endif /* SQLITE_OMIT_SUBQUERY */ /* ** Code an OP_Once instruction and allocate space for its flag. Return the ** address of the new instruction. */ |
︙ | ︙ | |||
1734 1735 1736 1737 1738 1739 1740 | assert( pX->op==TK_IN ); mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0; /* Check to see if an existing table or index can be used to ** satisfy the query. This is preferable to generating a new ** ephemeral table. */ | < | < | 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 | assert( pX->op==TK_IN ); mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0; /* Check to see if an existing table or index can be used to ** satisfy the query. This is preferable to generating a new ** ephemeral table. */ if( pParse->nErr==0 && (p = isCandidateForInOpt(pX))!=0 ){ sqlite3 *db = pParse->db; /* Database connection */ Table *pTab; /* Table <table>. */ Expr *pExpr; /* Expression <column> */ i16 iCol; /* Index of column <column> */ i16 iDb; /* Database idx for pTab */ assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */ pTab = p->pSrc->a[0].pTab; pExpr = p->pEList->a[0].pExpr; iCol = (i16)pExpr->iColumn; |
︙ | ︙ |
Changes to src/memjournal.c.
︙ | ︙ | |||
65 66 67 68 69 70 71 | FileChunk *pFirst; /* Head of in-memory chunk-list */ FilePoint endpoint; /* Pointer to the end of the file */ FilePoint readpoint; /* Pointer to the end of the last xRead() */ int flags; /* xOpen flags */ sqlite3_vfs *pVfs; /* The "real" underlying VFS */ const char *zJournal; /* Name of the journal file */ | < < < < < < | | | | > > > > > > > | | | | | | | | | | | | | | | | | | | | | | < | 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | FileChunk *pFirst; /* Head of in-memory chunk-list */ FilePoint endpoint; /* Pointer to the end of the file */ FilePoint readpoint; /* Pointer to the end of the last xRead() */ int flags; /* xOpen flags */ sqlite3_vfs *pVfs; /* The "real" underlying VFS */ const char *zJournal; /* Name of the journal file */ }; /* ** Read data from the in-memory journal file. This is the implementation ** of the sqlite3_vfs.xRead method. */ static int memjrnlRead( sqlite3_file *pJfd, /* The journal file from which to read */ void *zBuf, /* Put the results here */ int iAmt, /* Number of bytes to read */ sqlite_int64 iOfst /* Begin reading at this offset */ ){ MemJournal *p = (MemJournal *)pJfd; u8 *zOut = zBuf; int nRead = iAmt; int iChunkOffset; FileChunk *pChunk; #ifdef SQLITE_ENABLE_ATOMIC_WRITE if( (iAmt+iOfst)>p->endpoint.iOffset ){ return SQLITE_IOERR_SHORT_READ; } #endif assert( (iAmt+iOfst)<=p->endpoint.iOffset ); if( p->readpoint.iOffset!=iOfst || iOfst==0 ){ sqlite3_int64 iOff = 0; for(pChunk=p->pFirst; ALWAYS(pChunk) && (iOff+p->nChunkSize)<=iOfst; pChunk=pChunk->pNext ){ iOff += p->nChunkSize; } }else{ pChunk = p->readpoint.pChunk; } iChunkOffset = (int)(iOfst%p->nChunkSize); do { int iSpace = p->nChunkSize - iChunkOffset; int nCopy = MIN(nRead, (p->nChunkSize - iChunkOffset)); memcpy(zOut, (u8*)pChunk->zChunk + iChunkOffset, nCopy); zOut += nCopy; nRead -= iSpace; iChunkOffset = 0; } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 ); p->readpoint.iOffset = iOfst+iAmt; p->readpoint.pChunk = pChunk; return SQLITE_OK; } /* ** Free the list of FileChunk structures headed at MemJournal.pFirst. */ |
︙ | ︙ | |||
134 135 136 137 138 139 140 | p->pFirst = 0; } /* ** Flush the contents of memory to a real file on disk. */ static int memjrnlCreateFile(MemJournal *p){ | | < | > | > > | | | | < | < | | < | | > | | > > > > > | | | | | | | < < < < < < < < < < | > | | > | > > > > | 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 | p->pFirst = 0; } /* ** Flush the contents of memory to a real file on disk. */ static int memjrnlCreateFile(MemJournal *p){ int rc; sqlite3_file *pReal = (sqlite3_file*)p; MemJournal copy = *p; memset(p, 0, sizeof(MemJournal)); rc = sqlite3OsOpen(copy.pVfs, copy.zJournal, pReal, copy.flags, 0); if( rc==SQLITE_OK ){ int nChunk = copy.nChunkSize; i64 iOff = 0; FileChunk *pIter; for(pIter=copy.pFirst; pIter; pIter=pIter->pNext){ if( iOff + nChunk > copy.endpoint.iOffset ){ nChunk = copy.endpoint.iOffset - iOff; } rc = sqlite3OsWrite(pReal, (u8*)pIter->zChunk, nChunk, iOff); if( rc ) break; iOff += nChunk; } if( rc==SQLITE_OK ){ /* No error has occurred. Free the in-memory buffers. */ memjrnlFreeChunks(©); } } if( rc!=SQLITE_OK ){ /* If an error occurred while creating or writing to the file, restore ** the original before returning. This way, SQLite uses the in-memory ** journal data to roll back changes made to the internal page-cache ** before this function was called. */ sqlite3OsClose(pReal); *p = copy; } return rc; } /* ** Write data to the file. */ static int memjrnlWrite( sqlite3_file *pJfd, /* The journal file into which to write */ const void *zBuf, /* Take data to be written from here */ int iAmt, /* Number of bytes to write */ sqlite_int64 iOfst /* Begin writing at this offset into the file */ ){ MemJournal *p = (MemJournal *)pJfd; int nWrite = iAmt; u8 *zWrite = (u8 *)zBuf; /* If the file should be created now, create it and write the new data ** into the file on disk. */ if( p->nSpill>0 && (iAmt+iOfst)>p->nSpill ){ int rc = memjrnlCreateFile(p); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pJfd, zBuf, iAmt, iOfst); } return rc; } /* If the contents of this write should be stored in memory */ else{ /* An in-memory journal file should only ever be appended to. Random ** access writes are not required. The only exception to this is when ** the in-memory journal is being used by a connection using the ** atomic-write optimization. In this case the first 28 bytes of the ** journal file may be written as part of committing the transaction. */ assert( iOfst==p->endpoint.iOffset || iOfst==0 ); #ifdef SQLITE_ENABLE_ATOMIC_WRITE if( iOfst==0 && p->pFirst ){ assert( p->nChunkSize>iAmt ); memcpy((u8*)p->pFirst->zChunk, zBuf, iAmt); }else #else assert( iOfst>0 || p->pFirst==0 ); #endif { while( nWrite>0 ){ FileChunk *pChunk = p->endpoint.pChunk; int iChunkOffset = (int)(p->endpoint.iOffset%p->nChunkSize); int iSpace = MIN(nWrite, p->nChunkSize - iChunkOffset); if( iChunkOffset==0 ){ /* New chunk is required to extend the file. */ |
︙ | ︙ | |||
251 252 253 254 255 256 257 | ** ** If the journal file is already on disk, truncate it there. Or, if it ** is still in main memory but is being truncated to zero bytes in size, ** ignore */ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ MemJournal *p = (MemJournal *)pJfd; | < < | < < < | < < < < | 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 | ** ** If the journal file is already on disk, truncate it there. Or, if it ** is still in main memory but is being truncated to zero bytes in size, ** ignore */ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ MemJournal *p = (MemJournal *)pJfd; if( ALWAYS(size==0) ){ memjrnlFreeChunks(p); p->nSize = 0; p->endpoint.pChunk = 0; p->endpoint.iOffset = 0; p->readpoint.pChunk = 0; p->readpoint.iOffset = 0; } return SQLITE_OK; } /* ** Close the file. */ static int memjrnlClose(sqlite3_file *pJfd){ MemJournal *p = (MemJournal *)pJfd; memjrnlFreeChunks(p); return SQLITE_OK; } /* ** Sync the file. ** ** If the real file has been created, call its xSync method. Otherwise, ** syncing an in-memory journal is a no-op. */ static int memjrnlSync(sqlite3_file *pJfd, int flags){ UNUSED_PARAMETER2(pJfd, flags); return SQLITE_OK; } /* ** Query the size of the file in bytes. */ static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ MemJournal *p = (MemJournal *)pJfd; *pSize = (sqlite_int64) p->endpoint.iOffset; return SQLITE_OK; } /* ** Table of methods for MemJournal sqlite3_file object. */ |
︙ | ︙ | |||
350 351 352 353 354 355 356 | ){ MemJournal *p = (MemJournal*)pJfd; /* Zero the file-handle object. If nSpill was passed zero, initialize ** it using the sqlite3OsOpen() function of the underlying VFS. In this ** case none of the code in this module is executed as a result of calls ** made on the journal file-handle. */ | | | 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 | ){ MemJournal *p = (MemJournal*)pJfd; /* Zero the file-handle object. If nSpill was passed zero, initialize ** it using the sqlite3OsOpen() function of the underlying VFS. In this ** case none of the code in this module is executed as a result of calls ** made on the journal file-handle. */ memset(p, 0, sizeof(MemJournal)); if( nSpill==0 ){ return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0); } if( nSpill>0 ){ p->nChunkSize = nSpill; }else{ |
︙ | ︙ | |||
399 400 401 402 403 404 405 | /* ** The file-handle passed as the only argument is open on a journal file. ** Return true if this "journal file" is currently stored in heap memory, ** or false otherwise. */ int sqlite3JournalIsInMemory(sqlite3_file *p){ | | | | 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 | /* ** The file-handle passed as the only argument is open on a journal file. ** Return true if this "journal file" is currently stored in heap memory, ** or false otherwise. */ int sqlite3JournalIsInMemory(sqlite3_file *p){ return p->pMethods==&MemJournalMethods; } /* ** Return the number of bytes required to store a JournalFile that uses vfs ** pVfs to create the underlying on-disk files. */ int sqlite3JournalSize(sqlite3_vfs *pVfs){ return MAX(pVfs->szOsFile, sizeof(MemJournal)); } |
Changes to src/pager.c.
︙ | ︙ | |||
7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 | /* ** Return true if the underlying VFS for the given pager supports the ** primitives necessary for write-ahead logging. */ int sqlite3PagerWalSupported(Pager *pPager){ const sqlite3_io_methods *pMethods = pPager->fd->pMethods; return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap); } /* ** Attempt to take an exclusive lock on the database file. If a PENDING lock ** is obtained instead, immediately release it. */ | > | 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 | /* ** Return true if the underlying VFS for the given pager supports the ** primitives necessary for write-ahead logging. */ int sqlite3PagerWalSupported(Pager *pPager){ const sqlite3_io_methods *pMethods = pPager->fd->pMethods; if( pPager->noLock ) return 0; return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap); } /* ** Attempt to take an exclusive lock on the database file. If a PENDING lock ** is obtained instead, immediately release it. */ |
︙ | ︙ |
Changes to src/sqliteInt.h.
︙ | ︙ | |||
447 448 449 450 451 452 453 454 455 456 457 458 459 460 | #if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \ (defined(SQLITE_DEBUG) && SQLITE_OS_WIN) # define SQLITE_NEED_ERR_NAME #else # undef SQLITE_NEED_ERR_NAME #endif /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() ** macros to verify that we have tested SQLite for large-file support. */ #define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) | > > > > > > > | 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 | #if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \ (defined(SQLITE_DEBUG) && SQLITE_OS_WIN) # define SQLITE_NEED_ERR_NAME #else # undef SQLITE_NEED_ERR_NAME #endif /* ** SQLITE_ENABLE_EXPLAIN_COMMENTS is incompatible with SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_OMIT_EXPLAIN # undef SQLITE_ENABLE_EXPLAIN_COMMENTS #endif /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() ** macros to verify that we have tested SQLite for large-file support. */ #define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) |
︙ | ︙ |
Changes to src/util.c.
︙ | ︙ | |||
1420 1421 1422 1423 1424 1425 1426 | u64 sqlite3LogEstToInt(LogEst x){ u64 n; if( x<10 ) return 1; n = x%10; x /= 10; if( n>=5 ) n -= 2; else if( n>=1 ) n -= 1; | | > | < > > > > > | | 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 | u64 sqlite3LogEstToInt(LogEst x){ u64 n; if( x<10 ) return 1; n = x%10; x /= 10; if( n>=5 ) n -= 2; else if( n>=1 ) n -= 1; #if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) if( x>60 ) return (u64)LARGEST_INT64; #else /* If only SQLITE_ENABLE_STAT3_OR_STAT4 is on, then the largest input ** possible to this routine is 310, resulting in a maximum x of 31 */ assert( x<=60 ); #endif return x>=3 ? (n+8)<<(x-3) : (n+8)>>(3-x); } #endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */ |
Changes to src/where.c.
︙ | ︙ | |||
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 | ** (3) The template has same or fewer dependencies than the current loop ** (4) The template has the same or lower cost than the current loop */ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){ WhereLoop **ppPrev, *p; WhereInfo *pWInfo = pBuilder->pWInfo; sqlite3 *db = pWInfo->pParse->db; /* If pBuilder->pOrSet is defined, then only keep track of the costs ** and prereqs. */ if( pBuilder->pOrSet!=0 ){ if( pTemplate->nLTerm ){ #if WHERETRACE_ENABLED | > | 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 | ** (3) The template has same or fewer dependencies than the current loop ** (4) The template has the same or lower cost than the current loop */ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){ WhereLoop **ppPrev, *p; WhereInfo *pWInfo = pBuilder->pWInfo; sqlite3 *db = pWInfo->pParse->db; int rc; /* If pBuilder->pOrSet is defined, then only keep track of the costs ** and prereqs. */ if( pBuilder->pOrSet!=0 ){ if( pTemplate->nLTerm ){ #if WHERETRACE_ENABLED |
︙ | ︙ | |||
2057 2058 2059 2060 2061 2062 2063 | sqlite3DebugPrintf(" delete: "); whereLoopPrint(pToDel, pBuilder->pWC); } #endif whereLoopDelete(db, pToDel); } } | | | | 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 | sqlite3DebugPrintf(" delete: "); whereLoopPrint(pToDel, pBuilder->pWC); } #endif whereLoopDelete(db, pToDel); } } rc = whereLoopXfer(db, p, pTemplate); if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ Index *pIndex = p->u.btree.pIndex; if( pIndex && pIndex->tnum==0 ){ p->u.btree.pIndex = 0; } } return rc; } /* ** Adjust the WhereLoop.nOut value downward to account for terms of the ** WHERE clause that reference the loop but which are not used by an ** index. * |
︙ | ︙ | |||
2806 2807 2808 2809 2810 2811 2812 | ){ pIdxCons->usable = 1; } } /* Initialize the output fields of the sqlite3_index_info structure */ memset(pUsage, 0, sizeof(pUsage[0])*nConstraint); | | < | 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 | ){ pIdxCons->usable = 1; } } /* Initialize the output fields of the sqlite3_index_info structure */ memset(pUsage, 0, sizeof(pUsage[0])*nConstraint); assert( pIdxInfo->needToFreeIdxStr==0 ); pIdxInfo->idxStr = 0; pIdxInfo->idxNum = 0; pIdxInfo->orderByConsumed = 0; pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; pIdxInfo->estimatedRows = 25; pIdxInfo->idxFlags = 0; pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; /* Invoke the virtual table xBestIndex() method */ |
︙ | ︙ | |||
2883 2884 2885 2886 2887 2888 2889 | /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated ** that the scan will visit at most one row. Clear it otherwise. */ if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){ pNew->wsFlags |= WHERE_ONEROW; }else{ pNew->wsFlags &= ~WHERE_ONEROW; } | | > > > | | 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 | /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated ** that the scan will visit at most one row. Clear it otherwise. */ if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){ pNew->wsFlags |= WHERE_ONEROW; }else{ pNew->wsFlags &= ~WHERE_ONEROW; } rc = whereLoopInsert(pBuilder, pNew); if( pNew->u.vtab.needFree ){ sqlite3_free(pNew->u.vtab.idxStr); pNew->u.vtab.needFree = 0; } WHERETRACE(0xffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n", *pbIn, (sqlite3_uint64)mPrereq, (sqlite3_uint64)(pNew->prereq & ~mPrereq))); return rc; } /* ** Add all WhereLoop objects for a table of the join identified by ** pBuilder->pNew->iTab. That table is guaranteed to be a virtual table. ** |
︙ | ︙ | |||
2954 2955 2956 2957 2958 2959 2960 2961 | nConstraint = p->nConstraint; if( whereLoopResize(pParse->db, pNew, nConstraint) ){ sqlite3DbFree(pParse->db, p); return SQLITE_NOMEM_BKPT; } /* First call xBestIndex() with all constraints usable. */ rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, &bIn); | > < | | | | | | > | 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 | nConstraint = p->nConstraint; if( whereLoopResize(pParse->db, pNew, nConstraint) ){ sqlite3DbFree(pParse->db, p); return SQLITE_NOMEM_BKPT; } /* First call xBestIndex() with all constraints usable. */ WHERETRACE(0x40, (" VirtualOne: all usable\n")); rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, &bIn); /* If the call to xBestIndex() with all terms enabled produced a plan ** that does not require any source tables (IOW: a plan with mBest==0), ** then there is no point in making any further calls to xBestIndex() ** since they will all return the same result (if the xBestIndex() ** implementation is sane). */ if( rc==SQLITE_OK && (mBest = (pNew->prereq & ~mPrereq))!=0 ){ int seenZero = 0; /* True if a plan with no prereqs seen */ int seenZeroNoIN = 0; /* Plan with no prereqs and no IN(...) seen */ Bitmask mPrev = 0; Bitmask mBestNoIn = 0; /* If the plan produced by the earlier call uses an IN(...) term, call ** xBestIndex again, this time with IN(...) terms disabled. */ if( bIn ){ WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n")); rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, WO_IN, p, &bIn); assert( bIn==0 ); mBestNoIn = pNew->prereq & ~mPrereq; if( mBestNoIn==0 ){ seenZero = 1; seenZeroNoIN = 1; } |
︙ | ︙ | |||
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 | pWC->a[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq ); if( mThis>mPrev && mThis<mNext ) mNext = mThis; } mPrev = mNext; if( mNext==ALLBITS ) break; if( mNext==mBest || mNext==mBestNoIn ) continue; rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mNext|mPrereq, 0, p, &bIn); if( pNew->prereq==mPrereq ){ seenZero = 1; if( bIn==0 ) seenZeroNoIN = 1; } } /* If the calls to xBestIndex() in the above loop did not find a plan ** that requires no source tables at all (i.e. one guaranteed to be ** usable), make a call here with all source tables disabled */ if( rc==SQLITE_OK && seenZero==0 ){ rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, 0, p, &bIn); if( bIn==0 ) seenZeroNoIN = 1; } /* If the calls to xBestIndex() have so far failed to find a plan ** that requires no source tables at all and does not use an IN(...) ** operator, make a final call to obtain one here. */ if( rc==SQLITE_OK && seenZeroNoIN==0 ){ rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, WO_IN, p, &bIn); } } if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr); sqlite3DbFree(pParse->db, p); return rc; | > > > > | 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 | pWC->a[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq ); if( mThis>mPrev && mThis<mNext ) mNext = mThis; } mPrev = mNext; if( mNext==ALLBITS ) break; if( mNext==mBest || mNext==mBestNoIn ) continue; WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n", (sqlite3_uint64)mPrev, (sqlite3_uint64)mNext)); rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mNext|mPrereq, 0, p, &bIn); if( pNew->prereq==mPrereq ){ seenZero = 1; if( bIn==0 ) seenZeroNoIN = 1; } } /* If the calls to xBestIndex() in the above loop did not find a plan ** that requires no source tables at all (i.e. one guaranteed to be ** usable), make a call here with all source tables disabled */ if( rc==SQLITE_OK && seenZero==0 ){ WHERETRACE(0x40, (" VirtualOne: all disabled\n")); rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, 0, p, &bIn); if( bIn==0 ) seenZeroNoIN = 1; } /* If the calls to xBestIndex() have so far failed to find a plan ** that requires no source tables at all and does not use an IN(...) ** operator, make a final call to obtain one here. */ if( rc==SQLITE_OK && seenZeroNoIN==0 ){ WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n")); rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, WO_IN, p, &bIn); } } if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr); sqlite3DbFree(pParse->db, p); return rc; |
︙ | ︙ |
Changes to test/analyzer1.test.
︙ | ︙ | |||
21 22 23 24 25 26 27 | if {$tcl_platform(platform)=="windows"} { set PROG "sqlite3_analyzer.exe" } else { set PROG "./sqlite3_analyzer" } if {![file exe $PROG]} { | > > | | | > | 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | if {$tcl_platform(platform)=="windows"} { set PROG "sqlite3_analyzer.exe" } else { set PROG "./sqlite3_analyzer" } if {![file exe $PROG]} { set PROG [file normalize [file join $::cmdlinearg(TESTFIXTURE_HOME) $PROG]] if {![file exe $PROG]} { puts "analyzer1 cannot run because $PROG is not available" finish_test return } } db close forcedelete test.db test.db-journal test.db-wal sqlite3 db test.db do_test analyzer1-1.0 { db eval { |
︙ | ︙ |
Changes to test/autovacuum.test.
︙ | ︙ | |||
265 266 267 268 269 270 271 | do_test autovacuum-2.4.3 { execsql { SELECT rootpage FROM sqlite_master ORDER by rootpage } } {3 4 5 6 7 8 9 10} # Right now there are 5 free pages in the database. Consume and then free | | > > > > > > > > > > > > | < | 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 | do_test autovacuum-2.4.3 { execsql { SELECT rootpage FROM sqlite_master ORDER by rootpage } } {3 4 5 6 7 8 9 10} # Right now there are 5 free pages in the database. Consume and then free # all 520 pages. Then create 520 tables. This ensures that at least some of the # desired root-pages reside on the second free-list trunk page, and that the # trunk itself is required at some point. do_test autovacuum-2.4.4 { execsql " INSERT INTO av3 VALUES ('[make_str abcde [expr 1020*520 + 500]]'); DELETE FROM av3; " } {} set root_page_list [list] set pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1] # unusable_pages # These are either the pending_byte page or the pointer map pages # unset -nocomplain unusable_page if {[sqlite3 -has-codec]} { array set unusable_page {205 1 408 1} } else { array set unusable_page {207 1 412 1} } set unusable_page($pending_byte_page) 1 for {set i 3} {$i<=532} {incr i} { if {![info exists unusable_page($i)]} { lappend root_page_list $i } } if {$i >= $pending_byte_page} { lappend root_page_list $i } do_test autovacuum-2.4.5 { |
︙ | ︙ |
Changes to test/backcompat.test.
︙ | ︙ | |||
81 82 83 84 85 86 87 | array set ::incompatible [list] proc do_allbackcompat_test {script} { foreach bin $::BC(binaries) { set nErr [set_test_counter errors] foreach dir {0 1} { | | > | 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | array set ::incompatible [list] proc do_allbackcompat_test {script} { foreach bin $::BC(binaries) { set nErr [set_test_counter errors] foreach dir {0 1} { set bintag $bin regsub {.*testfixture\.} $bintag {} bintag set bintag [string map {\.exe {}} $bintag] if {$bintag == ""} {set bintag self} set ::bcname ".$bintag.$dir." rename do_test _do_test proc do_test {nm sql res} { set nm [regsub {\.} $nm $::bcname] |
︙ | ︙ |
Changes to test/backup4.test.
︙ | ︙ | |||
18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # schema cookie and change counter. Doing that could cause other clients # to become confused and continue using out-of-date cache data. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix backup4 #------------------------------------------------------------------------- # At one point this test was failing because [db] was using an out of # date schema in test case 1.2. # do_execsql_test 1.0 { CREATE TABLE t1(x, y, UNIQUE(x, y)); | > > > > > | 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | # schema cookie and change counter. Doing that could cause other clients # to become confused and continue using out-of-date cache data. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix backup4 # The codec logic does not work for zero-length database files. A database # file must contain at least one page in order to be recognized as an # encrypted database. do_not_use_codec #------------------------------------------------------------------------- # At one point this test was failing because [db] was using an out of # date schema in test case 1.2. # do_execsql_test 1.0 { CREATE TABLE t1(x, y, UNIQUE(x, y)); |
︙ | ︙ |
Changes to test/bc_common.tcl.
1 2 3 4 5 6 7 8 9 | proc bc_find_binaries {zCaption} { # Search for binaries to test against. Any executable files that match # our naming convention are assumed to be testfixture binaries to test # against. # set binaries [list] | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 | proc bc_find_binaries {zCaption} { # Search for binaries to test against. Any executable files that match # our naming convention are assumed to be testfixture binaries to test # against. # set binaries [list] set self [info nameofexec] set pattern "$self?*" if {$::tcl_platform(platform)=="windows"} { set pattern [string map {\.exe {}} $pattern] } foreach file [glob -nocomplain $pattern] { if {$file==$self} continue if {[file executable $file] && [file isfile $file]} {lappend binaries $file} |
︙ | ︙ | |||
48 49 50 51 52 53 54 | proc code2 {tcl} { testfixture $::bc_chan $tcl } proc sql1 sql { code1 [list db eval $sql] } proc sql2 sql { code2 [list db eval $sql] } code1 { sqlite3 db test.db } code2 { sqlite3 db test.db } | | > | 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | proc code2 {tcl} { testfixture $::bc_chan $tcl } proc sql1 sql { code1 [list db eval $sql] } proc sql2 sql { code2 [list db eval $sql] } code1 { sqlite3 db test.db } code2 { sqlite3 db test.db } set bintag $bin regsub {.*testfixture\.} $bintag {} bintag set bintag [string map {\.exe {}} $bintag] if {$bintag == ""} {set bintag self} set saved_prefix $::testprefix append ::testprefix ".$bintag" uplevel $script |
︙ | ︙ |
Changes to test/bestindex1.test.
︙ | ︙ | |||
10 11 12 13 14 15 16 17 18 19 20 21 22 23 | #*********************************************************************** # # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix bestindex1 register_tcl_module db proc vtab_command {method args} { switch -- $method { xConnect { return "CREATE TABLE t1(a, b, c)" | > > > > > | 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | #*********************************************************************** # # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix bestindex1 ifcapable !vtab { finish_test return } register_tcl_module db proc vtab_command {method args} { switch -- $method { xConnect { return "CREATE TABLE t1(a, b, c)" |
︙ | ︙ | |||
157 158 159 160 161 162 163 | do_eqp_test 2.2.$mode.6 { SELECT rowid FROM t1 WHERE a IN ('one', 'four') ORDER BY +rowid } $plan($mode) } finish_test | < < | 162 163 164 165 166 167 168 | do_eqp_test 2.2.$mode.6 { SELECT rowid FROM t1 WHERE a IN ('one', 'four') ORDER BY +rowid } $plan($mode) } finish_test |
Changes to test/bestindex2.test.
︙ | ︙ | |||
9 10 11 12 13 14 15 16 17 18 19 20 21 22 | # #*********************************************************************** set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix bestindex2 #------------------------------------------------------------------------- # Virtual table callback for table named $tbl, with the columns specified # by list argument $cols. e.g. if the function is invoked as: # # vtab_cmd t1 {a b c} ... # | > > > > | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 | # #*********************************************************************** set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix bestindex2 ifcapable !vtab { finish_test return } #------------------------------------------------------------------------- # Virtual table callback for table named $tbl, with the columns specified # by list argument $cols. e.g. if the function is invoked as: # # vtab_cmd t1 {a b c} ... # |
︙ | ︙ | |||
131 132 133 134 135 136 137 | 0 0 0 {SCAN TABLE x1} 0 1 1 {SCAN TABLE t1 VIRTUAL TABLE INDEX 0:} 0 2 2 {SCAN TABLE t2 VIRTUAL TABLE INDEX 0:indexed(c=?)} 0 3 3 {SCAN TABLE t3 VIRTUAL TABLE INDEX 0:indexed(e=?)} } finish_test | < | 135 136 137 138 139 140 141 | 0 0 0 {SCAN TABLE x1} 0 1 1 {SCAN TABLE t1 VIRTUAL TABLE INDEX 0:} 0 2 2 {SCAN TABLE t2 VIRTUAL TABLE INDEX 0:indexed(c=?)} 0 3 3 {SCAN TABLE t3 VIRTUAL TABLE INDEX 0:indexed(e=?)} } finish_test |
Changes to test/close.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 | # Test some specific circumstances to do with shared cache mode. # set testdir [file dirname $argv0] source $testdir/tester.tcl set ::testprefix close do_execsql_test 1.0 { CREATE TABLE t1(x); INSERT INTO t1 VALUES('one'); INSERT INTO t1 VALUES('two'); INSERT INTO t1 VALUES('three'); } | > > > > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | # Test some specific circumstances to do with shared cache mode. # set testdir [file dirname $argv0] source $testdir/tester.tcl set ::testprefix close # This module bypasses the "-key" logic in tester.tcl, so it cannot run # with the codec enabled. do_not_use_codec do_execsql_test 1.0 { CREATE TABLE t1(x); INSERT INTO t1 VALUES('one'); INSERT INTO t1 VALUES('two'); INSERT INTO t1 VALUES('three'); } |
︙ | ︙ |
Changes to test/corrupt2.test.
︙ | ︙ | |||
342 343 344 345 346 347 348 | hexio_write corrupt.db [expr 1024 + ($nPage-3)*5] 010000000 } -test { do_test corrupt2-6.3 { catchsql " $::presql pragma incremental_vacuum = 1 " } {1 {database disk image is malformed}} } | > | | | | | | | | | | | | | | | | | | | | | > | 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 | hexio_write corrupt.db [expr 1024 + ($nPage-3)*5] 010000000 } -test { do_test corrupt2-6.3 { catchsql " $::presql pragma incremental_vacuum = 1 " } {1 {database disk image is malformed}} } if {![nonzero_reserved_bytes]} { corruption_test -sqlprep { PRAGMA auto_vacuum = 1; PRAGMA page_size = 1024; CREATE TABLE t1(a INTEGER PRIMARY KEY, b); INSERT INTO t1 VALUES(1, randomblob(2500)); DELETE FROM t1 WHERE a = 1; } -corrupt { set nAppend [expr 1024*207 - [file size corrupt.db]] set fd [open corrupt.db r+] seek $fd 0 end puts -nonewline $fd [string repeat x $nAppend] close $fd hexio_write corrupt.db 28 00000000 } -test { do_test corrupt2-6.4 { catchsql " $::presql BEGIN EXCLUSIVE; COMMIT; " } {1 {database disk image is malformed}} } } } set sqlprep { PRAGMA auto_vacuum = 0; PRAGMA page_size = 1024; |
︙ | ︙ |
Changes to test/corrupt3.test.
︙ | ︙ | |||
14 15 16 17 18 19 20 | # segfault if it sees a corrupt database file. # # $Id: corrupt3.test,v 1.2 2007/04/06 21:42:22 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl | | < < | > | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # segfault if it sees a corrupt database file. # # $Id: corrupt3.test,v 1.2 2007/04/06 21:42:22 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} # These tests deal with corrupt database files # database_may_be_corrupt # We must have the page_size pragma for these tests to work. # |
︙ | ︙ |
Changes to test/corrupt4.test.
︙ | ︙ | |||
14 15 16 17 18 19 20 | # segfault if it sees a corrupt database file. # # $Id: corrupt4.test,v 1.1 2007/09/07 14:32:07 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl | | < < | > | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # segfault if it sees a corrupt database file. # # $Id: corrupt4.test,v 1.1 2007/09/07 14:32:07 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} # These tests deal with corrupt database files # database_may_be_corrupt # We must have the page_size pragma for these tests to work. # |
︙ | ︙ |
Changes to test/corrupt6.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | # on corrupt SerialTypeLen values. # # $Id: corrupt6.test,v 1.2 2008/05/19 15:37:10 shane Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl | | < < | > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # on corrupt SerialTypeLen values. # # $Id: corrupt6.test,v 1.2 2008/05/19 15:37:10 shane Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} # These tests deal with corrupt database files # database_may_be_corrupt # We must have the page_size pragma for these tests to work. # |
︙ | ︙ |
Changes to test/corrupt7.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | # on corrupt cell offsets in a btree page. # # $Id: corrupt7.test,v 1.8 2009/08/10 10:18:08 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl | | < < | > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # on corrupt cell offsets in a btree page. # # $Id: corrupt7.test,v 1.8 2009/08/10 10:18:08 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} # These tests deal with corrupt database files # database_may_be_corrupt # We must have the page_size pragma for these tests to work. # |
︙ | ︙ |
Changes to test/corruptE.test.
︙ | ︙ | |||
14 15 16 17 18 19 20 | # segfault if it sees a corrupt database file. It specifcally # focuses on rowid order corruption. # set testdir [file dirname $argv0] source $testdir/tester.tcl | | < < | > | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # segfault if it sees a corrupt database file. It specifcally # focuses on rowid order corruption. # set testdir [file dirname $argv0] source $testdir/tester.tcl # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} # These tests deal with corrupt database files # database_may_be_corrupt # Do not run the tests in this file if ENABLE_OVERSIZE_CELL_CHECK is on. # |
︙ | ︙ |
Changes to test/corruptG.test.
︙ | ︙ | |||
10 11 12 13 14 15 16 | #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix corruptG | | < < | > | 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 | #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix corruptG # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} # These tests deal with corrupt database files # database_may_be_corrupt # Create a simple database with a single entry. Then corrupt the # header-size varint on the index payload so that it maps into a |
︙ | ︙ |
Changes to test/corruptH.test.
︙ | ︙ | |||
10 11 12 13 14 15 16 | #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix corruptH | | | > | < | 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 | #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix corruptH # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} database_may_be_corrupt # The corruption migrations tested by the code in this file are not detected # mmap mode. # # The reason is that in mmap mode, the different queries may use different # PgHdr objects for the same page (same data, but different PgHdr container |
︙ | ︙ |
Changes to test/corruptI.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 | set testprefix corruptI if {[permutation]=="mmap"} { finish_test return } | | | > | < | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | set testprefix corruptI if {[permutation]=="mmap"} { finish_test return } # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} database_may_be_corrupt # Initialize the database. # do_execsql_test 1.1 { PRAGMA page_size=1024; PRAGMA auto_vacuum=0; |
︙ | ︙ |
Changes to test/corruptJ.test.
︙ | ︙ | |||
18 19 20 21 22 23 24 | set testprefix corruptJ if {[permutation]=="mmap"} { finish_test return } | | | > | < | 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | set testprefix corruptJ if {[permutation]=="mmap"} { finish_test return } # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} database_may_be_corrupt # Initialize the database. # do_execsql_test 1.1 { PRAGMA page_size=1024; PRAGMA auto_vacuum=0; |
︙ | ︙ |
Changes to test/crash8.test.
︙ | ︙ | |||
21 22 23 24 25 26 27 28 29 30 31 32 33 34 | set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !crashtest { finish_test return } do_test crash8-1.1 { execsql { PRAGMA auto_vacuum=OFF; CREATE TABLE t1(a, b); CREATE INDEX i1 ON t1(a, b); INSERT INTO t1 VALUES(1, randstr(1000,1000)); | > | 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable !crashtest { finish_test return } do_not_use_codec do_test crash8-1.1 { execsql { PRAGMA auto_vacuum=OFF; CREATE TABLE t1(a, b); CREATE INDEX i1 ON t1(a, b); INSERT INTO t1 VALUES(1, randstr(1000,1000)); |
︙ | ︙ |
Changes to test/e_uri.test.
︙ | ︙ | |||
9 10 11 12 13 14 15 | # #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix e_uri | | | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 | # #*********************************************************************** # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix e_uri do_not_use_codec db close proc parse_uri {uri} { testvfs tvfs2 testvfs tvfs tvfs filter xOpen tvfs script parse_uri_open_cb |
︙ | ︙ |
Changes to test/e_vacuum.test.
︙ | ︙ | |||
155 156 157 158 159 160 161 | } {1024 1} do_test e_vacuum-1.3.1.2 { execsql { PRAGMA page_size = 2048 } execsql { PRAGMA auto_vacuum = NONE } execsql { PRAGMA page_size ; PRAGMA auto_vacuum } } {1024 1} | > | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > | 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 | } {1024 1} do_test e_vacuum-1.3.1.2 { execsql { PRAGMA page_size = 2048 } execsql { PRAGMA auto_vacuum = NONE } execsql { PRAGMA page_size ; PRAGMA auto_vacuum } } {1024 1} if {![nonzero_reserved_bytes]} { # EVIDENCE-OF: R-08570-19916 However, when not in write-ahead log mode, # the page_size and/or auto_vacuum properties of an existing database # may be changed by using the page_size and/or pragma auto_vacuum # pragmas and then immediately VACUUMing the database. # do_test e_vacuum-1.3.2.1 { execsql { PRAGMA journal_mode = delete } execsql { PRAGMA page_size = 2048 } execsql { PRAGMA auto_vacuum = NONE } execsql VACUUM execsql { PRAGMA page_size ; PRAGMA auto_vacuum } } {2048 0} # EVIDENCE-OF: R-48521-51450 When in write-ahead log mode, only the # auto_vacuum support property can be changed using VACUUM. # ifcapable wal { do_test e_vacuum-1.3.3.1 { execsql { PRAGMA journal_mode = wal } execsql { PRAGMA page_size ; PRAGMA auto_vacuum } } {2048 0} do_test e_vacuum-1.3.3.2 { execsql { PRAGMA page_size = 1024 } execsql { PRAGMA auto_vacuum = FULL } execsql VACUUM execsql { PRAGMA page_size ; PRAGMA auto_vacuum } } {2048 1} } } # EVIDENCE-OF: R-38001-03952 VACUUM only works on the main database. It # is not possible to VACUUM an attached database file. forcedelete test.db2 create_db { PRAGMA auto_vacuum = NONE } do_execsql_test e_vacuum-2.1.1 { ATTACH 'test.db2' AS aux; PRAGMA aux.page_size = 1024; |
︙ | ︙ |
Changes to test/e_walauto.test.
︙ | ︙ | |||
19 20 21 22 23 24 25 26 27 28 29 30 31 32 | # accessing the same coherent view of the "test.db-shm" file. This doesn't # work on OpenBSD. # if {$tcl_platform(os) == "OpenBSD"} { finish_test return } proc read_nbackfill {} { seek $::shmfd 96 binary scan [read $::shmfd 4] n nBackfill set nBackfill } proc read_mxframe {} { | > > > > > | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 | # accessing the same coherent view of the "test.db-shm" file. This doesn't # work on OpenBSD. # if {$tcl_platform(os) == "OpenBSD"} { finish_test return } # This module uses hard-coded offsets which do not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} proc read_nbackfill {} { seek $::shmfd 96 binary scan [read $::shmfd 4] n nBackfill set nBackfill } proc read_mxframe {} { |
︙ | ︙ |
Changes to test/eqp.test.
︙ | ︙ | |||
512 513 514 515 516 517 518 | 1 0 0 {SCAN TABLE t1 USING COVERING INDEX i1} 2 0 0 {SCAN TABLE t2} 2 0 0 {USE TEMP B-TREE FOR ORDER BY} 0 0 0 {COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)} } | > | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > | 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 | 1 0 0 {SCAN TABLE t1 USING COVERING INDEX i1} 2 0 0 {SCAN TABLE t2} 2 0 0 {USE TEMP B-TREE FOR ORDER BY} 0 0 0 {COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)} } if {![nonzero_reserved_bytes]} { #------------------------------------------------------------------------- # The following tests - eqp-6.* - test that the example C code on # documentation page eqp.html works. The C code is duplicated in test1.c # and wrapped in Tcl command [print_explain_query_plan] # set boilerplate { proc explain_query_plan {db sql} { set stmt [sqlite3_prepare_v2 db $sql -1 DUMMY] print_explain_query_plan $stmt sqlite3_finalize $stmt } sqlite3 db test.db explain_query_plan db {%SQL%} db close exit } # Do a "Print Explain Query Plan" test. proc do_peqp_test {tn sql res} { set fd [open script.tcl w] puts $fd [string map [list %SQL% $sql] $::boilerplate] close $fd uplevel do_test $tn [list { set fd [open "|[info nameofexec] script.tcl"] set data [read $fd] close $fd set data }] [list $res] } do_peqp_test 6.1 { SELECT a, b FROM t1 EXCEPT SELECT d, 99 FROM t2 ORDER BY 1 } [string trimleft { 1 0 0 SCAN TABLE t1 USING COVERING INDEX i2 2 0 0 SCAN TABLE t2 2 0 0 USE TEMP B-TREE FOR ORDER BY 0 0 0 COMPOUND SUBQUERIES 1 AND 2 (EXCEPT) }] } #------------------------------------------------------------------------- # The following tests - eqp-7.* - test that queries that use the OP_Count # optimization return something sensible with EQP. # drop_all_tables |
︙ | ︙ |
Changes to test/filefmt.test.
︙ | ︙ | |||
140 141 142 143 144 145 146 | PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } {} | > | | | > | 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } {} if {![nonzero_reserved_bytes]} { do_test filefmt-2.1.2 { hexio_read test.db 28 4 } {00000009} } do_test filefmt-2.1.3 { sql36231 { INSERT INTO t1 VALUES(a_string(3000)) } } {} do_execsql_test filefmt-2.1.4 { INSERT INTO t2 VALUES(2) } {} integrity_check filefmt-2.1.5 |
︙ | ︙ | |||
166 167 168 169 170 171 172 | PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } {} | > | | | > | 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 | PRAGMA auto_vacuum = 0; CREATE TABLE t1(a); CREATE INDEX i1 ON t1(a); INSERT INTO t1 VALUES(a_string(3000)); CREATE TABLE t2(a); INSERT INTO t2 VALUES(1); } {} if {![nonzero_reserved_bytes]} { do_test filefmt-2.2.2 { hexio_read test.db 28 4 } {00000009} } do_test filefmt-2.2.3 { sql36231 { INSERT INTO t1 VALUES(a_string(3000)) } } {} do_execsql_test filefmt-2.2.4 { PRAGMA integrity_check; |
︙ | ︙ |
Changes to test/fts4opt.test.
︙ | ︙ | |||
161 162 163 164 165 166 167 168 169 | } {33 1 1057 1 2081 1 3105 1} do_execsql_test 2.7 { INSERT INTO t2(t2) VALUES('integrity-check') } do_execsql_test 2.8 { INSERT INTO t2(words) SELECT words FROM t1; SELECT level, count(*) FROM t2_segdir GROUP BY level; } {0 2 1024 2 2048 2 3072 2} finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 | } {33 1 1057 1 2081 1 3105 1} do_execsql_test 2.7 { INSERT INTO t2(t2) VALUES('integrity-check') } do_execsql_test 2.8 { INSERT INTO t2(words) SELECT words FROM t1; SELECT level, count(*) FROM t2_segdir GROUP BY level; } {0 2 1024 2 2048 2 3072 2} #------------------------------------------------------------------------- # Check that 'optimize' works when there is data in the in-memory hash # table, but no segments at all on disk. # do_execsql_test 3.1 { CREATE VIRTUAL TABLE fts USING fts4 (t); INSERT INTO fts (fts) VALUES ('optimize'); } do_execsql_test 3.2 { INSERT INTO fts(fts) VALUES('integrity-check'); SELECT count(*) FROM fts_segdir; } {0} do_execsql_test 3.3 { BEGIN; INSERT INTO fts (rowid, t) VALUES (2, 'test'); INSERT INTO fts (fts) VALUES ('optimize'); COMMIT; SELECT level, idx FROM fts_segdir; } {0 0} do_execsql_test 3.4 { INSERT INTO fts(fts) VALUES('integrity-check'); SELECT rowid FROM fts WHERE fts MATCH 'test'; } {2} do_execsql_test 3.5 { INSERT INTO fts (fts) VALUES ('optimize'); INSERT INTO fts(fts) VALUES('integrity-check'); } do_test 3.6 { set c1 [db total_changes] execsql { INSERT INTO fts (fts) VALUES ('optimize') } expr {[db total_changes] - $c1} } {1} do_test 3.7 { execsql { INSERT INTO fts (rowid, t) VALUES (3, 'xyz') } set c1 [db total_changes] execsql { INSERT INTO fts (fts) VALUES ('optimize') } expr {([db total_changes] - $c1) > 1} } {1} do_test 3.8 { set c1 [db total_changes] execsql { INSERT INTO fts (fts) VALUES ('optimize') } expr {[db total_changes] - $c1} } {1} finish_test |
Changes to test/in5.test.
︙ | ︙ | |||
178 179 180 181 182 183 184 185 186 | do_execsql_test 6.3.1 { CREATE TABLE x1(a); CREATE TABLE x2(b); INSERT INTO x1 VALUES(1), (1), (2); INSERT INTO x2 VALUES(1), (2); SELECT count(*) FROM x2 WHERE b IN (SELECT DISTINCT a FROM x1 LIMIT 2); } {2} finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | do_execsql_test 6.3.1 { CREATE TABLE x1(a); CREATE TABLE x2(b); INSERT INTO x1 VALUES(1), (1), (2); INSERT INTO x2 VALUES(1), (2); SELECT count(*) FROM x2 WHERE b IN (SELECT DISTINCT a FROM x1 LIMIT 2); } {2} #------------------------------------------------------------------------- # Test to confirm that bug [5e3c886796e5] is fixed. # do_execsql_test 7.1 { CREATE TABLE y1(a, b); CREATE TABLE y2(c); INSERT INTO y1 VALUES(1, 'one'); INSERT INTO y1 VALUES('two', 'two'); INSERT INTO y1 VALUES(3, 'three'); INSERT INTO y2 VALUES('one'); INSERT INTO y2 VALUES('two'); INSERT INTO y2 VALUES('three'); } {} do_execsql_test 7.2.1 { SELECT a FROM y1 WHERE b NOT IN (SELECT a FROM y2); } {1 3} do_execsql_test 7.2.2 { SELECT a FROM y1 WHERE b IN (SELECT a FROM y2); } {two} do_execsql_test 7.3.1 { CREATE INDEX y2c ON y2(c); SELECT a FROM y1 WHERE b NOT IN (SELECT a FROM y2); } {1 3} do_execsql_test 7.3.2 { SELECT a FROM y1 WHERE b IN (SELECT a FROM y2); } {two} finish_test finish_test |
Changes to test/incrblob.test.
︙ | ︙ | |||
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | db close forcedelete test.db test.db-journal sqlite3 db test.db execsql "PRAGMA mmap_size = 0" execsql "PRAGMA auto_vacuum = $AutoVacuumMode" do_test incrblob-2.$AutoVacuumMode.1 { set ::str [string repeat abcdefghij 2900] execsql { BEGIN; CREATE TABLE blobs(k PRIMARY KEY, v BLOB, i INTEGER); DELETE FROM blobs; INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45); COMMIT; } expr [file size test.db]/1024 | > > > > > | | 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | db close forcedelete test.db test.db-journal sqlite3 db test.db execsql "PRAGMA mmap_size = 0" execsql "PRAGMA auto_vacuum = $AutoVacuumMode" # Extra value added to size answers set ib2_extra 0 if {$AutoVacuumMode} {incr ib2_extra} if {[nonzero_reserved_bytes]} {incr ib2_extra} do_test incrblob-2.$AutoVacuumMode.1 { set ::str [string repeat abcdefghij 2900] execsql { BEGIN; CREATE TABLE blobs(k PRIMARY KEY, v BLOB, i INTEGER); DELETE FROM blobs; INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45); COMMIT; } expr [file size test.db]/1024 } [expr 31 + $ib2_extra] ifcapable autovacuum { do_test incrblob-2.$AutoVacuumMode.2 { execsql { PRAGMA auto_vacuum; } } $AutoVacuumMode |
︙ | ︙ | |||
159 160 161 162 163 164 165 | close $::blob # If the database is not in auto-vacuum mode, the whole of # the overflow-chain must be scanned. In auto-vacuum mode, # sqlite uses the ptrmap pages to avoid reading the other pages. # nRead db | | | 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 | close $::blob # If the database is not in auto-vacuum mode, the whole of # the overflow-chain must be scanned. In auto-vacuum mode, # sqlite uses the ptrmap pages to avoid reading the other pages. # nRead db } [expr $AutoVacuumMode ? 4 : 30+$ib2_extra] do_test incrblob-2.$AutoVacuumMode.4 { string range [db one {SELECT v FROM blobs}] end-19 end } $::fragment do_test incrblob-2.$AutoVacuumMode.5 { # Open and close the db to make sure the page cache is empty. |
︙ | ︙ | |||
183 184 185 186 187 188 189 | flush $::blob # If the database is not in auto-vacuum mode, the whole of # the overflow-chain must be scanned. In auto-vacuum mode, # sqlite uses the ptrmap pages to avoid reading the other pages. # nRead db | | | 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 | flush $::blob # If the database is not in auto-vacuum mode, the whole of # the overflow-chain must be scanned. In auto-vacuum mode, # sqlite uses the ptrmap pages to avoid reading the other pages. # nRead db } [expr $AutoVacuumMode ? 4 : 30 + $ib2_extra] # Pages 1 (the write-counter) and 32 (the blob data) were written. do_test incrblob-2.$AutoVacuumMode.6 { close $::blob nWrite db } 2 |
︙ | ︙ | |||
206 207 208 209 210 211 212 | execsql { PRAGMA mmap_size = 0 } execsql { SELECT i FROM blobs } } {45} do_test incrblob-2.$AutoVacuumMode.9 { nRead db | | | 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 | execsql { PRAGMA mmap_size = 0 } execsql { SELECT i FROM blobs } } {45} do_test incrblob-2.$AutoVacuumMode.9 { nRead db } [expr $AutoVacuumMode ? 4 : 30 + $ib2_extra] } sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit) #------------------------------------------------------------------------ # incrblob-3.*: # # Test the outcome of trying to write to a read-only blob handle. |
︙ | ︙ | |||
380 381 382 383 384 385 386 | # incrblob-5.*: # # Test that opening a blob in an attached database works. # ifcapable attach { do_test incrblob-5.1 { forcedelete test2.db test2.db-journal | | | | | | 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 | # incrblob-5.*: # # Test that opening a blob in an attached database works. # ifcapable attach { do_test incrblob-5.1 { forcedelete test2.db test2.db-journal set ::size [expr [file size $::cmdlinearg(INFO_SCRIPT)]] execsql { ATTACH 'test2.db' AS aux; CREATE TABLE aux.files(name, text); INSERT INTO aux.files VALUES('this one', zeroblob($::size)); } set fd [db incrblob aux files text 1] fconfigure $fd -translation binary set fd2 [open $::cmdlinearg(INFO_SCRIPT)] fconfigure $fd2 -translation binary puts -nonewline $fd [read $fd2] close $fd close $fd2 set ::text [db one {select text from aux.files}] string length $::text } [file size $::cmdlinearg(INFO_SCRIPT)] do_test incrblob-5.2 { set fd2 [open $::cmdlinearg(INFO_SCRIPT)] fconfigure $fd2 -translation binary set ::data [read $fd2] close $fd2 set ::data } $::text } |
︙ | ︙ | |||
572 573 574 575 576 577 578 | execsql { SELECT d FROM t1; } } {15} } | | | 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 | execsql { SELECT d FROM t1; } } {15} } set fd [open $::cmdlinearg(INFO_SCRIPT)] fconfigure $fd -translation binary set ::data [read $fd 14000] close $fd db close forcedelete test.db test.db-journal sqlite3 db test.db |
︙ | ︙ |
Changes to test/incrblob_err.test.
︙ | ︙ | |||
20 21 22 23 24 25 26 | finish_test return } source $testdir/malloc_common.tcl unset -nocomplain ::fd ::data | | | | 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | finish_test return } source $testdir/malloc_common.tcl unset -nocomplain ::fd ::data set ::fd [open $::cmdlinearg(INFO_SCRIPT)] set ::data [read $::fd] close $::fd do_malloc_test 1 -tclprep { set bytes [file size $::cmdlinearg(INFO_SCRIPT)] execsql { CREATE TABLE blobs(k, v BLOB); INSERT INTO blobs VALUES(1, zeroblob($::bytes)); } } -tclbody { set ::blob [db incrblob blobs v 1] fconfigure $::blob -translation binary |
︙ | ︙ |
Changes to test/io.test.
︙ | ︙ | |||
420 421 422 423 424 425 426 | # that the file is now greater than 20000 bytes in size. list [expr [file size test.db]>20000] [nSync] } {1 0} do_test io-3.3 { # The COMMIT requires a single fsync() - to the database file. execsql { COMMIT } list [file size test.db] [nSync] | | | 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 | # that the file is now greater than 20000 bytes in size. list [expr [file size test.db]>20000] [nSync] } {1 0} do_test io-3.3 { # The COMMIT requires a single fsync() - to the database file. execsql { COMMIT } list [file size test.db] [nSync] } "[expr {[nonzero_reserved_bytes]?40960:39936}] 1" } #---------------------------------------------------------------------- # Test cases io-4.* test the IOCAP_SAFE_APPEND optimization. # sqlite3_simulate_device -char safe_append |
︙ | ︙ |
Changes to test/memsubsys1.test.
︙ | ︙ | |||
261 262 263 264 265 266 267 | } 1 if !$::sqlite_options(enable_purgeable_pcache) { do_test memsubsys1-7.4 { set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] } 0 do_test memsubsys1-7.5 { set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2] | | | 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 | } 1 if !$::sqlite_options(enable_purgeable_pcache) { do_test memsubsys1-7.4 { set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2] } 0 do_test memsubsys1-7.5 { set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2] expr {$maxreq<4100 + 4200*[nonzero_reserved_bytes]} } 1 } do_test memsubsys1-7.6 { set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2] } 1 do_test memsubsys1-7.7 { set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2] |
︙ | ︙ |
Changes to test/mmap1.test.
︙ | ︙ | |||
84 85 86 87 88 89 90 | sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count" } {32 ok 77} # Have connection 2 shrink the file. Check connection 1 can still read it. sql2 { DELETE FROM t1 WHERE rowid%2; } do_test $t.$tn.2 { sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count" | | > | > | 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count" } {32 ok 77} # Have connection 2 shrink the file. Check connection 1 can still read it. sql2 { DELETE FROM t1 WHERE rowid%2; } do_test $t.$tn.2 { sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count" } "16 ok [expr {42+[nonzero_reserved_bytes]}]" # Have connection 2 grow the file. Check connection 1 can still read it. sql2 { INSERT INTO t1 SELECT rblob(500), rblob(500) FROM t1 } do_test $t.$tn.3 { sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count" } {32 ok 79} # Have connection 2 grow the file again. Check connection 1 is still ok. sql2 { INSERT INTO t1 SELECT rblob(500), rblob(500) FROM t1 } do_test $t.$tn.4 { sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count" } {64 ok 149} # Check that the number of pages read by connection 1 indicates that the # "PRAGMA mmap_size" command worked. if {[nonzero_reserved_bytes]==0} { do_test $t.$tn.5 { nRead db } $nRead } } } set ::rcnt 0 proc rblob {n} { set ::rcnt [expr (($::rcnt << 3) + $::rcnt + 456) & 0xFFFFFFFF] set str [format %.8x [expr $::rcnt ^ 0xbdf20da3]] |
︙ | ︙ |
Changes to test/mmap3.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 22 23 24 25 26 27 28 | ifcapable !mmap||!vtab { finish_test return } source $testdir/lock_common.tcl set testprefix mmap3 do_test mmap3-1.0 { load_static_extension db wholenumber db eval { PRAGMA mmap_size=100000; CREATE TABLE t1(x, y); CREATE VIRTUAL TABLE nums USING wholenumber; INSERT INTO t1 SELECT value, randomblob(value) FROM nums | > > > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | ifcapable !mmap||!vtab { finish_test return } source $testdir/lock_common.tcl set testprefix mmap3 # A codec shuts down memory-mapped I/O if {[nonzero_reserved_bytes]} {finish_test; return;} do_test mmap3-1.0 { load_static_extension db wholenumber db eval { PRAGMA mmap_size=100000; CREATE TABLE t1(x, y); CREATE VIRTUAL TABLE nums USING wholenumber; INSERT INTO t1 SELECT value, randomblob(value) FROM nums |
︙ | ︙ |
Changes to test/nan.test.
︙ | ︙ | |||
147 148 149 150 151 152 153 | # SQLite always converts NaN into NULL so it is not possible to write # a NaN value into the database file using SQLite. The following series # of tests writes a normal floating point value (0.5) into the database, # then writes directly into the database file to change the 0.5 into NaN. # Then it reads the value of the database to verify it is converted into # NULL. # | > | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > | 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 | # SQLite always converts NaN into NULL so it is not possible to write # a NaN value into the database file using SQLite. The following series # of tests writes a normal floating point value (0.5) into the database, # then writes directly into the database file to change the 0.5 into NaN. # Then it reads the value of the database to verify it is converted into # NULL. # if {![nonzero_reserved_bytes]} { do_test nan-3.1 { db eval { DELETE FROM t1; INSERT INTO t1 VALUES(0.5); PRAGMA auto_vacuum=OFF; PRAGMA page_size=1024; VACUUM; } hexio_read test.db 2040 8 } {3FE0000000000000} do_test nan-3.2 { db eval { SELECT x, typeof(x) FROM t1 } } {0.5 real} do_test nan-3.3 { db close hexio_write test.db 2040 FFF8000000000000 sqlite3 db test.db db eval {SELECT x, typeof(x) FROM t1} } {{} null} do_test nan-3.4 { db close hexio_write test.db 2040 7FF8000000000000 sqlite3 db test.db db eval {SELECT x, typeof(x) FROM t1} } {{} null} do_test nan-3.5 { db close hexio_write test.db 2040 FFFFFFFFFFFFFFFF sqlite3 db test.db db eval {SELECT x, typeof(x) FROM t1} } {{} null} do_test nan-3.6 { db close hexio_write test.db 2040 7FFFFFFFFFFFFFFF sqlite3 db test.db db eval {SELECT x, typeof(x) FROM t1} } {{} null} } # Verify that the sqlite3AtoF routine is able to handle extreme # numbers. # do_test nan-4.1 { db eval {DELETE FROM t1} db eval "INSERT INTO t1 VALUES([string repeat 9 307].0)" |
︙ | ︙ |
Changes to test/nolock.test.
︙ | ︙ | |||
178 179 180 181 182 183 184 185 | xCheckReservedLock $::tvfs_calls(xCheckReservedLock) \ xAccess $::tvfs_calls(xAccess) } {xLock 0 xUnlock 0 xCheckReservedLock 0 xAccess 0} db2 close db close tvfs delete finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | xCheckReservedLock $::tvfs_calls(xCheckReservedLock) \ xAccess $::tvfs_calls(xAccess) } {xLock 0 xUnlock 0 xCheckReservedLock 0 xAccess 0} db2 close db close tvfs delete # 2016-03-11: Make sure all works when transitioning to WAL mode under nolock. # do_test nolock-4.1 { forcedelete test.db sqlite3 db file:test.db?nolock=1 -uri 1 db eval { PRAGMA journal_mode=WAL; CREATE TABLE t1(x); INSERT INTO t1 VALUES('youngling'); SELECT * FROM t1; } } {delete youngling} db close do_test nolock-4.2 { forcedelete test.db sqlite3 db test.db db eval { PRAGMA journal_mode=WAL; CREATE TABLE t1(x); INSERT INTO t1 VALUES('catbird'); SELECT * FROM t1; } } {wal catbird} do_test nolock-4.3 { db close sqlite3 db file:test.db?nolock=1 -uri 1 set rc [catch {db eval {SELECT * FROM t1}} msg] lappend rc $msg } {1 {unable to open database file}} finish_test |
Changes to test/pager1.test.
︙ | ︙ | |||
1398 1399 1400 1401 1402 1403 1404 | testvfs tv -default 1 tv sectorsize 4096 faultsim_delete_and_reopen execsql { PRAGMA page_size = 1024 } for {set ii 0} {$ii < 4} {incr ii} { execsql "CREATE TABLE t${ii}(a, b)" } } {} | > > > | | < | | | | | | | > > > > > > > > > > > > > > > > > > | | | | | | | | | | > | 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 | testvfs tv -default 1 tv sectorsize 4096 faultsim_delete_and_reopen execsql { PRAGMA page_size = 1024 } for {set ii 0} {$ii < 4} {incr ii} { execsql "CREATE TABLE t${ii}(a, b)" } } {} if {[nonzero_reserved_bytes]} { # backup with a page size changes is not possible with the codec # do_test pager1-9.3.2codec { sqlite3 db2 test.db2 execsql { PRAGMA page_size = 4096; PRAGMA synchronous = OFF; CREATE TABLE t1(a, b); CREATE TABLE t2(a, b); } db2 sqlite3_backup B db2 main db main B step 30 list [B step 10000] [B finish] } {SQLITE_READONLY SQLITE_READONLY} do_test pager1-9.3.3codec { db2 close db close tv delete file size test.db2 } [file size test.db2] } else { do_test pager1-9.3.2 { sqlite3 db2 test.db2 execsql { PRAGMA page_size = 4096; PRAGMA synchronous = OFF; CREATE TABLE t1(a, b); CREATE TABLE t2(a, b); } db2 sqlite3_backup B db2 main db main B step 30 list [B step 10000] [B finish] } {SQLITE_DONE SQLITE_OK} do_test pager1-9.3.3 { db2 close db close tv delete file size test.db2 } [file size test.db] } do_test pager1-9.4.1 { faultsim_delete_and_reopen sqlite3 db2 test.db2 execsql { PRAGMA page_size = 4096; CREATE TABLE t1(a, b); |
︙ | ︙ | |||
2452 2453 2454 2455 2456 2457 2458 | PRAGMA auto_vacuum = full; PRAGMA locking_mode=exclusive; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); } file size test.db } [expr 1024*3] | > > | > > > > > > > | | | | | | > | 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 | PRAGMA auto_vacuum = full; PRAGMA locking_mode=exclusive; CREATE TABLE t1(a, b); INSERT INTO t1 VALUES(1, 2); } file size test.db } [expr 1024*3] if {[nonzero_reserved_bytes]} { # VACUUM with size changes is not possible with the codec. do_test pager1-29.2 { catchsql { PRAGMA page_size = 4096; VACUUM; } } {1 {attempt to write a readonly database}} } else { do_test pager1-29.2 { execsql { PRAGMA page_size = 4096; VACUUM; } file size test.db } [expr 4096*3] } #------------------------------------------------------------------------- # Test that if an empty database file (size 0 bytes) is opened in # exclusive-locking mode, any journal file is deleted from the file-system # without being rolled back. And that the RESERVED lock obtained while # doing this is not released. # |
︙ | ︙ |
Changes to test/pageropt.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # The focus of the tests in this file are to verify that the # pager optimizations implemented in version 3.3.14 work. # # $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl ifcapable {!pager_pragmas||secure_delete||direct_read} { finish_test return } # Run the SQL statement supplied by the argument and return # the results. Prepend four integers to the beginning of the # result which are # # (1) The number of page reads from the database # (2) The number of page writes to the database | > > > > > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | # The focus of the tests in this file are to verify that the # pager optimizations implemented in version 3.3.14 work. # # $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl do_not_use_codec ifcapable {!pager_pragmas||secure_delete||direct_read} { finish_test return } # A non-zero reserved_bytes value changes the number of pages in the # database file, which messes up the results in this test. if {[nonzero_reserved_bytes]} {finish_test; return;} # Run the SQL statement supplied by the argument and return # the results. Prepend four integers to the beginning of the # result which are # # (1) The number of page reads from the database # (2) The number of page writes to the database |
︙ | ︙ |
Changes to test/permutations.test.
︙ | ︙ | |||
1064 1065 1066 1067 1068 1069 1070 | puts " $d" puts "" } } exit -1 } | | | | 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 | puts " $d" puts "" } } exit -1 } if {[file tail $argv0] == "permutations.test"} { proc main {argv} { if {[llength $argv]==0} { help } else { set suite [file tail [lindex $argv 0]] if {[info exists ::testspec($suite)]==0} help set extra "" if {[llength $argv]>1} { set extra [list -files [lrange $argv 1 end]] } eval run_tests $suite $::testspec($suite) $extra } } main $argv |
︙ | ︙ |
Changes to test/pragma.test.
︙ | ︙ | |||
1842 1843 1844 1845 1846 1847 1848 1849 | catchsql {PRAGMA data_store_directory} } {0 {}} forcedelete data_dir } ;# endif windows database_may_be_corrupt | > | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > | 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 | catchsql {PRAGMA data_store_directory} } {0 {}} forcedelete data_dir } ;# endif windows database_may_be_corrupt if {![nonzero_reserved_bytes]} { do_test 21.1 { # Create a corrupt database in testerr.db. And a non-corrupt at test.db. # db close forcedelete test.db sqlite3 db test.db execsql { PRAGMA page_size = 1024; PRAGMA auto_vacuum = 0; CREATE TABLE t1(a PRIMARY KEY, b); INSERT INTO t1 VALUES(1, 1); } for {set i 0} {$i < 10} {incr i} { execsql { INSERT INTO t1 SELECT a + (1 << $i), b + (1 << $i) FROM t1 } } db close forcecopy test.db testerr.db hexio_write testerr.db 15000 [string repeat 55 100] } {100} set mainerr {*** in database main *** Multiple uses for byte 672 of page 15} set auxerr {*** in database aux *** Multiple uses for byte 672 of page 15} set mainerr {/{\*\*\* in database main \*\*\* Multiple uses for byte 672 of page 15}.*/} set auxerr {/{\*\*\* in database aux \*\*\* Multiple uses for byte 672 of page 15}.*/} do_test 22.2 { catch { db close } sqlite3 db testerr.db execsql { PRAGMA integrity_check } } $mainerr do_test 22.3.1 { catch { db close } sqlite3 db test.db execsql { ATTACH 'testerr.db' AS 'aux'; PRAGMA integrity_check; } } $auxerr do_test 22.3.2 { execsql { PRAGMA main.integrity_check; } } {ok} do_test 22.3.3 { execsql { PRAGMA aux.integrity_check; } } $auxerr do_test 22.4.1 { catch { db close } sqlite3 db testerr.db execsql { ATTACH 'test.db' AS 'aux'; PRAGMA integrity_check; } } $mainerr do_test 22.4.2 { execsql { PRAGMA main.integrity_check; } } $mainerr do_test 22.4.3 { execsql { PRAGMA aux.integrity_check; } } {ok} } db close forcedelete test.db test.db-wal test.db-journal sqlite3 db test.db sqlite3 db2 test.db do_test 23.1 { db eval { CREATE TABLE t1(a INTEGER PRIMARY KEY,b,c,d); |
︙ | ︙ |
Changes to test/pragma3.test.
︙ | ︙ | |||
11 12 13 14 15 16 17 18 19 20 21 22 23 24 | # This file implements regression tests for SQLite library. # # This file implements tests for PRAGMA data_version command. # set testdir [file dirname $argv0] source $testdir/tester.tcl do_execsql_test pragma3-100 { PRAGMA data_version; } {1} do_execsql_test pragma3-101 { PRAGMA temp.data_version; } {1} | > | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 | # This file implements regression tests for SQLite library. # # This file implements tests for PRAGMA data_version command. # set testdir [file dirname $argv0] source $testdir/tester.tcl do_not_use_codec do_execsql_test pragma3-100 { PRAGMA data_version; } {1} do_execsql_test pragma3-101 { PRAGMA temp.data_version; } {1} |
︙ | ︙ |
Changes to test/shell1.test.
︙ | ︙ | |||
17 18 19 20 21 22 23 | # # shell1-1.*: Basic command line option handling. # shell1-2.*: Basic "dot" command token parsing. # shell1-3.*: Basic test that "dot" command can be called. # set testdir [file dirname $argv0] source $testdir/tester.tcl | < < < | < < < < < | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # # shell1-1.*: Basic command line option handling. # shell1-2.*: Basic "dot" command token parsing. # shell1-3.*: Basic test that "dot" command can be called. # set testdir [file dirname $argv0] source $testdir/tester.tcl set CLI [test_find_cli] db close forcedelete test.db test.db-journal test.db-wal sqlite3 db test.db #---------------------------------------------------------------------------- # Test cases shell1-1.*: Basic command line option handling. # |
︙ | ︙ |
Changes to test/shell2.test.
︙ | ︙ | |||
16 17 18 19 20 21 22 | # Test plan: # # shell2-1.*: Misc. test of various tickets and reported errors. # set testdir [file dirname $argv0] source $testdir/tester.tcl | < < < | < < < < < | 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | # Test plan: # # shell2-1.*: Misc. test of various tickets and reported errors. # set testdir [file dirname $argv0] source $testdir/tester.tcl set CLI [test_find_cli] db close forcedelete test.db test.db-journal test.db-wal sqlite3 db test.db #---------------------------------------------------------------------------- # shell2-1.*: Misc. test of various tickets and reported errors. |
︙ | ︙ |
Changes to test/shell3.test.
︙ | ︙ | |||
17 18 19 20 21 22 23 | # Test plan: # # shell3-1.*: Basic tests for running SQL statments from command line. # shell3-2.*: Basic tests for running SQL file from command line. # set testdir [file dirname $argv0] source $testdir/tester.tcl | < < < | < < < < < | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # Test plan: # # shell3-1.*: Basic tests for running SQL statments from command line. # shell3-2.*: Basic tests for running SQL file from command line. # set testdir [file dirname $argv0] source $testdir/tester.tcl set CLI [test_find_cli] db close forcedelete test.db test.db-journal test.db-wal sqlite3 db test.db #---------------------------------------------------------------------------- # shell3-1.*: Basic tests for running SQL statments from command line. # |
︙ | ︙ |
Changes to test/shell4.test.
︙ | ︙ | |||
17 18 19 20 21 22 23 | # Test plan: # # shell4-1.*: Basic tests specific to the "stats" command. # shell4-2.*: Basic tests for ".trace" # set testdir [file dirname $argv0] source $testdir/tester.tcl | < < < | < < < < < | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # Test plan: # # shell4-1.*: Basic tests specific to the "stats" command. # shell4-2.*: Basic tests for ".trace" # set testdir [file dirname $argv0] source $testdir/tester.tcl set CLI [test_find_cli] db close forcedelete test.db test.db-journal test.db-wal sqlite3 db test.db #---------------------------------------------------------------------------- # Test cases shell4-1.*: Tests specific to the "stats" command. # |
︙ | ︙ |
Changes to test/shell5.test.
︙ | ︙ | |||
17 18 19 20 21 22 23 | # Test plan: # # shell5-1.*: Basic tests specific to the ".import" command. # set testdir [file dirname $argv0] source $testdir/tester.tcl | < < < | < < < < < | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | # Test plan: # # shell5-1.*: Basic tests specific to the ".import" command. # set testdir [file dirname $argv0] source $testdir/tester.tcl set CLI [test_find_cli] db close forcedelete test.db test.db-journal test.db-wal #---------------------------------------------------------------------------- # Test cases shell5-1.*: Basic handling of the .import and .separator commands. # |
︙ | ︙ |
Changes to test/spellfix3.test.
︙ | ︙ | |||
31 32 33 34 35 36 37 | SELECT spellfix1_scriptcode('וַיֹּ֥אמֶר אֱלֹהִ֖ים יְהִ֣י א֑וֹר וַֽיְהִי־אֽוֹר׃'); } {125} do_execsql_test 140 { SELECT spellfix1_scriptcode('فِي ذَلِكَ الوَقتِ، قالَ اللهُ: لِيَكُنْ نُورٌ. فَصَارَ نُورٌ.'); } {160} do_execsql_test 200 { SELECT spellfix1_scriptcode('+3.14159'); | | > > > > > > > > > | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | SELECT spellfix1_scriptcode('וַיֹּ֥אמֶר אֱלֹהִ֖ים יְהִ֣י א֑וֹר וַֽיְהִי־אֽוֹר׃'); } {125} do_execsql_test 140 { SELECT spellfix1_scriptcode('فِي ذَلِكَ الوَقتِ، قالَ اللهُ: لِيَكُنْ نُورٌ. فَصَارَ نُورٌ.'); } {160} do_execsql_test 200 { SELECT spellfix1_scriptcode('+3.14159'); } {215} do_execsql_test 210 { SELECT spellfix1_scriptcode('And God said: "Да будет свет"'); } {998} do_execsql_test 220 { SELECT spellfix1_scriptcode('+3.14159 light'); } {215} do_execsql_test 230 { SELECT spellfix1_scriptcode('+3.14159 свет'); } {220} do_execsql_test 240 { SELECT spellfix1_scriptcode('וַיֹּ֥אמֶר +3.14159'); } {125} finish_test |
Changes to test/stat.test.
︙ | ︙ | |||
17 18 19 20 21 22 23 24 25 26 27 28 29 30 | set testprefix stat ifcapable !vtab||!compound { finish_test return } set ::asc 1 proc a_string {n} { string range [string repeat [incr ::asc]. $n] 1 $n } db func a_string a_string register_dbstat_vtab db do_execsql_test stat-0.0 { | > > > > | 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | set testprefix stat ifcapable !vtab||!compound { finish_test return } # This module uses hard-coded results that depend on exact measurements of # pages sizes at the byte level, and hence will not work if the reserved_bytes # value is nonzero. if {[nonzero_reserved_bytes]} {finish_test; return;} set ::asc 1 proc a_string {n} { string range [string repeat [incr ::asc]. $n] 1 $n } db func a_string a_string register_dbstat_vtab db do_execsql_test stat-0.0 { |
︙ | ︙ |
Changes to test/superlock.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 | set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/wal_common.tcl set testprefix superlock # Test organization: # # 1.*: Test superlock on a rollback database. Test that once the db is # superlocked, it is not possible for a second client to read from # it. # | > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 | set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/wal_common.tcl set testprefix superlock do_not_use_codec # Test organization: # # 1.*: Test superlock on a rollback database. Test that once the db is # superlocked, it is not possible for a second client to read from # it. # |
︙ | ︙ | |||
246 247 248 249 250 251 252 | do_catchsql_test 6.7 { SELECT * FROM t1 } {1 {no such table: t1}} do_catchsql_test 6.8 { SELECT * FROM t2 } {0 {a b}} db_swap test.db2 test.db do_catchsql_test 6.9 { SELECT * FROM t1 } {0 {1 2 3 4}} do_catchsql_test 6.10 { SELECT * FROM t2 } {1 {no such table: t2}} | > > > > > > > > > | | | | | | | > | 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 | do_catchsql_test 6.7 { SELECT * FROM t1 } {1 {no such table: t1}} do_catchsql_test 6.8 { SELECT * FROM t2 } {0 {a b}} db_swap test.db2 test.db do_catchsql_test 6.9 { SELECT * FROM t1 } {0 {1 2 3 4}} do_catchsql_test 6.10 { SELECT * FROM t2 } {1 {no such table: t2}} if {[nonzero_reserved_bytes]} { # Vacuum with a size change is not allowed with the codec do_execsql_test 6.11codec { PRAGMA journal_mode = delete; VACUUM; PRAGMA journal_mode = wal; INSERT INTO t1 VALUES(5, 6); } {delete wal} } else { do_execsql_test 6.11 { PRAGMA journal_mode = delete; PRAGMA page_size = 512; VACUUM; PRAGMA journal_mode = wal; INSERT INTO t1 VALUES(5, 6); } {delete wal} } db_swap test.db2 test.db do_catchsql_test 6.12 { SELECT * FROM t1 } {1 {no such table: t1}} do_catchsql_test 6.13 { SELECT * FROM t2 } {0 {a b}} db_swap test.db2 test.db do_catchsql_test 6.14 { SELECT * FROM t1 } {0 {1 2 3 4 5 6}} do_catchsql_test 6.15 { SELECT * FROM t2 } {1 {no such table: t2}} finish_test |
Changes to test/tclsqlite.test.
︙ | ︙ | |||
18 19 20 21 22 23 24 25 | # $Id: tclsqlite.test,v 1.73 2009/03/16 13:19:36 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Check the error messages generated by tclsqlite # if {[sqlite3 -has-codec]} { | > | < < | 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | # $Id: tclsqlite.test,v 1.73 2009/03/16 13:19:36 danielk1977 Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Check the error messages generated by tclsqlite # set r "sqlite_orig HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?" if {[sqlite3 -has-codec]} { append r " ?-key CODECKEY?" } do_test tcl-1.1 { set v [catch {sqlite3 bogus} msg] regsub {really_sqlite3} $msg {sqlite3} msg lappend v $msg } [list 1 "wrong # args: should be \"$r\""] do_test tcl-1.2 { |
︙ | ︙ |
Changes to test/tester.tcl.
︙ | ︙ | |||
369 370 371 372 373 374 375 376 377 378 379 380 381 382 | # This command should be called after loading tester.tcl from within # all test scripts that are incompatible with encryption codecs. # proc do_not_use_codec {} { set ::do_not_use_codec 1 reset_db } # Print a HELP message and exit # proc print_help_and_quit {} { puts {Options: --pause Wait for user input before continuing --soft-heap-limit=N Set the soft-heap-limit to N | > > > > > > | 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 | # This command should be called after loading tester.tcl from within # all test scripts that are incompatible with encryption codecs. # proc do_not_use_codec {} { set ::do_not_use_codec 1 reset_db } # Return true if the "reserved_bytes" integer on database files is non-zero. # proc nonzero_reserved_bytes {} { return [sqlite3 -has-codec] } # Print a HELP message and exit # proc print_help_and_quit {} { puts {Options: --pause Wait for user input before continuing --soft-heap-limit=N Set the soft-heap-limit to N |
︙ | ︙ | |||
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 | # --soak=N # --file-retries=N # --file-retry-delay=N # --start=[$permutation:]$testfile # --match=$pattern # --verbose=$val # --output=$filename # --help # set cmdlinearg(soft-heap-limit) 0 set cmdlinearg(maxerror) 1000 set cmdlinearg(malloctrace) 0 set cmdlinearg(backtrace) 10 set cmdlinearg(binarylog) 0 set cmdlinearg(soak) 0 set cmdlinearg(file-retries) 0 set cmdlinearg(file-retry-delay) 0 set cmdlinearg(start) "" set cmdlinearg(match) "" set cmdlinearg(verbose) "" set cmdlinearg(output) "" set leftover [list] foreach a $argv { switch -regexp -- $a { {^-+pause$} { # Wait for user input before continuing. This is to give the user an # opportunity to connect profiling tools to the process. | > > > | 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 | # --soak=N # --file-retries=N # --file-retry-delay=N # --start=[$permutation:]$testfile # --match=$pattern # --verbose=$val # --output=$filename # -q Reduce output # --testdir=$dir Run tests in subdirectory $dir # --help # set cmdlinearg(soft-heap-limit) 0 set cmdlinearg(maxerror) 1000 set cmdlinearg(malloctrace) 0 set cmdlinearg(backtrace) 10 set cmdlinearg(binarylog) 0 set cmdlinearg(soak) 0 set cmdlinearg(file-retries) 0 set cmdlinearg(file-retry-delay) 0 set cmdlinearg(start) "" set cmdlinearg(match) "" set cmdlinearg(verbose) "" set cmdlinearg(output) "" set cmdlinearg(testdir) "testdir" set leftover [list] foreach a $argv { switch -regexp -- $a { {^-+pause$} { # Wait for user input before continuing. This is to give the user an # opportunity to connect profiling tools to the process. |
︙ | ︙ | |||
450 451 452 453 454 455 456 457 458 459 460 461 462 463 | } {^-+backtrace=.+$} { foreach {dummy cmdlinearg(backtrace)} [split $a =] break sqlite3_memdebug_backtrace $value } {^-+binarylog=.+$} { foreach {dummy cmdlinearg(binarylog)} [split $a =] break } {^-+soak=.+$} { foreach {dummy cmdlinearg(soak)} [split $a =] break set ::G(issoak) $cmdlinearg(soak) } {^-+file-retries=.+$} { foreach {dummy cmdlinearg(file-retries)} [split $a =] break | > | 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 | } {^-+backtrace=.+$} { foreach {dummy cmdlinearg(backtrace)} [split $a =] break sqlite3_memdebug_backtrace $value } {^-+binarylog=.+$} { foreach {dummy cmdlinearg(binarylog)} [split $a =] break set cmdlinearg(binarylog) [file normalize $cmdlinearg(binarylog)] } {^-+soak=.+$} { foreach {dummy cmdlinearg(soak)} [split $a =] break set ::G(issoak) $cmdlinearg(soak) } {^-+file-retries=.+$} { foreach {dummy cmdlinearg(file-retries)} [split $a =] break |
︙ | ︙ | |||
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 | set ::G(match) $cmdlinearg(match) if {$::G(match) == ""} {unset ::G(match)} } {^-+output=.+$} { foreach {dummy cmdlinearg(output)} [split $a =] break if {$cmdlinearg(verbose)==""} { set cmdlinearg(verbose) 2 } } {^-+verbose=.+$} { foreach {dummy cmdlinearg(verbose)} [split $a =] break if {$cmdlinearg(verbose)=="file"} { set cmdlinearg(verbose) 2 } elseif {[string is boolean -strict $cmdlinearg(verbose)]==0} { error "option --verbose= must be set to a boolean or to \"file\"" } } {.*help.*} { print_help_and_quit } {^-q$} { set cmdlinearg(output) test-out.txt set cmdlinearg(verbose) 2 } default { | > > > > | > > > > > > > > | 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 | set ::G(match) $cmdlinearg(match) if {$::G(match) == ""} {unset ::G(match)} } {^-+output=.+$} { foreach {dummy cmdlinearg(output)} [split $a =] break set cmdlinearg(output) [file normalize $cmdlinearg(output)] if {$cmdlinearg(verbose)==""} { set cmdlinearg(verbose) 2 } } {^-+verbose=.+$} { foreach {dummy cmdlinearg(verbose)} [split $a =] break if {$cmdlinearg(verbose)=="file"} { set cmdlinearg(verbose) 2 } elseif {[string is boolean -strict $cmdlinearg(verbose)]==0} { error "option --verbose= must be set to a boolean or to \"file\"" } } {^-+testdir=.*$} { foreach {dummy cmdlinearg(testdir)} [split $a =] break } {.*help.*} { print_help_and_quit } {^-q$} { set cmdlinearg(output) test-out.txt set cmdlinearg(verbose) 2 } default { lappend leftover [file normalize $a] } } } set testdir [file normalize $testdir] set cmdlinearg(TESTFIXTURE_HOME) [pwd] set cmdlinearg(INFO_SCRIPT) [file normalize [info script]] set argv0 [file normalize $argv0] if {$cmdlinearg(testdir)!=""} { file mkdir $cmdlinearg(testdir) cd $cmdlinearg(testdir) } set argv $leftover # Install the malloc layer used to inject OOM errors. And the 'automatic' # extensions. This only needs to be done once for the process. # sqlite3_shutdown |
︙ | ︙ | |||
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 | sqlite3_shutdown eval sqlite3_config_pagecache $::old_pagecache_config unset ::old_pagecache_config sqlite3_initialize autoinstall_test_functions sqlite3 db test.db } # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. set AUTOVACUUM $sqlite_options(default_autovacuum) # Make sure the FTS enhanced query syntax is disabled. set sqlite_fts3_enable_parentheses 0 | > > > > > > > > > > > > > > > > > > | 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 | sqlite3_shutdown eval sqlite3_config_pagecache $::old_pagecache_config unset ::old_pagecache_config sqlite3_initialize autoinstall_test_functions sqlite3 db test.db } # Find the name of the 'shell' executable (e.g. "sqlite3.exe") to use for # the tests in shell[1-5].test. If no such executable can be found, invoke # [finish_test ; return] in the callers context. # proc test_find_cli {} { if {$::tcl_platform(platform)=="windows"} { set ret "sqlite3.exe" } else { set ret "sqlite3" } set ret [file normalize [file join $::cmdlinearg(TESTFIXTURE_HOME) $ret]] if {![file executable $ret]} { finish_test return -code return } return $ret } # If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set # to non-zero, then set the global variable $AUTOVACUUM to 1. set AUTOVACUUM $sqlite_options(default_autovacuum) # Make sure the FTS enhanced query syntax is disabled. set sqlite_fts3_enable_parentheses 0 |
︙ | ︙ |
Changes to test/tkt4018.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 | # # This file implements tests to verify that ticket #4018 has been # fixed. # set testdir [file dirname $argv0] source $testdir/tester.tcl proc testsql {sql} { set fd [open tf_main.tcl w] puts $fd [subst -nocommands { sqlite3_test_control_pending_byte 0x0010000 sqlite3 db test.db set rc [catch { db eval {$sql} } msg] | > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 | # # This file implements tests to verify that ticket #4018 has been # fixed. # set testdir [file dirname $argv0] source $testdir/tester.tcl do_not_use_codec proc testsql {sql} { set fd [open tf_main.tcl w] puts $fd [subst -nocommands { sqlite3_test_control_pending_byte 0x0010000 sqlite3 db test.db set rc [catch { db eval {$sql} } msg] |
︙ | ︙ |
Changes to test/wal.test.
︙ | ︙ | |||
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 | do_test wal-21.3 { execsql { PRAGMA integrity_check } } {ok} #------------------------------------------------------------------------- # Test reading and writing of databases with different page-sizes. # foreach pgsz {512 1024 2048 4096 8192 16384 32768 65536} { do_multiclient_test tn [string map [list %PGSZ% $pgsz] { do_test wal-22.%PGSZ%.$tn.1 { sql1 { PRAGMA main.page_size = %PGSZ%; PRAGMA auto_vacuum = 0; PRAGMA journal_mode = WAL; CREATE TABLE t1(x UNIQUE); INSERT INTO t1 SELECT randomblob(800); INSERT INTO t1 SELECT randomblob(800); INSERT INTO t1 SELECT randomblob(800); } } {wal} do_test wal-22.%PGSZ%.$tn.2 { sql2 { PRAGMA integrity_check } } {ok} do_test wal-22.%PGSZ%.$tn.3 { sql1 {PRAGMA wal_checkpoint} expr {[file size test.db] % %PGSZ%} } {0} }] } #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # ifcapable curdir { set walfile [file nativename [file join [get_pwd] test.db-wal]] | > > | 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 | do_test wal-21.3 { execsql { PRAGMA integrity_check } } {ok} #------------------------------------------------------------------------- # Test reading and writing of databases with different page-sizes. # incr ::do_not_use_codec foreach pgsz {512 1024 2048 4096 8192 16384 32768 65536} { do_multiclient_test tn [string map [list %PGSZ% $pgsz] { do_test wal-22.%PGSZ%.$tn.1 { sql1 { PRAGMA main.page_size = %PGSZ%; PRAGMA auto_vacuum = 0; PRAGMA journal_mode = WAL; CREATE TABLE t1(x UNIQUE); INSERT INTO t1 SELECT randomblob(800); INSERT INTO t1 SELECT randomblob(800); INSERT INTO t1 SELECT randomblob(800); } } {wal} do_test wal-22.%PGSZ%.$tn.2 { sql2 { PRAGMA integrity_check } } {ok} do_test wal-22.%PGSZ%.$tn.3 { sql1 {PRAGMA wal_checkpoint} expr {[file size test.db] % %PGSZ%} } {0} }] } incr ::do_not_use_codec -1 #------------------------------------------------------------------------- # Test that when 1 or more pages are recovered from a WAL file, # sqlite3_log() is invoked to report this to the user. # ifcapable curdir { set walfile [file nativename [file join [get_pwd] test.db-wal]] |
︙ | ︙ |
Changes to test/wal5.test.
︙ | ︙ | |||
15 16 17 18 19 20 21 22 23 24 25 26 27 28 | set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/wal_common.tcl ifcapable !wal {finish_test ; return } if ![wal_is_ok] { finish_test; return } set testprefix wal5 proc db_page_count {{file test.db}} { expr [file size $file] / 1024 } proc wal_page_count {{file test.db}} { wal_frame_count ${file}-wal 1024 } | > | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | set testdir [file dirname $argv0] source $testdir/tester.tcl source $testdir/lock_common.tcl source $testdir/wal_common.tcl ifcapable !wal {finish_test ; return } if ![wal_is_ok] { finish_test; return } do_not_use_codec set testprefix wal5 proc db_page_count {{file test.db}} { expr [file size $file] / 1024 } proc wal_page_count {{file test.db}} { wal_frame_count ${file}-wal 1024 } |
︙ | ︙ | |||
142 143 144 145 146 147 148 | sql1 { INSERT INTO t1 VALUES(5, zeroblob(1200)) } list [db_page_count] [wal_page_count] $::nBusyHandler } {6 12 0} do_test 1.$tn.7 { reopen_all list [db_page_count] [wal_page_count] $::nBusyHandler | | | | | 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 | sql1 { INSERT INTO t1 VALUES(5, zeroblob(1200)) } list [db_page_count] [wal_page_count] $::nBusyHandler } {6 12 0} do_test 1.$tn.7 { reopen_all list [db_page_count] [wal_page_count] $::nBusyHandler } [expr {[nonzero_reserved_bytes]?"/# # 0/":"7 0 0"}] do_test 1.$tn.8 { sql2 { BEGIN ; SELECT x FROM t1 } } {1 2 3 4 5} do_test 1.$tn.9 { sql1 { INSERT INTO t1 VALUES(6, zeroblob(1200)) } list [db_page_count] [wal_page_count] $::nBusyHandler } [expr {[nonzero_reserved_bytes]?"/# # #/":"7 5 0"}] do_test 1.$tn.10 { sql3 { BEGIN ; SELECT x FROM t1 } } {1 2 3 4 5 6} set ::busy_handler_script { if {$n==5} { sql2 COMMIT } if {$n==6} { set ::db_file_size [db_page_count] } if {$n==7} { sql3 COMMIT } } do_test 1.$tn.11 { code1 { do_wal_checkpoint db -mode restart } list [db_page_count] [wal_page_count] $::nBusyHandler } [expr {[nonzero_reserved_bytes]?"/# # #/":"10 5 8"}] do_test 1.$tn.12 { set ::db_file_size } 10 } #------------------------------------------------------------------------- # This block of tests explores checkpoint operations on more than one # database file. # |
︙ | ︙ |
Changes to test/wal8.test.
︙ | ︙ | |||
25 26 27 28 29 30 31 32 33 34 35 36 37 38 | # set testdir [file dirname $argv0] source $testdir/tester.tcl set ::testprefix wal8 ifcapable !wal {finish_test ; return } if ![wal_is_ok] { finish_test; return } db close forcedelete test.db test.db-wal sqlite3 db test.db sqlite3 db2 test.db | > | 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | # set testdir [file dirname $argv0] source $testdir/tester.tcl set ::testprefix wal8 ifcapable !wal {finish_test ; return } if ![wal_is_ok] { finish_test; return } do_not_use_codec db close forcedelete test.db test.db-wal sqlite3 db test.db sqlite3 db2 test.db |
︙ | ︙ |
Changes to test/walbak.test.
︙ | ︙ | |||
124 125 126 127 128 129 130 131 132 133 134 135 136 137 | INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 16 */ INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 32 */ INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 64 */ COMMIT; } } {} do_test walbak-2.2 { db backup abc.db sqlite3 db2 abc.db string compare [sig db] [sig db2] } {0} do_test walbak-2.3 { sqlite3_backup B db2 main db main | > | 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 16 */ INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 32 */ INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 64 */ COMMIT; } } {} do_test walbak-2.2 { forcedelete abc.db db backup abc.db sqlite3 db2 abc.db string compare [sig db] [sig db2] } {0} do_test walbak-2.3 { sqlite3_backup B db2 main db main |
︙ | ︙ | |||
236 237 238 239 240 241 242 243 244 245 246 247 248 249 | PRAGMA page_size = 2048; PRAGMA journal_mode = PERSIST; CREATE TABLE xx(x); } } } { foreach f [glob -nocomplain test.db*] { forcedelete $f } eval $setup do_test walbak-3.$tn.1 { execsql { CREATE TABLE t1(a, b); | > | 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 | PRAGMA page_size = 2048; PRAGMA journal_mode = PERSIST; CREATE TABLE xx(x); } } } { if {$tn==4 && [sqlite3 -has-codec]} continue foreach f [glob -nocomplain test.db*] { forcedelete $f } eval $setup do_test walbak-3.$tn.1 { execsql { CREATE TABLE t1(a, b); |
︙ | ︙ |
Changes to test/walro.test.
︙ | ︙ | |||
220 221 222 223 224 225 226 | INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; } file size test.db-wal | | | 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 | INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; INSERT INTO t2 SELECT x||y, y||x FROM t2; } file size test.db-wal } [expr {[nonzero_reserved_bytes]?148848:147800}] do_test 1.4.4.2 { csql1 { SELECT * FROM t1 } } {0 {a b c d e f g h i j k l 1 2 3 4 5 6}} do_test 1.4.4.3 { csql2 COMMIT csql1 { SELECT count(*) FROM t2 } } {0 512} |
︙ | ︙ |
Changes to test/where2.test.
︙ | ︙ | |||
760 761 762 763 764 765 766 767 768 | # do_execsql_test where2-13.1 { CREATE TABLE t13(a,b); CREATE INDEX t13a ON t13(a); INSERT INTO t13 VALUES(4,5); SELECT * FROM t13 WHERE (1=2 AND a=3) OR a=4; } {4 5} finish_test | > > > > > > > > > > > | 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 | # do_execsql_test where2-13.1 { CREATE TABLE t13(a,b); CREATE INDEX t13a ON t13(a); INSERT INTO t13 VALUES(4,5); SELECT * FROM t13 WHERE (1=2 AND a=3) OR a=4; } {4 5} # https://www.sqlite.org/src/info/5e3c886796e5512e (2016-03-09) # Correlated subquery on the RHS of an IN operator # do_execsql_test where2-14.1 { CREATE TABLE t14a(x INTEGER PRIMARY KEY); INSERT INTO t14a(x) VALUES(1),(2),(3),(4); CREATE TABLE t14b(y INTEGER PRIMARY KEY); INSERT INTO t14b(y) VALUES(1); SELECT x FROM t14a WHERE x NOT IN (SELECT x FROM t14b); } {} finish_test |
Changes to tool/build-all-msvc.bat.
︙ | ︙ | |||
661 662 663 664 665 666 667 | REM REM NOTE: Copy the "sqlite3.pdb" file to the appropriate directory for REM the build and platform beneath the binary directory unless we REM are prevented from doing so. REM IF NOT DEFINED NOSYMBOLS ( | > | | | | > | 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 | REM REM NOTE: Copy the "sqlite3.pdb" file to the appropriate directory for REM the build and platform beneath the binary directory unless we REM are prevented from doing so. REM IF NOT DEFINED NOSYMBOLS ( IF EXIST "%DLL_PDB_FILE_NAME%" ( %__ECHO% XCOPY "%DLL_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS% IF ERRORLEVEL 1 ( ECHO Failed to copy "%DLL_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\". GOTO errors ) ) ) REM REM NOTE: If requested, also build the shell executable. REM IF DEFINED BUILD_ALL_SHELL ( |
︙ | ︙ | |||
718 719 720 721 722 723 724 | REM REM NOTE: Copy the "sqlite3sh.pdb" file to the appropriate directory REM for the build and platform beneath the binary directory REM unless we are prevented from doing so. REM IF NOT DEFINED NOSYMBOLS ( | > | | | | > | 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 | REM REM NOTE: Copy the "sqlite3sh.pdb" file to the appropriate directory REM for the build and platform beneath the binary directory REM unless we are prevented from doing so. REM IF NOT DEFINED NOSYMBOLS ( IF EXIST "%EXE_PDB_FILE_NAME%" ( %__ECHO% XCOPY "%EXE_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS% IF ERRORLEVEL 1 ( ECHO Failed to copy "%EXE_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\". GOTO errors ) ) ) ) ) ) ) |
︙ | ︙ |