SQLite

Check-in [6a7ee04b0d]
Login

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Merge all recent enhancements from trunk.
Downloads: Tarball | ZIP archive
Timelines: family | ancestors | descendants | both | sessions
Files: files | file ages | folders
SHA1: 6a7ee04b0ddac36a87d5ed2ac89a53e537f4d5a3
User & Date: drh 2016-03-16 01:16:30.929
Context
2016-03-21
15:32
Merge 3.12.0 beta changes from trunk. (check-in: 3296a0ceed user: drh tags: sessions)
2016-03-16
01:16
Merge all recent enhancements from trunk. (check-in: 6a7ee04b0d user: drh tags: sessions)
01:03
Add the SQLITE_OMIT_CODEC_FROM_TCL compile-time option. (check-in: 45f7f0c80b user: drh tags: trunk)
2016-03-07
17:49
Merge the virtual table query planner enhancement, the RTREE cost estimate fix, and the statement journal spill delay enhancement from trunk. (check-in: 17fd8f3cf0 user: drh tags: sessions)
Changes
Unified Diff Ignore Whitespace Patch
Changes to autoconf/Makefile.am.
1
2
3
4
5
6
7
8
9

10
11
12
13
14
15
16
17

AM_CFLAGS = @THREADSAFE_FLAGS@ @DYNAMIC_EXTENSION_FLAGS@ @FTS5_FLAGS@ @JSON1_FLAGS@ -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE

lib_LTLIBRARIES = libsqlite3.la
libsqlite3_la_SOURCES = sqlite3.c
libsqlite3_la_LDFLAGS = -no-undefined -version-info 8:6:8

bin_PROGRAMS = sqlite3
sqlite3_SOURCES = shell.c sqlite3.c sqlite3.h

sqlite3_LDADD = @READLINE_LIBS@
sqlite3_DEPENDENCIES = @EXTRA_SHELL_OBJ@
sqlite3_CFLAGS = $(AM_CFLAGS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS

include_HEADERS = sqlite3.h sqlite3ext.h

EXTRA_DIST = sqlite3.1 tea Makefile.msc sqlite3.rc README.txt Replace.cs
pkgconfigdir = ${libdir}/pkgconfig








|
>
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

AM_CFLAGS = @THREADSAFE_FLAGS@ @DYNAMIC_EXTENSION_FLAGS@ @FTS5_FLAGS@ @JSON1_FLAGS@ -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_RTREE

lib_LTLIBRARIES = libsqlite3.la
libsqlite3_la_SOURCES = sqlite3.c
libsqlite3_la_LDFLAGS = -no-undefined -version-info 8:6:8

bin_PROGRAMS = sqlite3
sqlite3_SOURCES = shell.c sqlite3.h
EXTRA_sqlite3_SOURCES = sqlite3.c
sqlite3_LDADD = @EXTRA_SHELL_OBJ@ @READLINE_LIBS@
sqlite3_DEPENDENCIES = @EXTRA_SHELL_OBJ@
sqlite3_CFLAGS = $(AM_CFLAGS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS

include_HEADERS = sqlite3.h sqlite3ext.h

EXTRA_DIST = sqlite3.1 tea Makefile.msc sqlite3.rc README.txt Replace.cs
pkgconfigdir = ${libdir}/pkgconfig
Changes to autoconf/configure.ac.
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#   --enable-static-shell
#
AC_ARG_ENABLE(static-shell, [AS_HELP_STRING(
  [--enable-static-shell], 
  [statically link libsqlite3 into shell tool [default=yes]])], 
  [], [enable_static_shell=yes])
if test x"$enable_static_shell" == "xyes"; then
  EXTRA_SHELL_OBJ=sqlite3.$OBJEXT
else
  EXTRA_SHELL_OBJ=libsqlite3.la
fi
AC_SUBST(EXTRA_SHELL_OBJ)
#-----------------------------------------------------------------------

AC_CHECK_FUNCS(posix_fallocate)







|







126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
#   --enable-static-shell
#
AC_ARG_ENABLE(static-shell, [AS_HELP_STRING(
  [--enable-static-shell], 
  [statically link libsqlite3 into shell tool [default=yes]])], 
  [], [enable_static_shell=yes])
if test x"$enable_static_shell" == "xyes"; then
  EXTRA_SHELL_OBJ=sqlite3-sqlite3.$OBJEXT
else
  EXTRA_SHELL_OBJ=libsqlite3.la
fi
AC_SUBST(EXTRA_SHELL_OBJ)
#-----------------------------------------------------------------------

AC_CHECK_FUNCS(posix_fallocate)
Changes to ext/fts3/fts3_write.c.
329
330
331
332
333
334
335

336
337
338
339
340
341
342
343
/* 27 */ "SELECT ? UNION SELECT level / (1024 * ?) FROM %Q.'%q_segdir'",

/* This statement is used to determine which level to read the input from
** when performing an incremental merge. It returns the absolute level number
** of the oldest level in the db that contains at least ? segments. Or,
** if no level in the FTS index contains more than ? segments, the statement
** returns zero rows.  */

/* 28 */ "SELECT level FROM %Q.'%q_segdir' GROUP BY level HAVING count(*)>=?"
         "  ORDER BY (level %% 1024) ASC LIMIT 1",

/* Estimate the upper limit on the number of leaf nodes in a new segment
** created by merging the oldest :2 segments from absolute level :1. See 
** function sqlite3Fts3Incrmerge() for details.  */
/* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) "
         "  FROM %Q.'%q_segdir' WHERE level = ? AND idx < ?",







>
|







329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
/* 27 */ "SELECT ? UNION SELECT level / (1024 * ?) FROM %Q.'%q_segdir'",

/* This statement is used to determine which level to read the input from
** when performing an incremental merge. It returns the absolute level number
** of the oldest level in the db that contains at least ? segments. Or,
** if no level in the FTS index contains more than ? segments, the statement
** returns zero rows.  */
/* 28 */ "SELECT level, count(*) AS cnt FROM %Q.'%q_segdir' "
         "  GROUP BY level HAVING cnt>=?"
         "  ORDER BY (level %% 1024) ASC LIMIT 1",

/* Estimate the upper limit on the number of leaf nodes in a new segment
** created by merging the oldest :2 segments from absolute level :1. See 
** function sqlite3Fts3Incrmerge() for details.  */
/* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) "
         "  FROM %Q.'%q_segdir' WHERE level = ? AND idx < ?",
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
  }

  if( iLevel==FTS3_SEGCURSOR_ALL ){
    /* This call is to merge all segments in the database to a single
    ** segment. The level of the new segment is equal to the numerically
    ** greatest segment level currently present in the database for this
    ** index. The idx of the new segment is always 0.  */
    if( csr.nSegment==1 ){
      rc = SQLITE_DONE;
      goto finished;
    }
    iNewLevel = iMaxLevel;
    bIgnoreEmpty = 1;

  }else{







|







3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
  }

  if( iLevel==FTS3_SEGCURSOR_ALL ){
    /* This call is to merge all segments in the database to a single
    ** segment. The level of the new segment is equal to the numerically
    ** greatest segment level currently present in the database for this
    ** index. The idx of the new segment is always 0.  */
    if( csr.nSegment==1 && 0==fts3SegReaderIsPending(csr.apSegment[0]) ){
      rc = SQLITE_DONE;
      goto finished;
    }
    iNewLevel = iMaxLevel;
    bIgnoreEmpty = 1;

  }else{
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842

4843
4844
4845
4846
4847
4848
4849
    /* Search the %_segdir table for the absolute level with the smallest
    ** relative level number that contains at least nMin segments, if any.
    ** If one is found, set iAbsLevel to the absolute level number and
    ** nSeg to nMin. If no level with at least nMin segments can be found, 
    ** set nSeg to -1.
    */
    rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0);
    sqlite3_bind_int(pFindLevel, 1, nMin);
    if( sqlite3_step(pFindLevel)==SQLITE_ROW ){
      iAbsLevel = sqlite3_column_int64(pFindLevel, 0);
      nSeg = nMin;

    }else{
      nSeg = -1;
    }
    rc = sqlite3_reset(pFindLevel);

    /* If the hint read from the %_stat table is not empty, check if the
    ** last entry in it specifies a relative level smaller than or equal







|


|
>







4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
    /* Search the %_segdir table for the absolute level with the smallest
    ** relative level number that contains at least nMin segments, if any.
    ** If one is found, set iAbsLevel to the absolute level number and
    ** nSeg to nMin. If no level with at least nMin segments can be found, 
    ** set nSeg to -1.
    */
    rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0);
    sqlite3_bind_int(pFindLevel, 1, MAX(2, nMin));
    if( sqlite3_step(pFindLevel)==SQLITE_ROW ){
      iAbsLevel = sqlite3_column_int64(pFindLevel, 0);
      nSeg = sqlite3_column_int(pFindLevel, 1);
      assert( nSeg>=2 );
    }else{
      nSeg = -1;
    }
    rc = sqlite3_reset(pFindLevel);

    /* If the hint read from the %_stat table is not empty, check if the
    ** last entry in it specifies a relative level smaller than or equal
Changes to ext/fts5/fts5Int.h.
168
169
170
171
172
173
174

175
176
177
178
179
180
181
  fts5_tokenizer *pTokApi;

  /* Values loaded from the %_config table */
  int iCookie;                    /* Incremented when %_config is modified */
  int pgsz;                       /* Approximate page size used in %_data */
  int nAutomerge;                 /* 'automerge' setting */
  int nCrisisMerge;               /* Maximum allowed segments per level */

  int nHashSize;                  /* Bytes of memory for in-memory hash */
  char *zRank;                    /* Name of rank function */
  char *zRankArgs;                /* Arguments to rank function */

  /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */
  char **pzErrmsg;








>







168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
  fts5_tokenizer *pTokApi;

  /* Values loaded from the %_config table */
  int iCookie;                    /* Incremented when %_config is modified */
  int pgsz;                       /* Approximate page size used in %_data */
  int nAutomerge;                 /* 'automerge' setting */
  int nCrisisMerge;               /* Maximum allowed segments per level */
  int nUsermerge;                 /* 'usermerge' setting */
  int nHashSize;                  /* Bytes of memory for in-memory hash */
  char *zRank;                    /* Name of rank function */
  char *zRankArgs;                /* Arguments to rank function */

  /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */
  char **pzErrmsg;

695
696
697
698
699
700
701






702
703
704
705
706
707
708
Fts5ExprNode *sqlite3Fts5ParseNode(
  Fts5Parse *pParse,
  int eType,
  Fts5ExprNode *pLeft,
  Fts5ExprNode *pRight,
  Fts5ExprNearset *pNear
);







Fts5ExprPhrase *sqlite3Fts5ParseTerm(
  Fts5Parse *pParse, 
  Fts5ExprPhrase *pPhrase, 
  Fts5Token *pToken,
  int bPrefix
);







>
>
>
>
>
>







696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
Fts5ExprNode *sqlite3Fts5ParseNode(
  Fts5Parse *pParse,
  int eType,
  Fts5ExprNode *pLeft,
  Fts5ExprNode *pRight,
  Fts5ExprNearset *pNear
);

Fts5ExprNode *sqlite3Fts5ParseImplicitAnd(
  Fts5Parse *pParse,
  Fts5ExprNode *pLeft,
  Fts5ExprNode *pRight
);

Fts5ExprPhrase *sqlite3Fts5ParseTerm(
  Fts5Parse *pParse, 
  Fts5ExprPhrase *pPhrase, 
  Fts5Token *pToken,
  int bPrefix
);
Changes to ext/fts5/fts5_config.c.
14
15
16
17
18
19
20

21
22
23
24
25
26
27
*/


#include "fts5Int.h"

#define FTS5_DEFAULT_PAGE_SIZE   4050
#define FTS5_DEFAULT_AUTOMERGE      4

#define FTS5_DEFAULT_CRISISMERGE   16
#define FTS5_DEFAULT_HASHSIZE    (1024*1024)

/* Maximum allowed page size */
#define FTS5_MAX_PAGE_SIZE (128*1024)

static int fts5_iswhitespace(char x){







>







14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
*/


#include "fts5Int.h"

#define FTS5_DEFAULT_PAGE_SIZE   4050
#define FTS5_DEFAULT_AUTOMERGE      4
#define FTS5_DEFAULT_USERMERGE      4
#define FTS5_DEFAULT_CRISISMERGE   16
#define FTS5_DEFAULT_HASHSIZE    (1024*1024)

/* Maximum allowed page size */
#define FTS5_MAX_PAGE_SIZE (128*1024)

static int fts5_iswhitespace(char x){
437
438
439
440
441
442
443

444

445
446
447
448
449
450
451
    memcpy(zOut, zIn, nIn+1);
    if( fts5_isopenquote(zOut[0]) ){
      int ii = fts5Dequote(zOut);
      zRet = &zIn[ii];
      *pbQuoted = 1;
    }else{
      zRet = fts5ConfigSkipBareword(zIn);

      zOut[zRet-zIn] = '\0';

    }
  }

  if( zRet==0 ){
    sqlite3_free(zOut);
  }else{
    *pzOut = zOut;







>
|
>







438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
    memcpy(zOut, zIn, nIn+1);
    if( fts5_isopenquote(zOut[0]) ){
      int ii = fts5Dequote(zOut);
      zRet = &zIn[ii];
      *pbQuoted = 1;
    }else{
      zRet = fts5ConfigSkipBareword(zIn);
      if( zRet ){
        zOut[zRet-zIn] = '\0';
      }
    }
  }

  if( zRet==0 ){
    sqlite3_free(zOut);
  }else{
    *pzOut = zOut;
852
853
854
855
856
857
858












859
860
861
862
863
864
865
    if( nAutomerge<0 || nAutomerge>64 ){
      *pbBadkey = 1;
    }else{
      if( nAutomerge==1 ) nAutomerge = FTS5_DEFAULT_AUTOMERGE;
      pConfig->nAutomerge = nAutomerge;
    }
  }













  else if( 0==sqlite3_stricmp(zKey, "crisismerge") ){
    int nCrisisMerge = -1;
    if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
      nCrisisMerge = sqlite3_value_int(pVal);
    }
    if( nCrisisMerge<0 ){







>
>
>
>
>
>
>
>
>
>
>
>







855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
    if( nAutomerge<0 || nAutomerge>64 ){
      *pbBadkey = 1;
    }else{
      if( nAutomerge==1 ) nAutomerge = FTS5_DEFAULT_AUTOMERGE;
      pConfig->nAutomerge = nAutomerge;
    }
  }

  else if( 0==sqlite3_stricmp(zKey, "usermerge") ){
    int nUsermerge = -1;
    if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
      nUsermerge = sqlite3_value_int(pVal);
    }
    if( nUsermerge<2 || nUsermerge>16 ){
      *pbBadkey = 1;
    }else{
      pConfig->nUsermerge = nUsermerge;
    }
  }

  else if( 0==sqlite3_stricmp(zKey, "crisismerge") ){
    int nCrisisMerge = -1;
    if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){
      nCrisisMerge = sqlite3_value_int(pVal);
    }
    if( nCrisisMerge<0 ){
899
900
901
902
903
904
905

906
907
908
909
910
911
912
  sqlite3_stmt *p = 0;
  int rc = SQLITE_OK;
  int iVersion = 0;

  /* Set default values */
  pConfig->pgsz = FTS5_DEFAULT_PAGE_SIZE;
  pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE;

  pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
  pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE;

  zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName);
  if( zSql ){
    rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p, 0);
    sqlite3_free(zSql);







>







914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
  sqlite3_stmt *p = 0;
  int rc = SQLITE_OK;
  int iVersion = 0;

  /* Set default values */
  pConfig->pgsz = FTS5_DEFAULT_PAGE_SIZE;
  pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE;
  pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE;
  pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE;
  pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE;

  zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName);
  if( zSql ){
    rc = sqlite3_prepare_v2(pConfig->db, zSql, -1, &p, 0);
    sqlite3_free(zSql);
Changes to ext/fts5/fts5_expr.c.
254
255
256
257
258
259
260


261
262
263
264
265
266
267
      }
      pNew->pIndex = 0;
      pNew->pConfig = pConfig;
      pNew->apExprPhrase = sParse.apPhrase;
      pNew->nPhrase = sParse.nPhrase;
      sParse.apPhrase = 0;
    }


  }

  sqlite3_free(sParse.apPhrase);
  *pzErr = sParse.zErr;
  return sParse.rc;
}








>
>







254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
      }
      pNew->pIndex = 0;
      pNew->pConfig = pConfig;
      pNew->apExprPhrase = sParse.apPhrase;
      pNew->nPhrase = sParse.nPhrase;
      sParse.apPhrase = 0;
    }
  }else{
    sqlite3Fts5ParseNodeFree(sParse.pExpr);
  }

  sqlite3_free(sParse.apPhrase);
  *pzErr = sParse.zErr;
  return sParse.rc;
}

1264
1265
1266
1267
1268
1269
1270


1271
1272
1273
1274
1275
1276
1277
  int rc = SQLITE_OK;
  pNode->bEof = 0;
  pNode->bNomatch = 0;

  if( Fts5NodeIsString(pNode) ){
    /* Initialize all term iterators in the NEAR object. */
    rc = fts5ExprNearInitAll(pExpr, pNode);


  }else{
    int i;
    int nEof = 0;
    for(i=0; i<pNode->nChild && rc==SQLITE_OK; i++){
      Fts5ExprNode *pChild = pNode->apChild[i];
      rc = fts5ExprNodeFirst(pExpr, pNode->apChild[i]);
      assert( pChild->bEof==0 || pChild->bEof==1 );







>
>







1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
  int rc = SQLITE_OK;
  pNode->bEof = 0;
  pNode->bNomatch = 0;

  if( Fts5NodeIsString(pNode) ){
    /* Initialize all term iterators in the NEAR object. */
    rc = fts5ExprNearInitAll(pExpr, pNode);
  }else if( pNode->xNext==0 ){
    pNode->bEof = 1;
  }else{
    int i;
    int nEof = 0;
    for(i=0; i<pNode->nChild && rc==SQLITE_OK; i++){
      Fts5ExprNode *pChild = pNode->apChild[i];
      rc = fts5ExprNodeFirst(pExpr, pNode->apChild[i]);
      assert( pChild->bEof==0 || pChild->bEof==1 );
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
** equal to iFirst.
**
** Return SQLITE_OK if successful, or an SQLite error code otherwise. It
** is not considered an error if the query does not match any documents.
*/
int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){
  Fts5ExprNode *pRoot = p->pRoot;
  int rc = SQLITE_OK;
  if( pRoot->xNext ){
    p->pIndex = pIdx;
    p->bDesc = bDesc;
    rc = fts5ExprNodeFirst(p, pRoot);

    /* If not at EOF but the current rowid occurs earlier than iFirst in
    ** the iteration order, move to document iFirst or later. */
    if( pRoot->bEof==0 && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 ){
      rc = fts5ExprNodeNext(p, pRoot, 1, iFirst);
    }

    /* If the iterator is not at a real match, skip forward until it is. */
    while( pRoot->bNomatch ){
      assert( pRoot->bEof==0 && rc==SQLITE_OK );
      rc = fts5ExprNodeNext(p, pRoot, 0, 0);
    }
  }
  return rc;
}

/*
** Move to the next document 
**







|
|
|
|
|

|
|
|
|
|

|
|
|
|
<







1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341

1342
1343
1344
1345
1346
1347
1348
** equal to iFirst.
**
** Return SQLITE_OK if successful, or an SQLite error code otherwise. It
** is not considered an error if the query does not match any documents.
*/
int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){
  Fts5ExprNode *pRoot = p->pRoot;
  int rc;                         /* Return code */

  p->pIndex = pIdx;
  p->bDesc = bDesc;
  rc = fts5ExprNodeFirst(p, pRoot);

  /* If not at EOF but the current rowid occurs earlier than iFirst in
  ** the iteration order, move to document iFirst or later. */
  if( pRoot->bEof==0 && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 ){
    rc = fts5ExprNodeNext(p, pRoot, 1, iFirst);
  }

  /* If the iterator is not at a real match, skip forward until it is. */
  while( pRoot->bNomatch ){
    assert( pRoot->bEof==0 && rc==SQLITE_OK );
    rc = fts5ExprNodeNext(p, pRoot, 0, 0);

  }
  return rc;
}

/*
** Move to the next document 
**
1440
1441
1442
1443
1444
1445
1446















1447
1448
1449
1450
1451
1452
1453
  }

  if( pRet==0 ){
    assert( pParse->rc!=SQLITE_OK );
    sqlite3Fts5ParseNearsetFree(pNear);
    sqlite3Fts5ParsePhraseFree(pPhrase);
  }else{















    pRet->apPhrase[pRet->nPhrase++] = pPhrase;
  }
  return pRet;
}

typedef struct TokenCtx TokenCtx;
struct TokenCtx {







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
  }

  if( pRet==0 ){
    assert( pParse->rc!=SQLITE_OK );
    sqlite3Fts5ParseNearsetFree(pNear);
    sqlite3Fts5ParsePhraseFree(pPhrase);
  }else{
    if( pRet->nPhrase>0 ){
      Fts5ExprPhrase *pLast = pRet->apPhrase[pRet->nPhrase-1];
      assert( pLast==pParse->apPhrase[pParse->nPhrase-2] );
      if( pPhrase->nTerm==0 ){
        fts5ExprPhraseFree(pPhrase);
        pRet->nPhrase--;
        pParse->nPhrase--;
        pPhrase = pLast;
      }else if( pLast->nTerm==0 ){
        fts5ExprPhraseFree(pLast);
        pParse->apPhrase[pParse->nPhrase-2] = pPhrase;
        pParse->nPhrase--;
        pRet->nPhrase--;
      }
    }
    pRet->apPhrase[pRet->nPhrase++] = pPhrase;
  }
  return pRet;
}

typedef struct TokenCtx TokenCtx;
struct TokenCtx {
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
  Fts5ExprPhrase *pPhrase = pCtx->pPhrase;

  UNUSED_PARAM2(iUnused1, iUnused2);

  /* If an error has already occurred, this is a no-op */
  if( pCtx->rc!=SQLITE_OK ) return pCtx->rc;

  assert( pPhrase==0 || pPhrase->nTerm>0 );
  if( pPhrase && (tflags & FTS5_TOKEN_COLOCATED) ){
    Fts5ExprTerm *pSyn;
    int nByte = sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer) + nToken+1;
    pSyn = (Fts5ExprTerm*)sqlite3_malloc(nByte);
    if( pSyn==0 ){
      rc = SQLITE_NOMEM;
    }else{
      memset(pSyn, 0, nByte);







<
|







1490
1491
1492
1493
1494
1495
1496

1497
1498
1499
1500
1501
1502
1503
1504
  Fts5ExprPhrase *pPhrase = pCtx->pPhrase;

  UNUSED_PARAM2(iUnused1, iUnused2);

  /* If an error has already occurred, this is a no-op */
  if( pCtx->rc!=SQLITE_OK ) return pCtx->rc;


  if( pPhrase && pPhrase->nTerm>0 && (tflags & FTS5_TOKEN_COLOCATED) ){
    Fts5ExprTerm *pSyn;
    int nByte = sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer) + nToken+1;
    pSyn = (Fts5ExprTerm*)sqlite3_malloc(nByte);
    if( pSyn==0 ){
      rc = SQLITE_NOMEM;
    }else{
      memset(pSyn, 0, nByte);
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598



1599
1600


1601
1602
1603
1604
1605
1606
1607
    rc = sqlite3Fts5Tokenize(pConfig, flags, z, n, &sCtx, fts5ParseTokenize);
  }
  sqlite3_free(z);
  if( rc || (rc = sCtx.rc) ){
    pParse->rc = rc;
    fts5ExprPhraseFree(sCtx.pPhrase);
    sCtx.pPhrase = 0;
  }else if( sCtx.pPhrase ){

    if( pAppend==0 ){
      if( (pParse->nPhrase % 8)==0 ){
        int nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
        Fts5ExprPhrase **apNew;
        apNew = (Fts5ExprPhrase**)sqlite3_realloc(pParse->apPhrase, nByte);
        if( apNew==0 ){
          pParse->rc = SQLITE_NOMEM;
          fts5ExprPhraseFree(sCtx.pPhrase);
          return 0;
        }
        pParse->apPhrase = apNew;
      }
      pParse->nPhrase++;
    }

    pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase;



    assert( sCtx.pPhrase->nTerm>0 );
    sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = bPrefix;


  }

  return sCtx.pPhrase;
}

/*
** Create a new FTS5 expression by cloning phrase iPhrase of the







|
















|
>
>
>
|
|
>
>







1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
    rc = sqlite3Fts5Tokenize(pConfig, flags, z, n, &sCtx, fts5ParseTokenize);
  }
  sqlite3_free(z);
  if( rc || (rc = sCtx.rc) ){
    pParse->rc = rc;
    fts5ExprPhraseFree(sCtx.pPhrase);
    sCtx.pPhrase = 0;
  }else{

    if( pAppend==0 ){
      if( (pParse->nPhrase % 8)==0 ){
        int nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
        Fts5ExprPhrase **apNew;
        apNew = (Fts5ExprPhrase**)sqlite3_realloc(pParse->apPhrase, nByte);
        if( apNew==0 ){
          pParse->rc = SQLITE_NOMEM;
          fts5ExprPhraseFree(sCtx.pPhrase);
          return 0;
        }
        pParse->apPhrase = apNew;
      }
      pParse->nPhrase++;
    }

    if( sCtx.pPhrase==0 ){
      /* This happens when parsing a token or quoted phrase that contains
      ** no token characters at all. (e.g ... MATCH '""'). */
      sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase));
    }else if( sCtx.pPhrase->nTerm ){
      sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = bPrefix;
    }
    pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase;
  }

  return sCtx.pPhrase;
}

/*
** Create a new FTS5 expression by cloning phrase iPhrase of the
1689
1690
1691
1692
1693
1694
1695

1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712

1713
1714
1715
1716
1717
1718
1719
}

void sqlite3Fts5ParseSetDistance(
  Fts5Parse *pParse, 
  Fts5ExprNearset *pNear,
  Fts5Token *p
){

  int nNear = 0;
  int i;
  if( p->n ){
    for(i=0; i<p->n; i++){
      char c = (char)p->p[i];
      if( c<'0' || c>'9' ){
        sqlite3Fts5ParseError(
            pParse, "expected integer, got \"%.*s\"", p->n, p->p
        );
        return;
      }
      nNear = nNear * 10 + (p->p[i] - '0');
    }
  }else{
    nNear = FTS5_DEFAULT_NEARDIST;
  }
  pNear->nNear = nNear;

}

/*
** The second argument passed to this function may be NULL, or it may be
** an existing Fts5Colset object. This function returns a pointer to
** a new colset object containing the contents of (p) with new value column
** number iCol appended. 







>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>







1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
}

void sqlite3Fts5ParseSetDistance(
  Fts5Parse *pParse, 
  Fts5ExprNearset *pNear,
  Fts5Token *p
){
  if( pNear ){
    int nNear = 0;
    int i;
    if( p->n ){
      for(i=0; i<p->n; i++){
        char c = (char)p->p[i];
        if( c<'0' || c>'9' ){
          sqlite3Fts5ParseError(
              pParse, "expected integer, got \"%.*s\"", p->n, p->p
              );
          return;
        }
        nNear = nNear * 10 + (p->p[i] - '0');
      }
    }else{
      nNear = FTS5_DEFAULT_NEARDIST;
    }
    pNear->nNear = nNear;
  }
}

/*
** The second argument passed to this function may be NULL, or it may be
** an existing Fts5Colset object. This function returns a pointer to
** a new colset object containing the contents of (p) with new value column
** number iCol appended. 
1892
1893
1894
1895
1896
1897
1898




1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
      pRet->eType = eType;
      pRet->pNear = pNear;
      fts5ExprAssignXNext(pRet);
      if( eType==FTS5_STRING ){
        int iPhrase;
        for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
          pNear->apPhrase[iPhrase]->pNode = pRet;




        }

        if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL 
         && (pNear->nPhrase!=1 || pNear->apPhrase[0]->nTerm!=1)
        ){
          assert( pParse->rc==SQLITE_OK );
          pParse->rc = SQLITE_ERROR;
          assert( pParse->zErr==0 );
          pParse->zErr = sqlite3_mprintf(
              "fts5: %s queries are not supported (detail!=full)", 
              pNear->nPhrase==1 ? "phrase": "NEAR"







>
>
>
>



|







1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
      pRet->eType = eType;
      pRet->pNear = pNear;
      fts5ExprAssignXNext(pRet);
      if( eType==FTS5_STRING ){
        int iPhrase;
        for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
          pNear->apPhrase[iPhrase]->pNode = pRet;
          if( pNear->apPhrase[iPhrase]->nTerm==0 ){
            pRet->xNext = 0;
            pRet->eType = FTS5_EOF;
          }
        }

        if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL 
         && (pNear->nPhrase!=1 || pNear->apPhrase[0]->nTerm>1)
        ){
          assert( pParse->rc==SQLITE_OK );
          pParse->rc = SQLITE_ERROR;
          assert( pParse->zErr==0 );
          pParse->zErr = sqlite3_mprintf(
              "fts5: %s queries are not supported (detail!=full)", 
              pNear->nPhrase==1 ? "phrase": "NEAR"
1921
1922
1923
1924
1925
1926
1927
































































1928
1929
1930
1931
1932
1933
1934

  if( pRet==0 ){
    assert( pParse->rc!=SQLITE_OK );
    sqlite3Fts5ParseNodeFree(pLeft);
    sqlite3Fts5ParseNodeFree(pRight);
    sqlite3Fts5ParseNearsetFree(pNear);
  }
































































  return pRet;
}

static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
  int nByte = 0;
  Fts5ExprTerm *p;
  char *zQuoted;







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026

  if( pRet==0 ){
    assert( pParse->rc!=SQLITE_OK );
    sqlite3Fts5ParseNodeFree(pLeft);
    sqlite3Fts5ParseNodeFree(pRight);
    sqlite3Fts5ParseNearsetFree(pNear);
  }
  return pRet;
}

Fts5ExprNode *sqlite3Fts5ParseImplicitAnd(
  Fts5Parse *pParse,              /* Parse context */
  Fts5ExprNode *pLeft,            /* Left hand child expression */
  Fts5ExprNode *pRight            /* Right hand child expression */
){
  Fts5ExprNode *pRet = 0;
  Fts5ExprNode *pPrev;

  if( pParse->rc ){
    sqlite3Fts5ParseNodeFree(pLeft);
    sqlite3Fts5ParseNodeFree(pRight);
  }else{

    assert( pLeft->eType==FTS5_STRING 
        || pLeft->eType==FTS5_TERM
        || pLeft->eType==FTS5_EOF
        || pLeft->eType==FTS5_AND
    );
    assert( pRight->eType==FTS5_STRING 
        || pRight->eType==FTS5_TERM 
        || pRight->eType==FTS5_EOF 
    );

    if( pLeft->eType==FTS5_AND ){
      pPrev = pLeft->apChild[pLeft->nChild-1];
    }else{
      pPrev = pLeft;
    }
    assert( pPrev->eType==FTS5_STRING 
        || pPrev->eType==FTS5_TERM 
        || pPrev->eType==FTS5_EOF 
        );

    if( pRight->eType==FTS5_EOF ){
      assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] );
      sqlite3Fts5ParseNodeFree(pRight);
      pRet = pLeft;
      pParse->nPhrase--;
    }
    else if( pPrev->eType==FTS5_EOF ){
      Fts5ExprPhrase **ap;

      if( pPrev==pLeft ){
        pRet = pRight;
      }else{
        pLeft->apChild[pLeft->nChild-1] = pRight;
        pRet = pLeft;
      }

      ap = &pParse->apPhrase[pParse->nPhrase-1-pRight->pNear->nPhrase];
      assert( ap[0]==pPrev->pNear->apPhrase[0] );
      memmove(ap, &ap[1], sizeof(Fts5ExprPhrase*)*pRight->pNear->nPhrase);
      pParse->nPhrase--;

      sqlite3Fts5ParseNodeFree(pPrev);
    }
    else{
      pRet = sqlite3Fts5ParseNode(pParse, FTS5_AND, pLeft, pRight, 0);
    }
  }

  return pRet;
}

static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){
  int nByte = 0;
  Fts5ExprTerm *p;
  char *zQuoted;
2058
2059
2060
2061
2062
2063
2064



2065
2066
2067
2068
2069
2070
2071
  }

  return zRet;
}

static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){
  char *zRet = 0;



  if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){
    Fts5ExprNearset *pNear = pExpr->pNear;
    int i; 
    int iTerm;

    if( pNear->pColset ){
      int iCol = pNear->pColset->aiCol[0];







>
>
>







2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
  }

  return zRet;
}

static char *fts5ExprPrint(Fts5Config *pConfig, Fts5ExprNode *pExpr){
  char *zRet = 0;
  if( pExpr->eType==0 ){
    return sqlite3_mprintf("\"\"");
  }else
  if( pExpr->eType==FTS5_STRING || pExpr->eType==FTS5_TERM ){
    Fts5ExprNearset *pNear = pExpr->pNear;
    int i; 
    int iTerm;

    if( pNear->pColset ){
      int iCol = pNear->pColset->aiCol[0];
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
    for(i=0; i<pExpr->nChild; i++){
      char *z = fts5ExprPrint(pConfig, pExpr->apChild[i]);
      if( z==0 ){
        sqlite3_free(zRet);
        zRet = 0;
      }else{
        int e = pExpr->apChild[i]->eType;
        int b = (e!=FTS5_STRING && e!=FTS5_TERM);
        zRet = fts5PrintfAppend(zRet, "%s%s%z%s", 
            (i==0 ? "" : zOp),
            (b?"(":""), z, (b?")":"")
        );
      }
      if( zRet==0 ) break;
    }







|







2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
    for(i=0; i<pExpr->nChild; i++){
      char *z = fts5ExprPrint(pConfig, pExpr->apChild[i]);
      if( z==0 ){
        sqlite3_free(zRet);
        zRet = 0;
      }else{
        int e = pExpr->apChild[i]->eType;
        int b = (e!=FTS5_STRING && e!=FTS5_TERM && e!=FTS5_EOF);
        zRet = fts5PrintfAppend(zRet, "%s%s%z%s", 
            (i==0 ? "" : zOp),
            (b?"(":""), z, (b?")":"")
        );
      }
      if( zRet==0 ) break;
    }
Changes to ext/fts5/fts5_index.c.
4175
4176
4177
4178
4179
4180
4181


4182
4183
4184
4185
4186

4187
4188

4189
4190
4191
4192
4193
4194
4195
  fts5MultiIterFree(pIter);
  fts5BufferFree(&term);
  if( pnRem ) *pnRem -= writer.nLeafWritten;
}

/*
** Do up to nPg pages of automerge work on the index.


*/
static void fts5IndexMerge(
  Fts5Index *p,                   /* FTS5 backend object */
  Fts5Structure **ppStruct,       /* IN/OUT: Current structure of index */
  int nPg                         /* Pages of work to do */

){
  int nRem = nPg;

  Fts5Structure *pStruct = *ppStruct;
  while( nRem>0 && p->rc==SQLITE_OK ){
    int iLvl;                   /* To iterate through levels */
    int iBestLvl = 0;           /* Level offering the most input segments */
    int nBest = 0;              /* Number of input segments on best level */

    /* Set iBestLvl to the level to read input segments from. */







>
>

|


|
>


>







4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
  fts5MultiIterFree(pIter);
  fts5BufferFree(&term);
  if( pnRem ) *pnRem -= writer.nLeafWritten;
}

/*
** Do up to nPg pages of automerge work on the index.
**
** Return true if any changes were actually made, or false otherwise.
*/
static int fts5IndexMerge(
  Fts5Index *p,                   /* FTS5 backend object */
  Fts5Structure **ppStruct,       /* IN/OUT: Current structure of index */
  int nPg,                        /* Pages of work to do */
  int nMin                        /* Minimum number of segments to merge */
){
  int nRem = nPg;
  int bRet = 0;
  Fts5Structure *pStruct = *ppStruct;
  while( nRem>0 && p->rc==SQLITE_OK ){
    int iLvl;                   /* To iterate through levels */
    int iBestLvl = 0;           /* Level offering the most input segments */
    int nBest = 0;              /* Number of input segments on best level */

    /* Set iBestLvl to the level to read input segments from. */
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223

4224
4225
4226
4227
4228
4229

4230
4231
4232
4233
4234
4235
4236
    /* If nBest is still 0, then the index must be empty. */
#ifdef SQLITE_DEBUG
    for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){
      assert( pStruct->aLevel[iLvl].nSeg==0 );
    }
#endif

    if( nBest<p->pConfig->nAutomerge 
        && pStruct->aLevel[iBestLvl].nMerge==0 
      ){
      break;
    }

    fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
    if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){
      fts5StructurePromote(p, iBestLvl+1, pStruct);
    }
  }
  *ppStruct = pStruct;

}

/*
** A total of nLeaf leaf pages of data has just been flushed to a level-0
** segment. This function updates the write-counter accordingly and, if
** necessary, performs incremental merge work.
**







<
|
<


>






>







4216
4217
4218
4219
4220
4221
4222

4223

4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
    /* If nBest is still 0, then the index must be empty. */
#ifdef SQLITE_DEBUG
    for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){
      assert( pStruct->aLevel[iLvl].nSeg==0 );
    }
#endif


    if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){

      break;
    }
    bRet = 1;
    fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
    if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){
      fts5StructurePromote(p, iBestLvl+1, pStruct);
    }
  }
  *ppStruct = pStruct;
  return bRet;
}

/*
** A total of nLeaf leaf pages of data has just been flushed to a level-0
** segment. This function updates the write-counter accordingly and, if
** necessary, performs incremental merge work.
**
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264

    /* Update the write-counter. While doing so, set nWork. */
    nWrite = pStruct->nWriteCounter;
    nWork = (int)(((nWrite + nLeaf) / p->nWorkUnit) - (nWrite / p->nWorkUnit));
    pStruct->nWriteCounter += nLeaf;
    nRem = (int)(p->nWorkUnit * nWork * pStruct->nLevel);

    fts5IndexMerge(p, ppStruct, nRem);
  }
}

static void fts5IndexCrisismerge(
  Fts5Index *p,                   /* FTS5 backend object */
  Fts5Structure **ppStruct        /* IN/OUT: Current structure of index */
){







|







4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268

    /* Update the write-counter. While doing so, set nWork. */
    nWrite = pStruct->nWriteCounter;
    nWork = (int)(((nWrite + nLeaf) / p->nWorkUnit) - (nWrite / p->nWorkUnit));
    pStruct->nWriteCounter += nLeaf;
    nRem = (int)(p->nWorkUnit * nWork * pStruct->nLevel);

    fts5IndexMerge(p, ppStruct, nRem, p->pConfig->nAutomerge);
  }
}

static void fts5IndexCrisismerge(
  Fts5Index *p,                   /* FTS5 backend object */
  Fts5Structure **ppStruct        /* IN/OUT: Current structure of index */
){
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479

4480

4481

4482
4483
4484














4485

4486
4487

4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
  if( p->nPendingData ){
    assert( p->pHash );
    p->nPendingData = 0;
    fts5FlushOneHash(p);
  }
}


int sqlite3Fts5IndexOptimize(Fts5Index *p){
  Fts5Structure *pStruct;

  Fts5Structure *pNew = 0;

  int nSeg = 0;


  assert( p->rc==SQLITE_OK );
  fts5IndexFlush(p);














  pStruct = fts5StructureRead(p);


  if( pStruct ){

    assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) );
    nSeg = pStruct->nSegment;
    if( nSeg>1 ){
      int nByte = sizeof(Fts5Structure);
      nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel);
      pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte);
    }
  }
  if( pNew ){
    Fts5StructureLevel *pLvl;
    int nByte = nSeg * sizeof(Fts5StructureSegment);
    pNew->nLevel = pStruct->nLevel+1;
    pNew->nRef = 1;
    pNew->nWriteCounter = pStruct->nWriteCounter;
    pLvl = &pNew->aLevel[pStruct->nLevel];







|
|
|
>

>
|
>

<
<
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
|
|
>
|
<
<
<
|
|
|
<







4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489


4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509



4510
4511
4512

4513
4514
4515
4516
4517
4518
4519
  if( p->nPendingData ){
    assert( p->pHash );
    p->nPendingData = 0;
    fts5FlushOneHash(p);
  }
}

static Fts5Structure *fts5IndexOptimizeStruct(
  Fts5Index *p, 
  Fts5Structure *pStruct
){
  Fts5Structure *pNew = 0;
  int nByte = sizeof(Fts5Structure);
  int nSeg = pStruct->nSegment;
  int i;



  /* Figure out if this structure requires optimization. A structure does
  ** not require optimization if either:
  **
  **  + it consists of fewer than two segments, or 
  **  + all segments are on the same level, or
  **  + all segments except one are currently inputs to a merge operation.
  **
  ** In the first case, return NULL. In the second, increment the ref-count
  ** on *pStruct and return a copy of the pointer to it.
  */
  if( nSeg<2 ) return 0;
  for(i=0; i<pStruct->nLevel; i++){
    int nThis = pStruct->aLevel[i].nSeg;
    if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){
      fts5StructureRef(pStruct);
      return pStruct;
    }
    assert( pStruct->aLevel[i].nMerge<=nThis );
  }




  nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel);
  pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte);


  if( pNew ){
    Fts5StructureLevel *pLvl;
    int nByte = nSeg * sizeof(Fts5StructureSegment);
    pNew->nLevel = pStruct->nLevel+1;
    pNew->nRef = 1;
    pNew->nWriteCounter = pStruct->nWriteCounter;
    pLvl = &pNew->aLevel[pStruct->nLevel];
4516
4517
4518
4519
4520
4521
4522

















4523
4524

4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537




4538
4539








4540
4541
4542
4543
4544
4545

4546
4547
4548
4549
4550
4551
4552
4553
4554
      pNew->nSegment = pLvl->nSeg = nSeg;
    }else{
      sqlite3_free(pNew);
      pNew = 0;
    }
  }


















  if( pNew ){
    int iLvl = pNew->nLevel-1;

    while( p->rc==SQLITE_OK && pNew->aLevel[iLvl].nSeg>0 ){
      int nRem = FTS5_OPT_WORK_UNIT;
      fts5IndexMergeLevel(p, &pNew, iLvl, &nRem);
    }

    fts5StructureWrite(p, pNew);
    fts5StructureRelease(pNew);
  }

  fts5StructureRelease(pStruct);
  return fts5IndexReturn(p); 
}





int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
  Fts5Structure *pStruct;









  pStruct = fts5StructureRead(p);
  if( pStruct && pStruct->nLevel ){
    fts5IndexMerge(p, &pStruct, nMerge);
    fts5StructureWrite(p, pStruct);
  }

  fts5StructureRelease(pStruct);

  return fts5IndexReturn(p);
}

static void fts5AppendRowid(
  Fts5Index *p,
  i64 iDelta,
  Fts5Iter *pUnused,







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

|
>









<



>
>
>
>

|
>
>
>
>
>
>
>
>
|
<
|
|
|
|
>
|
|







4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568

4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586

4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
      pNew->nSegment = pLvl->nSeg = nSeg;
    }else{
      sqlite3_free(pNew);
      pNew = 0;
    }
  }

  return pNew;
}

int sqlite3Fts5IndexOptimize(Fts5Index *p){
  Fts5Structure *pStruct;
  Fts5Structure *pNew = 0;

  assert( p->rc==SQLITE_OK );
  fts5IndexFlush(p);
  pStruct = fts5StructureRead(p);

  if( pStruct ){
    pNew = fts5IndexOptimizeStruct(p, pStruct);
  }
  fts5StructureRelease(pStruct);

  assert( pNew==0 || pNew->nSegment>0 );
  if( pNew ){
    int iLvl;
    for(iLvl=0; pNew->aLevel[iLvl].nSeg==0; iLvl++){}
    while( p->rc==SQLITE_OK && pNew->aLevel[iLvl].nSeg>0 ){
      int nRem = FTS5_OPT_WORK_UNIT;
      fts5IndexMergeLevel(p, &pNew, iLvl, &nRem);
    }

    fts5StructureWrite(p, pNew);
    fts5StructureRelease(pNew);
  }


  return fts5IndexReturn(p); 
}

/*
** This is called to implement the special "VALUES('merge', $nMerge)"
** INSERT command.
*/
int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
  Fts5Structure *pStruct = fts5StructureRead(p);
  if( pStruct ){
    int nMin = p->pConfig->nUsermerge;
    if( nMerge<0 ){
      Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct);
      fts5StructureRelease(pStruct);
      pStruct = pNew;
      nMin = 2;
      nMerge = nMerge*-1;
    }

    if( pStruct && pStruct->nLevel ){
      if( fts5IndexMerge(p, &pStruct, nMerge, nMin) ){
        fts5StructureWrite(p, pStruct);
      }
    }
    fts5StructureRelease(pStruct);
  }
  return fts5IndexReturn(p);
}

static void fts5AppendRowid(
  Fts5Index *p,
  i64 iDelta,
  Fts5Iter *pUnused,
Changes to ext/fts5/fts5_main.c.
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
      pTab->base.zErrMsg = sqlite3_mprintf(
          "cannot %s contentless fts5 table: %s", 
          (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName
      );
      rc = SQLITE_ERROR;
    }

    /* Case 1: DELETE */
    else if( nArg==1 ){
      i64 iDel = sqlite3_value_int64(apVal[0]);  /* Rowid to delete */
      rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0);
    }

    /* Case 2: INSERT */
    else if( eType0!=SQLITE_INTEGER ){     
      /* If this is a REPLACE, first remove the current entry (if any) */
      if( eConflict==SQLITE_REPLACE 
       && sqlite3_value_type(apVal[1])==SQLITE_INTEGER 
      ){
        i64 iNew = sqlite3_value_int64(apVal[1]);  /* Rowid to delete */
        rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0);
      }
      fts5StorageInsert(&rc, pTab, apVal, pRowid);
    }

    /* Case 2: UPDATE */
    else{
      i64 iOld = sqlite3_value_int64(apVal[0]);  /* Old rowid */
      i64 iNew = sqlite3_value_int64(apVal[1]);  /* New rowid */
      if( iOld!=iNew ){
        if( eConflict==SQLITE_REPLACE ){
          rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0);
          if( rc==SQLITE_OK ){







|





|











|







1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
      pTab->base.zErrMsg = sqlite3_mprintf(
          "cannot %s contentless fts5 table: %s", 
          (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName
      );
      rc = SQLITE_ERROR;
    }

    /* DELETE */
    else if( nArg==1 ){
      i64 iDel = sqlite3_value_int64(apVal[0]);  /* Rowid to delete */
      rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0);
    }

    /* INSERT */
    else if( eType0!=SQLITE_INTEGER ){     
      /* If this is a REPLACE, first remove the current entry (if any) */
      if( eConflict==SQLITE_REPLACE 
       && sqlite3_value_type(apVal[1])==SQLITE_INTEGER 
      ){
        i64 iNew = sqlite3_value_int64(apVal[1]);  /* Rowid to delete */
        rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0);
      }
      fts5StorageInsert(&rc, pTab, apVal, pRowid);
    }

    /* UPDATE */
    else{
      i64 iOld = sqlite3_value_int64(apVal[0]);  /* Old rowid */
      i64 iNew = sqlite3_value_int64(apVal[1]);  /* New rowid */
      if( iOld!=iNew ){
        if( eConflict==SQLITE_REPLACE ){
          rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0);
          if( rc==SQLITE_OK ){
Changes to ext/fts5/fts5_test_mi.c.
64
65
66
67
68
69
70
71
72
73

74

75

76
77
78
79
80
81


82
83
84
85
86
87
88
89


/*
** Return a pointer to the fts5_api pointer for database connection db.
** If an error occurs, return NULL and leave an error in the database 
** handle (accessible using sqlite3_errcode()/errmsg()).
*/
static fts5_api *fts5_api_from_db(sqlite3 *db){
  fts5_api *pRet = 0;
  sqlite3_stmt *pStmt = 0;



  if( SQLITE_OK==sqlite3_prepare(db, "SELECT fts5()", -1, &pStmt, 0)

   && SQLITE_ROW==sqlite3_step(pStmt) 
   && sizeof(pRet)==sqlite3_column_bytes(pStmt, 0)
  ){
    memcpy(&pRet, sqlite3_column_blob(pStmt, 0), sizeof(pRet));
  }
  sqlite3_finalize(pStmt);


  return pRet;
}


/*
** Argument f should be a flag accepted by matchinfo() (a valid character
** in the string passed as the second argument). If it is not, -1 is 
** returned. Otherwise, if f is a valid matchinfo flag, the value returned







|
<

>

>
|
>
|
|
|
|
|
|
>
>
|







64
65
66
67
68
69
70
71

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93


/*
** Return a pointer to the fts5_api pointer for database connection db.
** If an error occurs, return NULL and leave an error in the database 
** handle (accessible using sqlite3_errcode()/errmsg()).
*/
static int fts5_api_from_db(sqlite3 *db, fts5_api **ppApi){

  sqlite3_stmt *pStmt = 0;
  int rc;

  *ppApi = 0;
  rc = sqlite3_prepare(db, "SELECT fts5()", -1, &pStmt, 0);
  if( rc==SQLITE_OK ){
    if( SQLITE_ROW==sqlite3_step(pStmt) 
        && sizeof(fts5_api*)==sqlite3_column_bytes(pStmt, 0)
      ){
      memcpy(ppApi, sqlite3_column_blob(pStmt, 0), sizeof(fts5_api*));
    }
    rc = sqlite3_finalize(pStmt);
  }

  return rc;
}


/*
** Argument f should be a flag accepted by matchinfo() (a valid character
** in the string passed as the second argument). If it is not, -1 is 
** returned. Otherwise, if f is a valid matchinfo flag, the value returned
395
396
397
398
399
400
401
402

403
404
405
406
407
408
409
int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){
  int rc;                         /* Return code */
  fts5_api *pApi;                 /* FTS5 API functions */

  /* Extract the FTS5 API pointer from the database handle. The 
  ** fts5_api_from_db() function above is copied verbatim from the 
  ** FTS5 documentation. Refer there for details. */
  pApi = fts5_api_from_db(db);


  /* If fts5_api_from_db() returns NULL, then either FTS5 is not registered
  ** with this database handle, or an error (OOM perhaps?) has occurred.
  **
  ** Also check that the fts5_api object is version 2 or newer.  
  */ 
  if( pApi==0 || pApi->iVersion<2 ){







|
>







399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){
  int rc;                         /* Return code */
  fts5_api *pApi;                 /* FTS5 API functions */

  /* Extract the FTS5 API pointer from the database handle. The 
  ** fts5_api_from_db() function above is copied verbatim from the 
  ** FTS5 documentation. Refer there for details. */
  rc = fts5_api_from_db(db, &pApi);
  if( rc!=SQLITE_OK ) return rc;

  /* If fts5_api_from_db() returns NULL, then either FTS5 is not registered
  ** with this database handle, or an error (OOM perhaps?) has occurred.
  **
  ** Also check that the fts5_api object is version 2 or newer.  
  */ 
  if( pApi==0 || pApi->iVersion<2 ){
Changes to ext/fts5/fts5parse.y.
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
}

expr(A) ::= LP expr(X) RP. {A = X;}
expr(A) ::= exprlist(X).   {A = X;}

exprlist(A) ::= cnearset(X). {A = X;}
exprlist(A) ::= exprlist(X) cnearset(Y). {
  A = sqlite3Fts5ParseNode(pParse, FTS5_AND, X, Y, 0);
}

cnearset(A) ::= nearset(X). { 
  A = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, X); 
}
cnearset(A) ::= colset(X) COLON nearset(Y). { 
  sqlite3Fts5ParseSetColset(pParse, Y, X);







|







100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
}

expr(A) ::= LP expr(X) RP. {A = X;}
expr(A) ::= exprlist(X).   {A = X;}

exprlist(A) ::= cnearset(X). {A = X;}
exprlist(A) ::= exprlist(X) cnearset(Y). {
  A = sqlite3Fts5ParseImplicitAnd(pParse, X, Y);
}

cnearset(A) ::= nearset(X). { 
  A = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, X); 
}
cnearset(A) ::= colset(X) COLON nearset(Y). { 
  sqlite3Fts5ParseSetColset(pParse, Y, X);
Changes to ext/fts5/test/fts5_common.tcl.
154
155
156
157
158
159
160






161
162
163
164
165
166
167

    fts5_test_queryphrase
    fts5_test_phrasecount
  } {
    sqlite3_fts5_create_function $db $f $f
  }
}







proc fts5_level_segs {tbl} {
  set sql "SELECT fts5_decode(rowid,block) aS r FROM ${tbl}_data WHERE rowid=10"
  set ret [list]
  foreach L [lrange [db one $sql] 1 end] {
    lappend ret [expr [llength $L] - 3]
  }







>
>
>
>
>
>







154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173

    fts5_test_queryphrase
    fts5_test_phrasecount
  } {
    sqlite3_fts5_create_function $db $f $f
  }
}

proc fts5_segcount {tbl} {
  set N 0
  foreach n [fts5_level_segs $tbl] { incr N $n }
  set N
}

proc fts5_level_segs {tbl} {
  set sql "SELECT fts5_decode(rowid,block) aS r FROM ${tbl}_data WHERE rowid=10"
  set ret [list]
  foreach L [lrange [db one $sql] 1 end] {
    lappend ret [expr [llength $L] - 3]
  }
Changes to ext/fts5/test/fts5config.test.
242
243
244
245
246
247
248
















249
250
251
  set res [list 1 {malformed detail=... directive}]
  do_catchsql_test 11.$tn "CREATE VIRTUAL TABLE f1 USING fts5(x, $opt)" $res
}

do_catchsql_test 12.1 {
  INSERT INTO t1(t1, rank) VALUES('rank', NULL);;
} {1 {SQL logic error or missing database}}

















finish_test








>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>



242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
  set res [list 1 {malformed detail=... directive}]
  do_catchsql_test 11.$tn "CREATE VIRTUAL TABLE f1 USING fts5(x, $opt)" $res
}

do_catchsql_test 12.1 {
  INSERT INTO t1(t1, rank) VALUES('rank', NULL);;
} {1 {SQL logic error or missing database}}

#-------------------------------------------------------------------------
# errors in the 'usermerge' option
#
do_execsql_test 13.0 {
  CREATE VIRTUAL TABLE tt USING fts5(ttt);
}
foreach {tn val} {
  1     -1
  2     4.2
  3     17
  4     1
} {
  set sql "INSERT INTO tt(tt, rank) VALUES('usermerge', $val)"
  do_catchsql_test 13.$tn $sql {1 {SQL logic error or missing database}}
}

finish_test

Changes to ext/fts5/test/fts5eb.test.
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
  do_execsql_test $tn {SELECT fts5_expr($se_expr)} [list $res]
}

foreach {tn expr res} {
  1  {abc}                            {"abc"}
  2  {abc ""}                         {"abc"}
  3  {""}                             {}
  4  {abc OR ""}                      {"abc"}
  5  {abc NOT ""}                     {"abc"}
  6  {abc AND ""}                     {"abc"}
  7  {"" OR abc}                      {"abc"}
  8  {"" NOT abc}                     {"abc"}
  9  {"" AND abc}                     {"abc"}
  10 {abc + "" + def}                 {"abc" + "def"}
  11 {abc "" def}                     {"abc" AND "def"}
  12 {r+e OR w}                       {"r" + "e" OR "w"}

  13 {a AND b NOT c}                  {"a" AND ("b" NOT "c")}
  14 {a OR b NOT c}                   {"a" OR ("b" NOT "c")}
  15 {a NOT b AND c}                  {("a" NOT "b") AND "c"}







|
|
|
|
|
|







29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
  do_execsql_test $tn {SELECT fts5_expr($se_expr)} [list $res]
}

foreach {tn expr res} {
  1  {abc}                            {"abc"}
  2  {abc ""}                         {"abc"}
  3  {""}                             {}
  4  {abc OR ""}                      {"abc" OR ""}
  5  {abc NOT ""}                     {"abc" NOT ""}
  6  {abc AND ""}                     {"abc" AND ""}
  7  {"" OR abc}                      {"" OR "abc"}
  8  {"" NOT abc}                     {"" NOT "abc"}
  9  {"" AND abc}                     {"" AND "abc"}
  10 {abc + "" + def}                 {"abc" + "def"}
  11 {abc "" def}                     {"abc" AND "def"}
  12 {r+e OR w}                       {"r" + "e" OR "w"}

  13 {a AND b NOT c}                  {"a" AND ("b" NOT "c")}
  14 {a OR b NOT c}                   {"a" OR ("b" NOT "c")}
  15 {a NOT b AND c}                  {("a" NOT "b") AND "c"}
Changes to ext/fts5/test/fts5fault8.test.
50
51
52
53
54
55
56

57
























58
59
60
  if {[detail_is_none]==0} {
    do_faultsim_test 3 -faults oom-* -body {
      execsql { SELECT rowid FROM t1('b:2') }
    } -test {
      faultsim_test_result {0 {1 3}} {1 SQLITE_NOMEM}
    }
  }

} ;# foreach_detail_mode...

























finish_test








>

>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>



50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  if {[detail_is_none]==0} {
    do_faultsim_test 3 -faults oom-* -body {
      execsql { SELECT rowid FROM t1('b:2') }
    } -test {
      faultsim_test_result {0 {1 3}} {1 SQLITE_NOMEM}
    }
  }

} ;# foreach_detail_mode...


do_execsql_test 4.0 {
  CREATE VIRTUAL TABLE x2 USING fts5(a);
  INSERT INTO x2(x2, rank) VALUES('crisismerge', 2);
  INSERT INTO x2(x2, rank) VALUES('pgsz', 32);
  INSERT INTO x2 VALUES('a b c d');
  INSERT INTO x2 VALUES('e f g h');
  INSERT INTO x2 VALUES('i j k l');
  INSERT INTO x2 VALUES('m n o p');
  INSERT INTO x2 VALUES('q r s t');
  INSERT INTO x2 VALUES('u v w x');
  INSERT INTO x2 VALUES('y z a b');
}
faultsim_save_and_close

do_faultsim_test 4 -faults oom-* -prep {
  faultsim_restore_and_reopen
} -body {
  execsql { INSERT INTO x2(x2) VALUES('optimize') }
} -test {
  faultsim_test_result {0 {}} {1 SQLITE_NOMEM}
}


finish_test

Added ext/fts5/test/fts5fuzz1.test.


























































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# 2014 June 17
#
# The author disclaims copyright to this source code.  In place of
# a legal notice, here is a blessing:
#
#    May you do good and not evil.
#    May you find forgiveness for yourself and forgive others.
#    May you share freely, never taking more than you give.
#
#*************************************************************************
# This file implements regression tests for SQLite library.  The
# focus of this script is testing the FTS5 module.
#

source [file join [file dirname [info script]] fts5_common.tcl]
return_if_no_fts5
set testprefix fts5fuzz1


#-------------------------------------------------------------------------
reset_db
do_catchsql_test 1.1 {
  CREATE VIRTUAL TABLE f1 USING fts5(a b);
} {/1 {parse error in.*}/}


#-------------------------------------------------------------------------
reset_db
do_execsql_test 2.1 {
  CREATE VIRTUAL TABLE f1 USING fts5(a, b);
  INSERT INTO f1 VALUES('a b', 'c d');
  INSERT INTO f1 VALUES('e f', 'a b');
}

do_execsql_test 2.2.1 {
  SELECT rowid FROM f1('""');
} {}

do_execsql_test 2.2.2 {
  SELECT rowid FROM f1('"" AND a');
} {}


do_execsql_test 2.2.3 {
  SELECT rowid FROM f1('"" a');
} {1 2}

do_execsql_test 2.2.4 {
  SELECT rowid FROM f1('"" OR a');
} {1 2}

do_execsql_test 2.3 {
  SELECT a, b FROM f1('NEAR("")');
} {}

do_execsql_test 2.4 {
  SELECT a, b FROM f1('NEAR("", 5)');
} {}

do_execsql_test 2.5 {
  SELECT a, b FROM f1('NEAR("" c, 5)');
} {{a b} {c d}}

do_execsql_test 2.6 {
  SELECT a, b FROM f1('NEAR("" c d, 5)');
} {{a b} {c d}}

do_execsql_test 2.7 {
  SELECT a, b FROM f1('NEAR(c d, 5)');
} {{a b} {c d}}

do_execsql_test 2.8 {
  SELECT rowid FROM f1('NEAR("a" "b", 5)');
} {1 2}

#-------------------------------------------------------------------------
reset_db
do_execsql_test 3.2 {
  CREATE VIRTUAL TABLE f2 USING fts5(o, t, tokenize="ascii separators abc");
  SELECT * FROM f2('a+4');
} {}



#-------------------------------------------------------------------------
reset_db
do_catchsql_test 4.1 {
  CREATE VIRTUAL TABLE f2 USING fts5(o, t);
  SELECT * FROM f2('(8 AND 9)`AND 10');
} {1 {fts5: syntax error near "`"}}

finish_test

Changes to ext/fts5/test/fts5merge.test.
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg)
      INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii;

    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg)
      INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii;

    INSERT INTO x8(x8, rank) VALUES('automerge', 2);
  }

  for {set tn 1} {[lindex [fts5_level_segs x8] 0]>0} {incr tn} {
    do_execsql_test $testname.$tn {
      INSERT INTO x8(x8, rank) VALUES('merge', 1);
      INSERT INTO x8(x8) VALUES('integrity-check');
    }







|







41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg)
      INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii;

    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<$::nRowPerSeg)
      INSERT INTO x8 SELECT repeat('x y ', i % 16) FROM ii;

    INSERT INTO x8(x8, rank) VALUES('usermerge', 2);
  }

  for {set tn 1} {[lindex [fts5_level_segs x8] 0]>0} {incr tn} {
    do_execsql_test $testname.$tn {
      INSERT INTO x8(x8, rank) VALUES('merge', 1);
      INSERT INTO x8(x8) VALUES('integrity-check');
    }
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144

  set ::nRow $nRow
  do_test $testname.1 {
    for {set i 0} {$i < $::nRow} {incr i} {
      execsql { INSERT INTO x8 VALUES( rnddoc(($i%16) + 5) ) }
      while {[not_merged x8]} {
        execsql {
          INSERT INTO x8(x8, rank) VALUES('automerge', 2);
          INSERT INTO x8(x8, rank) VALUES('merge', 1);
          INSERT INTO x8(x8, rank) VALUES('automerge', 16);
          INSERT INTO x8(x8) VALUES('integrity-check');
        }
      }
    }
  } {}
}
proc not_merged {tbl} {
  set segs [fts5_level_segs $tbl]
  foreach s $segs { if {$s>1} { return 1 } }
  return 0
}

do_merge2_test 2.1    5
do_merge2_test 2.2   10
do_merge2_test 2.3   20

#-------------------------------------------------------------------------
# Test that an auto-merge will complete any merge that has already been
# started, even if the number of input segments is less than the current
# value of the 'automerge' configuration parameter.
#
db func rnddoc fts5_rnddoc

do_execsql_test 3.1 {
  DROP TABLE IF EXISTS x8;
  CREATE VIRTUAL TABLE x8 USING fts5(i);
  INSERT INTO x8(x8, rank) VALUES('pgsz', 32);
  INSERT INTO x8 VALUES(rnddoc(100));
  INSERT INTO x8 VALUES(rnddoc(100));
}
do_test 3.2 {
  execsql {
    INSERT INTO x8(x8, rank) VALUES('automerge', 4);
    INSERT INTO x8(x8, rank) VALUES('merge', 1);
  }
  fts5_level_segs x8
} {2}

do_test 3.3 {
  execsql {
    INSERT INTO x8(x8, rank) VALUES('automerge', 2);
    INSERT INTO x8(x8, rank) VALUES('merge', 1);
  }
  fts5_level_segs x8
} {2 1}

do_test 3.4 {
  execsql { INSERT INTO x8(x8, rank) VALUES('automerge', 4) }
  while {[not_merged x8]} {
    execsql { INSERT INTO x8(x8, rank) VALUES('merge', 1) }
  }
  fts5_level_segs x8
} {0 1}

#-------------------------------------------------------------------------







|

|

















|

|












|







|






|







80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144

  set ::nRow $nRow
  do_test $testname.1 {
    for {set i 0} {$i < $::nRow} {incr i} {
      execsql { INSERT INTO x8 VALUES( rnddoc(($i%16) + 5) ) }
      while {[not_merged x8]} {
        execsql {
          INSERT INTO x8(x8, rank) VALUES('usermerge', 2);
          INSERT INTO x8(x8, rank) VALUES('merge', 1);
          INSERT INTO x8(x8, rank) VALUES('usermerge', 16);
          INSERT INTO x8(x8) VALUES('integrity-check');
        }
      }
    }
  } {}
}
proc not_merged {tbl} {
  set segs [fts5_level_segs $tbl]
  foreach s $segs { if {$s>1} { return 1 } }
  return 0
}

do_merge2_test 2.1    5
do_merge2_test 2.2   10
do_merge2_test 2.3   20

#-------------------------------------------------------------------------
# Test that a merge will complete any merge that has already been
# started, even if the number of input segments is less than the current
# value of the 'usermerge' configuration parameter.
#
db func rnddoc fts5_rnddoc

do_execsql_test 3.1 {
  DROP TABLE IF EXISTS x8;
  CREATE VIRTUAL TABLE x8 USING fts5(i);
  INSERT INTO x8(x8, rank) VALUES('pgsz', 32);
  INSERT INTO x8 VALUES(rnddoc(100));
  INSERT INTO x8 VALUES(rnddoc(100));
}
do_test 3.2 {
  execsql {
    INSERT INTO x8(x8, rank) VALUES('usermerge', 4);
    INSERT INTO x8(x8, rank) VALUES('merge', 1);
  }
  fts5_level_segs x8
} {2}

do_test 3.3 {
  execsql {
    INSERT INTO x8(x8, rank) VALUES('usermerge', 2);
    INSERT INTO x8(x8, rank) VALUES('merge', 1);
  }
  fts5_level_segs x8
} {2 1}

do_test 3.4 {
  execsql { INSERT INTO x8(x8, rank) VALUES('usermerge', 4) }
  while {[not_merged x8]} {
    execsql { INSERT INTO x8(x8, rank) VALUES('merge', 1) }
  }
  fts5_level_segs x8
} {0 1}

#-------------------------------------------------------------------------
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192


















































193
194
  }

  do_execsql_test 4.$tn.3 {
    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100)
      INSERT INTO x8 SELECT mydoc() FROM ii;
    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100)
      INSERT INTO x8 SELECT mydoc() FROM ii;
    INSERT INTO x8(x8, rank) VALUES('automerge', 2);
  }

  set expect [mycount]
    for {set i 0} {$i < 20} {incr i} {
      do_test 4.$tn.4.$i {
        execsql { INSERT INTO x8(x8, rank) VALUES('merge', 1); }
        mycount
      } $expect
      break
    }
#  db eval {SELECT fts5_decode(rowid, block) AS r FROM x8_data} { puts $r }
}



















































finish_test








|













>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>


172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
  }

  do_execsql_test 4.$tn.3 {
    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100)
      INSERT INTO x8 SELECT mydoc() FROM ii;
    WITH ii(i) AS (SELECT 1 UNION ALL SELECT i+1 FROM ii WHERE i<100)
      INSERT INTO x8 SELECT mydoc() FROM ii;
    INSERT INTO x8(x8, rank) VALUES('usermerge', 2);
  }

  set expect [mycount]
    for {set i 0} {$i < 20} {incr i} {
      do_test 4.$tn.4.$i {
        execsql { INSERT INTO x8(x8, rank) VALUES('merge', 1); }
        mycount
      } $expect
      break
    }
#  db eval {SELECT fts5_decode(rowid, block) AS r FROM x8_data} { puts $r }
}

#-------------------------------------------------------------------------
# Test that the 'merge' command does not modify the database if there is
# no work to do. 

do_execsql_test 5.1 {
  CREATE VIRTUAL TABLE x9 USING fts5(one, two);
  INSERT INTO x9(x9, rank) VALUES('pgsz', 32);
  INSERT INTO x9(x9, rank) VALUES('automerge', 2);
  INSERT INTO x9(x9, rank) VALUES('usermerge', 2);
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
  INSERT INTO x9 VALUES(rnddoc(100), rnddoc(100));
}

do_test 5.2 {
  while 1 {
    set nChange [db total_changes]
    execsql { INSERT INTO x9(x9, rank) VALUES('merge', 1); }
    set nChange [expr [db total_changes] - $nChange]
    #puts $nChange
    if {$nChange<2} break
  }
} {}


#--------------------------------------------------------------------------
# Test that running 'merge' on an empty database does not cause a 
# problem.
#
reset_db
do_execsql_test 6.0 {
  CREATE VIRTUAL TABLE g1 USING fts5(a, b);
}
do_execsql_test 6.1 {
  INSERT INTO g1(g1, rank) VALUES('merge', 10);
}
do_execsql_test 6.2 {
  INSERT INTO g1(g1, rank) VALUES('merge', -10);
}
do_execsql_test 6.3 {
  INSERT INTO g1(g1) VALUES('integrity-check');
}



finish_test

Changes to ext/fts5/test/fts5optimize.test.
15
16
17
18
19
20
21






22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
set testprefix fts5optimize

# If SQLITE_ENABLE_FTS5 is defined, omit this file.
ifcapable !fts5 {
  finish_test
  return
}







proc rnddoc {nWord} {
  set vocab {a b c d e f g h i j k l m n o p q r s t u v w x y z}
  set nVocab [llength $vocab]
  set ret [list]
  for {set i 0} {$i < $nWord} {incr i} {
    lappend ret [lindex $vocab [expr {int(rand() * $nVocab)}]]
  }
  return $ret
}


foreach {tn nStep} {
  1 2
  2 10
  3 50
  4 500
} {
if {$tn!=4} continue
  reset_db
  db func rnddoc rnddoc
  do_execsql_test 1.$tn.1 {
    CREATE VIRTUAL TABLE t1 USING fts5(x, y);
  }
  do_test 1.$tn.2 {
    for {set i 0} {$i < $nStep} {incr i} {







>
>
>
>
>
>











<






<







15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

39
40
41
42
43
44

45
46
47
48
49
50
51
set testprefix fts5optimize

# If SQLITE_ENABLE_FTS5 is defined, omit this file.
ifcapable !fts5 {
  finish_test
  return
}

#
# 1.* - Warm body tests for index optimization using ('optimize')
#
# 2.* - Warm body tests for index optimization using ('merge', -1)
#

proc rnddoc {nWord} {
  set vocab {a b c d e f g h i j k l m n o p q r s t u v w x y z}
  set nVocab [llength $vocab]
  set ret [list]
  for {set i 0} {$i < $nWord} {incr i} {
    lappend ret [lindex $vocab [expr {int(rand() * $nVocab)}]]
  }
  return $ret
}


foreach {tn nStep} {
  1 2
  2 10
  3 50
  4 500
} {

  reset_db
  db func rnddoc rnddoc
  do_execsql_test 1.$tn.1 {
    CREATE VIRTUAL TABLE t1 USING fts5(x, y);
  }
  do_test 1.$tn.2 {
    for {set i 0} {$i < $nStep} {incr i} {
56
57
58
59
60
61
62
63

64






































65
66
  do_execsql_test 1.$tn.4 {
    INSERT INTO t1(t1) VALUES('optimize');
  }

  do_execsql_test 1.$tn.5 {
    INSERT INTO t1(t1) VALUES('integrity-check');
  }
}








































finish_test








|
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>


60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
  do_execsql_test 1.$tn.4 {
    INSERT INTO t1(t1) VALUES('optimize');
  }

  do_execsql_test 1.$tn.5 {
    INSERT INTO t1(t1) VALUES('integrity-check');
  }

  do_test 1.$tn.6 { fts5_segcount t1 } 1
}

foreach {tn nStep} {
  1 2
  2 10
  3 50
  4 500
} {
  reset_db
  db func rnddoc rnddoc
  do_execsql_test 1.$tn.1 {
    CREATE VIRTUAL TABLE t1 USING fts5(x, y);
  }
  do_test 2.$tn.2 {
    for {set i 0} {$i < $nStep} {incr i} {
      execsql { INSERT INTO t1 VALUES( rnddoc(5), rnddoc(5) ) }
    }
  } {}

  do_execsql_test 2.$tn.3 {
    INSERT INTO t1(t1) VALUES('integrity-check');
  }

  do_test 2.$tn.4 {
    execsql { INSERT INTO t1(t1, rank) VALUES('merge', -1) }
    while 1 {
      set c [db total_changes]
      execsql { INSERT INTO t1(t1, rank) VALUES('merge', 1) }
      set c [expr [db total_changes]-$c]
      if {$c<2} break
    }
  } {}

  do_execsql_test 2.$tn.5 {
    INSERT INTO t1(t1) VALUES('integrity-check');
  }

  do_test 2.$tn.6 { fts5_segcount t1 } 1
}
finish_test

Changes to ext/misc/spellfix.c.
1730
1731
1732
1733
1734
1735
1736

1737
1738
1739
1740
1741
1742
1743
1744
1745
1746

1747
1748



1749
1750
1751
1752
1753
1754
1755
1756
1757
1758

1759
1760
1761
1762
1763
1764
1765
  sqlite3_value **argv
){
  const unsigned char *zIn = sqlite3_value_text(argv[0]);
  int nIn = sqlite3_value_bytes(argv[0]);
  int c, sz;
  int scriptMask = 0;
  int res;

# define SCRIPT_LATIN       0x0001
# define SCRIPT_CYRILLIC    0x0002
# define SCRIPT_GREEK       0x0004
# define SCRIPT_HEBREW      0x0008
# define SCRIPT_ARABIC      0x0010

  while( nIn>0 ){
    c = utf8Read(zIn, nIn, &sz);
    zIn += sz;
    nIn -= sz;

    if( c<0x02af && (c>=0x80 || midClass[c&0x7f]<CCLASS_DIGIT) ){
      scriptMask |= SCRIPT_LATIN;



    }else if( c>=0x0400 && c<=0x04ff ){
      scriptMask |= SCRIPT_CYRILLIC;
    }else if( c>=0x0386 && c<=0x03ce ){
      scriptMask |= SCRIPT_GREEK;
    }else if( c>=0x0590 && c<=0x05ff ){
      scriptMask |= SCRIPT_HEBREW;
    }else if( c>=0x0600 && c<=0x06ff ){
      scriptMask |= SCRIPT_ARABIC;
    }
  }

  switch( scriptMask ){
    case 0:                res = 999; break;
    case SCRIPT_LATIN:     res = 215; break;
    case SCRIPT_CYRILLIC:  res = 220; break;
    case SCRIPT_GREEK:     res = 200; break;
    case SCRIPT_HEBREW:    res = 125; break;
    case SCRIPT_ARABIC:    res = 160; break;







>










>
|
|
>
>
>










>







1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
  sqlite3_value **argv
){
  const unsigned char *zIn = sqlite3_value_text(argv[0]);
  int nIn = sqlite3_value_bytes(argv[0]);
  int c, sz;
  int scriptMask = 0;
  int res;
  int seenDigit = 0;
# define SCRIPT_LATIN       0x0001
# define SCRIPT_CYRILLIC    0x0002
# define SCRIPT_GREEK       0x0004
# define SCRIPT_HEBREW      0x0008
# define SCRIPT_ARABIC      0x0010

  while( nIn>0 ){
    c = utf8Read(zIn, nIn, &sz);
    zIn += sz;
    nIn -= sz;
    if( c<0x02af ){
      if( c>=0x80 || midClass[c&0x7f]<CCLASS_DIGIT ){
        scriptMask |= SCRIPT_LATIN;
      }else if( c>='0' && c<='9' ){
        seenDigit = 1;
      }
    }else if( c>=0x0400 && c<=0x04ff ){
      scriptMask |= SCRIPT_CYRILLIC;
    }else if( c>=0x0386 && c<=0x03ce ){
      scriptMask |= SCRIPT_GREEK;
    }else if( c>=0x0590 && c<=0x05ff ){
      scriptMask |= SCRIPT_HEBREW;
    }else if( c>=0x0600 && c<=0x06ff ){
      scriptMask |= SCRIPT_ARABIC;
    }
  }
  if( scriptMask==0 && seenDigit ) scriptMask = SCRIPT_LATIN;
  switch( scriptMask ){
    case 0:                res = 999; break;
    case SCRIPT_LATIN:     res = 215; break;
    case SCRIPT_CYRILLIC:  res = 220; break;
    case SCRIPT_GREEK:     res = 200; break;
    case SCRIPT_HEBREW:    res = 125; break;
    case SCRIPT_ARABIC:    res = 160; break;
Added ext/rbu/rbuC.test.




























































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# 2016 March 7
#
# The author disclaims copyright to this source code.  In place of
# a legal notice, here is a blessing:
#
#    May you do good and not evil.
#    May you find forgiveness for yourself and forgive others.
#    May you share freely, never taking more than you give.
#
#***********************************************************************
# Tests for RBU focused on the REPLACE operation (rbu_control column
# contains integer value 2).
#

source [file join [file dirname [info script]] rbu_common.tcl]
set ::testprefix rbuC

#-------------------------------------------------------------------------
# This test is actually of an UPDATE directive. Just to establish that
# these work with UNIQUE indexes before preceding to REPLACE.
#
do_execsql_test 1.0 {
  CREATE TABLE t1(i INTEGER PRIMARY KEY, a, b, c UNIQUE);
  INSERT INTO t1 VALUES(1, 'a', 'b', 'c');
}

forcedelete rbu.db
do_execsql_test 1.1 {
  ATTACH 'rbu.db' AS rbu;
  CREATE TABLE rbu.data_t1(i, a, b, c, rbu_control);
  INSERT INTO data_t1 VALUES(1, 'a', 'b', 'c', '.xxx');
}

do_test 1.2 {
  step_rbu test.db rbu.db
} {SQLITE_DONE}

do_execsql_test 1.3 {
  SELECT * FROM t1
} {
  1 a b c
}

#-------------------------------------------------------------------------
#
foreach {tn schema} {
  1 {
    CREATE TABLE t1(i INTEGER PRIMARY KEY, a, b, c UNIQUE);
    CREATE INDEX t1a ON t1(a);
  }
  2 {
    CREATE TABLE t1(i PRIMARY KEY, a, b, c UNIQUE);
    CREATE INDEX t1a ON t1(a);
  }
  3 {
    CREATE TABLE t1(i PRIMARY KEY, a, b, c UNIQUE) WITHOUT ROWID;
    CREATE INDEX t1a ON t1(a);
  }
} {
  reset_db
  forcedelete rbu.db
  execsql $schema

  do_execsql_test 2.$tn.0 {
    INSERT INTO t1 VALUES(1, 'a', 'b', 'c');
    INSERT INTO t1 VALUES(2, 'b', 'c', 'd');
    INSERT INTO t1 VALUES(3, 'c', 'd', 'e');
  }
  
  do_execsql_test 2.$tn.1 {
    ATTACH 'rbu.db' AS rbu;
    CREATE TABLE rbu.data_t1(i, a, b, c, rbu_control);
    INSERT INTO data_t1 VALUES(1, 1, 2, 3, 2);
    INSERT INTO data_t1 VALUES(3, 'c', 'd', 'e', 2);
    INSERT INTO data_t1 VALUES(4, 'd', 'e', 'f', 2);
  }
  
  do_test 2.$tn.2 {
    step_rbu test.db rbu.db
  } {SQLITE_DONE}
  
  do_execsql_test 2.$tn.3 {
    SELECT * FROM t1 ORDER BY i
  } {
    1 1 2 3
    2 b c d
    3 c d e
    4 d e f
  }
  
  integrity_check 2.$tn.4
}

foreach {tn schema} {
  1 {
    CREATE TABLE t1(a, b, c UNIQUE);
    CREATE INDEX t1a ON t1(a);
  }

  2 {
    CREATE VIRTUAL TABLE t1 USING fts5(a, b, c);
  }
} {
  if {$tn==2} { ifcapable !fts5 break }
  reset_db
  forcedelete rbu.db
  execsql $schema

  do_execsql_test 3.$tn.0 {
    INSERT INTO t1 VALUES('a', 'b', 'c');
    INSERT INTO t1 VALUES('b', 'c', 'd');
    INSERT INTO t1 VALUES('c', 'd', 'e');
  }
  
  do_execsql_test 3.$tn.1 {
    ATTACH 'rbu.db' AS rbu;
    CREATE TABLE rbu.data_t1(rbu_rowid, a, b, c, rbu_control);
    INSERT INTO data_t1 VALUES(1, 1, 2, 3, 2);
    INSERT INTO data_t1 VALUES(3, 'c', 'd', 'e', 2);
    INSERT INTO data_t1 VALUES(4, 'd', 'e', 'f', 2);
  }
  
  do_test 3.$tn.2 {
    step_rbu test.db rbu.db
  } {SQLITE_DONE}
  
  do_execsql_test 3.$tn.3 {
    SELECT rowid, * FROM t1 ORDER BY 1
  } {
    1 1 2 3
    2 b c d
    3 c d e
    4 d e f
  }
  
  integrity_check 3.$tn.4
}



finish_test

Changes to ext/rbu/sqlite3rbu.c.
276
277
278
279
280
281
282

283
284
285
286

287
288
289
290
291
292
293

/*
** Within the RBU_STAGE_OAL stage, each call to sqlite3rbu_step() performs
** one of the following operations.
*/
#define RBU_INSERT     1          /* Insert on a main table b-tree */
#define RBU_DELETE     2          /* Delete a row from a main table b-tree */

#define RBU_IDX_DELETE 3          /* Delete a row from an aux. index b-tree */
#define RBU_IDX_INSERT 4          /* Insert on an aux. index b-tree */
#define RBU_UPDATE     5          /* Update a row in a main table b-tree */



/*
** A single step of an incremental checkpoint - frame iWalFrame of the wal
** file should be copied to page iDbPage of the database file.
*/
struct RbuFrame {
  u32 iDbPage;







>
|
|
<

>







276
277
278
279
280
281
282
283
284
285

286
287
288
289
290
291
292
293
294

/*
** Within the RBU_STAGE_OAL stage, each call to sqlite3rbu_step() performs
** one of the following operations.
*/
#define RBU_INSERT     1          /* Insert on a main table b-tree */
#define RBU_DELETE     2          /* Delete a row from a main table b-tree */
#define RBU_REPLACE    3          /* Delete and then insert a row */
#define RBU_IDX_DELETE 4          /* Delete a row from an aux. index b-tree */
#define RBU_IDX_INSERT 5          /* Insert on an aux. index b-tree */


#define RBU_UPDATE     6          /* Update a row in a main table b-tree */

/*
** A single step of an incremental checkpoint - frame iWalFrame of the wal
** file should be copied to page iDbPage of the database file.
*/
struct RbuFrame {
  u32 iDbPage;
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915

1916
1917
1918

1919
1920
1921
1922
1923
1924
1925
          zSql = sqlite3_mprintf(
              "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' ORDER BY %s%s",
              zCollist, p->zStateDb, pIter->zDataTbl,
              zCollist, zLimit
          );
        }else{
          zSql = sqlite3_mprintf(
              "SELECT %s, rbu_control FROM '%q' "
              "WHERE typeof(rbu_control)='integer' AND rbu_control!=1 "
              "UNION ALL "
              "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' "

              "ORDER BY %s%s",
              zCollist, pIter->zDataTbl, 
              zCollist, p->zStateDb, pIter->zDataTbl, 

              zCollist, zLimit
          );
        }
        p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, zSql);
      }

      sqlite3_free(zImposterCols);







|
<

|
>

<

>







1906
1907
1908
1909
1910
1911
1912
1913

1914
1915
1916
1917

1918
1919
1920
1921
1922
1923
1924
1925
1926
          zSql = sqlite3_mprintf(
              "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' ORDER BY %s%s",
              zCollist, p->zStateDb, pIter->zDataTbl,
              zCollist, zLimit
          );
        }else{
          zSql = sqlite3_mprintf(
              "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' "

              "UNION ALL "
              "SELECT %s, rbu_control FROM '%q' "
              "WHERE typeof(rbu_control)='integer' AND rbu_control!=1 "
              "ORDER BY %s%s",

              zCollist, p->zStateDb, pIter->zDataTbl, 
              zCollist, pIter->zDataTbl, 
              zCollist, zLimit
          );
        }
        p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, zSql);
      }

      sqlite3_free(zImposterCols);
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
            , (pIter->eType==RBU_PK_EXTERNAL ? ", 0 AS rbu_rowid" : "")
            , pIter->zDataTbl
        );

        rbuMPrintfExec(p, p->dbMain,
            "CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" "
            "BEGIN "
            "  SELECT rbu_tmp_insert(2, %s);"
            "END;"

            "CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" "
            "BEGIN "
            "  SELECT rbu_tmp_insert(2, %s);"
            "END;"

            "CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" "
            "BEGIN "
            "  SELECT rbu_tmp_insert(3, %s);"
            "END;",
            zWrite, zTbl, zOldlist,
            zWrite, zTbl, zOldlist,
            zWrite, zTbl, zNewlist
        );

        if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){







|




|




|







1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
            , (pIter->eType==RBU_PK_EXTERNAL ? ", 0 AS rbu_rowid" : "")
            , pIter->zDataTbl
        );

        rbuMPrintfExec(p, p->dbMain,
            "CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" "
            "BEGIN "
            "  SELECT rbu_tmp_insert(3, %s);"
            "END;"

            "CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" "
            "BEGIN "
            "  SELECT rbu_tmp_insert(3, %s);"
            "END;"

            "CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" "
            "BEGIN "
            "  SELECT rbu_tmp_insert(4, %s);"
            "END;",
            zWrite, zTbl, zOldlist,
            zWrite, zTbl, zOldlist,
            zWrite, zTbl, zNewlist
        );

        if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
static int rbuStepType(sqlite3rbu *p, const char **pzMask){
  int iCol = p->objiter.nCol;     /* Index of rbu_control column */
  int res = 0;                    /* Return value */

  switch( sqlite3_column_type(p->objiter.pSelect, iCol) ){
    case SQLITE_INTEGER: {
      int iVal = sqlite3_column_int(p->objiter.pSelect, iCol);
      if( iVal==0 ){
        res = RBU_INSERT;
      }else if( iVal==1 ){
        res = RBU_DELETE;
      }else if( iVal==2 ){
        res = RBU_IDX_DELETE;
      }else if( iVal==3 ){
        res = RBU_IDX_INSERT;
      }
      break;
    }

    case SQLITE_TEXT: {
      const unsigned char *z = sqlite3_column_text(p->objiter.pSelect, iCol);
      if( z==0 ){







|
|
<
|
|
|
<
|







2506
2507
2508
2509
2510
2511
2512
2513
2514

2515
2516
2517

2518
2519
2520
2521
2522
2523
2524
2525
static int rbuStepType(sqlite3rbu *p, const char **pzMask){
  int iCol = p->objiter.nCol;     /* Index of rbu_control column */
  int res = 0;                    /* Return value */

  switch( sqlite3_column_type(p->objiter.pSelect, iCol) ){
    case SQLITE_INTEGER: {
      int iVal = sqlite3_column_int(p->objiter.pSelect, iCol);
      switch( iVal ){
        case 0: res = RBU_INSERT;     break;

        case 1: res = RBU_DELETE;     break;
        case 2: res = RBU_REPLACE;    break;
        case 3: res = RBU_IDX_DELETE; break;

        case 4: res = RBU_IDX_INSERT; break;
      }
      break;
    }

    case SQLITE_TEXT: {
      const unsigned char *z = sqlite3_column_text(p->objiter.pSelect, iCol);
      if( z==0 ){
2550
2551
2552
2553
2554
2555
2556





























































2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632

2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643

2644
2645
2646
2647
2648
2649
2650
static void assertColumnName(sqlite3_stmt *pStmt, int iCol, const char *zName){
  const char *zCol = sqlite3_column_name(pStmt, iCol);
  assert( 0==sqlite3_stricmp(zName, zCol) );
}
#else
# define assertColumnName(x,y,z)
#endif






























































/*
** This function does the work for an sqlite3rbu_step() call.
**
** The object-iterator (p->objiter) currently points to a valid object,
** and the input cursor (p->objiter.pSelect) currently points to a valid
** input row. Perform whatever processing is required and return.
**
** If no  error occurs, SQLITE_OK is returned. Otherwise, an error code
** and message is left in the RBU handle and a copy of the error code
** returned.
*/
static int rbuStep(sqlite3rbu *p){
  RbuObjIter *pIter = &p->objiter;
  const char *zMask = 0;
  int i;
  int eType = rbuStepType(p, &zMask);

  if( eType ){
    assert( eType!=RBU_UPDATE || pIter->zIdx==0 );

    if( pIter->zIdx==0 && eType==RBU_IDX_DELETE ){
      rbuBadControlError(p);
    }
    else if( 
        eType==RBU_INSERT 
     || eType==RBU_DELETE
     || eType==RBU_IDX_DELETE 
     || eType==RBU_IDX_INSERT
    ){
      sqlite3_value *pVal;
      sqlite3_stmt *pWriter;

      assert( eType!=RBU_UPDATE );
      assert( eType!=RBU_DELETE || pIter->zIdx==0 );

      if( eType==RBU_IDX_DELETE || eType==RBU_DELETE ){
        pWriter = pIter->pDelete;
      }else{
        pWriter = pIter->pInsert;
      }

      for(i=0; i<pIter->nCol; i++){
        /* If this is an INSERT into a table b-tree and the table has an
        ** explicit INTEGER PRIMARY KEY, check that this is not an attempt
        ** to write a NULL into the IPK column. That is not permitted.  */
        if( eType==RBU_INSERT 
         && pIter->zIdx==0 && pIter->eType==RBU_PK_IPK && pIter->abTblPk[i] 
         && sqlite3_column_type(pIter->pSelect, i)==SQLITE_NULL
        ){
          p->rc = SQLITE_MISMATCH;
          p->zErrmsg = sqlite3_mprintf("datatype mismatch");
          goto step_out;
        }

        if( eType==RBU_DELETE && pIter->abTblPk[i]==0 ){
          continue;
        }

        pVal = sqlite3_column_value(pIter->pSelect, i);
        p->rc = sqlite3_bind_value(pWriter, i+1, pVal);
        if( p->rc ) goto step_out;
      }
      if( pIter->zIdx==0
       && (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE) 
      ){
        /* For a virtual table, or a table with no primary key, the 
        ** SELECT statement is:
        **
        **   SELECT <cols>, rbu_control, rbu_rowid FROM ....
        **
        ** Hence column_value(pIter->nCol+1).
        */
        assertColumnName(pIter->pSelect, pIter->nCol+1, "rbu_rowid");
        pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1);
        p->rc = sqlite3_bind_value(pWriter, pIter->nCol+1, pVal);

      }
      if( p->rc==SQLITE_OK ){
        sqlite3_step(pWriter);
        p->rc = resetAndCollectError(pWriter, &p->zErrmsg);
      }
    }else{
      sqlite3_value *pVal;
      sqlite3_stmt *pUpdate = 0;
      assert( eType==RBU_UPDATE );
      rbuGetUpdateStmt(p, pIter, zMask, &pUpdate);
      if( pUpdate ){

        for(i=0; p->rc==SQLITE_OK && i<pIter->nCol; i++){
          char c = zMask[pIter->aiSrcOrder[i]];
          pVal = sqlite3_column_value(pIter->pSelect, i);
          if( pIter->abTblPk[i] || c!='.' ){
            p->rc = sqlite3_bind_value(pUpdate, i+1, pVal);
          }
        }







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>















<



<
<
<
<
<
<
<
|
|
|
|
<
<
<
|
<

|
<
<
<
<
|
<
<
<
<
<
<
<
<
<
<
<
|
<
|
<
<
<
<
<
<
<
|
<
<
<
<
<
<
<
<
<
<
<
<
>
|
|
|
<
|
|





>







2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631

2632
2633
2634







2635
2636
2637
2638



2639

2640
2641




2642











2643

2644







2645












2646
2647
2648
2649

2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
static void assertColumnName(sqlite3_stmt *pStmt, int iCol, const char *zName){
  const char *zCol = sqlite3_column_name(pStmt, iCol);
  assert( 0==sqlite3_stricmp(zName, zCol) );
}
#else
# define assertColumnName(x,y,z)
#endif

/*
** Argument eType must be one of RBU_INSERT, RBU_DELETE, RBU_IDX_INSERT or
** RBU_IDX_DELETE. This function performs the work of a single
** sqlite3rbu_step() call for the type of operation specified by eType.
*/
static void rbuStepOneOp(sqlite3rbu *p, int eType){
  RbuObjIter *pIter = &p->objiter;
  sqlite3_value *pVal;
  sqlite3_stmt *pWriter;
  int i;

  assert( p->rc==SQLITE_OK );
  assert( eType!=RBU_DELETE || pIter->zIdx==0 );

  if( eType==RBU_IDX_DELETE || eType==RBU_DELETE ){
    pWriter = pIter->pDelete;
  }else{
    pWriter = pIter->pInsert;
  }

  for(i=0; i<pIter->nCol; i++){
    /* If this is an INSERT into a table b-tree and the table has an
    ** explicit INTEGER PRIMARY KEY, check that this is not an attempt
    ** to write a NULL into the IPK column. That is not permitted.  */
    if( eType==RBU_INSERT 
     && pIter->zIdx==0 && pIter->eType==RBU_PK_IPK && pIter->abTblPk[i] 
     && sqlite3_column_type(pIter->pSelect, i)==SQLITE_NULL
    ){
      p->rc = SQLITE_MISMATCH;
      p->zErrmsg = sqlite3_mprintf("datatype mismatch");
      return;
    }

    if( eType==RBU_DELETE && pIter->abTblPk[i]==0 ){
      continue;
    }

    pVal = sqlite3_column_value(pIter->pSelect, i);
    p->rc = sqlite3_bind_value(pWriter, i+1, pVal);
    if( p->rc ) return;
  }
  if( pIter->zIdx==0
   && (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE) 
  ){
    /* For a virtual table, or a table with no primary key, the 
    ** SELECT statement is:
    **
    **   SELECT <cols>, rbu_control, rbu_rowid FROM ....
    **
    ** Hence column_value(pIter->nCol+1).
    */
    assertColumnName(pIter->pSelect, pIter->nCol+1, "rbu_rowid");
    pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1);
    p->rc = sqlite3_bind_value(pWriter, pIter->nCol+1, pVal);
  }
  if( p->rc==SQLITE_OK ){
    sqlite3_step(pWriter);
    p->rc = resetAndCollectError(pWriter, &p->zErrmsg);
  }
}

/*
** This function does the work for an sqlite3rbu_step() call.
**
** The object-iterator (p->objiter) currently points to a valid object,
** and the input cursor (p->objiter.pSelect) currently points to a valid
** input row. Perform whatever processing is required and return.
**
** If no  error occurs, SQLITE_OK is returned. Otherwise, an error code
** and message is left in the RBU handle and a copy of the error code
** returned.
*/
static int rbuStep(sqlite3rbu *p){
  RbuObjIter *pIter = &p->objiter;
  const char *zMask = 0;

  int eType = rbuStepType(p, &zMask);

  if( eType ){







    assert( eType==RBU_INSERT     || eType==RBU_DELETE
         || eType==RBU_REPLACE    || eType==RBU_IDX_DELETE
         || eType==RBU_IDX_INSERT || eType==RBU_UPDATE
    );



    assert( eType!=RBU_UPDATE || pIter->zIdx==0 );


    if( pIter->zIdx==0 && eType==RBU_IDX_DELETE ){




      rbuBadControlError(p);











    }

    else if( eType==RBU_REPLACE ){







      if( pIter->zIdx==0 ) rbuStepOneOp(p, RBU_DELETE);












      if( p->rc==SQLITE_OK ) rbuStepOneOp(p, RBU_INSERT);
    }
    else if( eType!=RBU_UPDATE ){
      rbuStepOneOp(p, eType);

    }
    else{
      sqlite3_value *pVal;
      sqlite3_stmt *pUpdate = 0;
      assert( eType==RBU_UPDATE );
      rbuGetUpdateStmt(p, pIter, zMask, &pUpdate);
      if( pUpdate ){
        int i;
        for(i=0; p->rc==SQLITE_OK && i<pIter->nCol; i++){
          char c = zMask[pIter->aiSrcOrder[i]];
          pVal = sqlite3_column_value(pIter->pSelect, i);
          if( pIter->abTblPk[i] || c!='.' ){
            p->rc = sqlite3_bind_value(pUpdate, i+1, pVal);
          }
        }
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
        if( p->rc==SQLITE_OK ){
          sqlite3_step(pUpdate);
          p->rc = resetAndCollectError(pUpdate, &p->zErrmsg);
        }
      }
    }
  }

 step_out:
  return p->rc;
}

/*
** Increment the schema cookie of the main database opened by p->dbMain.
*/
static void rbuIncrSchemaCookie(sqlite3rbu *p){







<
<







2673
2674
2675
2676
2677
2678
2679


2680
2681
2682
2683
2684
2685
2686
        if( p->rc==SQLITE_OK ){
          sqlite3_step(pUpdate);
          p->rc = resetAndCollectError(pUpdate, &p->zErrmsg);
        }
      }
    }
  }


  return p->rc;
}

/*
** Increment the schema cookie of the main database opened by p->dbMain.
*/
static void rbuIncrSchemaCookie(sqlite3rbu *p){
Changes to src/attach.c.
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
                             sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) );
#ifndef SQLITE_OMIT_PAGER_PRAGMAS
    sqlite3BtreeSetPagerFlags(aNew->pBt,
                      PAGER_SYNCHRONOUS_FULL | (db->flags & PAGER_FLAGS_MASK));
#endif
    sqlite3BtreeLeave(aNew->pBt);
  }
  aNew->safety_level = 3;
  aNew->zName = sqlite3DbStrDup(db, zName);
  if( rc==SQLITE_OK && aNew->zName==0 ){
    rc = SQLITE_NOMEM_BKPT;
  }


#ifdef SQLITE_HAS_CODEC







|







157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
                             sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) );
#ifndef SQLITE_OMIT_PAGER_PRAGMAS
    sqlite3BtreeSetPagerFlags(aNew->pBt,
                      PAGER_SYNCHRONOUS_FULL | (db->flags & PAGER_FLAGS_MASK));
#endif
    sqlite3BtreeLeave(aNew->pBt);
  }
  aNew->safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1;
  aNew->zName = sqlite3DbStrDup(db, zName);
  if( rc==SQLITE_OK && aNew->zName==0 ){
    rc = SQLITE_NOMEM_BKPT;
  }


#ifdef SQLITE_HAS_CODEC
Changes to src/btree.c.
2860
2861
2862
2863
2864
2865
2866















2867
2868
2869

2870
2871
2872
2873
2874
2875
2876
    ** file.
    */
    if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){
      int isOpen = 0;
      rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen);
      if( rc!=SQLITE_OK ){
        goto page1_init_failed;















      }else if( isOpen==0 ){
        releasePage(pPage1);
        return SQLITE_OK;

      }
      rc = SQLITE_NOTADB;
    }
#endif

    /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload
    ** fractions and the leaf payload fraction values must be 64, 32, and 32.







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
>







2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
    ** file.
    */
    if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){
      int isOpen = 0;
      rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen);
      if( rc!=SQLITE_OK ){
        goto page1_init_failed;
      }else{
#if SQLITE_DEFAULT_SYNCHRONOUS!=SQLITE_DEFAULT_WAL_SYNCHRONOUS
        sqlite3 *db;
        Db *pDb;
        if( (db=pBt->db)!=0 && (pDb=db->aDb)!=0 ){
          while( pDb->pBt==0 || pDb->pBt->pBt!=pBt ){ pDb++; }
          if( pDb->bSyncSet==0
           && pDb->safety_level==SQLITE_DEFAULT_SYNCHRONOUS+1
          ){
            pDb->safety_level = SQLITE_DEFAULT_WAL_SYNCHRONOUS+1;
            sqlite3PagerSetFlags(pBt->pPager,
               pDb->safety_level | (db->flags & PAGER_FLAGS_MASK));
          }
        }
#endif
        if( isOpen==0 ){
          releasePage(pPage1);
          return SQLITE_OK;
        }
      }
      rc = SQLITE_NOTADB;
    }
#endif

    /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload
    ** fractions and the leaf payload fraction values must be 64, 32, and 32.
7552
7553
7554
7555
7556
7557
7558
7559
7560
7561


7562
7563
7564
7565
7566
7567
7568
      /* Obscure case for non-leaf-data trees: If the cell at pCell was
      ** previously stored on a leaf node, and its reported size was 4
      ** bytes, then it may actually be smaller than this 
      ** (see btreeParseCellPtr(), 4 bytes is the minimum size of
      ** any cell). But it is important to pass the correct size to 
      ** insertCell(), so reparse the cell now.
      **
      ** Note that this can never happen in an SQLite data file, as all
      ** cells are at least 4 bytes. It only happens in b-trees used
      ** to evaluate "IN (SELECT ...)" and similar clauses.


      */
      if( b.szCell[j]==4 ){
        assert(leafCorrection==4);
        sz = pParent->xCellSize(pParent, pCell);
      }
    }
    iOvflSpace += sz;







<
<
|
>
>







7568
7569
7570
7571
7572
7573
7574


7575
7576
7577
7578
7579
7580
7581
7582
7583
7584
      /* Obscure case for non-leaf-data trees: If the cell at pCell was
      ** previously stored on a leaf node, and its reported size was 4
      ** bytes, then it may actually be smaller than this 
      ** (see btreeParseCellPtr(), 4 bytes is the minimum size of
      ** any cell). But it is important to pass the correct size to 
      ** insertCell(), so reparse the cell now.
      **


      ** This can only happen for b-trees used to evaluate "IN (SELECT ...)"
      ** and WITHOUT ROWID tables with exactly one column which is the
      ** primary key.
      */
      if( b.szCell[j]==4 ){
        assert(leafCorrection==4);
        sz = pParent->xCellSize(pParent, pCell);
      }
    }
    iOvflSpace += sz;
Changes to src/build.c.
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
** SQLITE_AFF_NUMERIC is returned.
*/
char sqlite3AffinityType(const char *zIn, u8 *pszEst){
  u32 h = 0;
  char aff = SQLITE_AFF_NUMERIC;
  const char *zChar = 0;

  if( zIn==0 ) return aff;
  while( zIn[0] ){
    h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff];
    zIn++;
    if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){             /* CHAR */
      aff = SQLITE_AFF_TEXT;
      zChar = zIn;
    }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){       /* CLOB */







|







1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
** SQLITE_AFF_NUMERIC is returned.
*/
char sqlite3AffinityType(const char *zIn, u8 *pszEst){
  u32 h = 0;
  char aff = SQLITE_AFF_NUMERIC;
  const char *zChar = 0;

  assert( zIn!=0 );
  while( zIn[0] ){
    h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff];
    zIn++;
    if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){             /* CHAR */
      aff = SQLITE_AFF_TEXT;
      zChar = zIn;
    }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){       /* CLOB */
Changes to src/expr.c.
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583

1584
1585

1586
1587


1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607

1608

1609
1610
1611
1612
1613
1614
1615
1616
  if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1;
  if( sqlite3StrICmp(z, "ROWID")==0 ) return 1;
  if( sqlite3StrICmp(z, "OID")==0 ) return 1;
  return 0;
}

/*
** Return true if we are able to the IN operator optimization on a
** query of the form
**
**       x IN (SELECT ...)
**
** Where the SELECT... clause is as specified by the parameter to this
** routine.
**
** The Select object passed in has already been preprocessed and no
** errors have been found.
*/
#ifndef SQLITE_OMIT_SUBQUERY
static int isCandidateForInOpt(Select *p){

  SrcList *pSrc;
  ExprList *pEList;

  Table *pTab;
  if( p==0 ) return 0;                   /* right-hand side of IN is SELECT */


  if( p->pPrior ) return 0;              /* Not a compound SELECT */
  if( p->selFlags & (SF_Distinct|SF_Aggregate) ){
    testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct );
    testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate );
    return 0; /* No DISTINCT keyword and no aggregate functions */
  }
  assert( p->pGroupBy==0 );              /* Has no GROUP BY clause */
  if( p->pLimit ) return 0;              /* Has no LIMIT clause */
  assert( p->pOffset==0 );               /* No LIMIT means no OFFSET */
  if( p->pWhere ) return 0;              /* Has no WHERE clause */
  pSrc = p->pSrc;
  assert( pSrc!=0 );
  if( pSrc->nSrc!=1 ) return 0;          /* Single term in FROM clause */
  if( pSrc->a[0].pSelect ) return 0;     /* FROM is not a subquery or view */
  pTab = pSrc->a[0].pTab;
  if( NEVER(pTab==0) ) return 0;
  assert( pTab->pSelect==0 );            /* FROM clause is not a view */
  if( IsVirtual(pTab) ) return 0;        /* FROM clause not a virtual table */
  pEList = p->pEList;
  if( pEList->nExpr!=1 ) return 0;       /* One column in the result set */

  if( pEList->a[0].pExpr->op!=TK_COLUMN ) return 0; /* Result is a column */

  return 1;
}
#endif /* SQLITE_OMIT_SUBQUERY */

/*
** Code an OP_Once instruction and allocate space for its flag. Return the 
** address of the new instruction.
*/







|
|
<
|
<
|
|
<
<
<


|
>


>

|
>
>















|




>
|
>
|







1564
1565
1566
1567
1568
1569
1570
1571
1572

1573

1574
1575



1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
  if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1;
  if( sqlite3StrICmp(z, "ROWID")==0 ) return 1;
  if( sqlite3StrICmp(z, "OID")==0 ) return 1;
  return 0;
}

/*
** pX is the RHS of an IN operator.  If pX is a SELECT statement 
** that can be simplified to a direct table access, then return

** a pointer to the SELECT statement.  If pX is not a SELECT statement,

** or if the SELECT statement needs to be manifested into a transient
** table, then return NULL.



*/
#ifndef SQLITE_OMIT_SUBQUERY
static Select *isCandidateForInOpt(Expr *pX){
  Select *p;
  SrcList *pSrc;
  ExprList *pEList;
  Expr *pRes;
  Table *pTab;
  if( !ExprHasProperty(pX, EP_xIsSelect) ) return 0;  /* Not a subquery */
  if( ExprHasProperty(pX, EP_VarSelect)  ) return 0;  /* Correlated subq */
  p = pX->x.pSelect;
  if( p->pPrior ) return 0;              /* Not a compound SELECT */
  if( p->selFlags & (SF_Distinct|SF_Aggregate) ){
    testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct );
    testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate );
    return 0; /* No DISTINCT keyword and no aggregate functions */
  }
  assert( p->pGroupBy==0 );              /* Has no GROUP BY clause */
  if( p->pLimit ) return 0;              /* Has no LIMIT clause */
  assert( p->pOffset==0 );               /* No LIMIT means no OFFSET */
  if( p->pWhere ) return 0;              /* Has no WHERE clause */
  pSrc = p->pSrc;
  assert( pSrc!=0 );
  if( pSrc->nSrc!=1 ) return 0;          /* Single term in FROM clause */
  if( pSrc->a[0].pSelect ) return 0;     /* FROM is not a subquery or view */
  pTab = pSrc->a[0].pTab;
  assert( pTab!=0 );
  assert( pTab->pSelect==0 );            /* FROM clause is not a view */
  if( IsVirtual(pTab) ) return 0;        /* FROM clause not a virtual table */
  pEList = p->pEList;
  if( pEList->nExpr!=1 ) return 0;       /* One column in the result set */
  pRes = pEList->a[0].pExpr;
  if( pRes->op!=TK_COLUMN ) return 0;    /* Result is a column */
  assert( pRes->iTable==pSrc->a[0].iCursor );  /* Not a correlated subquery */
  return p;
}
#endif /* SQLITE_OMIT_SUBQUERY */

/*
** Code an OP_Once instruction and allocate space for its flag. Return the 
** address of the new instruction.
*/
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
  assert( pX->op==TK_IN );
  mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0;

  /* Check to see if an existing table or index can be used to
  ** satisfy the query.  This is preferable to generating a new 
  ** ephemeral table.
  */
  p = (ExprHasProperty(pX, EP_xIsSelect) ? pX->x.pSelect : 0);
  if( pParse->nErr==0 && isCandidateForInOpt(p) ){
    sqlite3 *db = pParse->db;              /* Database connection */
    Table *pTab;                           /* Table <table>. */
    Expr *pExpr;                           /* Expression <column> */
    i16 iCol;                              /* Index of column <column> */
    i16 iDb;                               /* Database idx for pTab */

    assert( p );                        /* Because of isCandidateForInOpt(p) */
    assert( p->pEList!=0 );             /* Because of isCandidateForInOpt(p) */
    assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */
    assert( p->pSrc!=0 );               /* Because of isCandidateForInOpt(p) */
    pTab = p->pSrc->a[0].pTab;
    pExpr = p->pEList->a[0].pExpr;
    iCol = (i16)pExpr->iColumn;
   







<
|






<







1735
1736
1737
1738
1739
1740
1741

1742
1743
1744
1745
1746
1747
1748

1749
1750
1751
1752
1753
1754
1755
  assert( pX->op==TK_IN );
  mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0;

  /* Check to see if an existing table or index can be used to
  ** satisfy the query.  This is preferable to generating a new 
  ** ephemeral table.
  */

  if( pParse->nErr==0 && (p = isCandidateForInOpt(pX))!=0 ){
    sqlite3 *db = pParse->db;              /* Database connection */
    Table *pTab;                           /* Table <table>. */
    Expr *pExpr;                           /* Expression <column> */
    i16 iCol;                              /* Index of column <column> */
    i16 iDb;                               /* Database idx for pTab */


    assert( p->pEList!=0 );             /* Because of isCandidateForInOpt(p) */
    assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */
    assert( p->pSrc!=0 );               /* Because of isCandidateForInOpt(p) */
    pTab = p->pSrc->a[0].pTab;
    pExpr = p->pEList->a[0].pExpr;
    iCol = (i16)pExpr->iColumn;
   
Changes to src/main.c.
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
  sqlite3BtreeLeave(db->aDb[0].pBt);
  db->aDb[1].pSchema = sqlite3SchemaGet(db, 0);

  /* The default safety_level for the main database is FULL; for the temp
  ** database it is OFF. This matches the pager layer defaults.  
  */
  db->aDb[0].zName = "main";
  db->aDb[0].safety_level = PAGER_SYNCHRONOUS_FULL;
  db->aDb[1].zName = "temp";
  db->aDb[1].safety_level = PAGER_SYNCHRONOUS_OFF;

  db->magic = SQLITE_MAGIC_OPEN;
  if( db->mallocFailed ){
    goto opendb_out;
  }







|







2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
  sqlite3BtreeLeave(db->aDb[0].pBt);
  db->aDb[1].pSchema = sqlite3SchemaGet(db, 0);

  /* The default safety_level for the main database is FULL; for the temp
  ** database it is OFF. This matches the pager layer defaults.  
  */
  db->aDb[0].zName = "main";
  db->aDb[0].safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1;
  db->aDb[1].zName = "temp";
  db->aDb[1].safety_level = PAGER_SYNCHRONOUS_OFF;

  db->magic = SQLITE_MAGIC_OPEN;
  if( db->mallocFailed ){
    goto opendb_out;
  }
Changes to src/memjournal.c.
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95







96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  FileChunk *pFirst;              /* Head of in-memory chunk-list */
  FilePoint endpoint;             /* Pointer to the end of the file */
  FilePoint readpoint;            /* Pointer to the end of the last xRead() */

  int flags;                      /* xOpen flags */
  sqlite3_vfs *pVfs;              /* The "real" underlying VFS */
  const char *zJournal;           /* Name of the journal file */
  sqlite3_file *pReal;            /* The "real" underlying file descriptor */
};

/*
** Read data from the in-memory journal file.  This is the implementation
** of the sqlite3_vfs.xRead method.
*/
static int memjrnlRead(
  sqlite3_file *pJfd,    /* The journal file from which to read */
  void *zBuf,            /* Put the results here */
  int iAmt,              /* Number of bytes to read */
  sqlite_int64 iOfst     /* Begin reading at this offset */
){
  MemJournal *p = (MemJournal *)pJfd;
  if( p->pReal ){
    return sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst);
  }else if( (iAmt+iOfst)>p->endpoint.iOffset ){
    return SQLITE_IOERR_SHORT_READ;
  }else{
    u8 *zOut = zBuf;
    int nRead = iAmt;
    int iChunkOffset;
    FileChunk *pChunk;








    if( p->readpoint.iOffset!=iOfst || iOfst==0 ){
      sqlite3_int64 iOff = 0;
      for(pChunk=p->pFirst; 
          ALWAYS(pChunk) && (iOff+p->nChunkSize)<=iOfst;
          pChunk=pChunk->pNext
      ){
        iOff += p->nChunkSize;
      }
    }else{
      pChunk = p->readpoint.pChunk;
    }

    iChunkOffset = (int)(iOfst%p->nChunkSize);
    do {
      int iSpace = p->nChunkSize - iChunkOffset;
      int nCopy = MIN(nRead, (p->nChunkSize - iChunkOffset));
      memcpy(zOut, (u8*)pChunk->zChunk + iChunkOffset, nCopy);
      zOut += nCopy;
      nRead -= iSpace;
      iChunkOffset = 0;
    } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 );
    p->readpoint.iOffset = iOfst+iAmt;
    p->readpoint.pChunk = pChunk;
  }

  return SQLITE_OK;
}

/*
** Free the list of FileChunk structures headed at MemJournal.pFirst.
*/







<













<
<
<
<
<
|
|
|
|

>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
<







65
66
67
68
69
70
71

72
73
74
75
76
77
78
79
80
81
82
83
84





85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119

120
121
122
123
124
125
126
  FileChunk *pFirst;              /* Head of in-memory chunk-list */
  FilePoint endpoint;             /* Pointer to the end of the file */
  FilePoint readpoint;            /* Pointer to the end of the last xRead() */

  int flags;                      /* xOpen flags */
  sqlite3_vfs *pVfs;              /* The "real" underlying VFS */
  const char *zJournal;           /* Name of the journal file */

};

/*
** Read data from the in-memory journal file.  This is the implementation
** of the sqlite3_vfs.xRead method.
*/
static int memjrnlRead(
  sqlite3_file *pJfd,    /* The journal file from which to read */
  void *zBuf,            /* Put the results here */
  int iAmt,              /* Number of bytes to read */
  sqlite_int64 iOfst     /* Begin reading at this offset */
){
  MemJournal *p = (MemJournal *)pJfd;





  u8 *zOut = zBuf;
  int nRead = iAmt;
  int iChunkOffset;
  FileChunk *pChunk;

#ifdef SQLITE_ENABLE_ATOMIC_WRITE
  if( (iAmt+iOfst)>p->endpoint.iOffset ){
    return SQLITE_IOERR_SHORT_READ;
  }
#endif

  assert( (iAmt+iOfst)<=p->endpoint.iOffset );
  if( p->readpoint.iOffset!=iOfst || iOfst==0 ){
    sqlite3_int64 iOff = 0;
    for(pChunk=p->pFirst; 
        ALWAYS(pChunk) && (iOff+p->nChunkSize)<=iOfst;
        pChunk=pChunk->pNext
    ){
      iOff += p->nChunkSize;
    }
  }else{
    pChunk = p->readpoint.pChunk;
  }

  iChunkOffset = (int)(iOfst%p->nChunkSize);
  do {
    int iSpace = p->nChunkSize - iChunkOffset;
    int nCopy = MIN(nRead, (p->nChunkSize - iChunkOffset));
    memcpy(zOut, (u8*)pChunk->zChunk + iChunkOffset, nCopy);
    zOut += nCopy;
    nRead -= iSpace;
    iChunkOffset = 0;
  } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 );
  p->readpoint.iOffset = iOfst+iAmt;
  p->readpoint.pChunk = pChunk;


  return SQLITE_OK;
}

/*
** Free the list of FileChunk structures headed at MemJournal.pFirst.
*/
134
135
136
137
138
139
140
141
142
143

144


145
146
147
148
149
150
151
152
153
154
155
156

157
158





159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210

211
212
213
214




215
216
217
218
219
220
221
  p->pFirst = 0;
}

/*
** Flush the contents of memory to a real file on disk.
*/
static int memjrnlCreateFile(MemJournal *p){
  int rc = SQLITE_OK;
  if( !p->pReal ){
    sqlite3_file *pReal = (sqlite3_file *)&p[1];

    rc = sqlite3OsOpen(p->pVfs, p->zJournal, pReal, p->flags, 0);


    if( rc==SQLITE_OK ){
      int nChunk = p->nChunkSize;
      i64 iOff = 0;
      FileChunk *pIter;
      p->pReal = pReal;
      for(pIter=p->pFirst; pIter && rc==SQLITE_OK; pIter=pIter->pNext){
        int nWrite = nChunk;
        if( pIter==p->endpoint.pChunk ){
          nWrite = p->endpoint.iOffset % p->nChunkSize;
          if( nWrite==0 ) nWrite = p->nChunkSize;
        }
        rc = sqlite3OsWrite(pReal, (u8*)pIter->zChunk, nWrite, iOff);

        iOff += nWrite;
      }





      if( rc!=SQLITE_OK ){
        /* If an error occurred while writing to the file, close it before
        ** returning. This way, SQLite uses the in-memory journal data to 
        ** roll back changes made to the internal page-cache before this
        ** function was called.  */
        sqlite3OsClose(pReal);
        p->pReal = 0;
      }else{
        /* No error has occurred. Free the in-memory buffers. */
        memjrnlFreeChunks(p);
      }
    }
  }
  return rc;
}


/*
** Write data to the file.
*/
static int memjrnlWrite(
  sqlite3_file *pJfd,    /* The journal file into which to write */
  const void *zBuf,      /* Take data to be written from here */
  int iAmt,              /* Number of bytes to write */
  sqlite_int64 iOfst     /* Begin writing at this offset into the file */
){
  MemJournal *p = (MemJournal *)pJfd;
  int nWrite = iAmt;
  u8 *zWrite = (u8 *)zBuf;

  /* If the file has already been created on disk. */
  if( p->pReal ){
    return sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst);
  }

  /* If the file should be created now. */

  else if( p->nSpill>0 && (iAmt+iOfst)>p->nSpill ){
    int rc = memjrnlCreateFile(p);
    if( rc==SQLITE_OK ){
      rc = memjrnlWrite(pJfd, zBuf, iAmt, iOfst);
    }
    return rc;
  }

  /* If the contents of this write should be stored in memory */
  else{
    /* An in-memory journal file should only ever be appended to. Random
    ** access writes are not required. The only exception to this is when
    ** the in-memory journal is being used by a connection using the
    ** atomic-write optimization. In this case the first 28 bytes of the
    ** journal file may be written as part of committing the transaction. */ 
    assert( iOfst==p->endpoint.iOffset || iOfst==0 );

    if( iOfst==0 && p->pFirst ){
      assert( p->nChunkSize>iAmt );
      memcpy((u8*)p->pFirst->zChunk, zBuf, iAmt);
    }else{




      while( nWrite>0 ){
        FileChunk *pChunk = p->endpoint.pChunk;
        int iChunkOffset = (int)(p->endpoint.iOffset%p->nChunkSize);
        int iSpace = MIN(nWrite, p->nChunkSize - iChunkOffset);

        if( iChunkOffset==0 ){
          /* New chunk is required to extend the file. */







|
<
|
>
|
>
>
|
|
|
|
<
|
<
|
|
<
|
|
>
|
|
>
>
>
>
>
|
|
|
|
|
|
|
<
<
<
<
<


















<
<
<
<
<
|
>
|


|












>



|
>
>
>
>







134
135
136
137
138
139
140
141

142
143
144
145
146
147
148
149
150

151

152
153

154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170





171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188





189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
  p->pFirst = 0;
}

/*
** Flush the contents of memory to a real file on disk.
*/
static int memjrnlCreateFile(MemJournal *p){
  int rc;

  sqlite3_file *pReal = (sqlite3_file*)p;
  MemJournal copy = *p;

  memset(p, 0, sizeof(MemJournal));
  rc = sqlite3OsOpen(copy.pVfs, copy.zJournal, pReal, copy.flags, 0);
  if( rc==SQLITE_OK ){
    int nChunk = copy.nChunkSize;
    i64 iOff = 0;
    FileChunk *pIter;

    for(pIter=copy.pFirst; pIter; pIter=pIter->pNext){

      if( iOff + nChunk > copy.endpoint.iOffset ){
        nChunk = copy.endpoint.iOffset - iOff;

      }
      rc = sqlite3OsWrite(pReal, (u8*)pIter->zChunk, nChunk, iOff);
      if( rc ) break;
      iOff += nChunk;
    }
    if( rc==SQLITE_OK ){
      /* No error has occurred. Free the in-memory buffers. */
      memjrnlFreeChunks(&copy);
    }
  }
  if( rc!=SQLITE_OK ){
    /* If an error occurred while creating or writing to the file, restore
    ** the original before returning. This way, SQLite uses the in-memory
    ** journal data to roll back changes made to the internal page-cache
    ** before this function was called.  */
    sqlite3OsClose(pReal);
    *p = copy;





  }
  return rc;
}


/*
** Write data to the file.
*/
static int memjrnlWrite(
  sqlite3_file *pJfd,    /* The journal file into which to write */
  const void *zBuf,      /* Take data to be written from here */
  int iAmt,              /* Number of bytes to write */
  sqlite_int64 iOfst     /* Begin writing at this offset into the file */
){
  MemJournal *p = (MemJournal *)pJfd;
  int nWrite = iAmt;
  u8 *zWrite = (u8 *)zBuf;






  /* If the file should be created now, create it and write the new data
  ** into the file on disk. */
  if( p->nSpill>0 && (iAmt+iOfst)>p->nSpill ){
    int rc = memjrnlCreateFile(p);
    if( rc==SQLITE_OK ){
      rc = sqlite3OsWrite(pJfd, zBuf, iAmt, iOfst);
    }
    return rc;
  }

  /* If the contents of this write should be stored in memory */
  else{
    /* An in-memory journal file should only ever be appended to. Random
    ** access writes are not required. The only exception to this is when
    ** the in-memory journal is being used by a connection using the
    ** atomic-write optimization. In this case the first 28 bytes of the
    ** journal file may be written as part of committing the transaction. */ 
    assert( iOfst==p->endpoint.iOffset || iOfst==0 );
#ifdef SQLITE_ENABLE_ATOMIC_WRITE
    if( iOfst==0 && p->pFirst ){
      assert( p->nChunkSize>iAmt );
      memcpy((u8*)p->pFirst->zChunk, zBuf, iAmt);
    }else
#else
    assert( iOfst>0 || p->pFirst==0 );
#endif
    {
      while( nWrite>0 ){
        FileChunk *pChunk = p->endpoint.pChunk;
        int iChunkOffset = (int)(p->endpoint.iOffset%p->nChunkSize);
        int iSpace = MIN(nWrite, p->nChunkSize - iChunkOffset);

        if( iChunkOffset==0 ){
          /* New chunk is required to extend the file. */
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
**
** If the journal file is already on disk, truncate it there. Or, if it
** is still in main memory but is being truncated to zero bytes in size,
** ignore 
*/
static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){
  MemJournal *p = (MemJournal *)pJfd;
  if( p->pReal ){
    return sqlite3OsTruncate(p->pReal, size);
  }else if( size==0 ){
    memjrnlFreeChunks(p);
    p->nSize = 0;
    p->endpoint.pChunk = 0;
    p->endpoint.iOffset = 0;
    p->readpoint.pChunk = 0;
    p->readpoint.iOffset = 0;
  }
  return SQLITE_OK;
}

/*
** Close the file.
*/
static int memjrnlClose(sqlite3_file *pJfd){
  MemJournal *p = (MemJournal *)pJfd;
  memjrnlFreeChunks(p);
  if( p->pReal ) sqlite3OsClose(p->pReal);
  return SQLITE_OK;
}

/*
** Sync the file.
**
** If the real file has been created, call its xSync method. Otherwise, 
** syncing an in-memory journal is a no-op. 
*/
static int memjrnlSync(sqlite3_file *pJfd, int flags){
  MemJournal *p = (MemJournal *)pJfd;
  if( p->pReal ){
    return sqlite3OsSync(p->pReal, flags);
  }
  return SQLITE_OK;
}

/*
** Query the size of the file in bytes.
*/
static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){
  MemJournal *p = (MemJournal *)pJfd;
  if( p->pReal ){
    return sqlite3OsFileSize(p->pReal, pSize);
  }
  *pSize = (sqlite_int64) p->endpoint.iOffset;
  return SQLITE_OK;
}

/*
** Table of methods for MemJournal sqlite3_file object.
*/







<
<
|
















<










<
<
|
<








<
<
<







252
253
254
255
256
257
258


259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275

276
277
278
279
280
281
282
283
284
285


286

287
288
289
290
291
292
293
294



295
296
297
298
299
300
301
**
** If the journal file is already on disk, truncate it there. Or, if it
** is still in main memory but is being truncated to zero bytes in size,
** ignore 
*/
static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){
  MemJournal *p = (MemJournal *)pJfd;


  if( ALWAYS(size==0) ){
    memjrnlFreeChunks(p);
    p->nSize = 0;
    p->endpoint.pChunk = 0;
    p->endpoint.iOffset = 0;
    p->readpoint.pChunk = 0;
    p->readpoint.iOffset = 0;
  }
  return SQLITE_OK;
}

/*
** Close the file.
*/
static int memjrnlClose(sqlite3_file *pJfd){
  MemJournal *p = (MemJournal *)pJfd;
  memjrnlFreeChunks(p);

  return SQLITE_OK;
}

/*
** Sync the file.
**
** If the real file has been created, call its xSync method. Otherwise, 
** syncing an in-memory journal is a no-op. 
*/
static int memjrnlSync(sqlite3_file *pJfd, int flags){


  UNUSED_PARAMETER2(pJfd, flags);

  return SQLITE_OK;
}

/*
** Query the size of the file in bytes.
*/
static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){
  MemJournal *p = (MemJournal *)pJfd;



  *pSize = (sqlite_int64) p->endpoint.iOffset;
  return SQLITE_OK;
}

/*
** Table of methods for MemJournal sqlite3_file object.
*/
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
){
  MemJournal *p = (MemJournal*)pJfd;

  /* Zero the file-handle object. If nSpill was passed zero, initialize
  ** it using the sqlite3OsOpen() function of the underlying VFS. In this
  ** case none of the code in this module is executed as a result of calls
  ** made on the journal file-handle.  */
  memset(p, 0, sizeof(MemJournal) + (pVfs ? pVfs->szOsFile : 0));
  if( nSpill==0 ){
    return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0);
  }

  if( nSpill>0 ){
    p->nChunkSize = nSpill;
  }else{







|







342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
){
  MemJournal *p = (MemJournal*)pJfd;

  /* Zero the file-handle object. If nSpill was passed zero, initialize
  ** it using the sqlite3OsOpen() function of the underlying VFS. In this
  ** case none of the code in this module is executed as a result of calls
  ** made on the journal file-handle.  */
  memset(p, 0, sizeof(MemJournal));
  if( nSpill==0 ){
    return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0);
  }

  if( nSpill>0 ){
    p->nChunkSize = nSpill;
  }else{
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415

/*
** The file-handle passed as the only argument is open on a journal file.
** Return true if this "journal file" is currently stored in heap memory,
** or false otherwise.
*/
int sqlite3JournalIsInMemory(sqlite3_file *p){
  return p->pMethods==&MemJournalMethods && ((MemJournal*)p)->pReal==0;
}

/* 
** Return the number of bytes required to store a JournalFile that uses vfs
** pVfs to create the underlying on-disk files.
*/
int sqlite3JournalSize(sqlite3_vfs *pVfs){
  return pVfs->szOsFile + sizeof(MemJournal);
}







|







|

391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407

/*
** The file-handle passed as the only argument is open on a journal file.
** Return true if this "journal file" is currently stored in heap memory,
** or false otherwise.
*/
int sqlite3JournalIsInMemory(sqlite3_file *p){
  return p->pMethods==&MemJournalMethods;
}

/* 
** Return the number of bytes required to store a JournalFile that uses vfs
** pVfs to create the underlying on-disk files.
*/
int sqlite3JournalSize(sqlite3_vfs *pVfs){
  return MAX(pVfs->szOsFile, sizeof(MemJournal));
}
Changes to src/os_win.c.
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
  OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg));
  switch( op ){
    case SQLITE_FCNTL_LOCKSTATE: {
      *(int*)pArg = pFile->locktype;
      OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
      return SQLITE_OK;
    }
    case SQLITE_LAST_ERRNO: {
      *(int*)pArg = (int)pFile->lastErrno;
      OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
      return SQLITE_OK;
    }
    case SQLITE_FCNTL_CHUNK_SIZE: {
      pFile->szChunk = *(int *)pArg;
      OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));







|







3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
  OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg));
  switch( op ){
    case SQLITE_FCNTL_LOCKSTATE: {
      *(int*)pArg = pFile->locktype;
      OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
      return SQLITE_OK;
    }
    case SQLITE_FCNTL_LAST_ERRNO: {
      *(int*)pArg = (int)pFile->lastErrno;
      OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
      return SQLITE_OK;
    }
    case SQLITE_FCNTL_CHUNK_SIZE: {
      pFile->szChunk = *(int *)pArg;
      OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
Changes to src/pager.c.
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
** The maximum allowed sector size. 64KiB. If the xSectorsize() method 
** returns a value larger than this, then MAX_SECTOR_SIZE is used instead.
** This could conceivably cause corruption following a power failure on
** such a system. This is currently an undocumented limit.
*/
#define MAX_SECTOR_SIZE 0x10000

/*
** If the option SQLITE_EXTRA_DURABLE option is set at compile-time, then
** SQLite will do extra fsync() operations when synchronous==FULL to help
** ensure that transactions are durable across a power failure.  Most
** applications are happy as long as transactions are consistent across
** a power failure, and are perfectly willing to lose the last transaction
** in exchange for the extra performance of avoiding directory syncs.
** And so the default SQLITE_EXTRA_DURABLE setting is off.
*/
#ifndef SQLITE_EXTRA_DURABLE
# define SQLITE_EXTRA_DURABLE 0
#endif


/*
** An instance of the following structure is allocated for each active
** savepoint and statement transaction in the system. All such structures
** are stored in the Pager.aSavepoint[] array, which is allocated and
** resized using sqlite3Realloc().
**







<
<
<
<
<
<
<
<
<
<
<
<
<







424
425
426
427
428
429
430













431
432
433
434
435
436
437
** The maximum allowed sector size. 64KiB. If the xSectorsize() method 
** returns a value larger than this, then MAX_SECTOR_SIZE is used instead.
** This could conceivably cause corruption following a power failure on
** such a system. This is currently an undocumented limit.
*/
#define MAX_SECTOR_SIZE 0x10000















/*
** An instance of the following structure is allocated for each active
** savepoint and statement transaction in the system. All such structures
** are stored in the Pager.aSavepoint[] array, which is allocated and
** resized using sqlite3Realloc().
**
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481




3482
3483
3484
3485
3486
3487
3488
3489
3490

3491
3492
3493
3494
3495
3496
3497

/*
** Adjust settings of the pager to those specified in the pgFlags parameter.
**
** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness
** of the database to damage due to OS crashes or power failures by
** changing the number of syncs()s when writing the journals.
** There are three levels:
**
**    OFF       sqlite3OsSync() is never called.  This is the default
**              for temporary and transient files.
**
**    NORMAL    The journal is synced once before writes begin on the
**              database.  This is normally adequate protection, but
**              it is theoretically possible, though very unlikely,
**              that an inopertune power failure could leave the journal
**              in a state which would cause damage to the database
**              when it is rolled back.
**
**    FULL      The journal is synced twice before writes begin on the
**              database (with some additional information - the nRec field
**              of the journal header - being written in between the two
**              syncs).  If we assume that writing a
**              single disk sector is atomic, then this mode provides
**              assurance that the journal will not be corrupted to the
**              point of causing damage to the database during rollback.




**
** The above is for a rollback-journal mode.  For WAL mode, OFF continues
** to mean that no syncs ever occur.  NORMAL means that the WAL is synced
** prior to the start of checkpoint and that the database file is synced
** at the conclusion of the checkpoint if the entire content of the WAL
** was written back into the database.  But no sync operations occur for
** an ordinary commit in NORMAL mode with WAL.  FULL means that the WAL
** file is synced following each commit operation, in addition to the
** syncs associated with NORMAL.

**
** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL.  The
** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync
** using fcntl(F_FULLFSYNC).  SQLITE_SYNC_NORMAL means to do an
** ordinary fsync() call.  There is no difference between SQLITE_SYNC_FULL
** and SQLITE_SYNC_NORMAL on platforms other than MacOSX.  But the
** synchronous=FULL versus synchronous=NORMAL setting determines when







|


















>
>
>
>








|
>







3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489

/*
** Adjust settings of the pager to those specified in the pgFlags parameter.
**
** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness
** of the database to damage due to OS crashes or power failures by
** changing the number of syncs()s when writing the journals.
** There are four levels:
**
**    OFF       sqlite3OsSync() is never called.  This is the default
**              for temporary and transient files.
**
**    NORMAL    The journal is synced once before writes begin on the
**              database.  This is normally adequate protection, but
**              it is theoretically possible, though very unlikely,
**              that an inopertune power failure could leave the journal
**              in a state which would cause damage to the database
**              when it is rolled back.
**
**    FULL      The journal is synced twice before writes begin on the
**              database (with some additional information - the nRec field
**              of the journal header - being written in between the two
**              syncs).  If we assume that writing a
**              single disk sector is atomic, then this mode provides
**              assurance that the journal will not be corrupted to the
**              point of causing damage to the database during rollback.
**
**    EXTRA     This is like FULL except that is also syncs the directory
**              that contains the rollback journal after the rollback
**              journal is unlinked.
**
** The above is for a rollback-journal mode.  For WAL mode, OFF continues
** to mean that no syncs ever occur.  NORMAL means that the WAL is synced
** prior to the start of checkpoint and that the database file is synced
** at the conclusion of the checkpoint if the entire content of the WAL
** was written back into the database.  But no sync operations occur for
** an ordinary commit in NORMAL mode with WAL.  FULL means that the WAL
** file is synced following each commit operation, in addition to the
** syncs associated with NORMAL.  There is no difference between FULL
** and EXTRA for WAL mode.
**
** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL.  The
** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync
** using fcntl(F_FULLFSYNC).  SQLITE_SYNC_NORMAL means to do an
** ordinary fsync() call.  There is no difference between SQLITE_SYNC_FULL
** and SQLITE_SYNC_NORMAL on platforms other than MacOSX.  But the
** synchronous=FULL versus synchronous=NORMAL setting determines when
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
    assert( pPager->fullSync==0 );
    assert( pPager->extraSync==0 );
    assert( pPager->syncFlags==0 );
    assert( pPager->walSyncFlags==0 );
    assert( pPager->ckptSyncFlags==0 );
  }else{
    pPager->fullSync = 1;
#if SQLITE_EXTRA_DURABLE
    pPager->extraSync = 1;
#else
    pPager->extraSync = 0;
#endif
    pPager->syncFlags = SQLITE_SYNC_NORMAL;
    pPager->walSyncFlags = SQLITE_SYNC_NORMAL | WAL_SYNC_TRANSACTIONS;
    pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL;
  }
  /* pPager->pFirst = 0; */
  /* pPager->pFirstSynced = 0; */
  /* pPager->pLast = 0; */







<
<
<

<







4806
4807
4808
4809
4810
4811
4812



4813

4814
4815
4816
4817
4818
4819
4820
    assert( pPager->fullSync==0 );
    assert( pPager->extraSync==0 );
    assert( pPager->syncFlags==0 );
    assert( pPager->walSyncFlags==0 );
    assert( pPager->ckptSyncFlags==0 );
  }else{
    pPager->fullSync = 1;



    pPager->extraSync = 0;

    pPager->syncFlags = SQLITE_SYNC_NORMAL;
    pPager->walSyncFlags = SQLITE_SYNC_NORMAL | WAL_SYNC_TRANSACTIONS;
    pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL;
  }
  /* pPager->pFirst = 0; */
  /* pPager->pFirstSynced = 0; */
  /* pPager->pLast = 0; */
7175
7176
7177
7178
7179
7180
7181

7182
7183
7184
7185
7186
7187
7188

/*
** Return true if the underlying VFS for the given pager supports the
** primitives necessary for write-ahead logging.
*/
int sqlite3PagerWalSupported(Pager *pPager){
  const sqlite3_io_methods *pMethods = pPager->fd->pMethods;

  return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap);
}

/*
** Attempt to take an exclusive lock on the database file. If a PENDING lock
** is obtained instead, immediately release it.
*/







>







7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177

/*
** Return true if the underlying VFS for the given pager supports the
** primitives necessary for write-ahead logging.
*/
int sqlite3PagerWalSupported(Pager *pPager){
  const sqlite3_io_methods *pMethods = pPager->fd->pMethods;
  if( pPager->noLock ) return 0;
  return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap);
}

/*
** Attempt to take an exclusive lock on the database file. If a PENDING lock
** is obtained instead, immediately release it.
*/
Changes to src/pragma.c.
989
990
991
992
993
994
995

996
997
998
999
1000
1001
1002
      if( !db->autoCommit ){
        sqlite3ErrorMsg(pParse, 
            "Safety level may not be changed inside a transaction");
      }else{
        int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK;
        if( iLevel==0 ) iLevel = 1;
        pDb->safety_level = iLevel;

        setAllPagerFlags(db);
      }
    }
    break;
  }
#endif /* SQLITE_OMIT_PAGER_PRAGMAS */








>







989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
      if( !db->autoCommit ){
        sqlite3ErrorMsg(pParse, 
            "Safety level may not be changed inside a transaction");
      }else{
        int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK;
        if( iLevel==0 ) iLevel = 1;
        pDb->safety_level = iLevel;
        pDb->bSyncSet = 1;
        setAllPagerFlags(db);
      }
    }
    break;
  }
#endif /* SQLITE_OMIT_PAGER_PRAGMAS */

1434
1435
1436
1437
1438
1439
1440

1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466






1467
1468
1469
1470
1471
1472

1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
    }
    sqlite3VdbeAddOp2(v, OP_Integer, mxErr, 1);  /* reg[1] holds errors left */

    /* Do an integrity check on each database file */
    for(i=0; i<db->nDb; i++){
      HashElem *x;
      Hash *pTbls;

      int cnt = 0;

      if( OMIT_TEMPDB && i==1 ) continue;
      if( iDb>=0 && i!=iDb ) continue;

      sqlite3CodeVerifySchema(pParse, i);
      addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */
      VdbeCoverage(v);
      sqlite3VdbeAddOp2(v, OP_Halt, 0, 0);
      sqlite3VdbeJumpHere(v, addr);

      /* Do an integrity check of the B-Tree
      **
      ** Begin by filling registers 2, 3, ... with the root pages numbers
      ** for all tables and indices in the database.
      */
      assert( sqlite3SchemaMutexHeld(db, i, 0) );
      pTbls = &db->aDb[i].pSchema->tblHash;
      for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
        Table *pTab = sqliteHashData(x);
        Index *pIdx;
        if( HasRowid(pTab) ){
          sqlite3VdbeAddOp2(v, OP_Integer, pTab->tnum, 2+cnt);
          VdbeComment((v, "%s", pTab->zName));
          cnt++;
        }






        for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
          sqlite3VdbeAddOp2(v, OP_Integer, pIdx->tnum, 2+cnt);
          VdbeComment((v, "%s", pIdx->zName));
          cnt++;
        }
      }


      /* Make sure sufficient number of registers have been allocated */
      pParse->nMem = MAX( pParse->nMem, cnt+8 );

      /* Do the b-tree integrity checks */
      sqlite3VdbeAddOp3(v, OP_IntegrityCk, 2, cnt, 1);
      sqlite3VdbeChangeP5(v, (u8)i);
      addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v);
      sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
         sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName),
         P4_DYNAMIC);
      sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1);
      sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2);







>













|




|


|
<
<
|
|
>
>
>
>
>
>

<
<
|


>


|


|







1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464


1465
1466
1467
1468
1469
1470
1471
1472
1473


1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
    }
    sqlite3VdbeAddOp2(v, OP_Integer, mxErr, 1);  /* reg[1] holds errors left */

    /* Do an integrity check on each database file */
    for(i=0; i<db->nDb; i++){
      HashElem *x;
      Hash *pTbls;
      int *aRoot;
      int cnt = 0;

      if( OMIT_TEMPDB && i==1 ) continue;
      if( iDb>=0 && i!=iDb ) continue;

      sqlite3CodeVerifySchema(pParse, i);
      addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */
      VdbeCoverage(v);
      sqlite3VdbeAddOp2(v, OP_Halt, 0, 0);
      sqlite3VdbeJumpHere(v, addr);

      /* Do an integrity check of the B-Tree
      **
      ** Begin by finding the root pages numbers
      ** for all tables and indices in the database.
      */
      assert( sqlite3SchemaMutexHeld(db, i, 0) );
      pTbls = &db->aDb[i].pSchema->tblHash;
      for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
        Table *pTab = sqliteHashData(x);
        Index *pIdx;
        if( HasRowid(pTab) ) cnt++;


        for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ cnt++; }
      }
      aRoot = sqlite3DbMallocRawNN(db, sizeof(int)*(cnt+1));
      if( aRoot==0 ) break;
      for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
        Table *pTab = sqliteHashData(x);
        Index *pIdx;
        if( HasRowid(pTab) ) aRoot[cnt++] = pTab->tnum;
        for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){


          aRoot[cnt++] = pIdx->tnum;
        }
      }
      aRoot[cnt] = 0;

      /* Make sure sufficient number of registers have been allocated */
      pParse->nMem = MAX( pParse->nMem, 14 );

      /* Do the b-tree integrity checks */
      sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY);
      sqlite3VdbeChangeP5(v, (u8)i);
      addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v);
      sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
         sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName),
         P4_DYNAMIC);
      sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1);
      sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2);
Changes to src/select.c.
4966
4967
4968
4969
4970
4971
4972












4973

4974

4975
4976
4977
4978
4979
4980
4981
4982
4983
        SELECTTRACE(0x100,pParse,p,("After WHERE-clause push-down:\n"));
        sqlite3TreeViewSelect(0, p, 0);
      }
#endif
    }

    /* Generate code to implement the subquery












    */

    if( pTabList->nSrc==1

     && (p->selFlags & SF_All)==0
     && OptimizationEnabled(db, SQLITE_SubqCoroutine)
    ){
      /* Implement a co-routine that will return a single row of the result
      ** set on each invocation.
      */
      int addrTop = sqlite3VdbeCurrentAddr(v)+1;
      pItem->regReturn = ++pParse->nMem;
      sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop);







>
>
>
>
>
>
>
>
>
>
>
>

>
|
>
|
|







4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
        SELECTTRACE(0x100,pParse,p,("After WHERE-clause push-down:\n"));
        sqlite3TreeViewSelect(0, p, 0);
      }
#endif
    }

    /* Generate code to implement the subquery
    **
    ** The subquery is implemented as a co-routine if all of these are true:
    **   (1)  The subquery is guaranteed to be the outer loop (so that it
    **        does not need to be computed more than once)
    **   (2)  The ALL keyword after SELECT is omitted.  (Applications are
    **        allowed to say "SELECT ALL" instead of just "SELECT" to disable
    **        the use of co-routines.)
    **   (3)  Co-routines are not disabled using sqlite3_test_control()
    **        with SQLITE_TESTCTRL_OPTIMIZATIONS.
    **
    ** TODO: Are there other reasons beside (1) to use a co-routine
    ** implementation?
    */
    if( i==0
     && (pTabList->nSrc==1
            || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0)  /* (1) */
     && (p->selFlags & SF_All)==0                                   /* (2) */
     && OptimizationEnabled(db, SQLITE_SubqCoroutine)               /* (3) */
    ){
      /* Implement a co-routine that will return a single row of the result
      ** set on each invocation.
      */
      int addrTop = sqlite3VdbeCurrentAddr(v)+1;
      pItem->regReturn = ++pParse->nMem;
      sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop);
Changes to src/sqliteInt.h.
447
448
449
450
451
452
453







454
455
456
457
458
459
460
#if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \
    (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
# define SQLITE_NEED_ERR_NAME
#else
# undef  SQLITE_NEED_ERR_NAME
#endif








/*
** Return true (non-zero) if the input is an integer that is too large
** to fit in 32-bits.  This macro is used inside of various testcase()
** macros to verify that we have tested SQLite for large-file support.
*/
#define IS_BIG_INT(X)  (((X)&~(i64)0xffffffff)!=0)








>
>
>
>
>
>
>







447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
#if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \
    (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
# define SQLITE_NEED_ERR_NAME
#else
# undef  SQLITE_NEED_ERR_NAME
#endif

/*
** SQLITE_ENABLE_EXPLAIN_COMMENTS is incompatible with SQLITE_OMIT_EXPLAIN
*/
#ifdef SQLITE_OMIT_EXPLAIN
# undef SQLITE_ENABLE_EXPLAIN_COMMENTS
#endif

/*
** Return true (non-zero) if the input is an integer that is too large
** to fit in 32-bits.  This macro is used inside of various testcase()
** macros to verify that we have tested SQLite for large-file support.
*/
#define IS_BIG_INT(X)  (((X)&~(i64)0xffffffff)!=0)

999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009






























1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021

1022
1023
1024
1025
1026
1027
1028
** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque
** pointer types (i.e. FuncDef) defined above.
*/
#include "btree.h"
#include "vdbe.h"
#include "pager.h"
#include "pcache.h"

#include "os.h"
#include "mutex.h"
































/*
** Each database file to be accessed by the system is an instance
** of the following structure.  There are normally two of these structures
** in the sqlite.aDb[] array.  aDb[0] is the main database file and
** aDb[1] is the database file used to hold temporary tables.  Additional
** databases may be attached.
*/
struct Db {
  char *zName;         /* Name of this database */
  Btree *pBt;          /* The B*Tree structure for this database file */
  u8 safety_level;     /* How aggressive at syncing data to disk */

  Schema *pSchema;     /* Pointer to database schema (possibly shared) */
};

/*
** An instance of the following structure stores a database schema.
**
** Most Schema objects are associated with a Btree.  The exception is







<



>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>












>







1006
1007
1008
1009
1010
1011
1012

1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque
** pointer types (i.e. FuncDef) defined above.
*/
#include "btree.h"
#include "vdbe.h"
#include "pager.h"
#include "pcache.h"

#include "os.h"
#include "mutex.h"

/* The SQLITE_EXTRA_DURABLE compile-time option used to set the default
** synchronous setting to EXTRA.  It is no longer supported.
*/
#ifdef SQLITE_EXTRA_DURABLE
# warning Use SQLITE_DEFAULT_SYNCHRONOUS=3 instead of SQLITE_EXTRA_DURABLE
# define SQLITE_DEFAULT_SYNCHRONOUS 3
#endif

/*
** Default synchronous levels.
**
** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ
** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1.
**
**           PAGER_SYNCHRONOUS       DEFAULT_SYNCHRONOUS
**   OFF           1                         0
**   NORMAL        2                         1
**   FULL          3                         2
**   EXTRA         4                         3
**
** The "PRAGMA synchronous" statement also uses the zero-based numbers.
** In other words, the zero-based numbers are used for all external interfaces
** and the one-based values are used internally.
*/
#ifndef SQLITE_DEFAULT_SYNCHRONOUS
# define SQLITE_DEFAULT_SYNCHRONOUS (PAGER_SYNCHRONOUS_FULL-1)
#endif
#ifndef SQLITE_DEFAULT_WAL_SYNCHRONOUS
# define SQLITE_DEFAULT_WAL_SYNCHRONOUS SQLITE_DEFAULT_SYNCHRONOUS
#endif

/*
** Each database file to be accessed by the system is an instance
** of the following structure.  There are normally two of these structures
** in the sqlite.aDb[] array.  aDb[0] is the main database file and
** aDb[1] is the database file used to hold temporary tables.  Additional
** databases may be attached.
*/
struct Db {
  char *zName;         /* Name of this database */
  Btree *pBt;          /* The B*Tree structure for this database file */
  u8 safety_level;     /* How aggressive at syncing data to disk */
  u8 bSyncSet;         /* True if "PRAGMA synchronous=N" has been run */
  Schema *pSchema;     /* Pointer to database schema (possibly shared) */
};

/*
** An instance of the following structure stores a database schema.
**
** Most Schema objects are associated with a Btree.  The exception is
2357
2358
2359
2360
2361
2362
2363

2364
2365
2366
2367
2368
2369
2370
#define BMS  ((int)(sizeof(Bitmask)*8))

/*
** A bit in a Bitmask
*/
#define MASKBIT(n)   (((Bitmask)1)<<(n))
#define MASKBIT32(n) (((unsigned int)1)<<(n))


/*
** The following structure describes the FROM clause of a SELECT statement.
** Each table or subquery in the FROM clause is a separate element of
** the SrcList.a[] array.
**
** With the addition of multiple database support, the following structure







>







2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
#define BMS  ((int)(sizeof(Bitmask)*8))

/*
** A bit in a Bitmask
*/
#define MASKBIT(n)   (((Bitmask)1)<<(n))
#define MASKBIT32(n) (((unsigned int)1)<<(n))
#define ALLBITS      ((Bitmask)-1)

/*
** The following structure describes the FROM clause of a SELECT statement.
** Each table or subquery in the FROM clause is a separate element of
** the SrcList.a[] array.
**
** With the addition of multiple database support, the following structure
Changes to src/tclsqlite.c.
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677

  /*
  **     $db rekey KEY
  **
  ** Change the encryption key on the currently open database.
  */
  case DB_REKEY: {
#ifdef SQLITE_HAS_CODEC
    int nKey;
    void *pKey;
#endif
    if( objc!=3 ){
      Tcl_WrongNumArgs(interp, 2, objv, "KEY");
      return TCL_ERROR;
    }
#ifdef SQLITE_HAS_CODEC
    pKey = Tcl_GetByteArrayFromObj(objv[2], &nKey);
    rc = sqlite3_rekey(pDb->db, pKey, nKey);
    if( rc ){
      Tcl_AppendResult(interp, sqlite3_errstr(rc), (char*)0);
      rc = TCL_ERROR;
    }
#endif







|







|







2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677

  /*
  **     $db rekey KEY
  **
  ** Change the encryption key on the currently open database.
  */
  case DB_REKEY: {
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
    int nKey;
    void *pKey;
#endif
    if( objc!=3 ){
      Tcl_WrongNumArgs(interp, 2, objv, "KEY");
      return TCL_ERROR;
    }
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
    pKey = Tcl_GetByteArrayFromObj(objv[2], &nKey);
    rc = sqlite3_rekey(pDb->db, pKey, nKey);
    if( rc ){
      Tcl_AppendResult(interp, sqlite3_errstr(rc), (char*)0);
      rc = TCL_ERROR;
    }
#endif
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
  const char *zArg;
  char *zErrMsg;
  int i;
  const char *zFile;
  const char *zVfs = 0;
  int flags;
  Tcl_DString translatedFilename;
#ifdef SQLITE_HAS_CODEC
  void *pKey = 0;
  int nKey = 0;
#endif
  int rc;

  /* In normal use, each TCL interpreter runs in a single thread.  So
  ** by default, we can turn of mutexing on SQLite database connections.







|







3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
  const char *zArg;
  char *zErrMsg;
  int i;
  const char *zFile;
  const char *zVfs = 0;
  int flags;
  Tcl_DString translatedFilename;
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
  void *pKey = 0;
  int nKey = 0;
#endif
  int rc;

  /* In normal use, each TCL interpreter runs in a single thread.  So
  ** by default, we can turn of mutexing on SQLite database connections.
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
      return TCL_OK;
    }
    if( strcmp(zArg,"-sourceid")==0 ){
      Tcl_AppendResult(interp,sqlite3_sourceid(), (char*)0);
      return TCL_OK;
    }
    if( strcmp(zArg,"-has-codec")==0 ){
#ifdef SQLITE_HAS_CODEC
      Tcl_AppendResult(interp,"1",(char*)0);
#else
      Tcl_AppendResult(interp,"0",(char*)0);
#endif
      return TCL_OK;
    }
  }
  for(i=3; i+1<objc; i+=2){
    zArg = Tcl_GetString(objv[i]);
    if( strcmp(zArg,"-key")==0 ){
#ifdef SQLITE_HAS_CODEC
      pKey = Tcl_GetByteArrayFromObj(objv[i+1], &nKey);
#endif
    }else if( strcmp(zArg, "-vfs")==0 ){
      zVfs = Tcl_GetString(objv[i+1]);
    }else if( strcmp(zArg, "-readonly")==0 ){
      int b;
      if( Tcl_GetBooleanFromObj(interp, objv[i+1], &b) ) return TCL_ERROR;







|










|







3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
      return TCL_OK;
    }
    if( strcmp(zArg,"-sourceid")==0 ){
      Tcl_AppendResult(interp,sqlite3_sourceid(), (char*)0);
      return TCL_OK;
    }
    if( strcmp(zArg,"-has-codec")==0 ){
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
      Tcl_AppendResult(interp,"1",(char*)0);
#else
      Tcl_AppendResult(interp,"0",(char*)0);
#endif
      return TCL_OK;
    }
  }
  for(i=3; i+1<objc; i+=2){
    zArg = Tcl_GetString(objv[i]);
    if( strcmp(zArg,"-key")==0 ){
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
      pKey = Tcl_GetByteArrayFromObj(objv[i+1], &nKey);
#endif
    }else if( strcmp(zArg, "-vfs")==0 ){
      zVfs = Tcl_GetString(objv[i+1]);
    }else if( strcmp(zArg, "-readonly")==0 ){
      int b;
      if( Tcl_GetBooleanFromObj(interp, objv[i+1], &b) ) return TCL_ERROR;
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
      return TCL_ERROR;
    }
  }
  if( objc<3 || (objc&1)!=1 ){
    Tcl_WrongNumArgs(interp, 1, objv, 
      "HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN?"
      " ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?"
#ifdef SQLITE_HAS_CODEC
      " ?-key CODECKEY?"
#endif
    );
    return TCL_ERROR;
  }
  zErrMsg = 0;
  p = (SqliteDb*)Tcl_Alloc( sizeof(*p) );







|







3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
      return TCL_ERROR;
    }
  }
  if( objc<3 || (objc&1)!=1 ){
    Tcl_WrongNumArgs(interp, 1, objv, 
      "HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN?"
      " ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?"
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
      " ?-key CODECKEY?"
#endif
    );
    return TCL_ERROR;
  }
  zErrMsg = 0;
  p = (SqliteDb*)Tcl_Alloc( sizeof(*p) );
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
      zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(p->db));
      sqlite3_close(p->db);
      p->db = 0;
    }
  }else{
    zErrMsg = sqlite3_mprintf("%s", sqlite3_errstr(rc));
  }
#ifdef SQLITE_HAS_CODEC
  if( p->db ){
    sqlite3_key(p->db, pKey, nKey);
  }
#endif
  if( p->db==0 ){
    Tcl_SetResult(interp, zErrMsg, TCL_VOLATILE);
    Tcl_Free((char*)p);







|







3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
      zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(p->db));
      sqlite3_close(p->db);
      p->db = 0;
    }
  }else{
    zErrMsg = sqlite3_mprintf("%s", sqlite3_errstr(rc));
  }
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
  if( p->db ){
    sqlite3_key(p->db, pKey, nKey);
  }
#endif
  if( p->db==0 ){
    Tcl_SetResult(interp, zErrMsg, TCL_VOLATILE);
    Tcl_Free((char*)p);
Changes to src/test1.c.
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
*/
static int test_key(
  void *NotUsed,
  Tcl_Interp *interp,    /* The TCL interpreter that invoked this command */
  int argc,              /* Number of arguments */
  char **argv            /* Text of each argument */
){
#ifdef SQLITE_HAS_CODEC
  sqlite3 *db;
  const char *zKey;
  int nKey;
  if( argc!=3 ){
    Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
       " FILENAME\"", 0);
    return TCL_ERROR;







|







647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
*/
static int test_key(
  void *NotUsed,
  Tcl_Interp *interp,    /* The TCL interpreter that invoked this command */
  int argc,              /* Number of arguments */
  char **argv            /* Text of each argument */
){
#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_CODEC_FROM_TCL)
  sqlite3 *db;
  const char *zKey;
  int nKey;
  if( argc!=3 ){
    Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
       " FILENAME\"", 0);
    return TCL_ERROR;
Changes to src/update.c.
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  chngKey = chngRowid + chngPk;

  /* The SET expressions are not actually used inside the WHERE loop.  
  ** So reset the colUsed mask. Unless this is a virtual table. In that
  ** case, set all bits of the colUsed mask (to ensure that the virtual
  ** table implementation makes all columns available).
  */
  pTabList->a[0].colUsed = IsVirtual(pTab) ? (Bitmask)-1 : 0;

  hasFK = sqlite3FkRequired(pParse, pTab, aXRef, chngKey);

  /* There is one entry in the aRegIdx[] array for each index on the table
  ** being updated.  Fill in aRegIdx[] with a register number that will hold
  ** the key for accessing each index.
  **







|







264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  chngKey = chngRowid + chngPk;

  /* The SET expressions are not actually used inside the WHERE loop.  
  ** So reset the colUsed mask. Unless this is a virtual table. In that
  ** case, set all bits of the colUsed mask (to ensure that the virtual
  ** table implementation makes all columns available).
  */
  pTabList->a[0].colUsed = IsVirtual(pTab) ? ALLBITS : 0;

  hasFK = sqlite3FkRequired(pParse, pTab, aXRef, chngKey);

  /* There is one entry in the aRegIdx[] array for each index on the table
  ** being updated.  Fill in aRegIdx[] with a register number that will hold
  ** the key for accessing each index.
  **
Changes to src/util.c.
1420
1421
1422
1423
1424
1425
1426
1427

1428
1429





1430
1431
1432
u64 sqlite3LogEstToInt(LogEst x){
  u64 n;
  if( x<10 ) return 1;
  n = x%10;
  x /= 10;
  if( n>=5 ) n -= 2;
  else if( n>=1 ) n -= 1;
  if( x>=3 ){

    return x>60 ? (u64)LARGEST_INT64 : (n+8)<<(x-3);
  }





  return (n+8)>>(3-x);
}
#endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */







|
>
|
<
>
>
>
>
>
|


1420
1421
1422
1423
1424
1425
1426
1427
1428
1429

1430
1431
1432
1433
1434
1435
1436
1437
u64 sqlite3LogEstToInt(LogEst x){
  u64 n;
  if( x<10 ) return 1;
  n = x%10;
  x /= 10;
  if( n>=5 ) n -= 2;
  else if( n>=1 ) n -= 1;
#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \
    defined(SQLITE_EXPLAIN_ESTIMATED_ROWS)
  if( x>60 ) return (u64)LARGEST_INT64;

#else
  /* If only SQLITE_ENABLE_STAT3_OR_STAT4 is on, then the largest input
  ** possible to this routine is 310, resulting in a maximum x of 31 */
  assert( x<=60 );
#endif
  return x>=3 ? (n+8)<<(x-3) : (n+8)>>(3-x);
}
#endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */
Changes to src/vdbe.c.
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514

5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
case OP_DropTrigger: {
  sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z);
  break;
}


#ifndef SQLITE_OMIT_INTEGRITY_CHECK
/* Opcode: IntegrityCk P1 P2 P3 * P5
**
** Do an analysis of the currently open database.  Store in
** register P1 the text of an error message describing any problems.
** If no problems are found, store a NULL in register P1.
**
** The register P3 contains the maximum number of allowed errors.
** At most reg(P3) errors will be reported.
** In other words, the analysis stops as soon as reg(P1) errors are 
** seen.  Reg(P1) is updated with the number of errors remaining.
**
** The root page numbers of all tables in the database are integer
** stored in reg(P1), reg(P1+1), reg(P1+2), ....  There are P2 tables
** total.
**
** If P5 is not zero, the check is done on the auxiliary database
** file, not the main database file.
**
** This opcode is used to implement the integrity_check pragma.
*/
case OP_IntegrityCk: {
  int nRoot;      /* Number of tables to check.  (Number of root pages.) */
  int *aRoot;     /* Array of rootpage numbers for tables to be checked */
  int j;          /* Loop counter */
  int nErr;       /* Number of errors reported */
  char *z;        /* Text of the error report */
  Mem *pnErr;     /* Register keeping track of errors remaining */

  assert( p->bIsReader );
  nRoot = pOp->p2;

  assert( nRoot>0 );
  aRoot = sqlite3DbMallocRawNN(db, sizeof(int)*(nRoot+1) );
  if( aRoot==0 ) goto no_mem;
  assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
  pnErr = &aMem[pOp->p3];
  assert( (pnErr->flags & MEM_Int)!=0 );
  assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 );
  pIn1 = &aMem[pOp->p1];
  for(j=0; j<nRoot; j++){
    aRoot[j] = (int)sqlite3VdbeIntValue(&pIn1[j]);
  }
  aRoot[j] = 0;
  assert( pOp->p5<db->nDb );
  assert( DbMaskTest(p->btreeMask, pOp->p5) );
  z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot,
                                 (int)pnErr->u.i, &nErr);
  sqlite3DbFree(db, aRoot);
  pnErr->u.i -= nErr;
  sqlite3VdbeMemSetNull(pIn1);
  if( nErr==0 ){
    assert( z==0 );
  }else if( z==0 ){
    goto no_mem;
  }else{







|










|
|
<









<






>

<
|





<
<
<
<




<







5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497

5498
5499
5500
5501
5502
5503
5504
5505
5506

5507
5508
5509
5510
5511
5512
5513
5514

5515
5516
5517
5518
5519
5520




5521
5522
5523
5524

5525
5526
5527
5528
5529
5530
5531
case OP_DropTrigger: {
  sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z);
  break;
}


#ifndef SQLITE_OMIT_INTEGRITY_CHECK
/* Opcode: IntegrityCk P1 P2 P3 P4 P5
**
** Do an analysis of the currently open database.  Store in
** register P1 the text of an error message describing any problems.
** If no problems are found, store a NULL in register P1.
**
** The register P3 contains the maximum number of allowed errors.
** At most reg(P3) errors will be reported.
** In other words, the analysis stops as soon as reg(P1) errors are 
** seen.  Reg(P1) is updated with the number of errors remaining.
**
** The root page numbers of all tables in the database are integers
** stored in P4_INTARRAY argument.

**
** If P5 is not zero, the check is done on the auxiliary database
** file, not the main database file.
**
** This opcode is used to implement the integrity_check pragma.
*/
case OP_IntegrityCk: {
  int nRoot;      /* Number of tables to check.  (Number of root pages.) */
  int *aRoot;     /* Array of rootpage numbers for tables to be checked */

  int nErr;       /* Number of errors reported */
  char *z;        /* Text of the error report */
  Mem *pnErr;     /* Register keeping track of errors remaining */

  assert( p->bIsReader );
  nRoot = pOp->p2;
  aRoot = pOp->p4.ai;
  assert( nRoot>0 );

  assert( aRoot[nRoot]==0 );
  assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) );
  pnErr = &aMem[pOp->p3];
  assert( (pnErr->flags & MEM_Int)!=0 );
  assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 );
  pIn1 = &aMem[pOp->p1];




  assert( pOp->p5<db->nDb );
  assert( DbMaskTest(p->btreeMask, pOp->p5) );
  z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot,
                                 (int)pnErr->u.i, &nErr);

  pnErr->u.i -= nErr;
  sqlite3VdbeMemSetNull(pIn1);
  if( nErr==0 ){
    assert( z==0 );
  }else if( z==0 ){
    goto no_mem;
  }else{
Changes to src/vdbeInt.h.
377
378
379
380
381
382
383


384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
  int pc;                 /* The program counter */
  int rc;                 /* Value to return */
#ifdef SQLITE_DEBUG
  int rcApp;              /* errcode set by sqlite3_result_error_code() */
#endif
  u16 nResColumn;         /* Number of columns in one row of the result set */
  u8 errorAction;         /* Recovery action to do in case of an error */


  u8 minWriteFileFormat;  /* Minimum file format for writable database files */
  bft explain:2;          /* True if EXPLAIN present on SQL command */
  bft changeCntOn:1;      /* True to update the change-counter */
  bft expired:1;          /* True if the VM needs to be recompiled */
  bft runOnlyOnce:1;      /* Automatically expire on reset */
  bft usesStmtJournal:1;  /* True if uses a statement journal */
  bft readOnly:1;         /* True for statements that do not write */
  bft bIsReader:1;        /* True for statements that read */
  bft isPrepareV2:1;      /* True if prepared with prepare_v2() */
  bft doingRerun:1;       /* True if rerunning after an auto-reprepare */
  int nChange;            /* Number of db changes made since last reset */
  yDbMask btreeMask;      /* Bitmask of db->aDb[] entries referenced */
  yDbMask lockMask;       /* Subset of btreeMask that requires a lock */
  int iStatement;         /* Statement number (or 0 if has not opened stmt) */
  u32 aCounter[5];        /* Counters used by sqlite3_stmt_status() */
#ifndef SQLITE_OMIT_TRACE
  i64 startTime;          /* Time when query started - used for profiling */







>
>



<





<







377
378
379
380
381
382
383
384
385
386
387
388

389
390
391
392
393

394
395
396
397
398
399
400
  int pc;                 /* The program counter */
  int rc;                 /* Value to return */
#ifdef SQLITE_DEBUG
  int rcApp;              /* errcode set by sqlite3_result_error_code() */
#endif
  u16 nResColumn;         /* Number of columns in one row of the result set */
  u8 errorAction;         /* Recovery action to do in case of an error */
  bft expired:1;          /* True if the VM needs to be recompiled */
  bft doingRerun:1;       /* True if rerunning after an auto-reprepare */
  u8 minWriteFileFormat;  /* Minimum file format for writable database files */
  bft explain:2;          /* True if EXPLAIN present on SQL command */
  bft changeCntOn:1;      /* True to update the change-counter */

  bft runOnlyOnce:1;      /* Automatically expire on reset */
  bft usesStmtJournal:1;  /* True if uses a statement journal */
  bft readOnly:1;         /* True for statements that do not write */
  bft bIsReader:1;        /* True for statements that read */
  bft isPrepareV2:1;      /* True if prepared with prepare_v2() */

  int nChange;            /* Number of db changes made since last reset */
  yDbMask btreeMask;      /* Bitmask of db->aDb[] entries referenced */
  yDbMask lockMask;       /* Subset of btreeMask that requires a lock */
  int iStatement;         /* Statement number (or 0 if has not opened stmt) */
  u32 aCounter[5];        /* Counters used by sqlite3_stmt_status() */
#ifndef SQLITE_OMIT_TRACE
  i64 startTime;          /* Time when query started - used for profiling */
Changes to src/where.c.
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955



956
957
958
959
960
961

962
963
964
965
966
967
968
**
** Whether or not an error is returned, it is the responsibility of the
** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates
** that this is required.
*/
static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){
  sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab;
  int i;
  int rc;

  TRACE_IDX_INPUTS(p);
  rc = pVtab->pModule->xBestIndex(pVtab, p);
  TRACE_IDX_OUTPUTS(p);

  if( rc!=SQLITE_OK ){
    if( rc==SQLITE_NOMEM ){
      sqlite3OomFault(pParse->db);
    }else if( !pVtab->zErrMsg ){
      sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc));
    }else{
      sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg);
    }
  }
  sqlite3_free(pVtab->zErrMsg);
  pVtab->zErrMsg = 0;




  for(i=0; i<p->nConstraint; i++){
    if( !p->aConstraint[i].usable && p->aConstraintUsage[i].argvIndex>0 ){
      sqlite3ErrorMsg(pParse, 
          "table %s: xBestIndex returned an invalid plan", pTab->zName);
    }
  }


  return pParse->nErr;
}
#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */

#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
/*







<


















>
>
>






>







930
931
932
933
934
935
936

937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
**
** Whether or not an error is returned, it is the responsibility of the
** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates
** that this is required.
*/
static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){
  sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab;

  int rc;

  TRACE_IDX_INPUTS(p);
  rc = pVtab->pModule->xBestIndex(pVtab, p);
  TRACE_IDX_OUTPUTS(p);

  if( rc!=SQLITE_OK ){
    if( rc==SQLITE_NOMEM ){
      sqlite3OomFault(pParse->db);
    }else if( !pVtab->zErrMsg ){
      sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc));
    }else{
      sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg);
    }
  }
  sqlite3_free(pVtab->zErrMsg);
  pVtab->zErrMsg = 0;

#if 0
  /* This error is now caught by the caller.
  ** Search for "xBestIndex malfunction" below */
  for(i=0; i<p->nConstraint; i++){
    if( !p->aConstraint[i].usable && p->aConstraintUsage[i].argvIndex>0 ){
      sqlite3ErrorMsg(pParse, 
          "table %s: xBestIndex returned an invalid plan", pTab->zName);
    }
  }
#endif

  return pParse->nErr;
}
#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */

#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
/*
1972
1973
1974
1975
1976
1977
1978

1979
1980
1981
1982
1983
1984
1985
**    (3)  The template has same or fewer dependencies than the current loop
**    (4)  The template has the same or lower cost than the current loop
*/
static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
  WhereLoop **ppPrev, *p;
  WhereInfo *pWInfo = pBuilder->pWInfo;
  sqlite3 *db = pWInfo->pParse->db;


  /* If pBuilder->pOrSet is defined, then only keep track of the costs
  ** and prereqs.
  */
  if( pBuilder->pOrSet!=0 ){
    if( pTemplate->nLTerm ){
#if WHERETRACE_ENABLED







>







1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
**    (3)  The template has same or fewer dependencies than the current loop
**    (4)  The template has the same or lower cost than the current loop
*/
static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){
  WhereLoop **ppPrev, *p;
  WhereInfo *pWInfo = pBuilder->pWInfo;
  sqlite3 *db = pWInfo->pParse->db;
  int rc;

  /* If pBuilder->pOrSet is defined, then only keep track of the costs
  ** and prereqs.
  */
  if( pBuilder->pOrSet!=0 ){
    if( pTemplate->nLTerm ){
#if WHERETRACE_ENABLED
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
        sqlite3DebugPrintf(" delete: ");
        whereLoopPrint(pToDel, pBuilder->pWC);
      }
#endif
      whereLoopDelete(db, pToDel);
    }
  }
  whereLoopXfer(db, p, pTemplate);
  if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
    Index *pIndex = p->u.btree.pIndex;
    if( pIndex && pIndex->tnum==0 ){
      p->u.btree.pIndex = 0;
    }
  }
  return SQLITE_OK;
}

/*
** Adjust the WhereLoop.nOut value downward to account for terms of the
** WHERE clause that reference the loop but which are not used by an
** index.
*







|






|







2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
        sqlite3DebugPrintf(" delete: ");
        whereLoopPrint(pToDel, pBuilder->pWC);
      }
#endif
      whereLoopDelete(db, pToDel);
    }
  }
  rc = whereLoopXfer(db, p, pTemplate);
  if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){
    Index *pIndex = p->u.btree.pIndex;
    if( pIndex && pIndex->tnum==0 ){
      p->u.btree.pIndex = 0;
    }
  }
  return rc;
}

/*
** Adjust the WhereLoop.nOut value downward to account for terms of the
** WHERE clause that reference the loop but which are not used by an
** index.
*
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
** log(nRow) factor is omitted from a non-covering index scan in order to
** bias the scoring in favor of using an index, since the worst-case
** performance of using an index is far better than the worst-case performance
** of a full table scan.
*/
static int whereLoopAddBtree(
  WhereLoopBuilder *pBuilder, /* WHERE clause information */
  Bitmask mExtra              /* Extra prerequesites for using this table */
){
  WhereInfo *pWInfo;          /* WHERE analysis context */
  Index *pProbe;              /* An index we are evaluating */
  Index sPk;                  /* A fake index object for the primary key */
  LogEst aiRowEstPk[2];       /* The aiRowLogEst[] value for the sPk index */
  i16 aiColumnPk = -1;        /* The aColumn[] value for the sPk index */
  SrcList *pTabList;          /* The FROM clause */







|







2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
** log(nRow) factor is omitted from a non-covering index scan in order to
** bias the scoring in favor of using an index, since the worst-case
** performance of using an index is far better than the worst-case performance
** of a full table scan.
*/
static int whereLoopAddBtree(
  WhereLoopBuilder *pBuilder, /* WHERE clause information */
  Bitmask mPrereq             /* Extra prerequesites for using this table */
){
  WhereInfo *pWInfo;          /* WHERE analysis context */
  Index *pProbe;              /* An index we are evaluating */
  Index sPk;                  /* A fake index object for the primary key */
  LogEst aiRowEstPk[2];       /* The aiRowLogEst[] value for the sPk index */
  i16 aiColumnPk = -1;        /* The aColumn[] value for the sPk index */
  SrcList *pTabList;          /* The FROM clause */
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
        /* TUNING: Each index lookup yields 20 rows in the table.  This
        ** is more than the usual guess of 10 rows, since we have no way
        ** of knowing how selective the index will ultimately be.  It would
        ** not be unreasonable to make this value much larger. */
        pNew->nOut = 43;  assert( 43==sqlite3LogEst(20) );
        pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut);
        pNew->wsFlags = WHERE_AUTO_INDEX;
        pNew->prereq = mExtra | pTerm->prereqRight;
        rc = whereLoopInsert(pBuilder, pNew);
      }
    }
  }
#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */

  /* Loop over all indices
  */
  for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){
    if( pProbe->pPartIdxWhere!=0
     && !whereUsablePartialIndex(pSrc->iCursor, pWC, pProbe->pPartIdxWhere) ){
      testcase( pNew->iTab!=pSrc->iCursor );  /* See ticket [98d973b8f5] */
      continue;  /* Partial index inappropriate for this query */
    }
    rSize = pProbe->aiRowLogEst[0];
    pNew->u.btree.nEq = 0;
    pNew->nSkip = 0;
    pNew->nLTerm = 0;
    pNew->iSortIdx = 0;
    pNew->rSetup = 0;
    pNew->prereq = mExtra;
    pNew->nOut = rSize;
    pNew->u.btree.pIndex = pProbe;
    b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor);
    /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */
    assert( (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || b==0 );
    if( pProbe->tnum<=0 ){
      /* Integer primary key index */







|




















|







2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
        /* TUNING: Each index lookup yields 20 rows in the table.  This
        ** is more than the usual guess of 10 rows, since we have no way
        ** of knowing how selective the index will ultimately be.  It would
        ** not be unreasonable to make this value much larger. */
        pNew->nOut = 43;  assert( 43==sqlite3LogEst(20) );
        pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut);
        pNew->wsFlags = WHERE_AUTO_INDEX;
        pNew->prereq = mPrereq | pTerm->prereqRight;
        rc = whereLoopInsert(pBuilder, pNew);
      }
    }
  }
#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */

  /* Loop over all indices
  */
  for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){
    if( pProbe->pPartIdxWhere!=0
     && !whereUsablePartialIndex(pSrc->iCursor, pWC, pProbe->pPartIdxWhere) ){
      testcase( pNew->iTab!=pSrc->iCursor );  /* See ticket [98d973b8f5] */
      continue;  /* Partial index inappropriate for this query */
    }
    rSize = pProbe->aiRowLogEst[0];
    pNew->u.btree.nEq = 0;
    pNew->nSkip = 0;
    pNew->nLTerm = 0;
    pNew->iSortIdx = 0;
    pNew->rSetup = 0;
    pNew->prereq = mPrereq;
    pNew->nOut = rSize;
    pNew->u.btree.pIndex = pProbe;
    b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor);
    /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */
    assert( (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || b==0 );
    if( pProbe->tnum<=0 ){
      /* Integer primary key index */
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
** A constraint is marked usable if:
**
**   * Argument mUsable indicates that its prerequisites are available, and
**
**   * It is not one of the operators specified in the mExclude mask passed
**     as the fourth argument (which in practice is either WO_IN or 0).
**
** Argument mExtra is a mask of tables that must be scanned before the
** virtual table in question. These are added to the plans prerequisites
** before it is added to pBuilder.
**
** Output parameter *pbIn is set to true if the plan added to pBuilder
** uses one or more WO_IN terms, or false otherwise.
*/
static int whereLoopAddVirtualOne(
  WhereLoopBuilder *pBuilder,
  Bitmask mExtra,                 /* Mask of tables that must be used. */
  Bitmask mUsable,                /* Mask of usable prereqs */
  u16 mExclude,                   /* Exclude terms for this operator */
  sqlite3_index_info *pIdxInfo,   /* Populated object for xBestIndex */
  int *pbIn                       /* OUT: True if plan uses an IN(...) op */
){
  WhereClause *pWC = pBuilder->pWC;
  struct sqlite3_index_constraint *pIdxCons;
  struct sqlite3_index_constraint_usage *pUsage = pIdxInfo->aConstraintUsage;
  int i;
  int mxTerm;
  int rc = SQLITE_OK;
  WhereLoop *pNew = pBuilder->pNew;
  Parse *pParse = pBuilder->pWInfo->pParse;
  struct SrcList_item *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab];
  int nConstraint = pIdxInfo->nConstraint;

  assert( (mUsable & mExtra)==mExtra );
  *pbIn = 0;
  pNew->prereq = mExtra;

  /* Set the usable flag on the subset of constraints identified by 
  ** arguments mUsable and mExclude. */
  pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint;
  for(i=0; i<nConstraint; i++, pIdxCons++){
    WhereTerm *pTerm = &pWC->a[pIdxCons->iTermOffset];
    pIdxCons->usable = 0;
    if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight 
     && (pTerm->eOperator & mExclude)==0
    ){
      pIdxCons->usable = 1;
    }
  }

  /* Initialize the output fields of the sqlite3_index_info structure */
  memset(pUsage, 0, sizeof(pUsage[0])*nConstraint);
  if( pIdxInfo->needToFreeIdxStr ) sqlite3_free(pIdxInfo->idxStr);
  pIdxInfo->idxStr = 0;
  pIdxInfo->idxNum = 0;
  pIdxInfo->needToFreeIdxStr = 0;
  pIdxInfo->orderByConsumed = 0;
  pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2;
  pIdxInfo->estimatedRows = 25;
  pIdxInfo->idxFlags = 0;
  pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed;

  /* Invoke the virtual table xBestIndex() method */







|








|
|
|














|

|
















|


<







2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816

2817
2818
2819
2820
2821
2822
2823
** A constraint is marked usable if:
**
**   * Argument mUsable indicates that its prerequisites are available, and
**
**   * It is not one of the operators specified in the mExclude mask passed
**     as the fourth argument (which in practice is either WO_IN or 0).
**
** Argument mPrereq is a mask of tables that must be scanned before the
** virtual table in question. These are added to the plans prerequisites
** before it is added to pBuilder.
**
** Output parameter *pbIn is set to true if the plan added to pBuilder
** uses one or more WO_IN terms, or false otherwise.
*/
static int whereLoopAddVirtualOne(
  WhereLoopBuilder *pBuilder,
  Bitmask mPrereq,                /* Mask of tables that must be used. */
  Bitmask mUsable,                /* Mask of usable tables */
  u16 mExclude,                   /* Exclude terms using these operators */
  sqlite3_index_info *pIdxInfo,   /* Populated object for xBestIndex */
  int *pbIn                       /* OUT: True if plan uses an IN(...) op */
){
  WhereClause *pWC = pBuilder->pWC;
  struct sqlite3_index_constraint *pIdxCons;
  struct sqlite3_index_constraint_usage *pUsage = pIdxInfo->aConstraintUsage;
  int i;
  int mxTerm;
  int rc = SQLITE_OK;
  WhereLoop *pNew = pBuilder->pNew;
  Parse *pParse = pBuilder->pWInfo->pParse;
  struct SrcList_item *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab];
  int nConstraint = pIdxInfo->nConstraint;

  assert( (mUsable & mPrereq)==mPrereq );
  *pbIn = 0;
  pNew->prereq = mPrereq;

  /* Set the usable flag on the subset of constraints identified by 
  ** arguments mUsable and mExclude. */
  pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint;
  for(i=0; i<nConstraint; i++, pIdxCons++){
    WhereTerm *pTerm = &pWC->a[pIdxCons->iTermOffset];
    pIdxCons->usable = 0;
    if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight 
     && (pTerm->eOperator & mExclude)==0
    ){
      pIdxCons->usable = 1;
    }
  }

  /* Initialize the output fields of the sqlite3_index_info structure */
  memset(pUsage, 0, sizeof(pUsage[0])*nConstraint);
  assert( pIdxInfo->needToFreeIdxStr==0 );
  pIdxInfo->idxStr = 0;
  pIdxInfo->idxNum = 0;

  pIdxInfo->orderByConsumed = 0;
  pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2;
  pIdxInfo->estimatedRows = 25;
  pIdxInfo->idxFlags = 0;
  pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed;

  /* Invoke the virtual table xBestIndex() method */
2831
2832
2833
2834
2835
2836
2837

2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
    if( (iTerm = pUsage[i].argvIndex - 1)>=0 ){
      WhereTerm *pTerm;
      int j = pIdxCons->iTermOffset;
      if( iTerm>=nConstraint
       || j<0
       || j>=pWC->nTerm
       || pNew->aLTerm[iTerm]!=0

      ){
        rc = SQLITE_ERROR;
        sqlite3ErrorMsg(pParse,"%s.xBestIndex() malfunction",pSrc->pTab->zName);
        return rc;
      }
      testcase( iTerm==nConstraint-1 );
      testcase( j==0 );
      testcase( j==pWC->nTerm-1 );
      pTerm = &pWC->a[j];
      pNew->prereq |= pTerm->prereqRight;







>


|







2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
    if( (iTerm = pUsage[i].argvIndex - 1)>=0 ){
      WhereTerm *pTerm;
      int j = pIdxCons->iTermOffset;
      if( iTerm>=nConstraint
       || j<0
       || j>=pWC->nTerm
       || pNew->aLTerm[iTerm]!=0
       || pIdxCons->usable==0
      ){
        rc = SQLITE_ERROR;
        sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName);
        return rc;
      }
      testcase( iTerm==nConstraint-1 );
      testcase( j==0 );
      testcase( j==pWC->nTerm-1 );
      pTerm = &pWC->a[j];
      pNew->prereq |= pTerm->prereqRight;
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
        /* A virtual table that is constrained by an IN clause may not
        ** consume the ORDER BY clause because (1) the order of IN terms
        ** is not necessarily related to the order of output terms and
        ** (2) Multiple outputs from a single IN value will not merge
        ** together.  */
        pIdxInfo->orderByConsumed = 0;
        pIdxInfo->idxFlags &= ~SQLITE_INDEX_SCAN_UNIQUE;
        *pbIn = 1;
      }
    }
  }

  pNew->nLTerm = mxTerm+1;
  assert( pNew->nLTerm<=pNew->nLSlot );
  pNew->u.vtab.idxNum = pIdxInfo->idxNum;







|







2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
        /* A virtual table that is constrained by an IN clause may not
        ** consume the ORDER BY clause because (1) the order of IN terms
        ** is not necessarily related to the order of output terms and
        ** (2) Multiple outputs from a single IN value will not merge
        ** together.  */
        pIdxInfo->orderByConsumed = 0;
        pIdxInfo->idxFlags &= ~SQLITE_INDEX_SCAN_UNIQUE;
        *pbIn = 1; assert( (mExclude & WO_IN)==0 );
      }
    }
  }

  pNew->nLTerm = mxTerm+1;
  assert( pNew->nLTerm<=pNew->nLSlot );
  pNew->u.vtab.idxNum = pIdxInfo->idxNum;
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890



2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956

2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973

2974

2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996


2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007

3008
3009
3010
3011
3012
3013
3014
3015

3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
  /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated
  ** that the scan will visit at most one row. Clear it otherwise. */
  if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){
    pNew->wsFlags |= WHERE_ONEROW;
  }else{
    pNew->wsFlags &= ~WHERE_ONEROW;
  }
  whereLoopInsert(pBuilder, pNew);
  if( pNew->u.vtab.needFree ){
    sqlite3_free(pNew->u.vtab.idxStr);
    pNew->u.vtab.needFree = 0;
  }




  return SQLITE_OK;
}


/*
** Add all WhereLoop objects for a table of the join identified by
** pBuilder->pNew->iTab.  That table is guaranteed to be a virtual table.
**
** If there are no LEFT or CROSS JOIN joins in the query, both mExtra and
** mUnusable are set to 0. Otherwise, mExtra is a mask of all FROM clause
** entries that occur before the virtual table in the FROM clause and are
** separated from it by at least one LEFT or CROSS JOIN. Similarly, the
** mUnusable mask contains all FROM clause entries that occur after the
** virtual table and are separated from it by at least one LEFT or 
** CROSS JOIN. 
**
** For example, if the query were:
**
**   ... FROM t1, t2 LEFT JOIN t3, t4, vt CROSS JOIN t5, t6;
**
** then mExtra corresponds to (t1, t2) and mUnusable to (t5, t6).
**
** All the tables in mExtra must be scanned before the current virtual 
** table. So any terms for which all prerequisites are satisfied by 
** mExtra may be specified as "usable" in all calls to xBestIndex. 
** Conversely, all tables in mUnusable must be scanned after the current
** virtual table, so any terms for which the prerequisites overlap with
** mUnusable should always be configured as "not-usable" for xBestIndex.
*/
static int whereLoopAddVirtual(
  WhereLoopBuilder *pBuilder,  /* WHERE clause information */
  Bitmask mExtra,              /* Tables that must be scanned before this one */
  Bitmask mUnusable            /* Tables that must be scanned after this one */
){
  int rc = SQLITE_OK;          /* Return code */
  WhereInfo *pWInfo;           /* WHERE analysis context */
  Parse *pParse;               /* The parsing context */
  WhereClause *pWC;            /* The WHERE clause */
  struct SrcList_item *pSrc;   /* The FROM clause term to search */
  sqlite3_index_info *p;       /* Object to pass to xBestIndex() */
  int nConstraint;             /* Number of constraints in p */
  int bIn;                     /* True if plan uses IN(...) operator */
  WhereLoop *pNew;
  Bitmask mBest;               /* Tables used by best possible plan */

  assert( (mExtra & mUnusable)==0 );
  pWInfo = pBuilder->pWInfo;
  pParse = pWInfo->pParse;
  pWC = pBuilder->pWC;
  pNew = pBuilder->pNew;
  pSrc = &pWInfo->pTabList->a[pNew->iTab];
  assert( IsVirtual(pSrc->pTab) );
  p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc,pBuilder->pOrderBy);
  if( p==0 ) return SQLITE_NOMEM_BKPT;
  pNew->rSetup = 0;
  pNew->wsFlags = WHERE_VIRTUALTABLE;
  pNew->nLTerm = 0;
  pNew->u.vtab.needFree = 0;
  nConstraint = p->nConstraint;
  if( whereLoopResize(pParse->db, pNew, nConstraint) ){
    sqlite3DbFree(pParse->db, p);
    return SQLITE_NOMEM_BKPT;
  }

  /* First call xBestIndex() with all constraints usable. */

  rc = whereLoopAddVirtualOne(pBuilder, mExtra, (Bitmask)(-1), 0, p, &bIn);
  mBest = pNew->prereq & ~mExtra;

  /* If the call to xBestIndex() with all terms enabled produced a plan
  ** that does not require any source tables, there is no point in making
  ** any further calls - if the xBestIndex() method is sane they will all
  ** return the same plan anyway.
  */
  if( mBest ){
    int seenZero = 0;             /* True if a plan with no prereqs seen */
    int seenZeroNoIN = 0;         /* Plan with no prereqs and no IN(...) seen */
    Bitmask mPrev = 0;
    Bitmask mBestNoIn = 0;

    /* If the plan produced by the earlier call uses an IN(...) term, call
    ** xBestIndex again, this time with IN(...) terms disabled. */
    if( rc==SQLITE_OK && bIn ){

      rc = whereLoopAddVirtualOne(pBuilder, mExtra, (Bitmask)-1, WO_IN, p,&bIn);

      mBestNoIn = pNew->prereq & ~mExtra;
      if( mBestNoIn==0 ){
        seenZero = 1;
        if( bIn==0 ) seenZeroNoIN = 1;
      }
    }

    /* Call xBestIndex once for each distinct value of (prereqRight & ~mExtra) 
    ** in the set of terms that apply to the current virtual table.  */
    while( rc==SQLITE_OK ){
      int i;
      Bitmask mNext = (Bitmask)(-1);
      assert( mNext>0 );
      for(i=0; i<nConstraint; i++){
        Bitmask mThis = (
            pWC->a[p->aConstraint[i].iTermOffset].prereqRight & ~mExtra
        );
        if( mThis>mPrev && mThis<mNext ) mNext = mThis;
      }
      mPrev = mNext;
      if( mNext==(Bitmask)(-1) ) break;
      if( mNext==mBest || mNext==mBestNoIn ) continue;


      rc = whereLoopAddVirtualOne(pBuilder, mExtra, mNext|mExtra, 0, p, &bIn);
      if( pNew->prereq==mExtra ){
        seenZero = 1;
        if( bIn==0 ) seenZeroNoIN = 1;
      }
    }

    /* If the calls to xBestIndex() in the above loop did not find a plan
    ** that requires no source tables at all (i.e. one guaranteed to be
    ** usable), make a call here with all source tables disabled */
    if( rc==SQLITE_OK && seenZero==0 ){

      rc = whereLoopAddVirtualOne(pBuilder, mExtra, mExtra, 0, p, &bIn);
      if( bIn==0 ) seenZeroNoIN = 1;
    }

    /* If the calls to xBestIndex() have so far failed to find a plan
    ** that requires no source tables at all and does not use an IN(...)
    ** operator, make a final call to obtain one here.  */
    if( rc==SQLITE_OK && seenZeroNoIN==0 ){

      rc = whereLoopAddVirtualOne(pBuilder, mExtra, mExtra, WO_IN, p, &bIn);
    }
  }

  if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr);
  sqlite3DbFree(pParse->db, p);
  return rc;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */

/*
** Add WhereLoop entries to handle OR terms.  This works for either
** btrees or virtual tables.
*/
static int whereLoopAddOr(
  WhereLoopBuilder *pBuilder, 
  Bitmask mExtra, 
  Bitmask mUnusable
){
  WhereInfo *pWInfo = pBuilder->pWInfo;
  WhereClause *pWC;
  WhereLoop *pNew;
  WhereTerm *pTerm, *pWCEnd;
  int rc = SQLITE_OK;







|




>
>
>

|







|
|










|

|

|






|













|






|












>
|
<


|
|
|
|
|







|
>
|
>
|


|



|



|



|




|

>
>
|
|









>
|







>
|















|







2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965

2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
  /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated
  ** that the scan will visit at most one row. Clear it otherwise. */
  if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){
    pNew->wsFlags |= WHERE_ONEROW;
  }else{
    pNew->wsFlags &= ~WHERE_ONEROW;
  }
  rc = whereLoopInsert(pBuilder, pNew);
  if( pNew->u.vtab.needFree ){
    sqlite3_free(pNew->u.vtab.idxStr);
    pNew->u.vtab.needFree = 0;
  }
  WHERETRACE(0xffff, ("  bIn=%d prereqIn=%04llx prereqOut=%04llx\n",
                      *pbIn, (sqlite3_uint64)mPrereq,
                      (sqlite3_uint64)(pNew->prereq & ~mPrereq)));

  return rc;
}


/*
** Add all WhereLoop objects for a table of the join identified by
** pBuilder->pNew->iTab.  That table is guaranteed to be a virtual table.
**
** If there are no LEFT or CROSS JOIN joins in the query, both mPrereq and
** mUnusable are set to 0. Otherwise, mPrereq is a mask of all FROM clause
** entries that occur before the virtual table in the FROM clause and are
** separated from it by at least one LEFT or CROSS JOIN. Similarly, the
** mUnusable mask contains all FROM clause entries that occur after the
** virtual table and are separated from it by at least one LEFT or 
** CROSS JOIN. 
**
** For example, if the query were:
**
**   ... FROM t1, t2 LEFT JOIN t3, t4, vt CROSS JOIN t5, t6;
**
** then mPrereq corresponds to (t1, t2) and mUnusable to (t5, t6).
**
** All the tables in mPrereq must be scanned before the current virtual 
** table. So any terms for which all prerequisites are satisfied by 
** mPrereq may be specified as "usable" in all calls to xBestIndex. 
** Conversely, all tables in mUnusable must be scanned after the current
** virtual table, so any terms for which the prerequisites overlap with
** mUnusable should always be configured as "not-usable" for xBestIndex.
*/
static int whereLoopAddVirtual(
  WhereLoopBuilder *pBuilder,  /* WHERE clause information */
  Bitmask mPrereq,             /* Tables that must be scanned before this one */
  Bitmask mUnusable            /* Tables that must be scanned after this one */
){
  int rc = SQLITE_OK;          /* Return code */
  WhereInfo *pWInfo;           /* WHERE analysis context */
  Parse *pParse;               /* The parsing context */
  WhereClause *pWC;            /* The WHERE clause */
  struct SrcList_item *pSrc;   /* The FROM clause term to search */
  sqlite3_index_info *p;       /* Object to pass to xBestIndex() */
  int nConstraint;             /* Number of constraints in p */
  int bIn;                     /* True if plan uses IN(...) operator */
  WhereLoop *pNew;
  Bitmask mBest;               /* Tables used by best possible plan */

  assert( (mPrereq & mUnusable)==0 );
  pWInfo = pBuilder->pWInfo;
  pParse = pWInfo->pParse;
  pWC = pBuilder->pWC;
  pNew = pBuilder->pNew;
  pSrc = &pWInfo->pTabList->a[pNew->iTab];
  assert( IsVirtual(pSrc->pTab) );
  p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc, pBuilder->pOrderBy);
  if( p==0 ) return SQLITE_NOMEM_BKPT;
  pNew->rSetup = 0;
  pNew->wsFlags = WHERE_VIRTUALTABLE;
  pNew->nLTerm = 0;
  pNew->u.vtab.needFree = 0;
  nConstraint = p->nConstraint;
  if( whereLoopResize(pParse->db, pNew, nConstraint) ){
    sqlite3DbFree(pParse->db, p);
    return SQLITE_NOMEM_BKPT;
  }

  /* First call xBestIndex() with all constraints usable. */
  WHERETRACE(0x40, ("  VirtualOne: all usable\n"));
  rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, &bIn);


  /* If the call to xBestIndex() with all terms enabled produced a plan
  ** that does not require any source tables (IOW: a plan with mBest==0),
  ** then there is no point in making any further calls to xBestIndex() 
  ** since they will all return the same result (if the xBestIndex()
  ** implementation is sane). */
  if( rc==SQLITE_OK && (mBest = (pNew->prereq & ~mPrereq))!=0 ){
    int seenZero = 0;             /* True if a plan with no prereqs seen */
    int seenZeroNoIN = 0;         /* Plan with no prereqs and no IN(...) seen */
    Bitmask mPrev = 0;
    Bitmask mBestNoIn = 0;

    /* If the plan produced by the earlier call uses an IN(...) term, call
    ** xBestIndex again, this time with IN(...) terms disabled. */
    if( bIn ){
      WHERETRACE(0x40, ("  VirtualOne: all usable w/o IN\n"));
      rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, WO_IN, p, &bIn);
      assert( bIn==0 );
      mBestNoIn = pNew->prereq & ~mPrereq;
      if( mBestNoIn==0 ){
        seenZero = 1;
        seenZeroNoIN = 1;
      }
    }

    /* Call xBestIndex once for each distinct value of (prereqRight & ~mPrereq) 
    ** in the set of terms that apply to the current virtual table.  */
    while( rc==SQLITE_OK ){
      int i;
      Bitmask mNext = ALLBITS;
      assert( mNext>0 );
      for(i=0; i<nConstraint; i++){
        Bitmask mThis = (
            pWC->a[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq
        );
        if( mThis>mPrev && mThis<mNext ) mNext = mThis;
      }
      mPrev = mNext;
      if( mNext==ALLBITS ) break;
      if( mNext==mBest || mNext==mBestNoIn ) continue;
      WHERETRACE(0x40, ("  VirtualOne: mPrev=%04llx mNext=%04llx\n",
                       (sqlite3_uint64)mPrev, (sqlite3_uint64)mNext));
      rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mNext|mPrereq, 0, p, &bIn);
      if( pNew->prereq==mPrereq ){
        seenZero = 1;
        if( bIn==0 ) seenZeroNoIN = 1;
      }
    }

    /* If the calls to xBestIndex() in the above loop did not find a plan
    ** that requires no source tables at all (i.e. one guaranteed to be
    ** usable), make a call here with all source tables disabled */
    if( rc==SQLITE_OK && seenZero==0 ){
      WHERETRACE(0x40, ("  VirtualOne: all disabled\n"));
      rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, 0, p, &bIn);
      if( bIn==0 ) seenZeroNoIN = 1;
    }

    /* If the calls to xBestIndex() have so far failed to find a plan
    ** that requires no source tables at all and does not use an IN(...)
    ** operator, make a final call to obtain one here.  */
    if( rc==SQLITE_OK && seenZeroNoIN==0 ){
      WHERETRACE(0x40, ("  VirtualOne: all disabled and w/o IN\n"));
      rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, WO_IN, p, &bIn);
    }
  }

  if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr);
  sqlite3DbFree(pParse->db, p);
  return rc;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */

/*
** Add WhereLoop entries to handle OR terms.  This works for either
** btrees or virtual tables.
*/
static int whereLoopAddOr(
  WhereLoopBuilder *pBuilder, 
  Bitmask mPrereq, 
  Bitmask mUnusable
){
  WhereInfo *pWInfo = pBuilder->pWInfo;
  WhereClause *pWC;
  WhereLoop *pNew;
  WhereTerm *pTerm, *pWCEnd;
  int rc = SQLITE_OK;
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
          for(i=0; i<sSubBuild.pWC->nTerm; i++){
            whereTermPrint(&sSubBuild.pWC->a[i], i);
          }
        }
#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
        if( IsVirtual(pItem->pTab) ){
          rc = whereLoopAddVirtual(&sSubBuild, mExtra, mUnusable);
        }else
#endif
        {
          rc = whereLoopAddBtree(&sSubBuild, mExtra);
        }
        if( rc==SQLITE_OK ){
          rc = whereLoopAddOr(&sSubBuild, mExtra, mUnusable);
        }
        assert( rc==SQLITE_OK || sCur.n==0 );
        if( sCur.n==0 ){
          sSum.n = 0;
          break;
        }else if( once ){
          whereOrMove(&sSum, &sCur);







|



|


|







3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
          for(i=0; i<sSubBuild.pWC->nTerm; i++){
            whereTermPrint(&sSubBuild.pWC->a[i], i);
          }
        }
#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
        if( IsVirtual(pItem->pTab) ){
          rc = whereLoopAddVirtual(&sSubBuild, mPrereq, mUnusable);
        }else
#endif
        {
          rc = whereLoopAddBtree(&sSubBuild, mPrereq);
        }
        if( rc==SQLITE_OK ){
          rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable);
        }
        assert( rc==SQLITE_OK || sCur.n==0 );
        if( sCur.n==0 ){
          sSum.n = 0;
          break;
        }else if( once ){
          whereOrMove(&sSum, &sCur);
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
}

/*
** Add all WhereLoop objects for all tables 
*/
static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
  WhereInfo *pWInfo = pBuilder->pWInfo;
  Bitmask mExtra = 0;
  Bitmask mPrior = 0;
  int iTab;
  SrcList *pTabList = pWInfo->pTabList;
  struct SrcList_item *pItem;
  struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel];
  sqlite3 *db = pWInfo->pParse->db;
  int rc = SQLITE_OK;
  WhereLoop *pNew;
  u8 priorJointype = 0;

  /* Loop over the tables in the join, from left to right */
  pNew = pBuilder->pNew;
  whereLoopInit(pNew);
  for(iTab=0, pItem=pTabList->a; pItem<pEnd; iTab++, pItem++){
    Bitmask mUnusable = 0;
    pNew->iTab = iTab;
    pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor);
    if( ((pItem->fg.jointype|priorJointype) & (JT_LEFT|JT_CROSS))!=0 ){
      /* This condition is true when pItem is the FROM clause term on the
      ** right-hand-side of a LEFT or CROSS JOIN.  */
      mExtra = mPrior;
    }
    priorJointype = pItem->fg.jointype;
    if( IsVirtual(pItem->pTab) ){
      struct SrcList_item *p;
      for(p=&pItem[1]; p<pEnd; p++){
        if( mUnusable || (p->fg.jointype & (JT_LEFT|JT_CROSS)) ){
          mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor);
        }
      }
      rc = whereLoopAddVirtual(pBuilder, mExtra, mUnusable);
    }else{
      rc = whereLoopAddBtree(pBuilder, mExtra);
    }
    if( rc==SQLITE_OK ){
      rc = whereLoopAddOr(pBuilder, mExtra, mUnusable);
    }
    mPrior |= pNew->maskSelf;
    if( rc || db->mallocFailed ) break;
  }

  whereLoopClear(db, pNew);
  return rc;







|




















|









|

|


|







3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
}

/*
** Add all WhereLoop objects for all tables 
*/
static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
  WhereInfo *pWInfo = pBuilder->pWInfo;
  Bitmask mPrereq = 0;
  Bitmask mPrior = 0;
  int iTab;
  SrcList *pTabList = pWInfo->pTabList;
  struct SrcList_item *pItem;
  struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel];
  sqlite3 *db = pWInfo->pParse->db;
  int rc = SQLITE_OK;
  WhereLoop *pNew;
  u8 priorJointype = 0;

  /* Loop over the tables in the join, from left to right */
  pNew = pBuilder->pNew;
  whereLoopInit(pNew);
  for(iTab=0, pItem=pTabList->a; pItem<pEnd; iTab++, pItem++){
    Bitmask mUnusable = 0;
    pNew->iTab = iTab;
    pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor);
    if( ((pItem->fg.jointype|priorJointype) & (JT_LEFT|JT_CROSS))!=0 ){
      /* This condition is true when pItem is the FROM clause term on the
      ** right-hand-side of a LEFT or CROSS JOIN.  */
      mPrereq = mPrior;
    }
    priorJointype = pItem->fg.jointype;
    if( IsVirtual(pItem->pTab) ){
      struct SrcList_item *p;
      for(p=&pItem[1]; p<pEnd; p++){
        if( mUnusable || (p->fg.jointype & (JT_LEFT|JT_CROSS)) ){
          mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor);
        }
      }
      rc = whereLoopAddVirtual(pBuilder, mPrereq, mUnusable);
    }else{
      rc = whereLoopAddBtree(pBuilder, mPrereq);
    }
    if( rc==SQLITE_OK ){
      rc = whereLoopAddOr(pBuilder, mPrereq, mUnusable);
    }
    mPrior |= pNew->maskSelf;
    if( rc || db->mallocFailed ) break;
  }

  whereLoopClear(db, pNew);
  return rc;
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
    if( db->mallocFailed ) goto whereBeginError;
    if( pWInfo->pOrderBy ){
       wherePathSolver(pWInfo, pWInfo->nRowOut+1);
       if( db->mallocFailed ) goto whereBeginError;
    }
  }
  if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){
     pWInfo->revMask = (Bitmask)(-1);
  }
  if( pParse->nErr || NEVER(db->mallocFailed) ){
    goto whereBeginError;
  }
#ifdef WHERETRACE_ENABLED
  if( sqlite3WhereTrace ){
    sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut);







|







4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
    if( db->mallocFailed ) goto whereBeginError;
    if( pWInfo->pOrderBy ){
       wherePathSolver(pWInfo, pWInfo->nRowOut+1);
       if( db->mallocFailed ) goto whereBeginError;
    }
  }
  if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){
     pWInfo->revMask = ALLBITS;
  }
  if( pParse->nErr || NEVER(db->mallocFailed) ){
    goto whereBeginError;
  }
#ifdef WHERETRACE_ENABLED
  if( sqlite3WhereTrace ){
    sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut);
Changes to src/wherecode.c.
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892

    sqlite3ExprCachePush(pParse);
    iReg = sqlite3GetTempRange(pParse, nConstraint+2);
    addrNotFound = pLevel->addrBrk;
    for(j=0; j<nConstraint; j++){
      int iTarget = iReg+j+2;
      pTerm = pLoop->aLTerm[j];
      if( pTerm==0 ) continue;
      if( pTerm->eOperator & WO_IN ){
        codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
        addrNotFound = pLevel->addrNxt;
      }else{
        sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget);
      }
    }







|







878
879
880
881
882
883
884
885
886
887
888
889
890
891
892

    sqlite3ExprCachePush(pParse);
    iReg = sqlite3GetTempRange(pParse, nConstraint+2);
    addrNotFound = pLevel->addrBrk;
    for(j=0; j<nConstraint; j++){
      int iTarget = iReg+j+2;
      pTerm = pLoop->aLTerm[j];
      if( NEVER(pTerm==0) ) continue;
      if( pTerm->eOperator & WO_IN ){
        codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
        addrNotFound = pLevel->addrNxt;
      }else{
        sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget);
      }
    }
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934

935
936

937
938
939
940
941
942
943
        VdbeOp *pOp;     /* Opcode to access the value of the IN constraint */

        /* Reload the constraint value into reg[iReg+j+2].  The same value
        ** was loaded into the same register prior to the OP_VFilter, but
        ** the xFilter implementation might have changed the datatype or
        ** encoding of the value in the register, so it *must* be reloaded. */
        assert( pLevel->u.in.aInLoop!=0 || db->mallocFailed );
        if( pLevel->u.in.aInLoop!=0 ){
          assert( iIn>0 );
          pOp = sqlite3VdbeGetOp(v, pLevel->u.in.aInLoop[--iIn].addrInTop);
          assert( pOp->opcode==OP_Column || pOp->opcode==OP_Rowid );
          assert( pOp->opcode!=OP_Column || pOp->p3==iReg+j+2 );
          assert( pOp->opcode!=OP_Rowid || pOp->p2==iReg+j+2 );
          testcase( pOp->opcode==OP_Rowid );
          sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3);
        }

        /* Generate code that will continue to the next row if 
        ** the IN constraint is not satisfied */
        pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0, 0);
        assert( pCompare!=0 || db->mallocFailed );
        if( pCompare ){
          pCompare->pLeft = pTerm->pExpr->pLeft;
          pCompare->pRight = pRight = sqlite3Expr(db, TK_REGISTER, 0);

          if( pRight ) pRight->iTable = iReg+j+2;
          sqlite3ExprIfFalse(pParse, pCompare, pLevel->addrCont, 0);

          pCompare->pLeft = 0;
          sqlite3ExprDelete(db, pCompare);
        }
      }
    }
    sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2);
    sqlite3ExprCachePop(pParse);







|
















>
|
|
>







911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
        VdbeOp *pOp;     /* Opcode to access the value of the IN constraint */

        /* Reload the constraint value into reg[iReg+j+2].  The same value
        ** was loaded into the same register prior to the OP_VFilter, but
        ** the xFilter implementation might have changed the datatype or
        ** encoding of the value in the register, so it *must* be reloaded. */
        assert( pLevel->u.in.aInLoop!=0 || db->mallocFailed );
        if( !db->mallocFailed ){
          assert( iIn>0 );
          pOp = sqlite3VdbeGetOp(v, pLevel->u.in.aInLoop[--iIn].addrInTop);
          assert( pOp->opcode==OP_Column || pOp->opcode==OP_Rowid );
          assert( pOp->opcode!=OP_Column || pOp->p3==iReg+j+2 );
          assert( pOp->opcode!=OP_Rowid || pOp->p2==iReg+j+2 );
          testcase( pOp->opcode==OP_Rowid );
          sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3);
        }

        /* Generate code that will continue to the next row if 
        ** the IN constraint is not satisfied */
        pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0, 0);
        assert( pCompare!=0 || db->mallocFailed );
        if( pCompare ){
          pCompare->pLeft = pTerm->pExpr->pLeft;
          pCompare->pRight = pRight = sqlite3Expr(db, TK_REGISTER, 0);
          if( pRight ){
            pRight->iTable = iReg+j+2;
            sqlite3ExprIfFalse(pParse, pCompare, pLevel->addrCont, 0);
          }
          pCompare->pLeft = 0;
          sqlite3ExprDelete(db, pCompare);
        }
      }
    }
    sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2);
    sqlite3ExprCachePop(pParse);
Changes to test/analyzer1.test.
21
22
23
24
25
26
27


28
29
30

31
32
33
34
35
36
37

if {$tcl_platform(platform)=="windows"} {
  set PROG "sqlite3_analyzer.exe"
} else {
  set PROG "./sqlite3_analyzer"
}
if {![file exe $PROG]} {


  puts "analyzer1 cannot run because $PROG is not available"
  finish_test
  return

}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

do_test analyzer1-1.0 {
  db eval {







>
>
|
|
|
>







21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

if {$tcl_platform(platform)=="windows"} {
  set PROG "sqlite3_analyzer.exe"
} else {
  set PROG "./sqlite3_analyzer"
}
if {![file exe $PROG]} {
  set PROG [file normalize [file join $::cmdlinearg(TESTFIXTURE_HOME) $PROG]]
  if {![file exe $PROG]} {
    puts "analyzer1 cannot run because $PROG is not available"
    finish_test
    return
  }
}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

do_test analyzer1-1.0 {
  db eval {
Changes to test/autovacuum.test.
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282












283
284
285
286
287
288
289
290
291
292
do_test autovacuum-2.4.3 {
  execsql {
    SELECT rootpage FROM sqlite_master ORDER by rootpage
  }
} {3 4 5 6 7 8 9 10}

# Right now there are 5 free pages in the database. Consume and then free
# a 520 pages. Then create 520 tables. This ensures that at least some of the
# desired root-pages reside on the second free-list trunk page, and that the
# trunk itself is required at some point.
do_test autovacuum-2.4.4 {
  execsql "
    INSERT INTO av3 VALUES ('[make_str abcde [expr 1020*520 + 500]]');
    DELETE FROM av3;
  "
} {}
set root_page_list [list]
set pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1]












for {set i 3} {$i<=532} {incr i} {
  # 207 and 412 are pointer-map pages.
  if { $i!=207 && $i!=412 && $i != $pending_byte_page} {
    lappend root_page_list $i
  }
}
if {$i >= $pending_byte_page} {
  lappend root_page_list $i
}
do_test autovacuum-2.4.5 {







|










>
>
>
>
>
>
>
>
>
>
>
>

|
<







265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

297
298
299
300
301
302
303
do_test autovacuum-2.4.3 {
  execsql {
    SELECT rootpage FROM sqlite_master ORDER by rootpage
  }
} {3 4 5 6 7 8 9 10}

# Right now there are 5 free pages in the database. Consume and then free
# all 520 pages. Then create 520 tables. This ensures that at least some of the
# desired root-pages reside on the second free-list trunk page, and that the
# trunk itself is required at some point.
do_test autovacuum-2.4.4 {
  execsql "
    INSERT INTO av3 VALUES ('[make_str abcde [expr 1020*520 + 500]]');
    DELETE FROM av3;
  "
} {}
set root_page_list [list]
set pending_byte_page [expr ($::sqlite_pending_byte / 1024) + 1]

# unusable_pages
# These are either the pending_byte page or the pointer map pages
#
unset -nocomplain unusable_page
if {[sqlite3 -has-codec]} {
  array set unusable_page {205 1 408 1}
} else {
  array set unusable_page {207 1 412 1}
}
set unusable_page($pending_byte_page) 1

for {set i 3} {$i<=532} {incr i} {
  if {![info exists unusable_page($i)]} {

    lappend root_page_list $i
  }
}
if {$i >= $pending_byte_page} {
  lappend root_page_list $i
}
do_test autovacuum-2.4.5 {
Changes to test/backcompat.test.
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  code2 { sqlite3 db test.db }

  foreach c {code1 code2} {
    $c {
      set v [split [db version] .]
      if {[llength $v]==3} {lappend v 0}
      set ::sqlite_libversion [format \
        "%d%.2d%.2d%2d" [lindex $v 0] [lindex $v 1] [lindex $v 2] [lindex $v 3]
      ]
    }
  }

  uplevel $script

  catch { code1 { db close } }







|







59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  code2 { sqlite3 db test.db }

  foreach c {code1 code2} {
    $c {
      set v [split [db version] .]
      if {[llength $v]==3} {lappend v 0}
      set ::sqlite_libversion [format \
        "%d%.2d%.2d%.2d" [lindex $v 0] [lindex $v 1] [lindex $v 2] [lindex $v 3]
      ]
    }
  }

  uplevel $script

  catch { code1 { db close } }
81
82
83
84
85
86
87
88

89
90
91
92
93
94
95
array set ::incompatible [list]
proc do_allbackcompat_test {script} {

  foreach bin $::BC(binaries) {
    set nErr [set_test_counter errors]
    foreach dir {0 1} {

      set bintag [string map {testfixture {}} $bin]

      set bintag [string map {\.exe {}} $bintag]
      if {$bintag == ""} {set bintag self}
      set ::bcname ".$bintag.$dir."

      rename do_test _do_test
      proc do_test {nm sql res} {
        set nm [regsub {\.} $nm $::bcname]







|
>







81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
array set ::incompatible [list]
proc do_allbackcompat_test {script} {

  foreach bin $::BC(binaries) {
    set nErr [set_test_counter errors]
    foreach dir {0 1} {

      set bintag $bin
      regsub {.*testfixture\.} $bintag {} bintag
      set bintag [string map {\.exe {}} $bintag]
      if {$bintag == ""} {set bintag self}
      set ::bcname ".$bintag.$dir."

      rename do_test _do_test
      proc do_test {nm sql res} {
        set nm [regsub {\.} $nm $::bcname]
416
417
418
419
420
421
422






423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
          SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level;
        }
      } {0 {0 1 2 3 4 5}}

      if {[code1 { set ::sqlite_libversion }] >=3071200 
       && [code2 { set ::sqlite_libversion }] >=3071200 
      } {






        do_test backcompat-3.9 {
          sql1 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
          sql2 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
          sql1 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
          sql2 { INSERT INTO t2(t2) VALUES('merge=2500,4'); }
          sql2 {
            SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level;
          }
        } {0 {0 1} 1 0}

        do_test backcompat-3.10 {
          sql1 { INSERT INTO t2(t2) VALUES('integrity-check') }
          sql2 { INSERT INTO t2(t2) VALUES('integrity-check') }
        } {}
      }
    }







>
>
>
>
>
>








|







417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
          SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level;
        }
      } {0 {0 1 2 3 4 5}}

      if {[code1 { set ::sqlite_libversion }] >=3071200 
       && [code2 { set ::sqlite_libversion }] >=3071200 
      } {
        if {[code1 { set ::sqlite_libversion }]<3120000} {
          set res {0 {0 1} 1 0}
        } else {
          set res {1 0}
        }

        do_test backcompat-3.9 {
          sql1 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
          sql2 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
          sql1 { INSERT INTO t2(t2) VALUES('merge=100,4'); }
          sql2 { INSERT INTO t2(t2) VALUES('merge=2500,4'); }
          sql2 {
            SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level;
          }
        } $res

        do_test backcompat-3.10 {
          sql1 { INSERT INTO t2(t2) VALUES('integrity-check') }
          sql2 { INSERT INTO t2(t2) VALUES('integrity-check') }
        } {}
      }
    }
Changes to test/backup4.test.
18
19
20
21
22
23
24





25
26
27
28
29
30
31
# schema cookie and change counter. Doing that could cause other clients
# to become confused and continue using out-of-date cache data.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix backup4






#-------------------------------------------------------------------------
# At one point this test was failing because [db] was using an out of
# date schema in test case 1.2.
#
do_execsql_test 1.0 {
  CREATE TABLE t1(x, y, UNIQUE(x, y));







>
>
>
>
>







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# schema cookie and change counter. Doing that could cause other clients
# to become confused and continue using out-of-date cache data.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix backup4

# The codec logic does not work for zero-length database files.  A database
# file must contain at least one page in order to be recognized as an
# encrypted database.
do_not_use_codec

#-------------------------------------------------------------------------
# At one point this test was failing because [db] was using an out of
# date schema in test case 1.2.
#
do_execsql_test 1.0 {
  CREATE TABLE t1(x, y, UNIQUE(x, y));
Changes to test/bc_common.tcl.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17



proc bc_find_binaries {zCaption} {
  # Search for binaries to test against. Any executable files that match
  # our naming convention are assumed to be testfixture binaries to test
  # against.
  #
  set binaries [list]
  set self [file tail [info nameofexec]]
  set pattern "$self?*"
  if {$::tcl_platform(platform)=="windows"} {
    set pattern [string map {\.exe {}} $pattern]
  }
  foreach file [glob -nocomplain $pattern] {
    if {$file==$self} continue
    if {[file executable $file] && [file isfile $file]} {lappend binaries $file}









|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17



proc bc_find_binaries {zCaption} {
  # Search for binaries to test against. Any executable files that match
  # our naming convention are assumed to be testfixture binaries to test
  # against.
  #
  set binaries [list]
  set self [info nameofexec]
  set pattern "$self?*"
  if {$::tcl_platform(platform)=="windows"} {
    set pattern [string map {\.exe {}} $pattern]
  }
  foreach file [glob -nocomplain $pattern] {
    if {$file==$self} continue
    if {[file executable $file] && [file isfile $file]} {lappend binaries $file}
48
49
50
51
52
53
54
55

56
57
58
59
60
61
62
  proc code2 {tcl} { testfixture $::bc_chan $tcl }
  proc sql1 sql { code1 [list db eval $sql] }
  proc sql2 sql { code2 [list db eval $sql] }

  code1 { sqlite3 db test.db }
  code2 { sqlite3 db test.db }

  set bintag [string map {testfixture {}} $bin]

  set bintag [string map {\.exe {}} $bintag]
  if {$bintag == ""} {set bintag self}
  set saved_prefix $::testprefix
  append ::testprefix ".$bintag"

  uplevel $script








|
>







48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
  proc code2 {tcl} { testfixture $::bc_chan $tcl }
  proc sql1 sql { code1 [list db eval $sql] }
  proc sql2 sql { code2 [list db eval $sql] }

  code1 { sqlite3 db test.db }
  code2 { sqlite3 db test.db }

  set bintag $bin
  regsub {.*testfixture\.} $bintag {} bintag
  set bintag [string map {\.exe {}} $bintag]
  if {$bintag == ""} {set bintag self}
  set saved_prefix $::testprefix
  append ::testprefix ".$bintag"

  uplevel $script

Changes to test/bestindex1.test.
10
11
12
13
14
15
16





17
18
19
20
21
22
23
#***********************************************************************
# 
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bestindex1






register_tcl_module db

proc vtab_command {method args} {
  switch -- $method {
    xConnect {
      return "CREATE TABLE t1(a, b, c)"







>
>
>
>
>







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
#***********************************************************************
# 
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bestindex1

ifcapable !vtab {
  finish_test
  return
}

register_tcl_module db

proc vtab_command {method args} {
  switch -- $method {
    xConnect {
      return "CREATE TABLE t1(a, b, c)"
157
158
159
160
161
162
163
164
165

  do_eqp_test 2.2.$mode.6 { 
    SELECT rowid FROM t1 WHERE a IN ('one', 'four') ORDER BY +rowid
  } $plan($mode)
}

finish_test









<
<
162
163
164
165
166
167
168



  do_eqp_test 2.2.$mode.6 { 
    SELECT rowid FROM t1 WHERE a IN ('one', 'four') ORDER BY +rowid
  } $plan($mode)
}

finish_test


Changes to test/bestindex2.test.
9
10
11
12
13
14
15




16
17
18
19
20
21
22
#
#***********************************************************************

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bestindex2






#-------------------------------------------------------------------------
# Virtual table callback for table named $tbl, with the columns specified
# by list argument $cols. e.g. if the function is invoked as:
#
#   vtab_cmd t1 {a b c} ...
#







>
>
>
>







9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#
#***********************************************************************

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix bestindex2

ifcapable !vtab {
  finish_test
  return
}

#-------------------------------------------------------------------------
# Virtual table callback for table named $tbl, with the columns specified
# by list argument $cols. e.g. if the function is invoked as:
#
#   vtab_cmd t1 {a b c} ...
#
131
132
133
134
135
136
137
138
  0 0 0 {SCAN TABLE x1} 
  0 1 1 {SCAN TABLE t1 VIRTUAL TABLE INDEX 0:}
  0 2 2 {SCAN TABLE t2 VIRTUAL TABLE INDEX 0:indexed(c=?)} 
  0 3 3 {SCAN TABLE t3 VIRTUAL TABLE INDEX 0:indexed(e=?)}
}

finish_test








<
135
136
137
138
139
140
141

  0 0 0 {SCAN TABLE x1} 
  0 1 1 {SCAN TABLE t1 VIRTUAL TABLE INDEX 0:}
  0 2 2 {SCAN TABLE t2 VIRTUAL TABLE INDEX 0:indexed(c=?)} 
  0 3 3 {SCAN TABLE t3 VIRTUAL TABLE INDEX 0:indexed(e=?)}
}

finish_test

Changes to test/close.test.
12
13
14
15
16
17
18




19
20
21
22
23
24
25
# Test some specific circumstances to do with shared cache mode.
#


set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix close





do_execsql_test 1.0 {
  CREATE TABLE t1(x);
  INSERT INTO t1 VALUES('one');
  INSERT INTO t1 VALUES('two');
  INSERT INTO t1 VALUES('three');
}







>
>
>
>







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Test some specific circumstances to do with shared cache mode.
#


set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix close

# This module bypasses the "-key" logic in tester.tcl, so it cannot run
# with the codec enabled.
do_not_use_codec

do_execsql_test 1.0 {
  CREATE TABLE t1(x);
  INSERT INTO t1 VALUES('one');
  INSERT INTO t1 VALUES('two');
  INSERT INTO t1 VALUES('three');
}
Changes to test/corrupt2.test.
342
343
344
345
346
347
348

349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369

370
371
372
373
374
375
376
    hexio_write corrupt.db [expr 1024 + ($nPage-3)*5] 010000000
  } -test {
    do_test corrupt2-6.3 {
      catchsql " $::presql pragma incremental_vacuum = 1 "
    } {1 {database disk image is malformed}}
  }


  corruption_test -sqlprep {
    PRAGMA auto_vacuum = 1;
    PRAGMA page_size = 1024;
    CREATE TABLE t1(a INTEGER PRIMARY KEY, b);
    INSERT INTO t1 VALUES(1, randomblob(2500));
    DELETE FROM t1 WHERE a = 1;
  } -corrupt {
    set nAppend [expr 1024*207 - [file size corrupt.db]]
    set fd [open corrupt.db r+]
    seek $fd 0 end
    puts -nonewline $fd [string repeat x $nAppend]
    close $fd
    hexio_write corrupt.db 28 00000000
  } -test {
    do_test corrupt2-6.4 {
      catchsql " 
        $::presql 
        BEGIN EXCLUSIVE;
        COMMIT;
      "
    } {1 {database disk image is malformed}}

  }
}


set sqlprep {
  PRAGMA auto_vacuum = 0;
  PRAGMA page_size = 1024;







>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>







342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
    hexio_write corrupt.db [expr 1024 + ($nPage-3)*5] 010000000
  } -test {
    do_test corrupt2-6.3 {
      catchsql " $::presql pragma incremental_vacuum = 1 "
    } {1 {database disk image is malformed}}
  }

  if {![nonzero_reserved_bytes]} {
    corruption_test -sqlprep {
      PRAGMA auto_vacuum = 1;
      PRAGMA page_size = 1024;
      CREATE TABLE t1(a INTEGER PRIMARY KEY, b);
      INSERT INTO t1 VALUES(1, randomblob(2500));
      DELETE FROM t1 WHERE a = 1;
    } -corrupt {
      set nAppend [expr 1024*207 - [file size corrupt.db]]
      set fd [open corrupt.db r+]
      seek $fd 0 end
      puts -nonewline $fd [string repeat x $nAppend]
      close $fd
      hexio_write corrupt.db 28 00000000
    } -test {
      do_test corrupt2-6.4 {
        catchsql " 
          $::presql 
          BEGIN EXCLUSIVE;
          COMMIT;
        "
      } {1 {database disk image is malformed}}
    }
  }
}


set sqlprep {
  PRAGMA auto_vacuum = 0;
  PRAGMA page_size = 1024;
Changes to test/corrupt3.test.
14
15
16
17
18
19
20
21
22
23
24

25
26
27
28
29
30
31
# segfault if it sees a corrupt database file.
#
# $Id: corrupt3.test,v 1.2 2007/04/06 21:42:22 drh Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec


# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#







|
<
<
|
>







14
15
16
17
18
19
20
21


22
23
24
25
26
27
28
29
30
# segfault if it sees a corrupt database file.
#
# $Id: corrupt3.test,v 1.2 2007/04/06 21:42:22 drh Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# This module uses hard-coded offsets which do not work if the reserved_bytes


# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#
Changes to test/corrupt4.test.
14
15
16
17
18
19
20
21
22
23
24

25
26
27
28
29
30
31
# segfault if it sees a corrupt database file.
#
# $Id: corrupt4.test,v 1.1 2007/09/07 14:32:07 drh Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec


# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#







|
<
<
|
>







14
15
16
17
18
19
20
21


22
23
24
25
26
27
28
29
30
# segfault if it sees a corrupt database file.
#
# $Id: corrupt4.test,v 1.1 2007/09/07 14:32:07 drh Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# This module uses hard-coded offsets which do not work if the reserved_bytes


# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#
Changes to test/corrupt6.test.
15
16
17
18
19
20
21
22
23
24
25

26
27
28
29
30
31
32
# on corrupt SerialTypeLen values.
#
# $Id: corrupt6.test,v 1.2 2008/05/19 15:37:10 shane Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec


# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#







|
<
<
|
>







15
16
17
18
19
20
21
22


23
24
25
26
27
28
29
30
31
# on corrupt SerialTypeLen values.
#
# $Id: corrupt6.test,v 1.2 2008/05/19 15:37:10 shane Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# This module uses hard-coded offsets which do not work if the reserved_bytes


# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#
Changes to test/corrupt7.test.
15
16
17
18
19
20
21
22
23
24
25

26
27
28
29
30
31
32
# on corrupt cell offsets in a btree page.
#
# $Id: corrupt7.test,v 1.8 2009/08/10 10:18:08 danielk1977 Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec


# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#







|
<
<
|
>







15
16
17
18
19
20
21
22


23
24
25
26
27
28
29
30
31
# on corrupt cell offsets in a btree page.
#
# $Id: corrupt7.test,v 1.8 2009/08/10 10:18:08 danielk1977 Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# This module uses hard-coded offsets which do not work if the reserved_bytes


# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# These tests deal with corrupt database files
#
database_may_be_corrupt

# We must have the page_size pragma for these tests to work.
#
Changes to test/corruptE.test.
14
15
16
17
18
19
20
21
22
23
24

25
26
27
28
29
30
31
# segfault if it sees a corrupt database file.  It specifcally
# focuses on rowid order corruption.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec


# These tests deal with corrupt database files
#
database_may_be_corrupt

# Do not run the tests in this file if ENABLE_OVERSIZE_CELL_CHECK is on.
#







|
<
<
|
>







14
15
16
17
18
19
20
21


22
23
24
25
26
27
28
29
30
# segfault if it sees a corrupt database file.  It specifcally
# focuses on rowid order corruption.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# This module uses hard-coded offsets which do not work if the reserved_bytes


# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# These tests deal with corrupt database files
#
database_may_be_corrupt

# Do not run the tests in this file if ENABLE_OVERSIZE_CELL_CHECK is on.
#
Changes to test/corruptG.test.
10
11
12
13
14
15
16
17
18
19
20

21
22
23
24
25
26
27
#***********************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix corruptG

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).
#
do_not_use_codec


# These tests deal with corrupt database files
#
database_may_be_corrupt

# Create a simple database with a single entry.  Then corrupt the
# header-size varint on the index payload so that it maps into a







|
<
<
|
>







10
11
12
13
14
15
16
17


18
19
20
21
22
23
24
25
26
#***********************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix corruptG

# This module uses hard-coded offsets which do not work if the reserved_bytes


# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# These tests deal with corrupt database files
#
database_may_be_corrupt

# Create a simple database with a single entry.  Then corrupt the
# header-size varint on the index payload so that it maps into a
Changes to test/corruptH.test.
10
11
12
13
14
15
16
17
18

19
20
21
22
23
24
25
26
27
#***********************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix corruptH

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).

#
do_not_use_codec
database_may_be_corrupt

# The corruption migrations tested by the code in this file are not detected
# mmap mode.
#
# The reason is that in mmap mode, the different queries may use different
# PgHdr objects for the same page (same data, but different PgHdr container 







|
|
>
|
<







10
11
12
13
14
15
16
17
18
19
20

21
22
23
24
25
26
27
#***********************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix corruptH

# This module uses hard-coded offsets which do not work if the reserved_bytes
# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}


database_may_be_corrupt

# The corruption migrations tested by the code in this file are not detected
# mmap mode.
#
# The reason is that in mmap mode, the different queries may use different
# PgHdr objects for the same page (same data, but different PgHdr container 
Changes to test/corruptI.test.
15
16
17
18
19
20
21
22
23

24
25
26
27
28
29
30
31
32
set testprefix corruptI

if {[permutation]=="mmap"} {
  finish_test
  return
}

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).

#
do_not_use_codec
database_may_be_corrupt

# Initialize the database.
#
do_execsql_test 1.1 {
  PRAGMA page_size=1024;
  PRAGMA auto_vacuum=0;







|
|
>
|
<







15
16
17
18
19
20
21
22
23
24
25

26
27
28
29
30
31
32
set testprefix corruptI

if {[permutation]=="mmap"} {
  finish_test
  return
}

# This module uses hard-coded offsets which do not work if the reserved_bytes
# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}


database_may_be_corrupt

# Initialize the database.
#
do_execsql_test 1.1 {
  PRAGMA page_size=1024;
  PRAGMA auto_vacuum=0;
Changes to test/corruptJ.test.
18
19
20
21
22
23
24
25
26

27
28
29
30
31
32
33
34
35
set testprefix corruptJ

if {[permutation]=="mmap"} {
  finish_test
  return
}

# Do not use a codec for tests in this file, as the database file is
# manipulated directly using tcl scripts (using the [hexio_write] command).

#
do_not_use_codec
database_may_be_corrupt

# Initialize the database.
#
do_execsql_test 1.1 {
  PRAGMA page_size=1024;
  PRAGMA auto_vacuum=0;







|
|
>
|
<







18
19
20
21
22
23
24
25
26
27
28

29
30
31
32
33
34
35
set testprefix corruptJ

if {[permutation]=="mmap"} {
  finish_test
  return
}

# This module uses hard-coded offsets which do not work if the reserved_bytes
# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}


database_may_be_corrupt

# Initialize the database.
#
do_execsql_test 1.1 {
  PRAGMA page_size=1024;
  PRAGMA auto_vacuum=0;
Changes to test/crash8.test.
21
22
23
24
25
26
27

28
29
30
31
32
33
34
set testdir [file dirname $argv0]
source $testdir/tester.tcl

ifcapable !crashtest {
  finish_test
  return
}


do_test crash8-1.1 {
  execsql {
    PRAGMA auto_vacuum=OFF;
    CREATE TABLE t1(a, b);
    CREATE INDEX i1 ON t1(a, b);
    INSERT INTO t1 VALUES(1, randstr(1000,1000));







>







21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
set testdir [file dirname $argv0]
source $testdir/tester.tcl

ifcapable !crashtest {
  finish_test
  return
}
do_not_use_codec

do_test crash8-1.1 {
  execsql {
    PRAGMA auto_vacuum=OFF;
    CREATE TABLE t1(a, b);
    CREATE INDEX i1 ON t1(a, b);
    INSERT INTO t1 VALUES(1, randstr(1000,1000));
Changes to test/e_uri.test.
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
#***********************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix e_uri

db close

proc parse_uri {uri} {
  testvfs tvfs2
  testvfs tvfs 
  tvfs filter xOpen
  tvfs script parse_uri_open_cb







|







9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
#***********************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
set testprefix e_uri
do_not_use_codec
db close

proc parse_uri {uri} {
  testvfs tvfs2
  testvfs tvfs 
  tvfs filter xOpen
  tvfs script parse_uri_open_cb
Changes to test/e_vacuum.test.
155
156
157
158
159
160
161

162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190

191
192
193
194
195
196
197
} {1024 1}
do_test e_vacuum-1.3.1.2 {
  execsql { PRAGMA page_size = 2048 }
  execsql { PRAGMA auto_vacuum = NONE }
  execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
} {1024 1}


# EVIDENCE-OF: R-08570-19916 However, when not in write-ahead log mode,
# the page_size and/or auto_vacuum properties of an existing database
# may be changed by using the page_size and/or pragma auto_vacuum
# pragmas and then immediately VACUUMing the database.
#
do_test e_vacuum-1.3.2.1 {
  execsql { PRAGMA journal_mode = delete }
  execsql { PRAGMA page_size = 2048 }
  execsql { PRAGMA auto_vacuum = NONE }
  execsql VACUUM
  execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
} {2048 0}

# EVIDENCE-OF: R-48521-51450 When in write-ahead log mode, only the
# auto_vacuum support property can be changed using VACUUM.
#
ifcapable wal {
do_test e_vacuum-1.3.3.1 {
  execsql { PRAGMA journal_mode = wal }
  execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
} {2048 0}
do_test e_vacuum-1.3.3.2 {
  execsql { PRAGMA page_size = 1024 }
  execsql { PRAGMA auto_vacuum = FULL }
  execsql VACUUM
  execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
} {2048 1}
}


# EVIDENCE-OF: R-38001-03952 VACUUM only works on the main database. It
# is not possible to VACUUM an attached database file.
forcedelete test.db2
create_db { PRAGMA auto_vacuum = NONE }
do_execsql_test e_vacuum-2.1.1 {
  ATTACH 'test.db2' AS aux;
  PRAGMA aux.page_size = 1024;







>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>







155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
} {1024 1}
do_test e_vacuum-1.3.1.2 {
  execsql { PRAGMA page_size = 2048 }
  execsql { PRAGMA auto_vacuum = NONE }
  execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
} {1024 1}

if {![nonzero_reserved_bytes]} {
  # EVIDENCE-OF: R-08570-19916 However, when not in write-ahead log mode,
  # the page_size and/or auto_vacuum properties of an existing database
  # may be changed by using the page_size and/or pragma auto_vacuum
  # pragmas and then immediately VACUUMing the database.
  #
  do_test e_vacuum-1.3.2.1 {
    execsql { PRAGMA journal_mode = delete }
    execsql { PRAGMA page_size = 2048 }
    execsql { PRAGMA auto_vacuum = NONE }
    execsql VACUUM
    execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
  } {2048 0}
  
  # EVIDENCE-OF: R-48521-51450 When in write-ahead log mode, only the
  # auto_vacuum support property can be changed using VACUUM.
  #
  ifcapable wal {
    do_test e_vacuum-1.3.3.1 {
      execsql { PRAGMA journal_mode = wal }
      execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
    } {2048 0}
    do_test e_vacuum-1.3.3.2 {
      execsql { PRAGMA page_size = 1024 }
      execsql { PRAGMA auto_vacuum = FULL }
      execsql VACUUM
      execsql { PRAGMA page_size ; PRAGMA auto_vacuum }
    } {2048 1}
  }
}
  
# EVIDENCE-OF: R-38001-03952 VACUUM only works on the main database. It
# is not possible to VACUUM an attached database file.
forcedelete test.db2
create_db { PRAGMA auto_vacuum = NONE }
do_execsql_test e_vacuum-2.1.1 {
  ATTACH 'test.db2' AS aux;
  PRAGMA aux.page_size = 1024;
Changes to test/e_walauto.test.
19
20
21
22
23
24
25





26
27
28
29
30
31
32
# accessing the same coherent view of the "test.db-shm" file. This doesn't
# work on OpenBSD.
#
if {$tcl_platform(os) == "OpenBSD"} {
  finish_test
  return
}






proc read_nbackfill {} {
  seek $::shmfd 96
  binary scan [read $::shmfd 4] n nBackfill
  set nBackfill
}
proc read_mxframe {} {







>
>
>
>
>







19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# accessing the same coherent view of the "test.db-shm" file. This doesn't
# work on OpenBSD.
#
if {$tcl_platform(os) == "OpenBSD"} {
  finish_test
  return
}

# This module uses hard-coded offsets which do not work if the reserved_bytes
# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}


proc read_nbackfill {} {
  seek $::shmfd 96
  binary scan [read $::shmfd 4] n nBackfill
  set nBackfill
}
proc read_mxframe {} {
Changes to test/eqp.test.
512
513
514
515
516
517
518

519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557

558
559
560
561
562
563
564
  1 0 0 {SCAN TABLE t1 USING COVERING INDEX i1}
  2 0 0 {SCAN TABLE t2}
  2 0 0 {USE TEMP B-TREE FOR ORDER BY}
  0 0 0 {COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)}
}



#-------------------------------------------------------------------------
# The following tests - eqp-6.* - test that the example C code on 
# documentation page eqp.html works. The C code is duplicated in test1.c
# and wrapped in Tcl command [print_explain_query_plan] 
#
set boilerplate {
  proc explain_query_plan {db sql} {
    set stmt [sqlite3_prepare_v2 db $sql -1 DUMMY]
    print_explain_query_plan $stmt
    sqlite3_finalize $stmt
  }
  sqlite3 db test.db
  explain_query_plan db {%SQL%}
  db close
  exit
}

# Do a "Print Explain Query Plan" test.
proc do_peqp_test {tn sql res} {
  set fd [open script.tcl w]
  puts $fd [string map [list %SQL% $sql] $::boilerplate]
  close $fd

  uplevel do_test $tn [list {
    set fd [open "|[info nameofexec] script.tcl"]
    set data [read $fd]
    close $fd
    set data
  }] [list $res]
}

do_peqp_test 6.1 {
  SELECT a, b FROM t1 EXCEPT SELECT d, 99 FROM t2 ORDER BY 1
} [string trimleft {
1 0 0 SCAN TABLE t1 USING COVERING INDEX i2
2 0 0 SCAN TABLE t2
2 0 0 USE TEMP B-TREE FOR ORDER BY
0 0 0 COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)
}]


#-------------------------------------------------------------------------
# The following tests - eqp-7.* - test that queries that use the OP_Count
# optimization return something sensible with EQP.
#
drop_all_tables








>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|





>







512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
  1 0 0 {SCAN TABLE t1 USING COVERING INDEX i1}
  2 0 0 {SCAN TABLE t2}
  2 0 0 {USE TEMP B-TREE FOR ORDER BY}
  0 0 0 {COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)}
}


if {![nonzero_reserved_bytes]} {
  #-------------------------------------------------------------------------
  # The following tests - eqp-6.* - test that the example C code on 
  # documentation page eqp.html works. The C code is duplicated in test1.c
  # and wrapped in Tcl command [print_explain_query_plan] 
  #
  set boilerplate {
    proc explain_query_plan {db sql} {
      set stmt [sqlite3_prepare_v2 db $sql -1 DUMMY]
      print_explain_query_plan $stmt
      sqlite3_finalize $stmt
    }
    sqlite3 db test.db
    explain_query_plan db {%SQL%}
    db close
    exit
  }
  
  # Do a "Print Explain Query Plan" test.
  proc do_peqp_test {tn sql res} {
    set fd [open script.tcl w]
    puts $fd [string map [list %SQL% $sql] $::boilerplate]
    close $fd
  
    uplevel do_test $tn [list {
      set fd [open "|[info nameofexec] script.tcl"]
      set data [read $fd]
      close $fd
      set data
    }] [list $res]
  }
  
  do_peqp_test 6.1 {
    SELECT a, b FROM t1 EXCEPT SELECT d, 99 FROM t2 ORDER BY 1
  } [string trimleft {
1 0 0 SCAN TABLE t1 USING COVERING INDEX i2
2 0 0 SCAN TABLE t2
2 0 0 USE TEMP B-TREE FOR ORDER BY
0 0 0 COMPOUND SUBQUERIES 1 AND 2 (EXCEPT)
}]
}

#-------------------------------------------------------------------------
# The following tests - eqp-7.* - test that queries that use the OP_Count
# optimization return something sensible with EQP.
#
drop_all_tables

Changes to test/filefmt.test.
140
141
142
143
144
145
146

147
148
149

150
151
152
153
154
155
156
  PRAGMA auto_vacuum = 0;
  CREATE TABLE t1(a);
  CREATE INDEX i1 ON t1(a);
  INSERT INTO t1 VALUES(a_string(3000));
  CREATE TABLE t2(a);
  INSERT INTO t2 VALUES(1);
} {}

do_test filefmt-2.1.2 {
  hexio_read test.db 28 4
} {00000009}


do_test filefmt-2.1.3 {
  sql36231 { INSERT INTO t1 VALUES(a_string(3000)) }
} {}

do_execsql_test filefmt-2.1.4 { INSERT INTO t2 VALUES(2) } {}
integrity_check filefmt-2.1.5







>
|
|
|
>







140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  PRAGMA auto_vacuum = 0;
  CREATE TABLE t1(a);
  CREATE INDEX i1 ON t1(a);
  INSERT INTO t1 VALUES(a_string(3000));
  CREATE TABLE t2(a);
  INSERT INTO t2 VALUES(1);
} {}
if {![nonzero_reserved_bytes]} {
  do_test filefmt-2.1.2 {
    hexio_read test.db 28 4
  } {00000009}
}

do_test filefmt-2.1.3 {
  sql36231 { INSERT INTO t1 VALUES(a_string(3000)) }
} {}

do_execsql_test filefmt-2.1.4 { INSERT INTO t2 VALUES(2) } {}
integrity_check filefmt-2.1.5
166
167
168
169
170
171
172

173
174
175

176
177
178
179
180
181
182
  PRAGMA auto_vacuum = 0;
  CREATE TABLE t1(a);
  CREATE INDEX i1 ON t1(a);
  INSERT INTO t1 VALUES(a_string(3000));
  CREATE TABLE t2(a);
  INSERT INTO t2 VALUES(1);
} {}

do_test filefmt-2.2.2 {
  hexio_read test.db 28 4
} {00000009}


do_test filefmt-2.2.3 {
  sql36231 { INSERT INTO t1 VALUES(a_string(3000)) }
} {}

do_execsql_test filefmt-2.2.4 { 
  PRAGMA integrity_check;







>
|
|
|
>







168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  PRAGMA auto_vacuum = 0;
  CREATE TABLE t1(a);
  CREATE INDEX i1 ON t1(a);
  INSERT INTO t1 VALUES(a_string(3000));
  CREATE TABLE t2(a);
  INSERT INTO t2 VALUES(1);
} {}
if {![nonzero_reserved_bytes]} {
  do_test filefmt-2.2.2 {
    hexio_read test.db 28 4
  } {00000009}
}

do_test filefmt-2.2.3 {
  sql36231 { INSERT INTO t1 VALUES(a_string(3000)) }
} {}

do_execsql_test filefmt-2.2.4 { 
  PRAGMA integrity_check;
Changes to test/fts4growth.test.
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73

74
75
76
77

78
79
80
81

82
83



84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
  } {
    execsql { INSERT INTO x1 VALUES($L) }
  }
  execsql { 
    INSERT INTO x1(x1) VALUES('merge=4,4');
    SELECT level, end_block, length(root) FROM x1_segdir;
  }
} {0 {0 110} 110 0 {0 132} 132 0 {0 129} 129 1 {128 658} 2}

do_execsql_test 1.5 {
  SELECT length(block) FROM x1_segments;
} {658 {}}

do_test 1.6 {
  foreach L {
    {'Twas Mulga Bill, from Eaglehawk, that sought his own abode,}
    {That perched above Dead Man's Creek, beside the mountain road.}
    {He turned the cycle down the hill and mounted for the fray,}
    {But 'ere he'd gone a dozen yards it bolted clean away.}

    {It left the track, and through the trees, just like a silver steak,}
    {It whistled down the awful slope towards the Dead Man's Creek.}
    {It shaved a stump by half an inch, it dodged a big white-box:}
    {The very wallaroos in fright went scrambling up the rocks,}

    {The wombats hiding in their caves dug deeper underground,}
    {As Mulga Bill, as white as chalk, sat tight to every bound.}
    {It struck a stone and gave a spring that cleared a fallen tree,}
    {It raced beside a precipice as close as close could be;}

    {And then as Mulga Bill let out one last despairing shriek}
    {It made a leap of twenty feet into the Dead Man's Creek.}



  } {
    execsql { INSERT INTO x1 VALUES($L) }
  }
  execsql { 
    SELECT level, end_block, length(root) FROM x1_segdir;
  }
} {1 {128 658} 2 1 {130 1377} 6 0 {0 117} 117}

do_execsql_test 1.7 {
  SELECT sum(length(block)) FROM x1_segments WHERE blockid IN (129, 130);
} {1377}

#-------------------------------------------------------------------------
#
do_execsql_test 2.1 { 
  CREATE TABLE t1(docid, words);
  CREATE VIRTUAL TABLE x2 USING fts4;
}







|



|







>




>




>


>
>
>






|


|
|







55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
  } {
    execsql { INSERT INTO x1 VALUES($L) }
  }
  execsql { 
    INSERT INTO x1(x1) VALUES('merge=4,4');
    SELECT level, end_block, length(root) FROM x1_segdir;
  }
} {1 {224 921} 2}

do_execsql_test 1.5 {
  SELECT length(block) FROM x1_segments;
} {921 {}}

do_test 1.6 {
  foreach L {
    {'Twas Mulga Bill, from Eaglehawk, that sought his own abode,}
    {That perched above Dead Man's Creek, beside the mountain road.}
    {He turned the cycle down the hill and mounted for the fray,}
    {But 'ere he'd gone a dozen yards it bolted clean away.}

    {It left the track, and through the trees, just like a silver steak,}
    {It whistled down the awful slope towards the Dead Man's Creek.}
    {It shaved a stump by half an inch, it dodged a big white-box:}
    {The very wallaroos in fright went scrambling up the rocks,}

    {The wombats hiding in their caves dug deeper underground,}
    {As Mulga Bill, as white as chalk, sat tight to every bound.}
    {It struck a stone and gave a spring that cleared a fallen tree,}
    {It raced beside a precipice as close as close could be;}

    {And then as Mulga Bill let out one last despairing shriek}
    {It made a leap of twenty feet into the Dead Man's Creek.}
    {It shaved a stump by half an inch, it dodged a big white-box:}
    {The very wallaroos in fright went scrambling up the rocks,}
    {The wombats hiding in their caves dug deeper underground,}
  } {
    execsql { INSERT INTO x1 VALUES($L) }
  }
  execsql { 
    SELECT level, end_block, length(root) FROM x1_segdir;
  }
} {1 {224 921} 2 1 {226 1230} 7 0 {0 98} 98}

do_execsql_test 1.7 {
  SELECT sum(length(block)) FROM x1_segments WHERE blockid IN (224,225,226)
} {1230}

#-------------------------------------------------------------------------
#
do_execsql_test 2.1 { 
  CREATE TABLE t1(docid, words);
  CREATE VIRTUAL TABLE x2 USING fts4;
}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

do_execsql_test 2.5 { 
  SELECT end_block FROM x2_segdir WHERE level=3;
  INSERT INTO x2(x2) VALUES('merge=4,4');
  SELECT end_block FROM x2_segdir WHERE level=3;
  INSERT INTO x2(x2) VALUES('merge=4,4');
  SELECT end_block FROM x2_segdir WHERE level=3;
} {{3828 -3430} {3828 -10191} {3828 -14109}}

do_execsql_test 2.6 {
  SELECT sum(length(block)) FROM x2_segdir, x2_segments WHERE 
    blockid BETWEEN start_block AND leaves_end_block
    AND level=3
} {14109}

do_execsql_test 2.7 { 
  INSERT INTO x2(x2) VALUES('merge=1000,4');
  SELECT end_block FROM x2_segdir WHERE level=3;
} {{3828 86120}}

do_execsql_test 2.8 {
  SELECT sum(length(block)) FROM x2_segdir, x2_segments WHERE 
    blockid BETWEEN start_block AND leaves_end_block
    AND level=3
} {86120}

#--------------------------------------------------------------------------
# Test that delete markers are removed from FTS segments when possible.
# It is only possible to remove delete markers when the output of the
# merge operation will become the oldest segment in the index.
#
#   3.1 - when the oldest segment is created by an 'optimize'.







|





|




|





|







133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164

do_execsql_test 2.5 { 
  SELECT end_block FROM x2_segdir WHERE level=3;
  INSERT INTO x2(x2) VALUES('merge=4,4');
  SELECT end_block FROM x2_segdir WHERE level=3;
  INSERT INTO x2(x2) VALUES('merge=4,4');
  SELECT end_block FROM x2_segdir WHERE level=3;
} {{5588 -3950} {5588 -11766} {5588 -15541}}

do_execsql_test 2.6 {
  SELECT sum(length(block)) FROM x2_segdir, x2_segments WHERE 
    blockid BETWEEN start_block AND leaves_end_block
    AND level=3
} {15541}

do_execsql_test 2.7 { 
  INSERT INTO x2(x2) VALUES('merge=1000,4');
  SELECT end_block FROM x2_segdir WHERE level=3;
} {{5588 127563}}

do_execsql_test 2.8 {
  SELECT sum(length(block)) FROM x2_segdir, x2_segments WHERE 
    blockid BETWEEN start_block AND leaves_end_block
    AND level=3
} {127563}

#--------------------------------------------------------------------------
# Test that delete markers are removed from FTS segments when possible.
# It is only possible to remove delete markers when the output of the
# merge operation will become the oldest segment in the index.
#
#   3.1 - when the oldest segment is created by an 'optimize'.
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

do_execsql_test 7.2 {
  INSERT INTO x6(x6) VALUES('merge=25,4');
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {118 117483} 0 1 {238 118006} 0 2 {358 118006} 
  0 3 {478 118006} 0 4 {598 118006} 0 5 {718 118006}
  1 0 {16014 -51226}
}

do_execsql_test 7.3 {
  UPDATE x6_segdir SET end_block = first(end_block) WHERE level=1;
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {118 117483} 0 1 {238 118006} 0 2 {358 118006} 
  0 3 {478 118006} 0 4 {598 118006} 0 5 {718 118006}
  1 0 16014
}

do_execsql_test 7.4 {
  INSERT INTO x6(x6) VALUES('merge=25,4');
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {118 117483} 0 1 {238 118006} 0 2 {358 118006} 
  0 3 {478 118006} 0 4 {598 118006} 0 5 {718 118006}
  1 0 16014
}

do_execsql_test 7.5 {
  INSERT INTO x6(x6) VALUES('merge=2500,4');
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {598 118006} 0 1 {718 118006} 1 0 16014
}

do_execsql_test 7.6 {
  INSERT INTO x6(x6) VALUES('merge=2500,2');
  SELECT level, idx, start_block, leaves_end_block, end_block FROM x6_segdir;
} {
  2 0 23695 24147 {41262 633507}
}

do_execsql_test 7.7 {
  SELECT sum(length(block)) FROM x6_segments 
  WHERE blockid BETWEEN 23695 AND 24147
} {633507}



finish_test







|








|








|




|

|






|




<
|
<



393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

437

438
439
440

do_execsql_test 7.2 {
  INSERT INTO x6(x6) VALUES('merge=25,4');
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {118 117483} 0 1 {238 118006} 0 2 {358 118006} 
  0 3 {478 118006} 0 4 {598 118006} 0 5 {718 118006}
  1 0 {23694 -69477}
}

do_execsql_test 7.3 {
  UPDATE x6_segdir SET end_block = first(end_block) WHERE level=1;
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {118 117483} 0 1 {238 118006} 0 2 {358 118006} 
  0 3 {478 118006} 0 4 {598 118006} 0 5 {718 118006}
  1 0 23694
}

do_execsql_test 7.4 {
  INSERT INTO x6(x6) VALUES('merge=25,4');
  SELECT level, idx, end_block FROM x6_segdir;
} {
  0 0 {118 117483} 0 1 {238 118006} 0 2 {358 118006} 
  0 3 {478 118006} 0 4 {598 118006} 0 5 {718 118006}
  1 0 23694
}

do_execsql_test 7.5 {
  INSERT INTO x6(x6) VALUES('merge=2500,4');
  SELECT level, idx, start_block, leaves_end_block, end_block FROM x6_segdir;
} {
  1 0 719 1171 23694
}

do_execsql_test 7.6 {
  INSERT INTO x6(x6) VALUES('merge=2500,2');
  SELECT level, idx, start_block, leaves_end_block, end_block FROM x6_segdir;
} {
  1 0 719 1171 23694
}

do_execsql_test 7.7 {
  SELECT sum(length(block)) FROM x6_segments 

} {635247}



finish_test
Changes to test/fts4langid.test.
477
478
479
480
481
482
483
484
485
486
    INSERT INTO t6(t6) VALUES('merge=100,3');
    SELECT docid FROM t6 WHERE t6 MATCH '"zero zero"' AND lid=$lid;
  } {1 2 5}

  do_execsql_test 5.4.$lid.5 {
    SELECT count(*) FROM t6_segdir;
    SELECT count(*) FROM t6_segments;
  } {4 4}
}
finish_test







|


477
478
479
480
481
482
483
484
485
486
    INSERT INTO t6(t6) VALUES('merge=100,3');
    SELECT docid FROM t6 WHERE t6 MATCH '"zero zero"' AND lid=$lid;
  } {1 2 5}

  do_execsql_test 5.4.$lid.5 {
    SELECT count(*) FROM t6_segdir;
    SELECT count(*) FROM t6_segments;
  } {1 2}
}
finish_test
Changes to test/fts4merge.test.
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
      SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three'
    } {123 132 213 231 312 321}
  }
  
  do_execsql_test 1.3 { 
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level 
  } {
    0 {0 1 2 3} 
    1 {0 1 2 3 4 5 6} 
    2 {0 1 2 3}
  }
  
  for {set i 0} {$i<100} {incr i} {
    do_execsql_test 1.4.$i { INSERT INTO t1(t1) VALUES('merge=1,4') }
    do_test 1.4.$i.2 { fts3_integrity_check t1 } ok
    do_execsql_test 1.4.$i.3 { 
      SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three'
    } {123 132 213 231 312 321}
  }
  
  do_execsql_test 1.5 { 
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level 
  } {
    2 {0 1}
    3 0
  }
  
  #-------------------------------------------------------------------------
  # Test cases 2.* test that errors in the xxx part of the 'merge=xxx' are
  # handled correctly.
  #
  do_execsql_test 2.0 "CREATE VIRTUAL TABLE t2 USING $mod"
  
  foreach {tn arg} {
    1   {merge=abc}
    2   {merge=%%%}
    3   {merge=,}
    4   {merge=5,}
    5   {merge=6,%}
    6   {merge=6,six}
    7   {merge=6,1}
    8   {merge=6,0}
  } {
    do_catchsql_test 2.$tn { 
      INSERT INTO t2(t2) VALUES($arg);
    } {1 {SQL logic error or missing database}}
  }
  
  #-------------------------------------------------------------------------







<
<














<

















<







51
52
53
54
55
56
57


58
59
60
61
62
63
64
65
66
67
68
69
70
71

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88

89
90
91
92
93
94
95
      SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three'
    } {123 132 213 231 312 321}
  }
  
  do_execsql_test 1.3 { 
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level 
  } {


    2 {0 1 2 3}
  }
  
  for {set i 0} {$i<100} {incr i} {
    do_execsql_test 1.4.$i { INSERT INTO t1(t1) VALUES('merge=1,4') }
    do_test 1.4.$i.2 { fts3_integrity_check t1 } ok
    do_execsql_test 1.4.$i.3 { 
      SELECT docid FROM t1 WHERE t1 MATCH 'zero one two three'
    } {123 132 213 231 312 321}
  }
  
  do_execsql_test 1.5 { 
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level 
  } {

    3 0
  }
  
  #-------------------------------------------------------------------------
  # Test cases 2.* test that errors in the xxx part of the 'merge=xxx' are
  # handled correctly.
  #
  do_execsql_test 2.0 "CREATE VIRTUAL TABLE t2 USING $mod"
  
  foreach {tn arg} {
    1   {merge=abc}
    2   {merge=%%%}
    3   {merge=,}
    4   {merge=5,}
    5   {merge=6,%}
    6   {merge=6,six}
    7   {merge=6,1}

  } {
    do_catchsql_test 2.$tn { 
      INSERT INTO t2(t2) VALUES($arg);
    } {1 {SQL logic error or missing database}}
  }
  
  #-------------------------------------------------------------------------
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
    3 {0 1 2 3 4 5 6}
  }
  
  do_execsql_test 3.3 { 
    INSERT INTO t2(t2) VALUES('merge=1000000,2');
    SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level 
  } {
    0 0 
    2 0
    3 0 
    4 0
    6 0
  }
  
  #-------------------------------------------------------------------------
  # Test cases 4.*
  #
  reset_db
  do_execsql_test 4.1 "







<
<
<

<







111
112
113
114
115
116
117



118

119
120
121
122
123
124
125
    3 {0 1 2 3 4 5 6}
  }
  
  do_execsql_test 3.3 { 
    INSERT INTO t2(t2) VALUES('merge=1000000,2');
    SELECT level, group_concat(idx, ' ') FROM t2_segdir GROUP BY level 
  } {



    4 0

  }
  
  #-------------------------------------------------------------------------
  # Test cases 4.*
  #
  reset_db
  do_execsql_test 4.1 "
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
  }
  
  do_execsql_test 5.3 {
    INSERT INTO t1(t1) VALUES('merge=1,5');
    INSERT INTO t1(t1) VALUES('merge=1,5');
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
  } {
    0 {0 1 2}
    1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14} 
    2 {0 1 2 3}
  }
  
  do_execsql_test 5.4 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'0105'}
  do_test 5.5 {
    foreach docid [execsql {SELECT docid FROM t1}] {
      execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid}
    }
  } {}
  
  do_execsql_test 5.6 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'0105'}
  
  do_execsql_test 5.7 {
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    0 {0 1 2 3 4 5 6 7 8 9 10} 
    1 {0 1 2 3 4 5 6 7 8 9 10 11 12} 
    2 {0 1 2 3 4 5 6 7}
    X'0105'
  }
  
  do_execsql_test 5.8 {
    INSERT INTO t1(t1) VALUES('merge=1,6');
    INSERT INTO t1(t1) VALUES('merge=1,6');
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    0 {0 1 2 3 4} 
    1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 
    2 {0 1 2 3 4 5 6 7 8} X'0106'
  }
  
  do_test 5.8.1 { fts3_integrity_check t1 } ok
  
  do_test 5.9 {
    set L [expr 16*16*7 + 16*3 + 12]
    foreach docid [execsql {
        SELECT docid FROM t1 UNION ALL SELECT docid FROM t1 LIMIT $L
    }] {
      execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid}
    }
  } {}
  
  do_execsql_test 5.10 {
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    0 0 1 {0 1} 2 0 3 0 X'0106'
  }
  
  do_execsql_test 5.11 {
    INSERT INTO t1(t1) VALUES('merge=1,6');
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    0 0 1 {0 1} 2 0 3 0 X''
  }
  
  #-------------------------------------------------------------------------
  # Test cases 6.*
  #
  # At one point the following test caused an assert() to fail (because the
  # second 'merge=1,2' operation below actually "merges" a single input







<




|






|





|

|
|








<

|

















|







|







191
192
193
194
195
196
197

198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226

227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
  }
  
  do_execsql_test 5.3 {
    INSERT INTO t1(t1) VALUES('merge=1,5');
    INSERT INTO t1(t1) VALUES('merge=1,5');
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
  } {

    1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14} 
    2 {0 1 2 3}
  }
  
  do_execsql_test 5.4 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'010F'}
  do_test 5.5 {
    foreach docid [execsql {SELECT docid FROM t1}] {
      execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid}
    }
  } {}
  
  do_execsql_test 5.6 {SELECT quote(value) from t1_stat WHERE rowid=1} {X'010F'}
  
  do_execsql_test 5.7 {
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    0 {0 1 2 3 4 5 6 7} 
    1 {0 1 2 3 4 5 6 7 8 9 10 11 12} 
    2 {0 1 2 3 4 5 6 7} 
    X'010F'
  }
  
  do_execsql_test 5.8 {
    INSERT INTO t1(t1) VALUES('merge=1,6');
    INSERT INTO t1(t1) VALUES('merge=1,6');
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {

    1 {0 1 2 3 4 5 6 7 8 9 10 11 12 13} 
    2 {0 1 2 3 4 5 6 7 8} X'010E'
  }
  
  do_test 5.8.1 { fts3_integrity_check t1 } ok
  
  do_test 5.9 {
    set L [expr 16*16*7 + 16*3 + 12]
    foreach docid [execsql {
        SELECT docid FROM t1 UNION ALL SELECT docid FROM t1 LIMIT $L
    }] {
      execsql {INSERT INTO t1 SELECT * FROM t1 WHERE docid=$docid}
    }
  } {}
  
  do_execsql_test 5.10 {
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    0 {0 1 2 3 4 5 6 7 8 9 10 11} 1 0 2 0 3 0 X'010E'
  }
  
  do_execsql_test 5.11 {
    INSERT INTO t1(t1) VALUES('merge=1,6');
    SELECT level, group_concat(idx, ' ') FROM t1_segdir GROUP BY level;
    SELECT quote(value) from t1_stat WHERE rowid=1;
  } {
    1 {0 1} 2 0 3 0 X'010E'
  }
  
  #-------------------------------------------------------------------------
  # Test cases 6.*
  #
  # At one point the following test caused an assert() to fail (because the
  # second 'merge=1,2' operation below actually "merges" a single input
Changes to test/fts4merge3.test.
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
      do_test 1.6.$i.2 { 
        sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" 
      } {1485}
    }

    do_test 1.7 { sql2 { 
      SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
    } } [list  0 1  2 18  3 5]

    # Using the old connection, insert many rows. 
    do_test 1.8 {
      for {set i 0} {$i < 1500} {incr i} {
        sql2 "INSERT INTO t2 SELECT content FROM t2 WHERE docid = $i"
      }
    } {}

    do_test 1.9 { sql2 { 
      SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
    } } [list  0 13  1 13  2 5  3 6]

    # Run a big incr-merge operation on the db.
    do_test 1.10 { sql1 { INSERT INTO t2(t2) VALUES('merge=2000,2') } } {}
    do_test 1.11 { 
      sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" 
    } {1485 21485}








|










|







58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
      do_test 1.6.$i.2 { 
        sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" 
      } {1485}
    }

    do_test 1.7 { sql2 { 
      SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
    } } {2 15 3 5}

    # Using the old connection, insert many rows. 
    do_test 1.8 {
      for {set i 0} {$i < 1500} {incr i} {
        sql2 "INSERT INTO t2 SELECT content FROM t2 WHERE docid = $i"
      }
    } {}

    do_test 1.9 { sql2 { 
      SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
    } } [list  0 12  1 13  2 4  3 6]

    # Run a big incr-merge operation on the db.
    do_test 1.10 { sql1 { INSERT INTO t2(t2) VALUES('merge=2000,2') } } {}
    do_test 1.11 { 
      sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" 
    } {1485 21485}

93
94
95
96
97
98
99
100
101
102
103
104
105
    do_test 1.14 { 
      sql2 "INSERT INTO t2(t2) VALUES('optimize')"
      sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" 
    } {1485 21485 22985}

    do_test 1.15 { sql2 { 
      SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
    } } {6 1}
  }
}


finish_test







|





93
94
95
96
97
98
99
100
101
102
103
104
105
    do_test 1.14 { 
      sql2 "INSERT INTO t2(t2) VALUES('optimize')"
      sql2 "SELECT docid FROM t2 WHERE t2 MATCH 'abc'" 
    } {1485 21485 22985}

    do_test 1.15 { sql2 { 
      SELECT level, count(*) FROM t2_segdir GROUP BY level ORDER BY 1
    } } {4 1}
  }
}


finish_test
Added test/fts4opt.test.










































































































































































































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
# 2016 March 8
#
# The author disclaims copyright to this source code.  In place of
# a legal notice, here is a blessing:
#
#    May you do good and not evil.
#    May you find forgiveness for yourself and forgive others.
#    May you share freely, never taking more than you give.
#
#*************************************************************************
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/fts3_common.tcl
set ::testprefix fts4opt

# If SQLITE_ENABLE_FTS3 is defined, omit this file.
ifcapable !fts3 {
  finish_test
  return
}

# Create the fts_kjv_genesis procedure which fills and FTS3/4 table 
# with the complete text of the Book of Genesis.
#
source $testdir/genesis.tcl

do_execsql_test 1.0 { CREATE TABLE t1(docid, words) }
fts_kjv_genesis

#-------------------------------------------------------------------------
# Argument $db is an open database handle. $tbl is the name of an FTS3/4
# table with the database. This command rearranges the contents of the
# %_segdir table so that all segments within each index are on the same
# level. This means that the 'merge' command can then be used for an
# incremental optimize routine.
#
proc prepare_for_optimize {db tbl} {
  $db eval [string map [list % $tbl] {
    BEGIN;
      CREATE TEMP TABLE tmp_segdir(
        level, idx, start_block, leaves_end_block, end_block, root
      );

      INSERT INTO temp.tmp_segdir 
        SELECT 
        1024*(o.level / 1024) + 32,                                -- level
        sum(o.level<i.level OR (o.level=i.level AND o.idx>i.idx)), -- idx
        o.start_block, o.leaves_end_block, o.end_block, o.root     -- other
        FROM %_segdir o, %_segdir i 
        WHERE (o.level / 1024) = (i.level / 1024)
        GROUP BY o.level, o.idx;
  
      DELETE FROM %_segdir;
      INSERT INTO %_segdir SELECT * FROM temp.tmp_segdir;
      DROP TABLE temp.tmp_segdir;
  
    COMMIT;
  }]
}

do_test 1.1 {
  execsql { CREATE VIRTUAL TABLE t2 USING fts4(words, prefix="1,2,3") }
  foreach {docid words} [db eval { SELECT * FROM t1 }] {
    execsql { INSERT INTO t2(docid, words) VALUES($docid, $words) }
  }
} {}

do_execsql_test 1.2 {
  SELECT level, count(*) FROM t2_segdir GROUP BY level
} {
  0    13    1 15    2 5 
  1024 13 1025 15 1026 5 
  2048 13 2049 15 2050 5 
  3072 13 3073 15 3074 5
}

do_execsql_test 1.3 { INSERT INTO t2(t2) VALUES('integrity-check') }
prepare_for_optimize db t2
do_execsql_test 1.4 { INSERT INTO t2(t2) VALUES('integrity-check') }

do_execsql_test 1.5 {
  SELECT level, count(*) FROM t2_segdir GROUP BY level
} {
  32   33 
  1056 33 
  2080 33 
  3104 33
}

do_test 1.6 {
  while 1 {
    set tc1 [db total_changes]
    execsql { INSERT INTO t2(t2) VALUES('merge=5,2') }
    set tc2 [db total_changes]
    if {($tc2 - $tc1) < 2} break
  }
  execsql { SELECT level, count(*) FROM t2_segdir GROUP BY level }
} {33 1 1057 1 2081 1 3105 1}
do_execsql_test 1.7 { INSERT INTO t2(t2) VALUES('integrity-check') }

do_execsql_test 1.8 {
  INSERT INTO t2(words) SELECT words FROM t1;
  SELECT level, count(*) FROM t2_segdir GROUP BY level;
} {0 2 1024 2 2048 2 3072 2}

#-------------------------------------------------------------------------

do_execsql_test 2.0 {
  DELETE FROM t2;
}
do_test 2.1 {
  foreach {docid words} [db eval { SELECT * FROM t1 }] {
    execsql { INSERT INTO t2(docid, words) VALUES($docid, $words) }
  }

  set i 0
  foreach {docid words} [db eval { SELECT * FROM t1 }] {
    if {[incr i] % 2} { execsql { DELETE FROM t2 WHERE docid = $docid } }
  }

  set i 0
  foreach {docid words} [db eval { SELECT * FROM t1 }] {
    if {[incr i] % 3} {
      execsql { INSERT OR REPLACE INTO t2(docid, words) VALUES($docid, $words) }
    }
  }
} {}

do_execsql_test 2.2 {
  SELECT level, count(*) FROM t2_segdir GROUP BY level
} {
  0    10    1 15    2 12 
  1024 10 1025 15 1026 12 
  2048 10 2049 15 2050 12 
  3072 10 3073 15 3074 12
}

do_execsql_test 2.3 { INSERT INTO t2(t2) VALUES('integrity-check') }
prepare_for_optimize db t2
do_execsql_test 2.4 { INSERT INTO t2(t2) VALUES('integrity-check') }

do_execsql_test 2.5 {
  SELECT level, count(*) FROM t2_segdir GROUP BY level
} {
    32 37 
  1056 37 
  2080 37 
  3104 37
}

do_test 2.6 {
  while 1 {
    set tc1 [db total_changes]
    execsql { INSERT INTO t2(t2) VALUES('merge=5,2') }
    set tc2 [db total_changes]
    if {($tc2 - $tc1) < 2} break
  }
  execsql { SELECT level, count(*) FROM t2_segdir GROUP BY level }
} {33 1 1057 1 2081 1 3105 1}
do_execsql_test 2.7 { INSERT INTO t2(t2) VALUES('integrity-check') }

do_execsql_test 2.8 {
  INSERT INTO t2(words) SELECT words FROM t1;
  SELECT level, count(*) FROM t2_segdir GROUP BY level;
} {0 2 1024 2 2048 2 3072 2}

#-------------------------------------------------------------------------
# Check that 'optimize' works when there is data in the in-memory hash
# table, but no segments at all on disk.
#
do_execsql_test 3.1 {
  CREATE VIRTUAL TABLE fts USING fts4 (t);
  INSERT INTO fts (fts) VALUES ('optimize');
}
do_execsql_test 3.2 {
  INSERT INTO fts(fts) VALUES('integrity-check');
  SELECT count(*) FROM fts_segdir;
} {0}
do_execsql_test 3.3 {
  BEGIN;
  INSERT INTO fts (rowid, t) VALUES (2, 'test');
  INSERT INTO fts (fts) VALUES ('optimize');
  COMMIT;
  SELECT level, idx FROM fts_segdir;
} {0 0}
do_execsql_test 3.4 {
  INSERT INTO fts(fts) VALUES('integrity-check');
  SELECT rowid FROM fts WHERE fts MATCH 'test';
} {2}
do_execsql_test 3.5 {
  INSERT INTO fts (fts) VALUES ('optimize');
  INSERT INTO fts(fts) VALUES('integrity-check');
}
do_test 3.6 {
  set c1 [db total_changes]
  execsql { INSERT INTO fts (fts) VALUES ('optimize') }
  expr {[db total_changes] - $c1}
} {1}
do_test 3.7 {
  execsql { INSERT INTO fts (rowid, t) VALUES (3, 'xyz') }
  set c1 [db total_changes]
  execsql { INSERT INTO fts (fts) VALUES ('optimize') }
  expr {([db total_changes] - $c1) > 1}
} {1}
do_test 3.8 {
  set c1 [db total_changes]
  execsql { INSERT INTO fts (fts) VALUES ('optimize') }
  expr {[db total_changes] - $c1}
} {1}

finish_test
Changes to test/in5.test.
178
179
180
181
182
183
184



































185
186
do_execsql_test 6.3.1 {
  CREATE TABLE x1(a);
  CREATE TABLE x2(b);
  INSERT INTO x1 VALUES(1), (1), (2);
  INSERT INTO x2 VALUES(1), (2);
  SELECT count(*) FROM x2 WHERE b IN (SELECT DISTINCT a FROM x1 LIMIT 2);
} {2}




































finish_test







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>


178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
do_execsql_test 6.3.1 {
  CREATE TABLE x1(a);
  CREATE TABLE x2(b);
  INSERT INTO x1 VALUES(1), (1), (2);
  INSERT INTO x2 VALUES(1), (2);
  SELECT count(*) FROM x2 WHERE b IN (SELECT DISTINCT a FROM x1 LIMIT 2);
} {2}

#-------------------------------------------------------------------------
# Test to confirm that bug [5e3c886796e5] is fixed.
#
do_execsql_test 7.1 {
  CREATE TABLE y1(a, b);
  CREATE TABLE y2(c);

  INSERT INTO y1 VALUES(1,     'one');
  INSERT INTO y1 VALUES('two', 'two');
  INSERT INTO y1 VALUES(3,     'three');

  INSERT INTO y2 VALUES('one');
  INSERT INTO y2 VALUES('two');
  INSERT INTO y2 VALUES('three');
} {}

do_execsql_test 7.2.1 {
  SELECT a FROM y1 WHERE b NOT IN (SELECT a FROM y2);
} {1 3}
do_execsql_test 7.2.2 {
  SELECT a FROM y1 WHERE b IN (SELECT a FROM y2);
} {two}

do_execsql_test 7.3.1 {
  CREATE INDEX y2c ON y2(c);
  SELECT a FROM y1 WHERE b NOT IN (SELECT a FROM y2);
} {1 3}
do_execsql_test 7.3.2 {
  SELECT a FROM y1 WHERE b IN (SELECT a FROM y2);
} {two}

finish_test



finish_test
Changes to test/incrblob.test.
122
123
124
125
126
127
128





129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  db close
  forcedelete test.db test.db-journal

  sqlite3 db test.db
  execsql "PRAGMA mmap_size = 0"
  execsql "PRAGMA auto_vacuum = $AutoVacuumMode"






  do_test incrblob-2.$AutoVacuumMode.1 {
    set ::str [string repeat abcdefghij 2900]
    execsql {
      BEGIN;
      CREATE TABLE blobs(k PRIMARY KEY, v BLOB, i INTEGER);
      DELETE FROM blobs;
      INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45);
      COMMIT;
    }
    expr [file size test.db]/1024
  } [expr 31 + $AutoVacuumMode]

  ifcapable autovacuum {
    do_test incrblob-2.$AutoVacuumMode.2 {
      execsql {
        PRAGMA auto_vacuum;
      }
    } $AutoVacuumMode







>
>
>
>
>










|







122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  db close
  forcedelete test.db test.db-journal

  sqlite3 db test.db
  execsql "PRAGMA mmap_size = 0"
  execsql "PRAGMA auto_vacuum = $AutoVacuumMode"

  # Extra value added to size answers
  set ib2_extra 0
  if {$AutoVacuumMode} {incr ib2_extra}
  if {[nonzero_reserved_bytes]} {incr ib2_extra}

  do_test incrblob-2.$AutoVacuumMode.1 {
    set ::str [string repeat abcdefghij 2900]
    execsql {
      BEGIN;
      CREATE TABLE blobs(k PRIMARY KEY, v BLOB, i INTEGER);
      DELETE FROM blobs;
      INSERT INTO blobs VALUES('one', $::str || randstr(500,500), 45);
      COMMIT;
    }
    expr [file size test.db]/1024
  } [expr 31 + $ib2_extra]

  ifcapable autovacuum {
    do_test incrblob-2.$AutoVacuumMode.2 {
      execsql {
        PRAGMA auto_vacuum;
      }
    } $AutoVacuumMode
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    close $::blob
  
    # If the database is not in auto-vacuum mode, the whole of
    # the overflow-chain must be scanned. In auto-vacuum mode,
    # sqlite uses the ptrmap pages to avoid reading the other pages.
    #
    nRead db
  } [expr $AutoVacuumMode ? 4 : 30]

  do_test incrblob-2.$AutoVacuumMode.4 {
    string range [db one {SELECT v FROM blobs}] end-19 end
  } $::fragment

  do_test incrblob-2.$AutoVacuumMode.5 {
    # Open and close the db to make sure the page cache is empty.







|







164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    close $::blob
  
    # If the database is not in auto-vacuum mode, the whole of
    # the overflow-chain must be scanned. In auto-vacuum mode,
    # sqlite uses the ptrmap pages to avoid reading the other pages.
    #
    nRead db
  } [expr $AutoVacuumMode ? 4 : 30+$ib2_extra]

  do_test incrblob-2.$AutoVacuumMode.4 {
    string range [db one {SELECT v FROM blobs}] end-19 end
  } $::fragment

  do_test incrblob-2.$AutoVacuumMode.5 {
    # Open and close the db to make sure the page cache is empty.
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
    flush $::blob
  
    # If the database is not in auto-vacuum mode, the whole of
    # the overflow-chain must be scanned. In auto-vacuum mode,
    # sqlite uses the ptrmap pages to avoid reading the other pages.
    #
    nRead db
  } [expr $AutoVacuumMode ? 4 : 30]

  # Pages 1 (the write-counter) and 32 (the blob data) were written.
  do_test incrblob-2.$AutoVacuumMode.6 {
    close $::blob
    nWrite db
  } 2








|







188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
    flush $::blob
  
    # If the database is not in auto-vacuum mode, the whole of
    # the overflow-chain must be scanned. In auto-vacuum mode,
    # sqlite uses the ptrmap pages to avoid reading the other pages.
    #
    nRead db
  } [expr $AutoVacuumMode ? 4 : 30 + $ib2_extra]

  # Pages 1 (the write-counter) and 32 (the blob data) were written.
  do_test incrblob-2.$AutoVacuumMode.6 {
    close $::blob
    nWrite db
  } 2

206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
    execsql { PRAGMA mmap_size = 0 }

    execsql { SELECT i FROM blobs } 
  } {45}

  do_test incrblob-2.$AutoVacuumMode.9 {
    nRead db
  } [expr $AutoVacuumMode ? 4 : 30]
}
sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)

#------------------------------------------------------------------------
# incrblob-3.*: 
#
# Test the outcome of trying to write to a read-only blob handle.







|







211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
    execsql { PRAGMA mmap_size = 0 }

    execsql { SELECT i FROM blobs } 
  } {45}

  do_test incrblob-2.$AutoVacuumMode.9 {
    nRead db
  } [expr $AutoVacuumMode ? 4 : 30 + $ib2_extra]
}
sqlite3_soft_heap_limit $cmdlinearg(soft-heap-limit)

#------------------------------------------------------------------------
# incrblob-3.*: 
#
# Test the outcome of trying to write to a read-only blob handle.
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
# incrblob-5.*: 
#
#     Test that opening a blob in an attached database works.
#
ifcapable attach {
  do_test incrblob-5.1 {
    forcedelete test2.db test2.db-journal
    set ::size [expr [file size [info script]]]
    execsql {
      ATTACH 'test2.db' AS aux;
      CREATE TABLE aux.files(name, text);
      INSERT INTO aux.files VALUES('this one', zeroblob($::size));
    }
    set fd  [db incrblob aux files text 1]
    fconfigure $fd -translation binary
    set fd2 [open [info script]]
    fconfigure $fd2 -translation binary
    puts -nonewline $fd [read $fd2]
    close $fd
    close $fd2
    set ::text [db one {select text from aux.files}]
    string length $::text
  } [file size [info script]]
  do_test incrblob-5.2 {
    set fd2 [open [info script]]
    fconfigure $fd2 -translation binary
    set ::data [read $fd2]
    close $fd2
    set ::data
  } $::text
}








|







|






|

|







385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
# incrblob-5.*: 
#
#     Test that opening a blob in an attached database works.
#
ifcapable attach {
  do_test incrblob-5.1 {
    forcedelete test2.db test2.db-journal
    set ::size [expr [file size $::cmdlinearg(INFO_SCRIPT)]]
    execsql {
      ATTACH 'test2.db' AS aux;
      CREATE TABLE aux.files(name, text);
      INSERT INTO aux.files VALUES('this one', zeroblob($::size));
    }
    set fd  [db incrblob aux files text 1]
    fconfigure $fd -translation binary
    set fd2 [open $::cmdlinearg(INFO_SCRIPT)]
    fconfigure $fd2 -translation binary
    puts -nonewline $fd [read $fd2]
    close $fd
    close $fd2
    set ::text [db one {select text from aux.files}]
    string length $::text
  } [file size $::cmdlinearg(INFO_SCRIPT)]
  do_test incrblob-5.2 {
    set fd2 [open $::cmdlinearg(INFO_SCRIPT)]
    fconfigure $fd2 -translation binary
    set ::data [read $fd2]
    close $fd2
    set ::data
  } $::text
}

572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
    execsql {
      SELECT d FROM t1;
    }
  } {15}

}

set fd [open [info script]]
fconfigure $fd -translation binary
set ::data [read $fd 14000]
close $fd

db close
forcedelete test.db test.db-journal
sqlite3 db test.db







|







577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
    execsql {
      SELECT d FROM t1;
    }
  } {15}

}

set fd [open $::cmdlinearg(INFO_SCRIPT)]
fconfigure $fd -translation binary
set ::data [read $fd 14000]
close $fd

db close
forcedelete test.db test.db-journal
sqlite3 db test.db
Changes to test/incrblob_err.test.
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  finish_test
  return
}

source $testdir/malloc_common.tcl

unset -nocomplain ::fd ::data
set ::fd [open [info script]]
set ::data [read $::fd]
close $::fd

do_malloc_test 1 -tclprep {
  set bytes [file size [info script]]
  execsql {
    CREATE TABLE blobs(k, v BLOB);
    INSERT INTO blobs VALUES(1, zeroblob($::bytes));
  }
} -tclbody {
  set ::blob [db incrblob blobs v 1]
  fconfigure $::blob -translation binary







|




|







20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  finish_test
  return
}

source $testdir/malloc_common.tcl

unset -nocomplain ::fd ::data
set ::fd [open $::cmdlinearg(INFO_SCRIPT)]
set ::data [read $::fd]
close $::fd

do_malloc_test 1 -tclprep {
  set bytes [file size $::cmdlinearg(INFO_SCRIPT)]
  execsql {
    CREATE TABLE blobs(k, v BLOB);
    INSERT INTO blobs VALUES(1, zeroblob($::bytes));
  }
} -tclbody {
  set ::blob [db incrblob blobs v 1]
  fconfigure $::blob -translation binary
Changes to test/io.test.
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
    # that the file is now greater than 20000 bytes in size.
    list [expr [file size test.db]>20000] [nSync]
  } {1 0}
  do_test io-3.3 {
    # The COMMIT requires a single fsync() - to the database file.
    execsql { COMMIT }
    list [file size test.db] [nSync]
  } {39936 1}
}

#----------------------------------------------------------------------
# Test cases io-4.* test the IOCAP_SAFE_APPEND optimization.
#
sqlite3_simulate_device -char safe_append








|







420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
    # that the file is now greater than 20000 bytes in size.
    list [expr [file size test.db]>20000] [nSync]
  } {1 0}
  do_test io-3.3 {
    # The COMMIT requires a single fsync() - to the database file.
    execsql { COMMIT }
    list [file size test.db] [nSync]
  } "[expr {[nonzero_reserved_bytes]?40960:39936}] 1"
}

#----------------------------------------------------------------------
# Test cases io-4.* test the IOCAP_SAFE_APPEND optimization.
#
sqlite3_simulate_device -char safe_append

Changes to test/memsubsys1.test.
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  expr {$pg_used<24}
} 1
do_test memsubsys1-7.4 {
  set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2]
} 0
do_test memsubsys1-7.5 {
  set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2]
  expr {$maxreq<4100}
} 1
do_test memsubsys1-7.6 {
  set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2]
} 1
do_test memsubsys1-7.7 {
  set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2]
} 0







|







251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  expr {$pg_used<24}
} 1
do_test memsubsys1-7.4 {
  set pg_ovfl [lindex [sqlite3_status SQLITE_STATUS_PAGECACHE_OVERFLOW 0] 2]
} 0
do_test memsubsys1-7.5 {
  set maxreq [lindex [sqlite3_status SQLITE_STATUS_MALLOC_SIZE 0] 2]
  expr {$maxreq<4100 + 4200*[nonzero_reserved_bytes]}
} 1
do_test memsubsys1-7.6 {
  set s_used [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_USED 0] 2]
} 1
do_test memsubsys1-7.7 {
  set s_ovfl [lindex [sqlite3_status SQLITE_STATUS_SCRATCH_OVERFLOW 0] 2]
} 0
Changes to test/mmap1.test.
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106

107

108
109
110
111
112
113
114
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {32 ok 77}

    # Have connection 2 shrink the file. Check connection 1 can still read it.
    sql2 { DELETE FROM t1 WHERE rowid%2; }
    do_test $t.$tn.2 {
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {16 ok 42}

    # Have connection 2 grow the file. Check connection 1 can still read it.
    sql2 { INSERT INTO t1 SELECT rblob(500), rblob(500) FROM t1 }
    do_test $t.$tn.3 {
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {32 ok 79}

    # Have connection 2 grow the file again. Check connection 1 is still ok.
    sql2 { INSERT INTO t1 SELECT rblob(500), rblob(500) FROM t1 }
    do_test $t.$tn.4 {
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {64 ok 149}

    # Check that the number of pages read by connection 1 indicates that the
    # "PRAGMA mmap_size" command worked.

    do_test $t.$tn.5 { nRead db } $nRead

  }
}

set ::rcnt 0
proc rblob {n} {
  set ::rcnt [expr (($::rcnt << 3) + $::rcnt + 456) & 0xFFFFFFFF]
  set str [format %.8x [expr $::rcnt ^ 0xbdf20da3]]







|















>
|
>







84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {32 ok 77}

    # Have connection 2 shrink the file. Check connection 1 can still read it.
    sql2 { DELETE FROM t1 WHERE rowid%2; }
    do_test $t.$tn.2 {
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } "16 ok [expr {42+[nonzero_reserved_bytes]}]"

    # Have connection 2 grow the file. Check connection 1 can still read it.
    sql2 { INSERT INTO t1 SELECT rblob(500), rblob(500) FROM t1 }
    do_test $t.$tn.3 {
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {32 ok 79}

    # Have connection 2 grow the file again. Check connection 1 is still ok.
    sql2 { INSERT INTO t1 SELECT rblob(500), rblob(500) FROM t1 }
    do_test $t.$tn.4 {
      sql1 "SELECT count(*) FROM t1; PRAGMA integrity_check ; PRAGMA page_count"
    } {64 ok 149}

    # Check that the number of pages read by connection 1 indicates that the
    # "PRAGMA mmap_size" command worked.
    if {[nonzero_reserved_bytes]==0} {
      do_test $t.$tn.5 { nRead db } $nRead
    }
  }
}

set ::rcnt 0
proc rblob {n} {
  set ::rcnt [expr (($::rcnt << 3) + $::rcnt + 456) & 0xFFFFFFFF]
  set str [format %.8x [expr $::rcnt ^ 0xbdf20da3]]
Changes to test/mmap3.test.
15
16
17
18
19
20
21



22
23
24
25
26
27
28
ifcapable !mmap||!vtab {
  finish_test
  return
}
source $testdir/lock_common.tcl
set testprefix mmap3




do_test mmap3-1.0 {
  load_static_extension db wholenumber
  db eval {
    PRAGMA mmap_size=100000;
    CREATE TABLE t1(x, y);
    CREATE VIRTUAL TABLE nums USING wholenumber;
    INSERT INTO t1 SELECT value, randomblob(value) FROM nums







>
>
>







15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
ifcapable !mmap||!vtab {
  finish_test
  return
}
source $testdir/lock_common.tcl
set testprefix mmap3

# A codec shuts down memory-mapped I/O
if {[nonzero_reserved_bytes]} {finish_test; return;}

do_test mmap3-1.0 {
  load_static_extension db wholenumber
  db eval {
    PRAGMA mmap_size=100000;
    CREATE TABLE t1(x, y);
    CREATE VIRTUAL TABLE nums USING wholenumber;
    INSERT INTO t1 SELECT value, randomblob(value) FROM nums
Changes to test/nan.test.
147
148
149
150
151
152
153

154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192

193
194
195
196
197
198
199
# SQLite always converts NaN into NULL so it is not possible to write
# a NaN value into the database file using SQLite.  The following series
# of tests writes a normal floating point value (0.5) into the database,
# then writes directly into the database file to change the 0.5 into NaN.
# Then it reads the value of the database to verify it is converted into
# NULL.
#

do_test nan-3.1 {
  db eval {
    DELETE FROM t1;
    INSERT INTO t1 VALUES(0.5);
    PRAGMA auto_vacuum=OFF;
    PRAGMA page_size=1024;
    VACUUM;
  }
  hexio_read test.db 2040 8
} {3FE0000000000000}
do_test nan-3.2 {
  db eval {
    SELECT x, typeof(x) FROM t1
  }
} {0.5 real}
do_test nan-3.3 {
  db close
  hexio_write test.db 2040 FFF8000000000000
  sqlite3 db test.db
  db eval {SELECT x, typeof(x) FROM t1}
} {{} null}
do_test nan-3.4 {
  db close
  hexio_write test.db 2040 7FF8000000000000
  sqlite3 db test.db
  db eval {SELECT x, typeof(x) FROM t1}
} {{} null}
do_test nan-3.5 {
  db close
  hexio_write test.db 2040 FFFFFFFFFFFFFFFF
  sqlite3 db test.db
  db eval {SELECT x, typeof(x) FROM t1}
} {{} null}
do_test nan-3.6 {
  db close
  hexio_write test.db 2040 7FFFFFFFFFFFFFFF
  sqlite3 db test.db
  db eval {SELECT x, typeof(x) FROM t1}
} {{} null}


# Verify that the sqlite3AtoF routine is able to handle extreme
# numbers.
#
do_test nan-4.1 {
  db eval {DELETE FROM t1}
  db eval "INSERT INTO t1 VALUES([string repeat 9 307].0)"







>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>







147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
# SQLite always converts NaN into NULL so it is not possible to write
# a NaN value into the database file using SQLite.  The following series
# of tests writes a normal floating point value (0.5) into the database,
# then writes directly into the database file to change the 0.5 into NaN.
# Then it reads the value of the database to verify it is converted into
# NULL.
#
if {![nonzero_reserved_bytes]} {
  do_test nan-3.1 {
    db eval {
      DELETE FROM t1;
      INSERT INTO t1 VALUES(0.5);
      PRAGMA auto_vacuum=OFF;
      PRAGMA page_size=1024;
      VACUUM;
    }
    hexio_read test.db 2040 8
  } {3FE0000000000000}
  do_test nan-3.2 {
    db eval {
      SELECT x, typeof(x) FROM t1
    }
  } {0.5 real}
  do_test nan-3.3 {
    db close
    hexio_write test.db 2040 FFF8000000000000
    sqlite3 db test.db
    db eval {SELECT x, typeof(x) FROM t1}
  } {{} null}
  do_test nan-3.4 {
    db close
    hexio_write test.db 2040 7FF8000000000000
    sqlite3 db test.db
    db eval {SELECT x, typeof(x) FROM t1}
  } {{} null}
  do_test nan-3.5 {
    db close
    hexio_write test.db 2040 FFFFFFFFFFFFFFFF
    sqlite3 db test.db
    db eval {SELECT x, typeof(x) FROM t1}
  } {{} null}
  do_test nan-3.6 {
    db close
    hexio_write test.db 2040 7FFFFFFFFFFFFFFF
    sqlite3 db test.db
    db eval {SELECT x, typeof(x) FROM t1}
  } {{} null}
}

# Verify that the sqlite3AtoF routine is able to handle extreme
# numbers.
#
do_test nan-4.1 {
  db eval {DELETE FROM t1}
  db eval "INSERT INTO t1 VALUES([string repeat 9 307].0)"
Changes to test/nolock.test.
178
179
180
181
182
183
184
































185
       xCheckReservedLock $::tvfs_calls(xCheckReservedLock) \
       xAccess $::tvfs_calls(xAccess)
} {xLock 0 xUnlock 0 xCheckReservedLock 0 xAccess 0}

db2 close
db close
tvfs delete
































finish_test







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
       xCheckReservedLock $::tvfs_calls(xCheckReservedLock) \
       xAccess $::tvfs_calls(xAccess)
} {xLock 0 xUnlock 0 xCheckReservedLock 0 xAccess 0}

db2 close
db close
tvfs delete

# 2016-03-11:  Make sure all works when transitioning to WAL mode under nolock.
#
do_test nolock-4.1 {
  forcedelete test.db
  sqlite3 db file:test.db?nolock=1 -uri 1
  db eval {
     PRAGMA journal_mode=WAL;
     CREATE TABLE t1(x);
     INSERT INTO t1 VALUES('youngling');
     SELECT * FROM t1;
  }
} {delete youngling}
db close

do_test nolock-4.2 {
  forcedelete test.db
  sqlite3 db test.db
  db eval {
    PRAGMA journal_mode=WAL;
    CREATE TABLE t1(x);
    INSERT INTO t1 VALUES('catbird');
    SELECT * FROM t1;
  }
} {wal catbird}
do_test nolock-4.3 {
  db close
  sqlite3 db file:test.db?nolock=1 -uri 1
  set rc [catch {db eval {SELECT * FROM t1}} msg]
  lappend rc $msg
} {1 {unable to open database file}}

finish_test
Changes to test/pager1.test.
1392
1393
1394
1395
1396
1397
1398



1399
1400
1401
1402
1403
1404
1405
1406
1407
1408


















1409
1410
1411
1412
1413
1414
1415
1416
1417
1418

1419
1420
1421
1422
1423
1424
1425
  testvfs tv -default 1
  tv sectorsize 4096
  faultsim_delete_and_reopen

  execsql { PRAGMA page_size = 1024 }
  for {set ii 0} {$ii < 4} {incr ii} { execsql "CREATE TABLE t${ii}(a, b)" }
} {}



do_test pager1-9.3.2 {
  sqlite3 db2 test.db2

  execsql {
    PRAGMA page_size = 4096;
    PRAGMA synchronous = OFF;
    CREATE TABLE t1(a, b);
    CREATE TABLE t2(a, b);
  } db2



















  sqlite3_backup B db2 main db main
  B step 30
  list [B step 10000] [B finish]
} {SQLITE_DONE SQLITE_OK}
do_test pager1-9.3.3 {
  db2 close
  db close
  tv delete
  file size test.db2
} [file size test.db]


do_test pager1-9.4.1 {
  faultsim_delete_and_reopen
  sqlite3 db2 test.db2
  execsql {
    PRAGMA page_size = 4096;
    CREATE TABLE t1(a, b);







>
>
>
|
|
<
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
>







1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403

1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
  testvfs tv -default 1
  tv sectorsize 4096
  faultsim_delete_and_reopen

  execsql { PRAGMA page_size = 1024 }
  for {set ii 0} {$ii < 4} {incr ii} { execsql "CREATE TABLE t${ii}(a, b)" }
} {}
if {[nonzero_reserved_bytes]} {
  # backup with a page size changes is not possible with the codec
  #
  do_test pager1-9.3.2codec {
    sqlite3 db2 test.db2

    execsql {
      PRAGMA page_size = 4096;
      PRAGMA synchronous = OFF;
      CREATE TABLE t1(a, b);
      CREATE TABLE t2(a, b);
    } db2
    sqlite3_backup B db2 main db main
    B step 30
    list [B step 10000] [B finish]
  } {SQLITE_READONLY SQLITE_READONLY}
  do_test pager1-9.3.3codec {
    db2 close
    db close
    tv delete
    file size test.db2
  } [file size test.db2]
} else {
  do_test pager1-9.3.2 {
    sqlite3 db2 test.db2
    execsql {
      PRAGMA page_size = 4096;
      PRAGMA synchronous = OFF;
      CREATE TABLE t1(a, b);
      CREATE TABLE t2(a, b);
    } db2
    sqlite3_backup B db2 main db main
    B step 30
    list [B step 10000] [B finish]
  } {SQLITE_DONE SQLITE_OK}
  do_test pager1-9.3.3 {
    db2 close
    db close
    tv delete
    file size test.db2
  } [file size test.db]
}

do_test pager1-9.4.1 {
  faultsim_delete_and_reopen
  sqlite3 db2 test.db2
  execsql {
    PRAGMA page_size = 4096;
    CREATE TABLE t1(a, b);
2443
2444
2445
2446
2447
2448
2449


2450







2451
2452
2453
2454
2455
2456

2457
2458
2459
2460
2461
2462
2463
    PRAGMA auto_vacuum = full;
    PRAGMA locking_mode=exclusive;
    CREATE TABLE t1(a, b);
    INSERT INTO t1 VALUES(1, 2);
  }
  file size test.db
} [expr 1024*3]


do_test pager1-29.2 {







  execsql {
    PRAGMA page_size = 4096;
    VACUUM;
  }
  file size test.db
} [expr 4096*3]


#-------------------------------------------------------------------------
# Test that if an empty database file (size 0 bytes) is opened in 
# exclusive-locking mode, any journal file is deleted from the file-system
# without being rolled back. And that the RESERVED lock obtained while
# doing this is not released.
#







>
>
|
>
>
>
>
>
>
>
|
|
|
|
|
|
>







2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
    PRAGMA auto_vacuum = full;
    PRAGMA locking_mode=exclusive;
    CREATE TABLE t1(a, b);
    INSERT INTO t1 VALUES(1, 2);
  }
  file size test.db
} [expr 1024*3]
if {[nonzero_reserved_bytes]} {
  # VACUUM with size changes is not possible with the codec.
  do_test pager1-29.2 {
    catchsql {
      PRAGMA page_size = 4096;
      VACUUM;
    }
  } {1 {attempt to write a readonly database}}
} else {
  do_test pager1-29.2 {
    execsql {
      PRAGMA page_size = 4096;
      VACUUM;
    }
    file size test.db
  } [expr 4096*3]
}

#-------------------------------------------------------------------------
# Test that if an empty database file (size 0 bytes) is opened in 
# exclusive-locking mode, any journal file is deleted from the file-system
# without being rolled back. And that the RESERVED lock obtained while
# doing this is not released.
#
Changes to test/pageropt.test.
12
13
14
15
16
17
18

19
20
21
22
23




24
25
26
27
28
29
30
# The focus of the tests in this file are to verify that the
# pager optimizations implemented in version 3.3.14 work.
#
# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl


ifcapable {!pager_pragmas||secure_delete||direct_read} {
  finish_test
  return
}





# Run the SQL statement supplied by the argument and return
# the results.  Prepend four integers to the beginning of the
# result which are
#
#     (1)  The number of page reads from the database
#     (2)  The number of page writes to the database







>





>
>
>
>







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# The focus of the tests in this file are to verify that the
# pager optimizations implemented in version 3.3.14 work.
#
# $Id: pageropt.test,v 1.5 2008/08/20 14:49:25 danielk1977 Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_not_use_codec

ifcapable {!pager_pragmas||secure_delete||direct_read} {
  finish_test
  return
}

# A non-zero reserved_bytes value changes the number of pages in the 
# database file, which messes up the results in this test.
if {[nonzero_reserved_bytes]} {finish_test; return;}

# Run the SQL statement supplied by the argument and return
# the results.  Prepend four integers to the beginning of the
# result which are
#
#     (1)  The number of page reads from the database
#     (2)  The number of page writes to the database
Changes to test/permutations.test.
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
      puts "  $d"
      puts ""
    }
  }
  exit -1
}

if {[info script] == $argv0} {
  proc main {argv} {
    if {[llength $argv]==0} {
      help
    } else {
      set suite [lindex $argv 0]
      if {[info exists ::testspec($suite)]==0} help
      set extra ""
      if {[llength $argv]>1} { set extra [list -files [lrange $argv 1 end]] }
      eval run_tests $suite $::testspec($suite) $extra
    }
  }
  main $argv







|




|







1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
      puts "  $d"
      puts ""
    }
  }
  exit -1
}

if {[file tail $argv0] == "permutations.test"} {
  proc main {argv} {
    if {[llength $argv]==0} {
      help
    } else {
      set suite [file tail [lindex $argv 0]]
      if {[info exists ::testspec($suite)]==0} help
      set extra ""
      if {[llength $argv]>1} { set extra [list -files [lrange $argv 1 end]] }
      eval run_tests $suite $::testspec($suite) $extra
    }
  }
  main $argv
Changes to test/pragma.test.
1737
1738
1739
1740
1741
1742
1743

1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810

1811
1812
1813
1814
1815
1816
1817
  catchsql {PRAGMA data_store_directory}
} {0 {}}

forcedelete data_dir
} ;# endif windows

database_may_be_corrupt


do_test 21.1 {
  # Create a corrupt database in testerr.db. And a non-corrupt at test.db.
  #
  db close
  forcedelete test.db
  sqlite3 db test.db
  execsql { 
    PRAGMA page_size = 1024;
    PRAGMA auto_vacuum = 0;
    CREATE TABLE t1(a PRIMARY KEY, b);
    INSERT INTO t1 VALUES(1, 1);
  }
  for {set i 0} {$i < 10} {incr i} {
    execsql { INSERT INTO t1 SELECT a + (1 << $i), b + (1 << $i) FROM t1 }
  }
  db close
  forcecopy test.db testerr.db
  hexio_write testerr.db 15000 [string repeat 55 100]
} {100}

set mainerr {*** in database main ***
Multiple uses for byte 672 of page 15}
set auxerr {*** in database aux ***
Multiple uses for byte 672 of page 15}

set mainerr {/{\*\*\* in database main \*\*\*
Multiple uses for byte 672 of page 15}.*/}
set auxerr {/{\*\*\* in database aux \*\*\*
Multiple uses for byte 672 of page 15}.*/}

do_test 22.2 {
  catch { db close }
  sqlite3 db testerr.db
  execsql { PRAGMA integrity_check }
} $mainerr

do_test 22.3.1 {
  catch { db close }
  sqlite3 db test.db
  execsql { 
    ATTACH 'testerr.db' AS 'aux';
    PRAGMA integrity_check;
  }
} $auxerr
do_test 22.3.2 {
  execsql { PRAGMA main.integrity_check; }
} {ok}
do_test 22.3.3 {
  execsql { PRAGMA aux.integrity_check; }
} $auxerr

do_test 22.4.1 {
  catch { db close }
  sqlite3 db testerr.db
  execsql { 
    ATTACH 'test.db' AS 'aux';
    PRAGMA integrity_check;
  }
} $mainerr
do_test 22.4.2 {
  execsql { PRAGMA main.integrity_check; }
} $mainerr
do_test 22.4.3 {
  execsql { PRAGMA aux.integrity_check; }
} {ok}


db close
forcedelete test.db test.db-wal test.db-journal
sqlite3 db test.db
sqlite3 db2 test.db
do_test 23.1 {
  db eval {
    CREATE TABLE t1(a INTEGER PRIMARY KEY,b,c,d);







>

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|

|
|

|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>







1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
  catchsql {PRAGMA data_store_directory}
} {0 {}}

forcedelete data_dir
} ;# endif windows

database_may_be_corrupt
if {![nonzero_reserved_bytes]} {

  do_test 21.1 {
    # Create a corrupt database in testerr.db. And a non-corrupt at test.db.
    #
    db close
    forcedelete test.db
    sqlite3 db test.db
    execsql { 
      PRAGMA page_size = 1024;
      PRAGMA auto_vacuum = 0;
      CREATE TABLE t1(a PRIMARY KEY, b);
      INSERT INTO t1 VALUES(1, 1);
    }
    for {set i 0} {$i < 10} {incr i} {
      execsql { INSERT INTO t1 SELECT a + (1 << $i), b + (1 << $i) FROM t1 }
    }
    db close
    forcecopy test.db testerr.db
    hexio_write testerr.db 15000 [string repeat 55 100]
  } {100}
  
  set mainerr {*** in database main ***
Multiple uses for byte 672 of page 15}
  set auxerr {*** in database aux ***
Multiple uses for byte 672 of page 15}
  
  set mainerr {/{\*\*\* in database main \*\*\*
Multiple uses for byte 672 of page 15}.*/}
  set auxerr {/{\*\*\* in database aux \*\*\*
Multiple uses for byte 672 of page 15}.*/}
  
  do_test 22.2 {
    catch { db close }
    sqlite3 db testerr.db
    execsql { PRAGMA integrity_check }
  } $mainerr
  
  do_test 22.3.1 {
    catch { db close }
    sqlite3 db test.db
    execsql { 
      ATTACH 'testerr.db' AS 'aux';
      PRAGMA integrity_check;
    }
  } $auxerr
  do_test 22.3.2 {
    execsql { PRAGMA main.integrity_check; }
  } {ok}
  do_test 22.3.3 {
    execsql { PRAGMA aux.integrity_check; }
  } $auxerr
  
  do_test 22.4.1 {
    catch { db close }
    sqlite3 db testerr.db
    execsql { 
      ATTACH 'test.db' AS 'aux';
      PRAGMA integrity_check;
    }
  } $mainerr
  do_test 22.4.2 {
    execsql { PRAGMA main.integrity_check; }
  } $mainerr
  do_test 22.4.3 {
    execsql { PRAGMA aux.integrity_check; }
  } {ok}
}
  
db close
forcedelete test.db test.db-wal test.db-journal
sqlite3 db test.db
sqlite3 db2 test.db
do_test 23.1 {
  db eval {
    CREATE TABLE t1(a INTEGER PRIMARY KEY,b,c,d);
Changes to test/pragma3.test.
11
12
13
14
15
16
17

18
19
20
21
22
23
24
# This file implements regression tests for SQLite library.
#
# This file implements tests for PRAGMA data_version command.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl


do_execsql_test pragma3-100 {
  PRAGMA data_version;
} {1}
do_execsql_test pragma3-101 {
  PRAGMA temp.data_version;
} {1}







>







11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# This file implements regression tests for SQLite library.
#
# This file implements tests for PRAGMA data_version command.
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_not_use_codec

do_execsql_test pragma3-100 {
  PRAGMA data_version;
} {1}
do_execsql_test pragma3-101 {
  PRAGMA temp.data_version;
} {1}
Changes to test/select4.test.
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#    May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.  The
# focus of this file is testing UNION, INTERSECT and EXCEPT operators
# in SELECT statements.
#
# $Id: select4.test,v 1.30 2009/04/16 00:24:24 drh Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Most tests in this file depend on compound-select. But there are a couple
# right at the end that test DISTINCT, so we cannot omit the entire file.
#







<







8
9
10
11
12
13
14

15
16
17
18
19
20
21
#    May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library.  The
# focus of this file is testing UNION, INTERSECT and EXCEPT operators
# in SELECT statements.
#


set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Most tests in this file depend on compound-select. But there are a couple
# right at the end that test DISTINCT, so we cannot omit the entire file.
#
931
932
933
934
935
936
937
938




































939
   WHERE t0.a=t1.a AND t1.a=33 AND t0.b=456
  UNION
  SELECT DISTINCT t0.id, t0.a, t0.b
    FROM tx AS t0, tx AS t1
   WHERE t0.a=t1.a AND t1.a=33 AND t0.b=789
   ORDER BY 1;
} {1 33 456 2 33 789}





































finish_test








>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
   WHERE t0.a=t1.a AND t1.a=33 AND t0.b=456
  UNION
  SELECT DISTINCT t0.id, t0.a, t0.b
    FROM tx AS t0, tx AS t1
   WHERE t0.a=t1.a AND t1.a=33 AND t0.b=789
   ORDER BY 1;
} {1 33 456 2 33 789}

# Enhancement (2016-03-15):  Use a co-routine for subqueries if the
# subquery is guaranteed to be the outer-most query
#
do_execsql_test select4-16.1 {
  DROP TABLE IF EXISTS t1;
  CREATE TABLE t1(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,
  PRIMARY KEY(a,b DESC)) WITHOUT ROWID;

  WITH RECURSIVE c(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM c WHERE x<100)
  INSERT INTO t1(a,b,c,d)
    SELECT x%10, x/10, x, printf('xyz%dabc',x) FROM c;

  SELECT t3.c FROM 
    (SELECT a,max(b) AS m FROM t1 WHERE a>=5 GROUP BY a) AS t2
    JOIN t1 AS t3
  WHERE t2.a=t3.a AND t2.m=t3.b
  ORDER BY t3.a;
} {95 96 97 98 99}
do_execsql_test select4-16.2 {
  SELECT t3.c FROM 
    (SELECT a,max(b) AS m FROM t1 WHERE a>=5 GROUP BY a) AS t2
    CROSS JOIN t1 AS t3
  WHERE t2.a=t3.a AND t2.m=t3.b
  ORDER BY t3.a;
} {95 96 97 98 99}
do_execsql_test select4-16.3 {
  SELECT t3.c FROM 
    (SELECT a,max(b) AS m FROM t1 WHERE a>=5 GROUP BY a) AS t2
    LEFT JOIN t1 AS t3
  WHERE t2.a=t3.a AND t2.m=t3.b
  ORDER BY t3.a;
} {95 96 97 98 99}




finish_test
Changes to test/shell1.test.
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#
#   shell1-1.*: Basic command line option handling.
#   shell1-2.*: Basic "dot" command token parsing.
#   shell1-3.*: Basic test that "dot" command can be called.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {$tcl_platform(platform)=="windows"} {
  set CLI "sqlite3.exe"
} else {
  set CLI "./sqlite3"
}
if {![file executable $CLI]} {
  finish_test
  return
}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

#----------------------------------------------------------------------------
# Test cases shell1-1.*: Basic command line option handling.
#







<
<
<
|
<
<
<
<
<







17
18
19
20
21
22
23



24





25
26
27
28
29
30
31
#
#   shell1-1.*: Basic command line option handling.
#   shell1-2.*: Basic "dot" command token parsing.
#   shell1-3.*: Basic test that "dot" command can be called.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl



set CLI [test_find_cli]





db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

#----------------------------------------------------------------------------
# Test cases shell1-1.*: Basic command line option handling.
#
Changes to test/shell2.test.
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

# Test plan:
#
#   shell2-1.*: Misc. test of various tickets and reported errors.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {$tcl_platform(platform)=="windows"} {
  set CLI "sqlite3.exe"
} else {
  set CLI "./sqlite3"
}
if {![file executable $CLI]} {
  finish_test
  return
}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db


#----------------------------------------------------------------------------
#   shell2-1.*: Misc. test of various tickets and reported errors.







<
<
<
|
<
<
<
<
<







16
17
18
19
20
21
22



23





24
25
26
27
28
29
30

# Test plan:
#
#   shell2-1.*: Misc. test of various tickets and reported errors.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl



set CLI [test_find_cli]





db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db


#----------------------------------------------------------------------------
#   shell2-1.*: Misc. test of various tickets and reported errors.
Changes to test/shell3.test.
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# Test plan:
#
#   shell3-1.*: Basic tests for running SQL statments from command line.
#   shell3-2.*: Basic tests for running SQL file from command line.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {$tcl_platform(platform)=="windows"} {
  set CLI "sqlite3.exe"
} else {
  set CLI "./sqlite3"
}
if {![file executable $CLI]} {
  finish_test
  return
}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

#----------------------------------------------------------------------------
#   shell3-1.*: Basic tests for running SQL statments from command line.
#







<
<
<
|
<
<
<
<
<







17
18
19
20
21
22
23



24





25
26
27
28
29
30
31
# Test plan:
#
#   shell3-1.*: Basic tests for running SQL statments from command line.
#   shell3-2.*: Basic tests for running SQL file from command line.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl



set CLI [test_find_cli]





db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

#----------------------------------------------------------------------------
#   shell3-1.*: Basic tests for running SQL statments from command line.
#
Changes to test/shell4.test.
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# Test plan:
#
#   shell4-1.*: Basic tests specific to the "stats" command.
#   shell4-2.*: Basic tests for ".trace"
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {$tcl_platform(platform)=="windows"} {
  set CLI "sqlite3.exe"
} else {
  set CLI "./sqlite3"
}
if {![file executable $CLI]} {
  finish_test
  return
}
db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

#----------------------------------------------------------------------------
# Test cases shell4-1.*: Tests specific to the "stats" command.
#







<
<
<
|
<
<
<
<
<







17
18
19
20
21
22
23



24





25
26
27
28
29
30
31
# Test plan:
#
#   shell4-1.*: Basic tests specific to the "stats" command.
#   shell4-2.*: Basic tests for ".trace"
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl



set CLI [test_find_cli]





db close
forcedelete test.db test.db-journal test.db-wal
sqlite3 db test.db

#----------------------------------------------------------------------------
# Test cases shell4-1.*: Tests specific to the "stats" command.
#
Changes to test/shell5.test.
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

# Test plan:
#
#   shell5-1.*: Basic tests specific to the ".import" command.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
if {$tcl_platform(platform)=="windows"} {
  set CLI "sqlite3.exe"
} else {
  set CLI "./sqlite3"
}
if {![file executable $CLI]} {
  finish_test
  return
}
db close
forcedelete test.db test.db-journal test.db-wal

#----------------------------------------------------------------------------
# Test cases shell5-1.*: Basic handling of the .import and .separator commands.
#








<
<
<
|
<
<
<
<
<







17
18
19
20
21
22
23



24





25
26
27
28
29
30
31

# Test plan:
#
#   shell5-1.*: Basic tests specific to the ".import" command.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl



set CLI [test_find_cli]





db close
forcedelete test.db test.db-journal test.db-wal

#----------------------------------------------------------------------------
# Test cases shell5-1.*: Basic handling of the .import and .separator commands.
#

Changes to test/spellfix3.test.
31
32
33
34
35
36
37
38
39
40
41









42
43
  SELECT spellfix1_scriptcode('וַיֹּ֥אמֶר אֱלֹהִ֖ים יְהִ֣י א֑וֹר וַֽיְהִי־אֽוֹר׃');
} {125}
do_execsql_test 140 {
  SELECT spellfix1_scriptcode('فِي ذَلِكَ الوَقتِ، قالَ اللهُ: لِيَكُنْ نُورٌ. فَصَارَ نُورٌ.');
} {160}
do_execsql_test 200 {
  SELECT spellfix1_scriptcode('+3.14159');
} {999}
do_execsql_test 210 {
  SELECT spellfix1_scriptcode('And God said: "Да будет свет"');
} {998}










finish_test







|



>
>
>
>
>
>
>
>
>


31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
  SELECT spellfix1_scriptcode('וַיֹּ֥אמֶר אֱלֹהִ֖ים יְהִ֣י א֑וֹר וַֽיְהִי־אֽוֹר׃');
} {125}
do_execsql_test 140 {
  SELECT spellfix1_scriptcode('فِي ذَلِكَ الوَقتِ، قالَ اللهُ: لِيَكُنْ نُورٌ. فَصَارَ نُورٌ.');
} {160}
do_execsql_test 200 {
  SELECT spellfix1_scriptcode('+3.14159');
} {215}
do_execsql_test 210 {
  SELECT spellfix1_scriptcode('And God said: "Да будет свет"');
} {998}
do_execsql_test 220 {
  SELECT spellfix1_scriptcode('+3.14159 light');
} {215}
do_execsql_test 230 {
  SELECT spellfix1_scriptcode('+3.14159 свет');
} {220}
do_execsql_test 240 {
  SELECT spellfix1_scriptcode('וַיֹּ֥אמֶר +3.14159');
} {125}

finish_test
Changes to test/stat.test.
17
18
19
20
21
22
23




24
25
26
27
28
29
30
set testprefix stat

ifcapable !vtab||!compound {
  finish_test
  return
}






set ::asc 1
proc a_string {n} { string range [string repeat [incr ::asc]. $n] 1 $n }
db func a_string a_string

register_dbstat_vtab db
do_execsql_test stat-0.0 {







>
>
>
>







17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
set testprefix stat

ifcapable !vtab||!compound {
  finish_test
  return
}

# This module uses hard-coded results that depend on exact measurements of
# pages sizes at the byte level, and hence will not work if the reserved_bytes
# value is nonzero.
if {[nonzero_reserved_bytes]} {finish_test; return;}

set ::asc 1
proc a_string {n} { string range [string repeat [incr ::asc]. $n] 1 $n }
db func a_string a_string

register_dbstat_vtab db
do_execsql_test stat-0.0 {
Changes to test/superlock.test.
11
12
13
14
15
16
17

18
19
20
21
22
23
24
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/lock_common.tcl

set testprefix superlock


# Test organization:
#
#   1.*: Test superlock on a rollback database. Test that once the db is
#        superlocked, it is not possible for a second client to read from
#        it.
#







>







11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/lock_common.tcl

set testprefix superlock
do_not_use_codec

# Test organization:
#
#   1.*: Test superlock on a rollback database. Test that once the db is
#        superlocked, it is not possible for a second client to read from
#        it.
#
234
235
236
237
238
239
240









241
242
243
244
245
246
247

248
249
250
251
252
253
254
255
256
257
do_catchsql_test 6.7 { SELECT * FROM t1 } {1 {no such table: t1}}
do_catchsql_test 6.8 { SELECT * FROM t2 } {0 {a b}}

db_swap test.db2 test.db
do_catchsql_test 6.9 { SELECT * FROM t1 } {0 {1 2 3 4}}
do_catchsql_test 6.10 { SELECT * FROM t2 } {1 {no such table: t2}}










do_execsql_test  6.11 { 
  PRAGMA journal_mode = delete;
  PRAGMA page_size = 512;
  VACUUM;
  PRAGMA journal_mode = wal;
  INSERT INTO t1 VALUES(5, 6);
} {delete wal}


db_swap test.db2 test.db
do_catchsql_test 6.12 { SELECT * FROM t1 } {1 {no such table: t1}}
do_catchsql_test 6.13 { SELECT * FROM t2 } {0 {a b}}

db_swap test.db2 test.db
do_catchsql_test 6.14 { SELECT * FROM t1 } {0 {1 2 3 4 5 6}}
do_catchsql_test 6.15 { SELECT * FROM t2 } {1 {no such table: t2}}

finish_test







>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
>










235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
do_catchsql_test 6.7 { SELECT * FROM t1 } {1 {no such table: t1}}
do_catchsql_test 6.8 { SELECT * FROM t2 } {0 {a b}}

db_swap test.db2 test.db
do_catchsql_test 6.9 { SELECT * FROM t1 } {0 {1 2 3 4}}
do_catchsql_test 6.10 { SELECT * FROM t2 } {1 {no such table: t2}}

if {[nonzero_reserved_bytes]} {
  # Vacuum with a size change is not allowed with the codec
  do_execsql_test  6.11codec { 
    PRAGMA journal_mode = delete;
    VACUUM;
    PRAGMA journal_mode = wal;
    INSERT INTO t1 VALUES(5, 6);
  } {delete wal}
} else {
  do_execsql_test  6.11 { 
    PRAGMA journal_mode = delete;
    PRAGMA page_size = 512;
    VACUUM;
    PRAGMA journal_mode = wal;
    INSERT INTO t1 VALUES(5, 6);
  } {delete wal}
}

db_swap test.db2 test.db
do_catchsql_test 6.12 { SELECT * FROM t1 } {1 {no such table: t1}}
do_catchsql_test 6.13 { SELECT * FROM t2 } {0 {a b}}

db_swap test.db2 test.db
do_catchsql_test 6.14 { SELECT * FROM t1 } {0 {1 2 3 4 5 6}}
do_catchsql_test 6.15 { SELECT * FROM t2 } {1 {no such table: t2}}

finish_test
Changes to test/tclsqlite.test.
18
19
20
21
22
23
24

25
26
27
28
29
30
31
32
33
34
35
# $Id: tclsqlite.test,v 1.73 2009/03/16 13:19:36 danielk1977 Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Check the error messages generated by tclsqlite
#

if {[sqlite3 -has-codec]} {
  set r "sqlite_orig HANDLE FILENAME ?-key CODEC-KEY?"
} else {
  set r "sqlite_orig HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?"
}
do_test tcl-1.1 {
  set v [catch {sqlite3 bogus} msg]
  regsub {really_sqlite3} $msg {sqlite3} msg
  lappend v $msg
} [list 1 "wrong # args: should be \"$r\""]
do_test tcl-1.2 {







>

|
<
<







18
19
20
21
22
23
24
25
26
27


28
29
30
31
32
33
34
# $Id: tclsqlite.test,v 1.73 2009/03/16 13:19:36 danielk1977 Exp $

set testdir [file dirname $argv0]
source $testdir/tester.tcl

# Check the error messages generated by tclsqlite
#
set r "sqlite_orig HANDLE FILENAME ?-vfs VFSNAME? ?-readonly BOOLEAN? ?-create BOOLEAN? ?-nomutex BOOLEAN? ?-fullmutex BOOLEAN? ?-uri BOOLEAN?"
if {[sqlite3 -has-codec]} {
  append r " ?-key CODECKEY?"


}
do_test tcl-1.1 {
  set v [catch {sqlite3 bogus} msg]
  regsub {really_sqlite3} $msg {sqlite3} msg
  lappend v $msg
} [list 1 "wrong # args: should be \"$r\""]
do_test tcl-1.2 {
Changes to test/tester.tcl.
369
370
371
372
373
374
375






376
377
378
379
380
381
382
# This command should be called after loading tester.tcl from within
# all test scripts that are incompatible with encryption codecs.
#
proc do_not_use_codec {} {
  set ::do_not_use_codec 1
  reset_db
}







# Print a HELP message and exit
#
proc print_help_and_quit {} {
  puts {Options:
  --pause                  Wait for user input before continuing
  --soft-heap-limit=N      Set the soft-heap-limit to N







>
>
>
>
>
>







369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
# This command should be called after loading tester.tcl from within
# all test scripts that are incompatible with encryption codecs.
#
proc do_not_use_codec {} {
  set ::do_not_use_codec 1
  reset_db
}

# Return true if the "reserved_bytes" integer on database files is non-zero.
#
proc nonzero_reserved_bytes {} {
  return [sqlite3 -has-codec]
}

# Print a HELP message and exit
#
proc print_help_and_quit {} {
  puts {Options:
  --pause                  Wait for user input before continuing
  --soft-heap-limit=N      Set the soft-heap-limit to N
407
408
409
410
411
412
413


414
415
416
417
418
419
420
421
422
423
424
425
426
427

428
429
430
431
432
433
434
  #   --soak=N
  #   --file-retries=N
  #   --file-retry-delay=N
  #   --start=[$permutation:]$testfile
  #   --match=$pattern
  #   --verbose=$val
  #   --output=$filename


  #   --help
  #
  set cmdlinearg(soft-heap-limit)    0
  set cmdlinearg(maxerror)        1000
  set cmdlinearg(malloctrace)        0
  set cmdlinearg(backtrace)         10
  set cmdlinearg(binarylog)          0
  set cmdlinearg(soak)               0
  set cmdlinearg(file-retries)       0
  set cmdlinearg(file-retry-delay)   0
  set cmdlinearg(start)             ""
  set cmdlinearg(match)             ""
  set cmdlinearg(verbose)           ""
  set cmdlinearg(output)            ""


  set leftover [list]
  foreach a $argv {
    switch -regexp -- $a {
      {^-+pause$} {
        # Wait for user input before continuing. This is to give the user an
        # opportunity to connect profiling tools to the process.







>
>














>







413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
  #   --soak=N
  #   --file-retries=N
  #   --file-retry-delay=N
  #   --start=[$permutation:]$testfile
  #   --match=$pattern
  #   --verbose=$val
  #   --output=$filename
  #   -q                                      Reduce output
  #   --testdir=$dir                          Run tests in subdirectory $dir
  #   --help
  #
  set cmdlinearg(soft-heap-limit)    0
  set cmdlinearg(maxerror)        1000
  set cmdlinearg(malloctrace)        0
  set cmdlinearg(backtrace)         10
  set cmdlinearg(binarylog)          0
  set cmdlinearg(soak)               0
  set cmdlinearg(file-retries)       0
  set cmdlinearg(file-retry-delay)   0
  set cmdlinearg(start)             ""
  set cmdlinearg(match)             ""
  set cmdlinearg(verbose)           ""
  set cmdlinearg(output)            ""
  set cmdlinearg(testdir)           "testdir"

  set leftover [list]
  foreach a $argv {
    switch -regexp -- $a {
      {^-+pause$} {
        # Wait for user input before continuing. This is to give the user an
        # opportunity to connect profiling tools to the process.
450
451
452
453
454
455
456

457
458
459
460
461
462
463
      }
      {^-+backtrace=.+$} {
        foreach {dummy cmdlinearg(backtrace)} [split $a =] break
        sqlite3_memdebug_backtrace $value
      }
      {^-+binarylog=.+$} {
        foreach {dummy cmdlinearg(binarylog)} [split $a =] break

      }
      {^-+soak=.+$} {
        foreach {dummy cmdlinearg(soak)} [split $a =] break
        set ::G(issoak) $cmdlinearg(soak)
      }
      {^-+file-retries=.+$} {
        foreach {dummy cmdlinearg(file-retries)} [split $a =] break







>







459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
      }
      {^-+backtrace=.+$} {
        foreach {dummy cmdlinearg(backtrace)} [split $a =] break
        sqlite3_memdebug_backtrace $value
      }
      {^-+binarylog=.+$} {
        foreach {dummy cmdlinearg(binarylog)} [split $a =] break
        set cmdlinearg(binarylog) [file normalize $cmdlinearg(binarylog)]
      }
      {^-+soak=.+$} {
        foreach {dummy cmdlinearg(soak)} [split $a =] break
        set ::G(issoak) $cmdlinearg(soak)
      }
      {^-+file-retries=.+$} {
        foreach {dummy cmdlinearg(file-retries)} [split $a =] break
482
483
484
485
486
487
488

489
490
491
492
493
494
495
496
497
498
499
500



501
502
503
504
505
506
507
508
509
510
511
512








513
514
515
516
517
518
519

        set ::G(match) $cmdlinearg(match)
        if {$::G(match) == ""} {unset ::G(match)}
      }

      {^-+output=.+$} {
        foreach {dummy cmdlinearg(output)} [split $a =] break

        if {$cmdlinearg(verbose)==""} {
          set cmdlinearg(verbose) 2
        }
      }
      {^-+verbose=.+$} {
        foreach {dummy cmdlinearg(verbose)} [split $a =] break
        if {$cmdlinearg(verbose)=="file"} {
          set cmdlinearg(verbose) 2
        } elseif {[string is boolean -strict $cmdlinearg(verbose)]==0} {
          error "option --verbose= must be set to a boolean or to \"file\""
        }
      }



      {.*help.*} {
         print_help_and_quit
      }
      {^-q$} {
        set cmdlinearg(output) test-out.txt
        set cmdlinearg(verbose) 2
      }

      default {
        lappend leftover $a
      }
    }








  }
  set argv $leftover

  # Install the malloc layer used to inject OOM errors. And the 'automatic'
  # extensions. This only needs to be done once for the process.
  #
  sqlite3_shutdown







>












>
>
>









|


>
>
>
>
>
>
>
>







492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541

        set ::G(match) $cmdlinearg(match)
        if {$::G(match) == ""} {unset ::G(match)}
      }

      {^-+output=.+$} {
        foreach {dummy cmdlinearg(output)} [split $a =] break
        set cmdlinearg(output) [file normalize $cmdlinearg(output)]
        if {$cmdlinearg(verbose)==""} {
          set cmdlinearg(verbose) 2
        }
      }
      {^-+verbose=.+$} {
        foreach {dummy cmdlinearg(verbose)} [split $a =] break
        if {$cmdlinearg(verbose)=="file"} {
          set cmdlinearg(verbose) 2
        } elseif {[string is boolean -strict $cmdlinearg(verbose)]==0} {
          error "option --verbose= must be set to a boolean or to \"file\""
        }
      }
      {^-+testdir=.*$} {
        foreach {dummy cmdlinearg(testdir)} [split $a =] break
      }
      {.*help.*} {
         print_help_and_quit
      }
      {^-q$} {
        set cmdlinearg(output) test-out.txt
        set cmdlinearg(verbose) 2
      }

      default {
        lappend leftover [file normalize $a]
      }
    }
  }
  set testdir [file normalize $testdir]
  set cmdlinearg(TESTFIXTURE_HOME) [pwd]
  set cmdlinearg(INFO_SCRIPT) [file normalize [info script]]
  set argv0 [file normalize $argv0]
  if {$cmdlinearg(testdir)!=""} {
    file mkdir $cmdlinearg(testdir)
    cd $cmdlinearg(testdir)
  }
  set argv $leftover

  # Install the malloc layer used to inject OOM errors. And the 'automatic'
  # extensions. This only needs to be done once for the process.
  #
  sqlite3_shutdown
2115
2116
2117
2118
2119
2120
2121


















2122
2123
2124
2125
2126
2127
2128
  sqlite3_shutdown
  eval sqlite3_config_pagecache $::old_pagecache_config
  unset ::old_pagecache_config 
  sqlite3_initialize
  autoinstall_test_functions
  sqlite3 db test.db
}



















# If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set
# to non-zero, then set the global variable $AUTOVACUUM to 1.
set AUTOVACUUM $sqlite_options(default_autovacuum)

# Make sure the FTS enhanced query syntax is disabled.
set sqlite_fts3_enable_parentheses 0







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
  sqlite3_shutdown
  eval sqlite3_config_pagecache $::old_pagecache_config
  unset ::old_pagecache_config 
  sqlite3_initialize
  autoinstall_test_functions
  sqlite3 db test.db
}

# Find the name of the 'shell' executable (e.g. "sqlite3.exe") to use for
# the tests in shell[1-5].test. If no such executable can be found, invoke
# [finish_test ; return] in the callers context.
#
proc test_find_cli {} {
  if {$::tcl_platform(platform)=="windows"} {
    set ret "sqlite3.exe"
  } else {
    set ret "sqlite3"
  }
  set ret [file normalize [file join $::cmdlinearg(TESTFIXTURE_HOME) $ret]]
  if {![file executable $ret]} {
    finish_test
    return -code return
  }
  return $ret
}

# If the library is compiled with the SQLITE_DEFAULT_AUTOVACUUM macro set
# to non-zero, then set the global variable $AUTOVACUUM to 1.
set AUTOVACUUM $sqlite_options(default_autovacuum)

# Make sure the FTS enhanced query syntax is disabled.
set sqlite_fts3_enable_parentheses 0
Changes to test/tkt4018.test.
12
13
14
15
16
17
18

19
20
21
22
23
24
25
#
# This file implements tests to verify that ticket #4018 has been
# fixed.  
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl


proc testsql {sql} {
  set fd [open tf_main.tcl w]
  puts $fd [subst -nocommands {
    sqlite3_test_control_pending_byte 0x0010000
    sqlite3 db test.db
    set rc [catch { db eval {$sql} } msg]







>







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#
# This file implements tests to verify that ticket #4018 has been
# fixed.  
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
do_not_use_codec

proc testsql {sql} {
  set fd [open tf_main.tcl w]
  puts $fd [subst -nocommands {
    sqlite3_test_control_pending_byte 0x0010000
    sqlite3 db test.db
    set rc [catch { db eval {$sql} } msg]
Changes to test/unixexcl.test.
83
84
85
86
87
88
89

90
91
92
93
94
95
96
do_multiclient_test tn {
  do_test unixexcl-3.$tn.1 {
    code1 { db close; sqlite3 db file:test.db?psow=0 -vfs unix-excl -uri 1 }
    code2 { db2 close; sqlite3 db2 file:test.db?psow=0 -vfs unix-excl -uri 1 }
    sql1 {
      PRAGMA auto_vacuum = 0;
      PRAGMA journal_mode = WAL;

      CREATE TABLE t1(a, b);
      INSERT INTO t1 VALUES(1, 2);
    }
  } {wal}

  if {$tn==1} {
    do_test unixexcl-3.$tn.1.multiproc {







>







83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
do_multiclient_test tn {
  do_test unixexcl-3.$tn.1 {
    code1 { db close; sqlite3 db file:test.db?psow=0 -vfs unix-excl -uri 1 }
    code2 { db2 close; sqlite3 db2 file:test.db?psow=0 -vfs unix-excl -uri 1 }
    sql1 {
      PRAGMA auto_vacuum = 0;
      PRAGMA journal_mode = WAL;
      PRAGMA synchronous = FULL;
      CREATE TABLE t1(a, b);
      INSERT INTO t1 VALUES(1, 2);
    }
  } {wal}

  if {$tn==1} {
    do_test unixexcl-3.$tn.1.multiproc {
Changes to test/vtab6.test.
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
set ::echo_module_ignore_usable 1
db cache flush

do_test vtab6-11.4.1 {
  catchsql {
    SELECT a, b, c FROM ab NATURAL JOIN bc;
  }
} {1 {table ab: xBestIndex returned an invalid plan}}
do_test vtab6-11.4.2 {
  catchsql {
    SELECT a, b, c FROM bc NATURAL JOIN ab;
  }
} {1 {table bc: xBestIndex returned an invalid plan}}

unset ::echo_module_ignore_usable

finish_test







|




|




562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
set ::echo_module_ignore_usable 1
db cache flush

do_test vtab6-11.4.1 {
  catchsql {
    SELECT a, b, c FROM ab NATURAL JOIN bc;
  }
} {1 {ab.xBestIndex malfunction}}
do_test vtab6-11.4.2 {
  catchsql {
    SELECT a, b, c FROM bc NATURAL JOIN ab;
  }
} {1 {bc.xBestIndex malfunction}}

unset ::echo_module_ignore_usable

finish_test
Changes to test/wal.test.
1374
1375
1376
1377
1378
1379
1380

1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400

1401
1402
1403
1404
1405
1406
1407
do_test wal-21.3 {
  execsql { PRAGMA integrity_check }
} {ok}

#-------------------------------------------------------------------------
# Test reading and writing of databases with different page-sizes.
#

foreach pgsz {512 1024 2048 4096 8192 16384 32768 65536} {
  do_multiclient_test tn [string map [list %PGSZ% $pgsz] {
    do_test wal-22.%PGSZ%.$tn.1 {
      sql1 {
        PRAGMA main.page_size = %PGSZ%;
        PRAGMA auto_vacuum = 0;
        PRAGMA journal_mode = WAL;
        CREATE TABLE t1(x UNIQUE);
        INSERT INTO t1 SELECT randomblob(800);
        INSERT INTO t1 SELECT randomblob(800);
        INSERT INTO t1 SELECT randomblob(800);
      }
    } {wal}
    do_test wal-22.%PGSZ%.$tn.2 { sql2 { PRAGMA integrity_check } } {ok}
    do_test wal-22.%PGSZ%.$tn.3 {
      sql1 {PRAGMA wal_checkpoint}
      expr {[file size test.db] % %PGSZ%}
    } {0}
  }]
}


#-------------------------------------------------------------------------
# Test that when 1 or more pages are recovered from a WAL file, 
# sqlite3_log() is invoked to report this to the user.
#
ifcapable curdir {
  set walfile [file nativename [file join [get_pwd] test.db-wal]]







>




















>







1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
do_test wal-21.3 {
  execsql { PRAGMA integrity_check }
} {ok}

#-------------------------------------------------------------------------
# Test reading and writing of databases with different page-sizes.
#
incr ::do_not_use_codec
foreach pgsz {512 1024 2048 4096 8192 16384 32768 65536} {
  do_multiclient_test tn [string map [list %PGSZ% $pgsz] {
    do_test wal-22.%PGSZ%.$tn.1 {
      sql1 {
        PRAGMA main.page_size = %PGSZ%;
        PRAGMA auto_vacuum = 0;
        PRAGMA journal_mode = WAL;
        CREATE TABLE t1(x UNIQUE);
        INSERT INTO t1 SELECT randomblob(800);
        INSERT INTO t1 SELECT randomblob(800);
        INSERT INTO t1 SELECT randomblob(800);
      }
    } {wal}
    do_test wal-22.%PGSZ%.$tn.2 { sql2 { PRAGMA integrity_check } } {ok}
    do_test wal-22.%PGSZ%.$tn.3 {
      sql1 {PRAGMA wal_checkpoint}
      expr {[file size test.db] % %PGSZ%}
    } {0}
  }]
}
incr ::do_not_use_codec -1

#-------------------------------------------------------------------------
# Test that when 1 or more pages are recovered from a WAL file, 
# sqlite3_log() is invoked to report this to the user.
#
ifcapable curdir {
  set walfile [file nativename [file join [get_pwd] test.db-wal]]
Changes to test/wal2.test.
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
foreach {tn sql reslist} {
  1 { }                                 {10 0 4 0 6 0}
  2 { PRAGMA checkpoint_fullfsync = 1 } {10 4 4 2 6 2}
  3 { PRAGMA checkpoint_fullfsync = 0 } {10 0 4 0 6 0}
} {
  faultsim_delete_and_reopen

  execsql {PRAGMA auto_vacuum = 0}
  execsql $sql
  do_execsql_test wal2-14.$tn.0 { PRAGMA page_size = 4096 }   {}
  do_execsql_test wal2-14.$tn.1 { PRAGMA journal_mode = WAL } {wal}

  set sqlite_sync_count 0
  set sqlite_fullsync_count 0








|







1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
foreach {tn sql reslist} {
  1 { }                                 {10 0 4 0 6 0}
  2 { PRAGMA checkpoint_fullfsync = 1 } {10 4 4 2 6 2}
  3 { PRAGMA checkpoint_fullfsync = 0 } {10 0 4 0 6 0}
} {
  faultsim_delete_and_reopen

  execsql {PRAGMA auto_vacuum = 0; PRAGMA synchronous = FULL;}
  execsql $sql
  do_execsql_test wal2-14.$tn.0 { PRAGMA page_size = 4096 }   {}
  do_execsql_test wal2-14.$tn.1 { PRAGMA journal_mode = WAL } {wal}

  set sqlite_sync_count 0
  set sqlite_fullsync_count 0

Changes to test/wal5.test.
14
15
16
17
18
19
20

21
22
23
24
25
26
27
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/lock_common.tcl
source $testdir/wal_common.tcl
ifcapable !wal {finish_test ; return }


set testprefix wal5

proc db_page_count  {{file test.db}} { expr [file size $file] / 1024 }
proc wal_page_count {{file test.db}} { wal_frame_count ${file}-wal 1024 }









>







14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
#

set testdir [file dirname $argv0]
source $testdir/tester.tcl
source $testdir/lock_common.tcl
source $testdir/wal_common.tcl
ifcapable !wal {finish_test ; return }
do_not_use_codec

set testprefix wal5

proc db_page_count  {{file test.db}} { expr [file size $file] / 1024 }
proc wal_page_count {{file test.db}} { wal_frame_count ${file}-wal 1024 }


136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
      sql1 { INSERT INTO t1 VALUES(5, zeroblob(1200)) }
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } {6 12 0}

    do_test 1.$tn.7 {
      reopen_all
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } {7 0 0}

    do_test 1.$tn.8  { sql2 { BEGIN ; SELECT x FROM t1 } } {1 2 3 4 5}
    do_test 1.$tn.9  {
      sql1 { INSERT INTO t1 VALUES(6, zeroblob(1200)) }
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } {7 5 0}
    do_test 1.$tn.10 { sql3 { BEGIN ; SELECT x FROM t1 } } {1 2 3 4 5 6}

    set ::busy_handler_script { 
      if {$n==5} { sql2 COMMIT } 
      if {$n==6} { set ::db_file_size [db_page_count] }
      if {$n==7} { sql3 COMMIT }
    }
    do_test 1.$tn.11 {
      code1 { do_wal_checkpoint db -mode restart }
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } {10 5 8}
    do_test 1.$tn.12 { set ::db_file_size } 10
  }

  #-------------------------------------------------------------------------
  # This block of tests explores checkpoint operations on more than one 
  # database file.
  #







|





|










|







137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
      sql1 { INSERT INTO t1 VALUES(5, zeroblob(1200)) }
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } {6 12 0}

    do_test 1.$tn.7 {
      reopen_all
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } [expr {[nonzero_reserved_bytes]?"/# # 0/":"7 0 0"}]

    do_test 1.$tn.8  { sql2 { BEGIN ; SELECT x FROM t1 } } {1 2 3 4 5}
    do_test 1.$tn.9  {
      sql1 { INSERT INTO t1 VALUES(6, zeroblob(1200)) }
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } [expr {[nonzero_reserved_bytes]?"/# # #/":"7 5 0"}]
    do_test 1.$tn.10 { sql3 { BEGIN ; SELECT x FROM t1 } } {1 2 3 4 5 6}

    set ::busy_handler_script { 
      if {$n==5} { sql2 COMMIT } 
      if {$n==6} { set ::db_file_size [db_page_count] }
      if {$n==7} { sql3 COMMIT }
    }
    do_test 1.$tn.11 {
      code1 { do_wal_checkpoint db -mode restart }
      list [db_page_count] [wal_page_count] $::nBusyHandler
    } [expr {[nonzero_reserved_bytes]?"/# # #/":"10 5 8"}]
    do_test 1.$tn.12 { set ::db_file_size } 10
  }

  #-------------------------------------------------------------------------
  # This block of tests explores checkpoint operations on more than one 
  # database file.
  #
Changes to test/wal8.test.
23
24
25
26
27
28
29

30
31
32
33
34
35
36
# first read transaction is executed), and the "PRAGMA page_size = XXX"
# is a no-op.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix wal8
ifcapable !wal {finish_test ; return }


db close
forcedelete test.db test.db-wal

sqlite3 db test.db
sqlite3 db2 test.db








>







23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# first read transaction is executed), and the "PRAGMA page_size = XXX"
# is a no-op.
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
set ::testprefix wal8
ifcapable !wal {finish_test ; return }
do_not_use_codec

db close
forcedelete test.db test.db-wal

sqlite3 db test.db
sqlite3 db2 test.db

Changes to test/walbak.test.
123
124
125
126
127
128
129

130
131
132
133
134
135
136
      INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 16 */
      INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 32 */
      INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 64 */
    COMMIT;
  }
} {}
do_test walbak-2.2 {

  db backup abc.db
  sqlite3 db2 abc.db
  string compare [sig db] [sig db2]
} {0}

do_test walbak-2.3 {
  sqlite3_backup B db2 main db main







>







123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
      INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 16 */
      INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 32 */
      INSERT INTO t1 SELECT randomblob(500), randomblob(500) FROM t1; /* 64 */
    COMMIT;
  }
} {}
do_test walbak-2.2 {
  forcedelete abc.db
  db backup abc.db
  sqlite3 db2 abc.db
  string compare [sig db] [sig db2]
} {0}

do_test walbak-2.3 {
  sqlite3_backup B db2 main db main
235
236
237
238
239
240
241

242
243
244
245
246
247
248
      PRAGMA page_size = 2048;
      PRAGMA journal_mode = PERSIST;
      CREATE TABLE xx(x);
    }
  }

} {

  foreach f [glob -nocomplain test.db*] { forcedelete $f }

  eval $setup

  do_test walbak-3.$tn.1 {
    execsql {
      CREATE TABLE t1(a, b);







>







236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
      PRAGMA page_size = 2048;
      PRAGMA journal_mode = PERSIST;
      CREATE TABLE xx(x);
    }
  }

} {
  if {$tn==4 && [sqlite3 -has-codec]} continue
  foreach f [glob -nocomplain test.db*] { forcedelete $f }

  eval $setup

  do_test walbak-3.$tn.1 {
    execsql {
      CREATE TABLE t1(a, b);
Changes to test/walro.test.
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
    }
    file size test.db-wal
  } {147800}
  do_test 1.4.4.2 {
    csql1 { SELECT * FROM t1 }
  } {0 {a b c d e f g h i j k l 1 2 3 4 5 6}}
  do_test 1.4.4.3 {
    csql2 COMMIT
    csql1 { SELECT count(*) FROM t2 }
  } {0 512}







|







208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
      INSERT INTO t2 SELECT x||y, y||x FROM t2;
    }
    file size test.db-wal
  } [expr {[nonzero_reserved_bytes]?148848:147800}]
  do_test 1.4.4.2 {
    csql1 { SELECT * FROM t1 }
  } {0 {a b c d e f g h i j k l 1 2 3 4 5 6}}
  do_test 1.4.4.3 {
    csql2 COMMIT
    csql1 { SELECT count(*) FROM t2 }
  } {0 512}
Changes to test/where2.test.
760
761
762
763
764
765
766











767
768
#
do_execsql_test where2-13.1 {
  CREATE TABLE t13(a,b);
  CREATE INDEX t13a ON t13(a);
  INSERT INTO t13 VALUES(4,5);
  SELECT * FROM t13 WHERE (1=2 AND a=3) OR a=4;
} {4 5}












finish_test







>
>
>
>
>
>
>
>
>
>
>


760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
#
do_execsql_test where2-13.1 {
  CREATE TABLE t13(a,b);
  CREATE INDEX t13a ON t13(a);
  INSERT INTO t13 VALUES(4,5);
  SELECT * FROM t13 WHERE (1=2 AND a=3) OR a=4;
} {4 5}

# https://www.sqlite.org/src/info/5e3c886796e5512e  (2016-03-09)
# Correlated subquery on the RHS of an IN operator 
#
do_execsql_test where2-14.1 {
  CREATE TABLE t14a(x INTEGER PRIMARY KEY);
  INSERT INTO t14a(x) VALUES(1),(2),(3),(4);
  CREATE TABLE t14b(y INTEGER PRIMARY KEY);
  INSERT INTO t14b(y) VALUES(1);
  SELECT x FROM t14a WHERE x NOT IN (SELECT x FROM t14b);
} {}

finish_test
Changes to test/zerodamage.test.
108
109
110
111
112
113
114

115
116
117
118
119
120
121
  # Repeat the previous with POWERSAFE_OVERWRITE off.  Verify that the WAL file
  # is padded.
  #
  do_test zerodamage-3.1 {
    db close
    sqlite3 db file:test.db?psow=FALSE -uri 1
    db eval {

       UPDATE t1 SET y=randomblob(50) WHERE x=124;
    }
    file size test.db-wal
  } {16800}
}

finish_test







>







108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
  # Repeat the previous with POWERSAFE_OVERWRITE off.  Verify that the WAL file
  # is padded.
  #
  do_test zerodamage-3.1 {
    db close
    sqlite3 db file:test.db?psow=FALSE -uri 1
    db eval {
       PRAGMA synchronous=FULL;
       UPDATE t1 SET y=randomblob(50) WHERE x=124;
    }
    file size test.db-wal
  } {16800}
}

finish_test
Changes to tool/build-all-msvc.bat.
661
662
663
664
665
666
667

668
669
670
671
672

673
674
675
676
677
678
679

        REM
        REM NOTE: Copy the "sqlite3.pdb" file to the appropriate directory for
        REM       the build and platform beneath the binary directory unless we
        REM       are prevented from doing so.
        REM
        IF NOT DEFINED NOSYMBOLS (

          %__ECHO% XCOPY "%DLL_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%

          IF ERRORLEVEL 1 (
            ECHO Failed to copy "%DLL_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
            GOTO errors

          )
        )

        REM
        REM NOTE: If requested, also build the shell executable.
        REM
        IF DEFINED BUILD_ALL_SHELL (







>
|

|
|
|
>







661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681

        REM
        REM NOTE: Copy the "sqlite3.pdb" file to the appropriate directory for
        REM       the build and platform beneath the binary directory unless we
        REM       are prevented from doing so.
        REM
        IF NOT DEFINED NOSYMBOLS (
          IF EXIST "%DLL_PDB_FILE_NAME%" (
            %__ECHO% XCOPY "%DLL_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%

            IF ERRORLEVEL 1 (
              ECHO Failed to copy "%DLL_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
              GOTO errors
            )
          )
        )

        REM
        REM NOTE: If requested, also build the shell executable.
        REM
        IF DEFINED BUILD_ALL_SHELL (
718
719
720
721
722
723
724

725
726
727
728
729

730
731
732
733
734
735
736

          REM
          REM NOTE: Copy the "sqlite3sh.pdb" file to the appropriate directory
          REM       for the build and platform beneath the binary directory
          REM       unless we are prevented from doing so.
          REM
          IF NOT DEFINED NOSYMBOLS (

            %__ECHO% XCOPY "%EXE_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%

            IF ERRORLEVEL 1 (
              ECHO Failed to copy "%EXE_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
              GOTO errors

            )
          )
        )
      )
    )
  )








>
|

|
|
|
>







720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740

          REM
          REM NOTE: Copy the "sqlite3sh.pdb" file to the appropriate directory
          REM       for the build and platform beneath the binary directory
          REM       unless we are prevented from doing so.
          REM
          IF NOT DEFINED NOSYMBOLS (
            IF EXIST "%EXE_PDB_FILE_NAME%" (
              %__ECHO% XCOPY "%EXE_PDB_FILE_NAME%" "%BINARYDIRECTORY%\%%B\%%D\" %FFLAGS% %DFLAGS%

              IF ERRORLEVEL 1 (
                ECHO Failed to copy "%EXE_PDB_FILE_NAME%" to "%BINARYDIRECTORY%\%%B\%%D\".
                GOTO errors
              )
            )
          )
        )
      )
    )
  )