Index: VERSION ================================================================== --- VERSION +++ VERSION @@ -1,1 +1,1 @@ -2.8.13 +2.8.17 Index: main.mk ================================================================== --- main.mk +++ main.mk @@ -153,10 +153,24 @@ $(RANLIB) libsqlite.a sqlite$(EXE): $(TOP)/src/shell.c libsqlite.a sqlite.h $(TCCX) $(READLINE_FLAGS) -o sqlite$(EXE) $(TOP)/src/shell.c \ libsqlite.a $(LIBREADLINE) $(THREADLIB) + +sqlite_analyzer$(EXE): $(TOP)/src/tclsqlite.c libsqlite.a $(TESTSRC) \ + $(TOP)/tool/spaceanal.tcl + sed \ + -e '/^#/d' \ + -e 's,\\,\\\\,g' \ + -e 's,",\\",g' \ + -e 's,^,",' \ + -e 's,$$,\\n",' \ + $(TOP)/tool/spaceanal.tcl >spaceanal_tcl.h + $(TCCX) $(TCL_FLAGS) -DTCLSH=2 -DSQLITE_TEST=1 -static -o \ + sqlite_analyzer$(EXE) $(TESTSRC) $(TOP)/src/tclsqlite.c \ + libsqlite.a $(LIBTCL) + # This target creates a directory named "tsrc" and fills it with # copies of all of the C source code and header files needed to # build on the target system. Some of the C source code and header # files are automatically generated. This target takes care of Index: publish.sh ================================================================== --- publish.sh +++ publish.sh @@ -9,46 +9,53 @@ # Set srcdir to the name of the directory that contains the publish.sh # script. # srcdir=`echo "$0" | sed 's%\(^.*\)/[^/][^/]*$%\1%'` -# Get the makefile. +# Clear out the build directory "doc" +# +#rm -rf doc +#make doc + +# Get the current version number - needed to help build filenames # -cp $srcdir/Makefile.linux-gcc ./Makefile -chmod +x $srcdir/install-sh +VERS=`cat $srcdir/VERSION` +VERSW=`sed 's/\./_/g' $srcdir/VERSION` -# Start building stuff. +# Start by building an sqlite shell for linux. # make clean make sqlite strip sqlite -mv sqlite sqlite.bin +mv sqlite sqlite-$VERS.bin rm -f sqlite.bin.gz -gzip sqlite.bin +gzip sqlite-$VERS.bin +mv sqlite-$VERS.bin.gz doc # Build the tclsqlite.so shared library for import into tclsh or wish # under Linux # make target_source -rm sqlite_source.zip +rm -f doc/sqlite-source-$VERSW.zip cd tsrc -zip ../sqlite_source.zip * +zip ../doc/sqlite-source-$VERSW.zip * rm shell.c TCLDIR=/home/drh/tcltk/8.2linux TCLSTUBLIB=$TCLDIR/libtclstub8.2g.a OPTS='-DUSE_TCL_STUBS=1 -DNDEBUG=1' gcc -fPIC $OPTS -O2 -I. -I$TCLDIR -shared *.c $TCLSTUBLIB -o tclsqlite.so strip tclsqlite.so -mv tclsqlite.so .. +mv tclsqlite.so tclsqlite-$VERS.so +gzip tclsqlite-$VERS.so +mv tclsqlite-$VERS.so.gz ../doc rm tclsqlite.c gcc -fPIC -DNDEBUG=1 -O2 -I. -shared *.c -o sqlite.so strip sqlite.so -mv sqlite.so .. +mv sqlite.so sqlite-$VERS.so +gzip sqlite-$VERS.so +mv sqlite-$VERS.so.gz ../doc cd .. -rm -f tclsqlite.so.gz sqlite.so.gz -gzip tclsqlite.so -gzip sqlite.so # Build the tclsqlite.dll shared library that can be imported into tclsh # or wish on windows. # make target_source @@ -74,11 +81,11 @@ --dlltool-name i386-mingw32msvc-dlltool \ --as i386-mingw32msvc-as \ --target i386-mingw32 \ -dllname tclsqlite.dll -lmsvcrt *.o $TCLSTUBLIB i386-mingw32msvc-strip tclsqlite.dll -mv tclsqlite.dll .. +#mv tclsqlite.dll ../tclsqlite-$VERSW.dll rm tclsqlite.o cat >sqlite.def <<\END_OF_FILE EXPORTS sqlite_open sqlite_close @@ -131,39 +138,34 @@ --dlltool-name i386-mingw32msvc-dlltool \ --as i386-mingw32msvc-as \ --target i386-mingw32 \ -dllname sqlite.dll -lmsvcrt *.o i386-mingw32msvc-strip sqlite.dll -mv sqlite.dll sqlite.def .. +zip ../doc/tclsqlite-$VERSW.zip tclsqlite.dll +zip ../doc/sqlitedll-$VERSW.zip sqlite.dll sqlite.def cd .. -rm -f tclsqlite.zip sqlitedll.zip -zip tclsqlite.zip tclsqlite.dll -zip sqlitedll.zip sqlite.dll sqlite.def # Build the sqlite.exe executable for windows. # make target_source cd tsrc rm tclsqlite.c OPTS='-DSTATIC_BUILD=1 -DNDEBUG=1' i386-mingw32msvc-gcc -O2 $OPTS -I. -I$TCLDIR *.c -o sqlite.exe -mv sqlite.exe .. +zip ../doc/sqlite-$VERSW.zip sqlite.exe cd .. -rm -f sqlite.zip -zip sqlite.zip sqlite.exe # Construct a tarball of the source tree # ORIGIN=`pwd` cd $srcdir cd .. -EXCLUDE=`find sqlite -print | grep CVS | sed 's,sqlite/, --exclude sqlite/,'` -tar czf $ORIGIN/sqlite.tar.gz $EXCLUDE sqlite +mv sqlite_v2 sqlite-$VERS +EXCLUDE=`find sqlite-$VERS -print | grep CVS | sed 's,^, --exclude ,'` +tar czf $ORIGIN/doc/sqlite-$VERS.tar.gz $EXCLUDE sqlite-$VERS +mv sqlite-$VERS sqlite_v2 cd $ORIGIN -vers=`cat $srcdir/VERSION` -rm -f sqlite-$vers.tar.gz -ln sqlite.tar.gz sqlite-$vers.tar.gz # # Build RPMS (binary) and Source RPM # @@ -177,26 +179,16 @@ mkdir $HOME/rpm/RPMS mkdir $HOME/rpm/SRPMS mkdir $HOME/rpm/SPECS # create the spec file from the template -sed s/SQLITE_VERSION/$vers/g $srcdir/spec.template > $HOME/rpm/SPECS/sqlite.spec +sed s/SQLITE_VERSION/$VERS/g $srcdir/spec.template > $HOME/rpm/SPECS/sqlite.spec # copy the source tarball to the rpm directory -cp sqlite-$vers.tar.gz $HOME/rpm/SOURCES/. +cp doc/sqlite-$VERS.tar.gz $HOME/rpm/SOURCES/. # build all the rpms rpm -ba $HOME/rpm/SPECS/sqlite.spec >& rpm-$vers.log # copy the RPMs into the build directory. -ln $HOME/rpm/RPMS/i386/sqlite*-$vers*.rpm . -ln $HOME/rpm/SRPMS/sqlite-$vers*.rpm . - - -# Build the website -# -cp $srcdir/../historical/* . -rm -rf doc -make doc -ln sqlite.bin.gz sqlite.zip sqlite*.tar.gz tclsqlite.so.gz tclsqlite.zip doc -ln sqlitedll.zip sqlite.so.gz sqlite_source.zip doc -ln *.rpm doc +mv $HOME/rpm/RPMS/i386/sqlite*-$VERS*.rpm doc +mv $HOME/rpm/SRPMS/sqlite-$VERS*.rpm doc Index: src/attach.c ================================================================== --- src/attach.c +++ src/attach.c @@ -9,11 +9,11 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to implement the ATTACH and DETACH commands. ** -** $Id: attach.c,v 1.10 2004/02/12 18:46:39 drh Exp $ +** $Id: attach.c,v 1.10.2.1 2004/05/07 01:46:01 drh Exp $ */ #include "sqliteInt.h" /* ** This routine is called by the parser to process an ATTACH statement: @@ -135,19 +135,21 @@ */ void sqliteDetach(Parse *pParse, Token *pDbname){ int i; sqlite *db; Vdbe *v; + Db *pDb; v = sqliteGetVdbe(pParse); sqliteVdbeAddOp(v, OP_Halt, 0, 0); if( pParse->explain ) return; db = pParse->db; for(i=0; inDb; i++){ - if( db->aDb[i].pBt==0 || db->aDb[i].zName==0 ) continue; - if( strlen(db->aDb[i].zName)!=pDbname->n ) continue; - if( sqliteStrNICmp(db->aDb[i].zName, pDbname->z, pDbname->n)==0 ) break; + pDb = &db->aDb[i]; + if( pDb->pBt==0 || pDb->zName==0 ) continue; + if( strlen(pDb->zName)!=pDbname->n ) continue; + if( sqliteStrNICmp(pDb->zName, pDbname->z, pDbname->n)==0 ) break; } if( i>=db->nDb ){ sqliteErrorMsg(pParse, "no such database: %T", pDbname); return; } @@ -158,14 +160,15 @@ #ifndef SQLITE_OMIT_AUTHORIZATION if( sqliteAuthCheck(pParse,SQLITE_DETACH,db->aDb[i].zName,0,0)!=SQLITE_OK ){ return; } #endif /* SQLITE_OMIT_AUTHORIZATION */ - sqliteBtreeClose(db->aDb[i].pBt); - db->aDb[i].pBt = 0; - sqliteFree(db->aDb[i].zName); + sqliteBtreeClose(pDb->pBt); + pDb->pBt = 0; + sqliteFree(pDb->zName); sqliteResetInternalSchema(db, i); + if( pDb->pAux && pDb->xFreeAux ) pDb->xFreeAux(pDb->pAux); db->nDb--; if( inDb ){ db->aDb[i] = db->aDb[db->nDb]; memset(&db->aDb[db->nDb], 0, sizeof(db->aDb[0])); sqliteResetInternalSchema(db, i); Index: src/auth.c ================================================================== --- src/auth.c +++ src/auth.c @@ -12,11 +12,11 @@ ** This file contains code used to implement the sqlite_set_authorizer() ** API. This facility is an optional feature of the library. Embedded ** systems that do not need this facility may omit it by recompiling ** the library with -DSQLITE_OMIT_AUTHORIZATION=1 ** -** $Id: auth.c,v 1.12 2004/02/22 18:40:57 drh Exp $ +** $Id: auth.c,v 1.12.2.2 2004/09/09 13:54:30 drh Exp $ */ #include "sqliteInt.h" /* ** All of the code in this file may be omitted by defining a single @@ -109,27 +109,27 @@ int rc; Table *pTab; /* The table being read */ const char *zCol; /* Name of the column of the table */ int iSrc; /* Index in pTabList->a[] of table being read */ const char *zDBase; /* Name of database being accessed */ + TriggerStack *pStack; /* The stack of current triggers */ if( db->xAuth==0 ) return; assert( pExpr->op==TK_COLUMN ); for(iSrc=0; iSrcnSrc; iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ) break; } if( iSrc>=0 && iSrcnSrc ){ pTab = pTabList->a[iSrc].pTab; - }else{ + }else if( (pStack = pParse->trigStack)!=0 ){ /* This must be an attempt to read the NEW or OLD pseudo-tables ** of a trigger. */ - TriggerStack *pStack; /* The stack of current triggers */ - pStack = pParse->trigStack; - assert( pStack!=0 ); assert( pExpr->iTable==pStack->newIdx || pExpr->iTable==pStack->oldIdx ); pTab = pStack->pTab; + }else{ + return; } if( pTab==0 ) return; if( pExpr->iColumn>=0 ){ assert( pExpr->iColumnnCol ); zCol = pTab->aCol[pExpr->iColumn].zName; @@ -172,11 +172,11 @@ const char *zArg3 ){ sqlite *db = pParse->db; int rc; - if( db->xAuth==0 ){ + if( db->init.busy || db->xAuth==0 ){ return SQLITE_OK; } rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext); if( rc==SQLITE_DENY ){ sqliteErrorMsg(pParse, "not authorized"); Index: src/btree_rb.c ================================================================== --- src/btree_rb.c +++ src/btree_rb.c @@ -7,11 +7,11 @@ ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* -** $Id: btree_rb.c,v 1.24 2004/02/29 00:11:31 drh Exp $ +** $Id: btree_rb.c,v 1.24.2.1 2004/06/26 14:40:05 drh Exp $ ** ** This file implements an in-core database using Red-Black balanced ** binary trees. ** ** It was contributed to SQLite by anonymous on 2003-Feb-04 23:24:49 UTC. @@ -309,11 +309,11 @@ * is not called from anyplace in the code. */ static void print_node(BtRbNode *pNode) { char * str = append_node(0, pNode, 0); - printf(str); + printf("%s", str); /* Suppress a warning message about print_node() being unused */ (void)print_node; } Index: src/build.c ================================================================== --- src/build.c +++ src/build.c @@ -21,11 +21,11 @@ ** BEGIN TRANSACTION ** COMMIT ** ROLLBACK ** PRAGMA ** -** $Id: build.c,v 1.176 2004/04/23 17:04:44 drh Exp $ +** $Id: build.c,v 1.176.2.3 2004/08/28 14:53:34 drh Exp $ */ #include "sqliteInt.h" #include /* @@ -1032,11 +1032,11 @@ sEnd = pParse->sLastToken; if( sEnd.z[0]!=0 && sEnd.z[0]!=';' ){ sEnd.z += sEnd.n; } sEnd.n = 0; - n = ((int)sEnd.z) - (int)pBegin->z; + n = sEnd.z - pBegin->z; z = pBegin->z; while( n>0 && (z[n-1]==';' || isspace(z[n-1])) ){ n--; } sEnd.z = &z[n-1]; sEnd.n = 1; @@ -1535,11 +1535,11 @@ ** own name. */ if( pName && !db->init.busy ){ Index *pISameName; /* Another index with the same name */ Table *pTSameName; /* A table with same name as the index */ - zName = sqliteStrNDup(pName->z, pName->n); + zName = sqliteTableNameFromToken(pName); if( zName==0 ) goto exit_create_index; if( (pISameName = sqliteFindIndex(db, zName, 0))!=0 ){ sqliteErrorMsg(pParse, "index %s already exists", zName); goto exit_create_index; } @@ -1555,11 +1555,11 @@ sprintf(zBuf,"%d)",n); zName = 0; sqliteSetString(&zName, "(", pTab->zName, " autoindex ", zBuf, (char*)0); if( zName==0 ) goto exit_create_index; }else{ - zName = sqliteStrNDup(pName->z, pName->n); + zName = sqliteTableNameFromToken(pName); } /* Check for authorization to create an index. */ #ifndef SQLITE_OMIT_AUTHORIZATION Index: src/date.c ================================================================== --- src/date.c +++ src/date.c @@ -14,11 +14,11 @@ ** ** There is only one exported symbol in this file - the function ** sqliteRegisterDateTimeFunctions() found at the bottom of the file. ** All other code has file scope. ** -** $Id: date.c,v 1.16 2004/02/29 01:08:18 drh Exp $ +** $Id: date.c,v 1.16.2.4 2007/01/08 16:20:29 drh Exp $ ** ** NOTES: ** ** SQLite processes all times and dates as Julian Day numbers. The ** dates and times are stored as the number of days since noon @@ -798,22 +798,24 @@ break; } case 'H': sprintf(&z[j],"%02d",x.h); j+=2; break; case 'W': /* Fall thru */ case 'j': { - int n; + int n; /* Number of days since 1st day of year */ DateTime y = x; y.validJD = 0; y.M = 1; y.D = 1; computeJD(&y); - n = x.rJD - y.rJD + 1; + n = x.rJD - y.rJD + 0.5; if( zFmt[i]=='W' ){ - sprintf(&z[j],"%02d",(n+6)/7); + int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ + wd = ((int)(x.rJD+0.5)) % 7; + sprintf(&z[j],"%02d",(n+7-wd)/7); j += 2; }else{ - sprintf(&z[j],"%03d",n); + sprintf(&z[j],"%03d",n+1); j += 3; } break; } case 'J': sprintf(&z[j],"%.16g",x.rJD); j+=strlen(&z[j]); break; @@ -822,11 +824,11 @@ case 's': { sprintf(&z[j],"%d",(int)((x.rJD-2440587.5)*86400.0 + 0.5)); j += strlen(&z[j]); break; } - case 'S': sprintf(&z[j],"%02d",(int)(x.s+0.5)); j+=2; break; + case 'S': sprintf(&z[j],"%02d",(int)x.s); j+=2; break; case 'w': z[j++] = (((int)(x.rJD+1.5)) % 7) + '0'; break; case 'Y': sprintf(&z[j],"%04d",x.Y); j+=strlen(&z[j]); break; case '%': z[j++] = '%'; break; } } @@ -845,23 +847,22 @@ ** This function registered all of the above C functions as SQL ** functions. This should be the only routine in this file with ** external linkage. */ void sqliteRegisterDateTimeFunctions(sqlite *db){ +#ifndef SQLITE_OMIT_DATETIME_FUNCS static struct { char *zName; int nArg; int dataType; void (*xFunc)(sqlite_func*,int,const char**); } aFuncs[] = { -#ifndef SQLITE_OMIT_DATETIME_FUNCS { "julianday", -1, SQLITE_NUMERIC, juliandayFunc }, { "date", -1, SQLITE_TEXT, dateFunc }, { "time", -1, SQLITE_TEXT, timeFunc }, { "datetime", -1, SQLITE_TEXT, datetimeFunc }, { "strftime", -1, SQLITE_TEXT, strftimeFunc }, -#endif }; int i; for(i=0; i /* @@ -122,11 +122,11 @@ if( p==0 ) return 0; pNew = sqliteMallocRaw( sizeof(*p) ); if( pNew==0 ) return 0; memcpy(pNew, p, sizeof(*pNew)); if( p->token.z!=0 ){ - pNew->token.z = sqliteStrDup(p->token.z); + pNew->token.z = sqliteStrNDup(p->token.z, p->token.n); pNew->token.dyn = 1; }else{ assert( pNew->token.z==0 ); } pNew->span.z = 0; @@ -153,11 +153,14 @@ if( p==0 ) return 0; pNew = sqliteMalloc( sizeof(*pNew) ); if( pNew==0 ) return 0; pNew->nExpr = pNew->nAlloc = p->nExpr; pNew->a = pItem = sqliteMalloc( p->nExpr*sizeof(p->a[0]) ); - if( pItem==0 ) return 0; /* Leaks memory after a malloc failure */ + if( pItem==0 ){ + sqliteFree(pNew); + return 0; + } for(i=0; inExpr; i++, pItem++){ Expr *pNewExpr, *pOldExpr; pItem->pExpr = pNewExpr = sqliteExprDup(pOldExpr = p->a[i].pExpr); if( pOldExpr->span.z!=0 && pNewExpr ){ /* Always make a copy of the span for top-level expressions in the Index: src/func.c ================================================================== --- src/func.c +++ src/func.c @@ -14,11 +14,11 @@ ** ** There is only one exported symbol in this file - the function ** sqliteRegisterBuildinFunctions() found at the bottom of the file. ** All other code has file scope. ** -** $Id: func.c,v 1.43 2004/02/25 22:51:06 rdc Exp $ +** $Id: func.c,v 1.43.2.3 2004/07/18 23:03:11 drh Exp $ */ #include #include #include #include @@ -155,24 +155,24 @@ /* ** Implementation of the upper() and lower() SQL functions. */ static void upperFunc(sqlite_func *context, int argc, const char **argv){ - char *z; + unsigned char *z; int i; if( argc<1 || argv[0]==0 ) return; - z = sqlite_set_result_string(context, argv[0], -1); + z = (unsigned char*)sqlite_set_result_string(context, argv[0], -1); if( z==0 ) return; for(i=0; z[i]; i++){ if( islower(z[i]) ) z[i] = toupper(z[i]); } } static void lowerFunc(sqlite_func *context, int argc, const char **argv){ - char *z; + unsigned char *z; int i; if( argc<1 || argv[0]==0 ) return; - z = sqlite_set_result_string(context, argv[0], -1); + z = (unsigned char*)sqlite_set_result_string(context, argv[0], -1); if( z==0 ) return; for(i=0; z[i]; i++){ if( isupper(z[i]) ) z[i] = tolower(z[i]); } } @@ -515,42 +515,44 @@ MinMaxCtx *p; int (*xCompare)(const char*, const char*); int mask; /* 0 for min() or 0xffffffff for max() */ assert( argc==2 ); + if( argv[0]==0 ) return; /* Ignore NULL values */ if( argv[1][0]=='n' ){ xCompare = sqliteCompare; }else{ xCompare = strcmp; } mask = (int)sqlite_user_data(context); + assert( mask==0 || mask==-1 ); p = sqlite_aggregate_context(context, sizeof(*p)); - if( p==0 || argc<1 || argv[0]==0 ) return; + if( p==0 || argc<1 ) return; if( p->z==0 || (xCompare(argv[0],p->z)^mask)<0 ){ int len; - if( !p->zBuf[0] ){ + if( p->zBuf[0] ){ sqliteFree(p->z); } len = strlen(argv[0]); if( len < sizeof(p->zBuf)-1 ){ p->z = &p->zBuf[1]; - p->zBuf[0] = 1; + p->zBuf[0] = 0; }else{ p->z = sqliteMalloc( len+1 ); - p->zBuf[0] = 0; + p->zBuf[0] = 1; if( p->z==0 ) return; } strcpy(p->z, argv[0]); } } static void minMaxFinalize(sqlite_func *context){ MinMaxCtx *p; p = sqlite_aggregate_context(context, sizeof(*p)); - if( p && p->z ){ + if( p && p->z && p->zBuf[0]<2 ){ sqlite_set_result_string(context, p->z, strlen(p->z)); } - if( p && !p->zBuf[0] ){ + if( p && p->zBuf[0] ){ sqliteFree(p->z); } } /* @@ -619,19 +621,29 @@ }; static const char *azTypeFuncs[] = { "min", "max", "typeof" }; int i; for(i=0; i @@ -187,14 +187,17 @@ static int sqliteInitOne(sqlite *db, int iDb, char **pzErrMsg){ int rc; BtCursor *curMain; int size; Table *pTab; - char *azArg[6]; + char const *azArg[6]; char zDbNum[30]; int meta[SQLITE_N_BTREE_META]; InitData initData; + char const *zMasterSchema; + char const *zMasterName; + char *zSql = 0; /* ** The master database table has a structure like this */ static char master_schema[] = @@ -214,66 +217,42 @@ " rootpage integer,\n" " sql text\n" ")" ; - /* The following SQL will read the schema from the master tables. - ** The first version works with SQLite file formats 2 or greater. - ** The second version is for format 1 files. - ** - ** Beginning with file format 2, the rowid for new table entries - ** (including entries in sqlite_master) is an increasing integer. - ** So for file format 2 and later, we can play back sqlite_master - ** and all the CREATE statements will appear in the right order. - ** But with file format 1, table entries were random and so we - ** have to make sure the CREATE TABLEs occur before their corresponding - ** CREATE INDEXs. (We don't have to deal with CREATE VIEW or - ** CREATE TRIGGER in file format 1 because those constructs did - ** not exist then.) + assert( iDb>=0 && iDbnDb ); + + /* zMasterSchema and zInitScript are set to point at the master schema + ** and initialisation script appropriate for the database being + ** initialised. zMasterName is the name of the master table. */ - static char init_script[] = - "SELECT type, name, rootpage, sql, 1 FROM sqlite_temp_master " - "UNION ALL " - "SELECT type, name, rootpage, sql, 0 FROM sqlite_master"; - static char older_init_script[] = - "SELECT type, name, rootpage, sql, 1 FROM sqlite_temp_master " - "UNION ALL " - "SELECT type, name, rootpage, sql, 0 FROM sqlite_master " - "WHERE type='table' " - "UNION ALL " - "SELECT type, name, rootpage, sql, 0 FROM sqlite_master " - "WHERE type='index'"; - - - assert( iDb>=0 && iDb!=1 && iDbnDb ); - - /* Construct the schema tables: sqlite_master and sqlite_temp_master + if( iDb==1 ){ + zMasterSchema = temp_master_schema; + zMasterName = TEMP_MASTER_NAME; + }else{ + zMasterSchema = master_schema; + zMasterName = MASTER_NAME; + } + + /* Construct the schema table. */ sqliteSafetyOff(db); azArg[0] = "table"; - azArg[1] = MASTER_NAME; + azArg[1] = zMasterName; azArg[2] = "2"; - azArg[3] = master_schema; + azArg[3] = zMasterSchema; sprintf(zDbNum, "%d", iDb); azArg[4] = zDbNum; azArg[5] = 0; initData.db = db; initData.pzErrMsg = pzErrMsg; - sqliteInitCallback(&initData, 5, azArg, 0); - pTab = sqliteFindTable(db, MASTER_NAME, "main"); + sqliteInitCallback(&initData, 5, (char **)azArg, 0); + pTab = sqliteFindTable(db, zMasterName, db->aDb[iDb].zName); if( pTab ){ pTab->readOnly = 1; - } - if( iDb==0 ){ - azArg[1] = TEMP_MASTER_NAME; - azArg[3] = temp_master_schema; - azArg[4] = "1"; - sqliteInitCallback(&initData, 5, azArg, 0); - pTab = sqliteFindTable(db, TEMP_MASTER_NAME, "temp"); - if( pTab ){ - pTab->readOnly = 1; - } + }else{ + return SQLITE_NOMEM; } sqliteSafetyOn(db); /* Create a cursor to hold the database open */ @@ -318,11 +297,11 @@ }else if( db->file_format>4 ){ sqliteBtreeCloseCursor(curMain); sqliteSetString(pzErrMsg, "unsupported file format", (char*)0); return SQLITE_ERROR; } - }else if( db->file_format!=meta[2] || db->file_format<4 ){ + }else if( iDb!=1 && (db->file_format!=meta[2] || db->file_format<4) ){ assert( db->file_format>=4 ); if( meta[2]==0 ){ sqliteSetString(pzErrMsg, "cannot attach empty database: ", db->aDb[iDb].zName, (char*)0); }else{ @@ -338,34 +317,48 @@ /* Read the schema information out of the schema tables */ assert( db->init.busy ); sqliteSafetyOff(db); - if( iDb==0 ){ - rc = sqlite_exec(db, - db->file_format>=2 ? init_script : older_init_script, - sqliteInitCallback, &initData, 0); + + /* The following SQL will read the schema from the master tables. + ** The first version works with SQLite file formats 2 or greater. + ** The second version is for format 1 files. + ** + ** Beginning with file format 2, the rowid for new table entries + ** (including entries in sqlite_master) is an increasing integer. + ** So for file format 2 and later, we can play back sqlite_master + ** and all the CREATE statements will appear in the right order. + ** But with file format 1, table entries were random and so we + ** have to make sure the CREATE TABLEs occur before their corresponding + ** CREATE INDEXs. (We don't have to deal with CREATE VIEW or + ** CREATE TRIGGER in file format 1 because those constructs did + ** not exist then.) + */ + if( db->file_format>=2 ){ + sqliteSetString(&zSql, + "SELECT type, name, rootpage, sql, ", zDbNum, " FROM \"", + db->aDb[iDb].zName, "\".", zMasterName, (char*)0); }else{ - char *zSql = 0; sqliteSetString(&zSql, - "SELECT type, name, rootpage, sql, ", zDbNum, " FROM \"", - db->aDb[iDb].zName, "\".sqlite_master", (char*)0); - rc = sqlite_exec(db, zSql, sqliteInitCallback, &initData, 0); - sqliteFree(zSql); + "SELECT type, name, rootpage, sql, ", zDbNum, " FROM \"", + db->aDb[iDb].zName, "\".", zMasterName, + " WHERE type IN ('table', 'index')" + " ORDER BY CASE type WHEN 'table' THEN 0 ELSE 1 END", (char*)0); } + rc = sqlite_exec(db, zSql, sqliteInitCallback, &initData, 0); + + sqliteFree(zSql); sqliteSafetyOn(db); sqliteBtreeCloseCursor(curMain); if( sqlite_malloc_failed ){ sqliteSetString(pzErrMsg, "out of memory", (char*)0); rc = SQLITE_NOMEM; sqliteResetInternalSchema(db, 0); } if( rc==SQLITE_OK ){ DbSetProperty(db, iDb, DB_SchemaLoaded); - if( iDb==0 ){ - DbSetProperty(db, 1, DB_SchemaLoaded); - } }else{ sqliteResetInternalSchema(db, iDb); } return rc; } @@ -389,17 +382,28 @@ if( db->init.busy ) return SQLITE_OK; assert( (db->flags & SQLITE_Initialized)==0 ); rc = SQLITE_OK; db->init.busy = 1; for(i=0; rc==SQLITE_OK && inDb; i++){ - if( DbHasProperty(db, i, DB_SchemaLoaded) ) continue; - assert( i!=1 ); /* Should have been initialized together with 0 */ + if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue; rc = sqliteInitOne(db, i, pzErrMsg); if( rc ){ sqliteResetInternalSchema(db, i); } } + + /* Once all the other databases have been initialised, load the schema + ** for the TEMP database. This is loaded last, as the TEMP database + ** schema may contain references to objects in other databases. + */ + if( rc==SQLITE_OK && db->nDb>1 && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ + rc = sqliteInitOne(db, 1, pzErrMsg); + if( rc ){ + sqliteResetInternalSchema(db, 1); + } + } + db->init.busy = 0; if( rc==SQLITE_OK ){ db->flags |= SQLITE_Initialized; sqliteCommitInternalChanges(db); } @@ -867,11 +871,11 @@ static const char delays[] = { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 50, 100}; static const short int totals[] = { 0, 1, 3, 8, 18, 33, 53, 78, 103, 128, 178, 228, 287}; # define NDELAY (sizeof(delays)/sizeof(delays[0])) - int timeout = (int)Timeout; + int timeout = (int)(long)Timeout; int delay, prior; if( count <= NDELAY ){ delay = delays[count-1]; prior = totals[count-1]; @@ -884,11 +888,11 @@ if( delay<=0 ) return 0; } sqliteOsSleep(delay); return 1; #else - int timeout = (int)Timeout; + int timeout = (int)(long)Timeout; if( (count+1)*1000 > timeout ){ return 0; } sqliteOsSleep(1000); return 1; @@ -937,11 +941,11 @@ ** This routine installs a default busy handler that waits for the ** specified number of milliseconds before returning 0. */ void sqlite_busy_timeout(sqlite *db, int ms){ if( ms>0 ){ - sqlite_busy_handler(db, sqliteDefaultBusyCallback, (void*)ms); + sqlite_busy_handler(db, sqliteDefaultBusyCallback, (void*)(long)ms); }else{ sqlite_busy_handler(db, 0, 0); } } Index: src/os.c ================================================================== --- src/os.c +++ src/os.c @@ -462,10 +462,15 @@ #if OS_UNIX int rc; id->dirfd = -1; id->fd = open(zFilename, O_RDWR|O_CREAT|O_LARGEFILE|O_BINARY, 0644); if( id->fd<0 ){ +#ifdef EISDIR + if( errno==EISDIR ){ + return SQLITE_CANTOPEN; + } +#endif id->fd = open(zFilename, O_RDONLY|O_LARGEFILE|O_BINARY); if( id->fd<0 ){ return SQLITE_CANTOPEN; } *pReadonly = 1; @@ -771,17 +776,25 @@ TRACE3("OPENDIR %-3d %s\n", id->dirfd, zDirname); #endif return SQLITE_OK; } +/* +** If the following global variable points to a string which is the +** name of a directory, then that directory will be used to store +** temporary files. +*/ +const char *sqlite_temp_directory = 0; + /* ** Create a temporary file name in zBuf. zBuf must be big enough to ** hold at least SQLITE_TEMPNAME_SIZE characters. */ int sqliteOsTempFileName(char *zBuf){ #if OS_UNIX static const char *azDirs[] = { + 0, "/var/tmp", "/usr/tmp", "/tmp", ".", }; @@ -790,11 +803,13 @@ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; int i, j; struct stat buf; const char *zDir = "."; + azDirs[0] = sqlite_temp_directory; for(i=0; i0 && zTempPath[i-1]=='\\'; i--){} - zTempPath[i] = 0; + if( sqlite_temp_directory==0 ){ + GetTempPath(SQLITE_TEMPNAME_SIZE-30, zTempPath); + for(i=strlen(zTempPath); i>0 && zTempPath[i-1]=='\\'; i--){} + zTempPath[i] = 0; + zDir = zTempPath; + }else{ + zDir = sqlite_temp_directory; + } for(;;){ - sprintf(zBuf, "%s\\"TEMP_FILE_PREFIX, zTempPath); + sprintf(zBuf, "%s\\"TEMP_FILE_PREFIX, zDir); j = strlen(zBuf); sqliteRandomness(15, &zBuf[j]); for(i=0; i<15; i++, j++){ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } @@ -834,17 +855,20 @@ static char zChars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; int i, j; + char *zDir; char zTempPath[SQLITE_TEMPNAME_SIZE]; char zdirName[32]; CInfoPBRec infoRec; Str31 dirName; memset(&infoRec, 0, sizeof(infoRec)); memset(zTempPath, 0, SQLITE_TEMPNAME_SIZE); - if( FindFolder(kOnSystemDisk, kTemporaryFolderType, kCreateFolder, + if( sqlite_temp_directory!=0 ){ + zDir = sqlite_temp_directory; + }else if( FindFolder(kOnSystemDisk, kTemporaryFolderType, kCreateFolder, &(infoRec.dirInfo.ioVRefNum), &(infoRec.dirInfo.ioDrParID)) == noErr ){ infoRec.dirInfo.ioNamePtr = dirName; do{ infoRec.dirInfo.ioFDirIndex = -1; infoRec.dirInfo.ioDrDirID = infoRec.dirInfo.ioDrParID; @@ -857,15 +881,18 @@ }else{ *zTempPath = 0; break; } } while( infoRec.dirInfo.ioDrDirID != fsRtDirID ); + zDir = zTempPath; } - if( *zTempPath == 0 ) + if( zDir[0]==0 ){ getcwd(zTempPath, SQLITE_TEMPNAME_SIZE-24); + zDir = zTempPath; + } for(;;){ - sprintf(zBuf, "%s"TEMP_FILE_PREFIX, zTempPath); + sprintf(zBuf, "%s"TEMP_FILE_PREFIX, zDir); j = strlen(zBuf); sqliteRandomness(15, &zBuf[j]); for(i=0; i<15; i++, j++){ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } @@ -1086,10 +1113,14 @@ return SQLITE_OK; } } #endif } + +#ifdef SQLITE_NOSYNC +# define fsync(X) 0 +#endif /* ** Make sure all writes to a particular file are committed to disk. ** ** Under Unix, also make sure that the directory entry for the file @@ -1745,10 +1776,11 @@ char *zFull = 0; if( zRelative[0]=='/' ){ sqliteSetString(&zFull, zRelative, (char*)0); }else{ char zBuf[5000]; + zBuf[0] = 0; sqliteSetString(&zFull, getcwd(zBuf, sizeof(zBuf)), "/", zRelative, (char*)0); } return zFull; #endif Index: src/pager.c ================================================================== --- src/pager.c +++ src/pager.c @@ -16,11 +16,11 @@ ** is separate from the database file. The pager also implements file ** locking to prevent two processes from writing the same database ** file simultaneously, or one process from reading the database while ** another is writing. ** -** @(#) $Id: pager.c,v 1.101 2004/02/25 02:20:41 drh Exp $ +** @(#) $Id: pager.c,v 1.101.2.1 2005/12/19 17:37:10 drh Exp $ */ #include "os.h" /* Must be first to enable large file support */ #include "sqliteInt.h" #include "pager.h" #include @@ -1927,11 +1927,11 @@ void sqlitepager_dont_write(Pager *pPager, Pgno pgno){ PgHdr *pPg; pPg = pager_lookup(pPager, pgno); pPg->alwaysRollback = 1; - if( pPg && pPg->dirty ){ + if( pPg && pPg->dirty && !pPager->ckptInUse ){ if( pPager->dbSize==(int)pPg->pgno && pPager->origDbSizedbSize ){ /* If this pages is the last page in the file and the file has grown ** during the current transaction, then do NOT mark the page as clean. ** When the database file grows, we must make sure that the last page ** gets written at least once so that the disk file will be the correct Index: src/printf.c ================================================================== --- src/printf.c +++ src/printf.c @@ -225,10 +225,11 @@ etByte flag_rtz; /* True if trailing zeros should be removed */ etByte flag_exp; /* True to force display of the exponent */ int nsd; /* Number of significant digits returned */ #endif + func(arg,"",0); count = length = 0; bufpt = 0; for(; (c=(*fmt))!=0; ++fmt){ if( c!='%' ){ int amt; @@ -671,13 +672,15 @@ }else{ pM->zText = pM->xRealloc(pM->zText, pM->nAlloc); } } } - if( pM->zText && nNewChar>0 ){ - memcpy(&pM->zText[pM->nChar], zNewText, nNewChar); - pM->nChar += nNewChar; + if( pM->zText ){ + if( nNewChar>0 ){ + memcpy(&pM->zText[pM->nChar], zNewText, nNewChar); + pM->nChar += nNewChar; + } pM->zText[pM->nChar] = 0; } } /* Index: src/select.c ================================================================== --- src/select.c +++ src/select.c @@ -10,11 +10,11 @@ ** ************************************************************************* ** This file contains C code routines that are called by the parser ** to handle SELECT statements in SQLite. ** -** $Id: select.c,v 1.161 2004/03/13 14:00:36 drh Exp $ +** $Id: select.c,v 1.161.2.4 2004/07/20 01:45:49 drh Exp $ */ #include "sqliteInt.h" /* @@ -361,10 +361,34 @@ zType[i] = sqliteExprType(pEList->a[i].pExpr)==SQLITE_SO_NUM ? 'n' : 't'; } zType[i] = 0; sqliteVdbeChangeP3(v, -1, zType, P3_DYNAMIC); } + +/* +** Add code to implement the OFFSET and LIMIT +*/ +static void codeLimiter( + Vdbe *v, /* Generate code into this VM */ + Select *p, /* The SELECT statement being coded */ + int iContinue, /* Jump here to skip the current record */ + int iBreak, /* Jump here to end the loop */ + int nPop /* Number of times to pop stack when jumping */ +){ + if( p->iOffset>=0 ){ + int addr = sqliteVdbeCurrentAddr(v) + 2; + if( nPop>0 ) addr++; + sqliteVdbeAddOp(v, OP_MemIncr, p->iOffset, addr); + if( nPop>0 ){ + sqliteVdbeAddOp(v, OP_Pop, nPop, 0); + } + sqliteVdbeAddOp(v, OP_Goto, 0, iContinue); + } + if( p->iLimit>=0 ){ + sqliteVdbeAddOp(v, OP_MemIncr, p->iLimit, iBreak); + } +} /* ** This routine generates the code for the inside of the inner loop ** of a SELECT. ** @@ -386,26 +410,21 @@ int iContinue, /* Jump here to continue with next row */ int iBreak /* Jump here to break out of the inner loop */ ){ Vdbe *v = pParse->pVdbe; int i; + int hasDistinct; /* True if the DISTINCT keyword is present */ if( v==0 ) return 0; assert( pEList!=0 ); /* If there was a LIMIT clause on the SELECT statement, then do the check ** to see if this row should be output. */ - if( pOrderBy==0 ){ - if( p->iOffset>=0 ){ - int addr = sqliteVdbeCurrentAddr(v); - sqliteVdbeAddOp(v, OP_MemIncr, p->iOffset, addr+2); - sqliteVdbeAddOp(v, OP_Goto, 0, iContinue); - } - if( p->iLimit>=0 ){ - sqliteVdbeAddOp(v, OP_MemIncr, p->iLimit, iBreak); - } + hasDistinct = distinct>=0 && pEList && pEList->nExpr>0; + if( pOrderBy==0 && !hasDistinct ){ + codeLimiter(v, p, iContinue, iBreak, 0); } /* Pull the requested columns. */ if( nColumn>0 ){ @@ -421,11 +440,11 @@ /* If the DISTINCT keyword was present on the SELECT statement ** and this row has been seen before, then do not make this row ** part of the result. */ - if( distinct>=0 && pEList && pEList->nExpr>0 ){ + if( hasDistinct ){ #if NULL_ALWAYS_DISTINCT sqliteVdbeAddOp(v, OP_IsNull, -pEList->nExpr, sqliteVdbeCurrentAddr(v)+7); #endif sqliteVdbeAddOp(v, OP_MakeKey, pEList->nExpr, 1); if( pParse->db->file_format>=4 ) sqliteAddKeyType(v, pEList); @@ -432,10 +451,13 @@ sqliteVdbeAddOp(v, OP_Distinct, distinct, sqliteVdbeCurrentAddr(v)+3); sqliteVdbeAddOp(v, OP_Pop, pEList->nExpr+1, 0); sqliteVdbeAddOp(v, OP_Goto, 0, iContinue); sqliteVdbeAddOp(v, OP_String, 0, 0); sqliteVdbeAddOp(v, OP_PutStrKey, distinct, 0); + if( pOrderBy==0 ){ + codeLimiter(v, p, iContinue, iBreak, nColumn); + } } switch( eDest ){ /* In this mode, write each query result to the key of the temporary ** table iParm. @@ -568,18 +590,11 @@ int end2 = sqliteVdbeMakeLabel(v); int addr; if( eDest==SRT_Sorter ) return; sqliteVdbeAddOp(v, OP_Sort, 0, 0); addr = sqliteVdbeAddOp(v, OP_SortNext, 0, end1); - if( p->iOffset>=0 ){ - sqliteVdbeAddOp(v, OP_MemIncr, p->iOffset, addr+4); - sqliteVdbeAddOp(v, OP_Pop, 1, 0); - sqliteVdbeAddOp(v, OP_Goto, 0, addr); - } - if( p->iLimit>=0 ){ - sqliteVdbeAddOp(v, OP_MemIncr, p->iLimit, end2); - } + codeLimiter(v, p, addr, end2, 1); switch( eDest ){ case SRT_Callback: { sqliteVdbeAddOp(v, OP_SortCallback, nColumn, 0); break; } @@ -808,12 +823,13 @@ }else if( p->span.z && p->span.z[0] ){ sqliteSetNString(&pTab->aCol[i].zName, p->span.z, p->span.n, 0); }else{ char zBuf[30]; sprintf(zBuf, "column%d", i+1); - pTab->aCol[i].zName = sqliteStrDup(zBuf); + aCol[i].zName = sqliteStrDup(zBuf); } + sqliteDequote(aCol[i].zName); } pTab->iPKey = -1; return pTab; } @@ -941,25 +957,24 @@ a[k].zName = 0; }else{ /* This expression is a "*" or a "TABLE.*" and needs to be ** expanded. */ int tableSeen = 0; /* Set to 1 when TABLE matches */ - Token *pName; /* text of name of TABLE */ + char *zTName; /* text of name of TABLE */ if( pE->op==TK_DOT && pE->pLeft ){ - pName = &pE->pLeft->token; + zTName = sqliteTableNameFromToken(&pE->pLeft->token); }else{ - pName = 0; + zTName = 0; } for(i=0; inSrc; i++){ Table *pTab = pTabList->a[i].pTab; char *zTabName = pTabList->a[i].zAlias; if( zTabName==0 || zTabName[0]==0 ){ zTabName = pTab->zName; } - if( pName && (zTabName==0 || zTabName[0]==0 || - sqliteStrNICmp(pName->z, zTabName, pName->n)!=0 || - zTabName[pName->n]!=0) ){ + if( zTName && (zTabName==0 || zTabName[0]==0 || + sqliteStrICmp(zTName, zTabName)!=0) ){ continue; } tableSeen = 1; for(j=0; jnCol; j++){ Expr *pExpr, *pLeft, *pRight; @@ -1000,17 +1015,18 @@ } pNew = sqliteExprListAppend(pNew, pExpr, 0); } } if( !tableSeen ){ - if( pName ){ - sqliteErrorMsg(pParse, "no such table: %T", pName); + if( zTName ){ + sqliteErrorMsg(pParse, "no such table: %s", zTName); }else{ sqliteErrorMsg(pParse, "no tables specified"); } rc = 1; } + sqliteFree(zTName); } } sqliteExprListDelete(pEList); p->pEList = pNew; } @@ -1914,10 +1930,16 @@ if( pIdx==0 ){ sqliteVdbeAddOp(v, seekOp, base, 0); }else{ sqliteVdbeAddOp(v, OP_Integer, pIdx->iDb, 0); sqliteVdbeOp3(v, OP_OpenRead, base+1, pIdx->tnum, pIdx->zName, P3_STATIC); + if( seekOp==OP_Rewind ){ + sqliteVdbeAddOp(v, OP_String, 0, 0); + sqliteVdbeAddOp(v, OP_MakeKey, 1, 0); + sqliteVdbeAddOp(v, OP_IncrKey, 0, 0); + seekOp = OP_MoveTo; + } sqliteVdbeAddOp(v, seekOp, base+1, 0); sqliteVdbeAddOp(v, OP_IdxRecno, base+1, 0); sqliteVdbeAddOp(v, OP_Close, base+1, 0); sqliteVdbeAddOp(v, OP_MoveTo, base, 0); } Index: src/shell.c ================================================================== --- src/shell.c +++ src/shell.c @@ -10,11 +10,11 @@ ** ************************************************************************* ** This file contains code to implement the "sqlite" command line ** utility for accessing SQLite databases. ** -** $Id: shell.c,v 1.93 2004/03/17 23:42:13 drh Exp $ +** $Id: shell.c,v 1.93.2.1 2005/02/16 03:53:11 drh Exp $ */ #include #include #include #include "sqlite.h" @@ -589,20 +589,20 @@ fprintf(p->out, "BEGIN TRANSACTION;\n"); if( nArg==1 ){ sqlite_exec(p->db, "SELECT name, type, sql FROM sqlite_master " "WHERE type!='meta' AND sql NOT NULL " - "ORDER BY substr(type,2,1), name", + "ORDER BY substr(type,2,1), rowid", dump_callback, p, &zErrMsg ); }else{ int i; for(i=1; idb, "SELECT name, type, sql FROM sqlite_master " "WHERE tbl_name LIKE '%q' AND type!='meta' AND sql NOT NULL " - "ORDER BY substr(type,2,1), name", + "ORDER BY substr(type,2,1), rowid", dump_callback, p, &zErrMsg, azArg[i] ); } } if( zErrMsg ){ Index: src/sqlite.h.in ================================================================== --- src/sqlite.h.in +++ src/sqlite.h.in @@ -10,11 +10,11 @@ ** ************************************************************************* ** This header file defines the interface that the SQLite library ** presents to client programs. ** -** @(#) $Id: sqlite.h.in,v 1.60 2004/03/14 22:12:35 drh Exp $ +** @(#) $Id: sqlite.h.in,v 1.60.2.1 2004/10/06 15:52:36 drh Exp $ */ #ifndef _SQLITE_H_ #define _SQLITE_H_ #include /* Needed for the definition of va_list */ @@ -26,11 +26,15 @@ #endif /* ** The version of the SQLite library. */ -#define SQLITE_VERSION "--VERS--" +#ifdef SQLITE_VERSION +# undef SQLITE_VERSION +#else +# define SQLITE_VERSION "--VERS--" +#endif /* ** The version string is also compiled into the library so that a program ** can check to make sure that the lib*.a file and the *.h file are from ** the same version. @@ -477,12 +481,26 @@ sqlite *db, /* The database there the function is registered */ const char *zName, /* Name of the function */ int datatype /* The datatype for this function */ ); #define SQLITE_NUMERIC (-1) -#define SQLITE_TEXT (-2) +/* #define SQLITE_TEXT (-2) // See below */ #define SQLITE_ARGS (-3) + +/* +** SQLite version 3 defines SQLITE_TEXT differently. To allow both +** version 2 and version 3 to be included, undefine them both if a +** conflict is seen. Define SQLITE2_TEXT to be the version 2 value. +*/ +#ifdef SQLITE_TEXT +# undef SQLITE_TEXT +#else +# define SQLITE_TEXT (-2) +#endif +#define SQLITE2_TEXT (-2) + + /* ** The user function implementations call one of the following four routines ** in order to return their results. The first parameter to each of these ** routines is a copy of the first argument to xFunc() or xFinialize(). Index: src/sqliteInt.h ================================================================== --- src/sqliteInt.h +++ src/sqliteInt.h @@ -9,11 +9,11 @@ ** May you share freely, never taking more than you give. ** ************************************************************************* ** Internal interface definitions for SQLite. ** -** @(#) $Id: sqliteInt.h,v 1.220 2004/02/25 13:47:33 drh Exp $ +** @(#) $Id: sqliteInt.h,v 1.220.2.2 2005/06/06 15:07:03 drh Exp $ */ #include "config.h" #include "sqlite.h" #include "hash.h" #include "parse.h" @@ -100,10 +100,13 @@ # define UINT32_TYPE unsigned int #endif #ifndef UINT16_TYPE # define UINT16_TYPE unsigned short int #endif +#ifndef INT16_TYPE +# define INT16_TYPE short int +#endif #ifndef UINT8_TYPE # define UINT8_TYPE unsigned char #endif #ifndef INT8_TYPE # define INT8_TYPE signed char @@ -115,10 +118,11 @@ # define INTPTR_TYPE long long # endif #endif typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ +typedef INT16_TYPE i16; /* 2-byte signed integer */ typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ typedef UINT8_TYPE i8; /* 1-byte signed integer */ typedef INTPTR_TYPE ptr; /* Big enough to hold a pointer */ typedef unsigned INTPTR_TYPE uptr; /* Big enough to hold a pointer */ @@ -760,12 +764,12 @@ ** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL, ** such a table must be a simple name: ID. But in SQLite, the table can ** now be identified by a database name, a dot, then the table name: ID.ID. */ struct SrcList { - u16 nSrc; /* Number of tables or subqueries in the FROM clause */ - u16 nAlloc; /* Number of entries allocated in a[] below */ + i16 nSrc; /* Number of tables or subqueries in the FROM clause */ + i16 nAlloc; /* Number of entries allocated in a[] below */ struct SrcList_item { char *zDatabase; /* Name of database holding this table */ char *zName; /* Name of the table */ char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ Table *pTab; /* An SQL table corresponding to zName */ @@ -1114,11 +1118,11 @@ char *sqliteStrNDup(const char*, int); # define sqliteCheckMemory(a,b) #endif char *sqliteMPrintf(const char*, ...); char *sqliteVMPrintf(const char*, va_list); -void sqliteSetString(char **, const char *, ...); +void sqliteSetString(char **, ...); void sqliteSetNString(char **, ...); void sqliteErrorMsg(Parse*, const char*, ...); void sqliteDequote(char*); int sqliteKeywordCode(const char*, int); int sqliteRunParser(Parse*, const char*, char **); Index: src/table.c ================================================================== --- src/table.c +++ src/table.c @@ -29,11 +29,11 @@ char *zErrMsg; int nResult; int nAlloc; int nRow; int nColumn; - int nData; + long nData; int rc; } TabResult; /* ** This routine is called once for each row in the result table. Its job @@ -194,10 +194,10 @@ ){ if( azResult ){ int i, n; azResult--; if( azResult==0 ) return; - n = (int)azResult[0]; + n = (int)(long)azResult[0]; for(i=1; i0 /* ** If the macro TCLSH is defined and is one, then put in code for the -** "main" routine that will initialize Tcl. +** "main" routine that implement a interactive shell into which the user +** can type TCL commands. */ -#if defined(TCLSH) && TCLSH==1 +#if TCLSH==1 static char zMainloop[] = "set line {}\n" "while {![eof stdin]} {\n" "if {$line!=\"\"} {\n" "puts -nonewline \"> \"\n" @@ -1191,31 +1197,43 @@ "} else {\n" "append line \\n\n" "}\n" "}\n" ; +#endif /* TCLSH==1 */ -#define TCLSH_MAIN main /* Needed to fake out mktclapp */ -int TCLSH_MAIN(int argc, char **argv){ - Tcl_Interp *interp; - Tcl_FindExecutable(argv[0]); - interp = Tcl_CreateInterp(); +int Libsqlite_Init( Tcl_Interp *interp) { +#ifdef TCL_THREADS + if (Thread_Init(interp) == TCL_ERROR) { + return TCL_ERROR; + } +#endif Sqlite_Init(interp); #ifdef SQLITE_TEST { extern int Sqlitetest1_Init(Tcl_Interp*); extern int Sqlitetest2_Init(Tcl_Interp*); extern int Sqlitetest3_Init(Tcl_Interp*); - extern int Sqlitetest4_Init(Tcl_Interp*); extern int Md5_Init(Tcl_Interp*); Sqlitetest1_Init(interp); Sqlitetest2_Init(interp); Sqlitetest3_Init(interp); - Sqlitetest4_Init(interp); Md5_Init(interp); + Tcl_StaticPackage(interp, "sqlite", Libsqlite_Init, Libsqlite_Init); } #endif + return TCL_OK; +} + +#define TCLSH_MAIN main /* Needed to fake out mktclapp */ +#if TCLSH==1 +int TCLSH_MAIN(int argc, char **argv){ +#ifndef TCL_THREADS + Tcl_Interp *interp; + Tcl_FindExecutable(argv[0]); + interp = Tcl_CreateInterp(); + Libsqlite_Init(interp); if( argc>=2 ){ int i; Tcl_SetVar(interp,"argv0",argv[1],TCL_GLOBAL_ONLY); Tcl_SetVar(interp,"argv", "", TCL_GLOBAL_ONLY); for(i=2; iresult; fprintf(stderr,"%s: %s\n", *argv, zInfo); - return 1; + return TCL_ERROR; } }else{ Tcl_GlobalEval(interp, zMainloop); } return 0; +#else + Tcl_Main(argc, argv, Libsqlite_Init); +#endif /* TCL_THREADS */ + return 0; +} +#endif /* TCLSH==1 */ + + +/* +** If the macro TCLSH is set to 2, then implement a space analysis tool. +*/ +#if TCLSH==2 +static char zAnalysis[] = +#include "spaceanal_tcl.h" +; + +int main(int argc, char **argv){ + Tcl_Interp *interp; + int i; + Tcl_FindExecutable(argv[0]); + interp = Tcl_CreateInterp(); + Libsqlite_Init(interp); + Tcl_SetVar(interp,"argv0",argv[0],TCL_GLOBAL_ONLY); + Tcl_SetVar(interp,"argv", "", TCL_GLOBAL_ONLY); + for(i=1; iresult; + fprintf(stderr,"%s: %s\n", *argv, zInfo); + return TCL_ERROR; + } + return 0; } +#endif /* TCLSH==2 */ + #endif /* TCLSH */ -#endif /* !defined(NO_TCL) */ +#endif /* NO_TCL */ Index: src/test1.c ================================================================== --- src/test1.c +++ src/test1.c @@ -11,11 +11,11 @@ ************************************************************************* ** Code for testing the printf() interface to SQLite. This code ** is not included in the SQLite library. It is used for automated ** testing of the SQLite library. ** -** $Id: test1.c,v 1.36 2004/02/22 17:49:34 drh Exp $ +** $Id: test1.c,v 1.36.2.1 2004/05/07 00:57:06 drh Exp $ */ #include "sqliteInt.h" #include "tcl.h" #include "os.h" #include @@ -973,10 +973,11 @@ int Sqlitetest1_Init(Tcl_Interp *interp){ extern int sqlite_search_count; extern int sqlite_interrupt_count; extern int sqlite_open_file_count; extern int sqlite_current_time; + extern int sqlite_temp_directory; static struct { char *zName; Tcl_CmdProc *xProc; } aCmd[] = { { "sqlite_mprintf_int", (Tcl_CmdProc*)sqlite_mprintf_int }, @@ -1018,7 +1019,9 @@ (char*)&sqlite_open_file_count, TCL_LINK_INT); Tcl_LinkVar(interp, "sqlite_current_time", (char*)&sqlite_current_time, TCL_LINK_INT); Tcl_LinkVar(interp, "sqlite_static_bind_value", (char*)&sqlite_static_bind_value, TCL_LINK_STRING); + Tcl_LinkVar(interp, "sqlite_temp_directory", + (char*)&sqlite_temp_directory, TCL_LINK_STRING); return TCL_OK; } Index: src/update.c ================================================================== --- src/update.c +++ src/update.c @@ -10,11 +10,11 @@ ** ************************************************************************* ** This file contains C code routines that are called by the parser ** to handle UPDATE statements. ** -** $Id: update.c,v 1.70 2004/02/22 20:05:02 drh Exp $ +** $Id: update.c,v 1.70.2.1 2004/04/29 16:16:29 drh Exp $ */ #include "sqliteInt.h" /* ** Process an UPDATE statement. @@ -30,11 +30,12 @@ Expr *pWhere, /* The WHERE clause. May be null */ int onError /* How to handle constraint errors */ ){ int i, j; /* Loop counters */ Table *pTab; /* The table to be updated */ - int addr; /* VDBE instruction address of the start of the loop */ + int loopStart; /* VDBE instruction address of the start of the loop */ + int jumpInst; /* Addr of VDBE instruction to jump out of loop */ WhereInfo *pWInfo; /* Information about the WHERE clause */ Vdbe *v; /* The virtual database engine */ Index *pIdx; /* For looping over indices */ int nIdx; /* Number of indices that need updating */ int nIdxTotal; /* Total number of indices */ @@ -47,10 +48,11 @@ ** aXRef[i]==-1 if the i-th column is not changed. */ int chngRecno; /* True if the record number is being changed */ Expr *pRecnoExpr; /* Expression defining the new record number */ int openAll; /* True if all indices need to be opened */ int isView; /* Trying to update a view */ + int iStackDepth; /* Index of memory cell holding stack depth */ AuthContext sContext; /* The authorization context */ int before_triggers; /* True if there are any BEFORE triggers */ int after_triggers; /* True if there are any AFTER triggers */ int row_triggers_exist = 0; /* True if any row triggers exist */ @@ -60,10 +62,11 @@ sContext.pParse = 0; if( pParse->nErr || sqlite_malloc_failed ) goto update_cleanup; db = pParse->db; assert( pTabList->nSrc==1 ); + iStackDepth = pParse->nMem++; /* Locate the table which we want to update. */ pTab = sqliteSrcListLookup(pParse, pTabList); if( pTab==0 ) goto update_cleanup; @@ -246,11 +249,15 @@ sqliteVdbeAddOp(v, OP_OpenPseudo, newIdx, 0); /* The top of the update loop for when there are triggers. */ sqliteVdbeAddOp(v, OP_ListRewind, 0, 0); - addr = sqliteVdbeAddOp(v, OP_ListRead, 0, 0); + sqliteVdbeAddOp(v, OP_StackDepth, 0, 0); + sqliteVdbeAddOp(v, OP_MemStore, iStackDepth, 1); + loopStart = sqliteVdbeAddOp(v, OP_MemLoad, iStackDepth, 0); + sqliteVdbeAddOp(v, OP_StackReset, 0, 0); + jumpInst = sqliteVdbeAddOp(v, OP_ListRead, 0, 0); sqliteVdbeAddOp(v, OP_Dup, 0, 0); /* Open a cursor and make it point to the record that is ** being updated. */ @@ -293,11 +300,11 @@ } /* Fire the BEFORE and INSTEAD OF triggers */ if( sqliteCodeRowTrigger(pParse, TK_UPDATE, pChanges, TK_BEFORE, pTab, - newIdx, oldIdx, onError, addr) ){ + newIdx, oldIdx, onError, loopStart) ){ goto update_cleanup; } } if( !isView ){ @@ -334,14 +341,14 @@ ** Also, the old data is needed to delete the old index entires. ** So make the cursor point at the old record. */ if( !row_triggers_exist ){ sqliteVdbeAddOp(v, OP_ListRewind, 0, 0); - addr = sqliteVdbeAddOp(v, OP_ListRead, 0, 0); + jumpInst = loopStart = sqliteVdbeAddOp(v, OP_ListRead, 0, 0); sqliteVdbeAddOp(v, OP_Dup, 0, 0); } - sqliteVdbeAddOp(v, OP_NotExists, iCur, addr); + sqliteVdbeAddOp(v, OP_NotExists, iCur, loopStart); /* If the record number will change, push the record number as it ** will be after the update. (The old record number is currently ** on top of the stack.) */ @@ -366,11 +373,11 @@ } /* Do constraint checks */ sqliteGenerateConstraintChecks(pParse, pTab, iCur, aIdxUsed, chngRecno, 1, - onError, addr); + onError, loopStart); /* Delete the old indices for the current record. */ sqliteGenerateRowIndexDelete(db, v, pTab, iCur, aIdxUsed); @@ -402,20 +409,20 @@ } sqliteVdbeAddOp(v, OP_Close, iCur, 0); pParse->nTab = iCur; } if( sqliteCodeRowTrigger(pParse, TK_UPDATE, pChanges, TK_AFTER, pTab, - newIdx, oldIdx, onError, addr) ){ + newIdx, oldIdx, onError, loopStart) ){ goto update_cleanup; } } /* Repeat the above with the next record to be updated, until ** all record selected by the WHERE clause have been updated. */ - sqliteVdbeAddOp(v, OP_Goto, 0, addr); - sqliteVdbeChangeP2(v, addr, sqliteVdbeCurrentAddr(v)); + sqliteVdbeAddOp(v, OP_Goto, 0, loopStart); + sqliteVdbeChangeP2(v, jumpInst, sqliteVdbeCurrentAddr(v)); sqliteVdbeAddOp(v, OP_ListReset, 0, 0); /* Close all tables if there were no FOR EACH ROW triggers */ if( !row_triggers_exist ){ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ Index: src/util.c ================================================================== --- src/util.c +++ src/util.c @@ -12,11 +12,11 @@ ** Utility functions used throughout sqlite. ** ** This file contains functions for allocating memory, comparing ** strings, and stuff like that. ** -** $Id: util.c,v 1.74 2004/02/22 17:49:34 drh Exp $ +** $Id: util.c,v 1.74.2.2 2005/06/06 15:07:03 drh Exp $ */ #include "sqliteInt.h" #include #include @@ -328,31 +328,30 @@ ** first NULL argument), store the string in memory obtained from ** sqliteMalloc() and make the pointer indicated by the 1st argument ** point to that string. The 1st argument must either be NULL or ** point to memory obtained from sqliteMalloc(). */ -void sqliteSetString(char **pz, const char *zFirst, ...){ +void sqliteSetString(char **pz, ...){ va_list ap; int nByte; const char *z; char *zResult; if( pz==0 ) return; - nByte = strlen(zFirst) + 1; - va_start(ap, zFirst); + nByte = 1; + va_start(ap, pz); while( (z = va_arg(ap, const char*))!=0 ){ nByte += strlen(z); } va_end(ap); sqliteFree(*pz); *pz = zResult = sqliteMallocRaw( nByte ); if( zResult==0 ){ return; } - strcpy(zResult, zFirst); - zResult += strlen(zResult); - va_start(ap, zFirst); + *zResult = 0; + va_start(ap, pz); while( (z = va_arg(ap, const char*))!=0 ){ strcpy(zResult, z); zResult += strlen(zResult); } va_end(ap); @@ -502,18 +501,18 @@ int sqliteStrICmp(const char *zLeft, const char *zRight){ register unsigned char *a, *b; a = (unsigned char *)zLeft; b = (unsigned char *)zRight; while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } - return *a - *b; + return UpperToLower[*a] - UpperToLower[*b]; } int sqliteStrNICmp(const char *zLeft, const char *zRight, int N){ register unsigned char *a, *b; a = (unsigned char *)zLeft; b = (unsigned char *)zRight; while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } - return N<0 ? 0 : *a - *b; + return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b]; } /* ** Return TRUE if z is a pure numeric string. Return FALSE if the ** string contains any character which is not part of a number. Index: src/vacuum.c ================================================================== --- src/vacuum.c +++ src/vacuum.c @@ -12,11 +12,11 @@ ** This file contains code used to implement the VACUUM command. ** ** Most of the code in this file may be omitted by defining the ** SQLITE_OMIT_VACUUM macro. ** -** $Id: vacuum.c,v 1.13 2004/03/10 18:57:32 drh Exp $ +** $Id: vacuum.c,v 1.13.2.5 2005/02/14 00:25:48 drh Exp $ */ #include "sqliteInt.h" #include "os.h" /* @@ -161,28 +161,10 @@ } if( rc!=SQLITE_ABORT ) p->rc = rc; return rc; } -/* -** This callback is used to transfer PRAGMA settings from one database -** to the other. The value in argv[0] should be passed to a pragma -** identified by ((vacuumStruct*)pArg)->zPragma. -*/ -static int vacuumCallback3(void *pArg, int argc, char **argv, char **NotUsed){ - vacuumStruct *p = (vacuumStruct*)pArg; - char zBuf[200]; - assert( argc==1 ); - if( argv==0 ) return 0; - assert( argv[0]!=0 ); - assert( strlen(p->zPragma)<100 ); - assert( strlen(argv[0])<30 ); - sprintf(zBuf,"PRAGMA %s=%s;", p->zPragma, argv[0]); - p->rc = execsql(p->pzErrMsg, p->dbNew, zBuf); - return p->rc; -} - /* ** Generate a random name of 20 character in length. */ static void randomName(unsigned char *zBuf){ static const unsigned char zChars[] = @@ -224,22 +206,17 @@ int rc = SQLITE_OK; /* Return code from service routines */ int i; /* Loop counter */ char *zErrMsg; /* Error message */ vacuumStruct sVac; /* Information passed to callbacks */ - /* These are all of the pragmas that need to be transferred over - ** to the new database */ - static const char *zPragma[] = { - "default_synchronous", - "default_cache_size", - /* "default_temp_store", */ - }; - if( db->flags & SQLITE_InTrans ){ sqliteSetString(pzErrMsg, "cannot VACUUM from within a transaction", (char*)0); return SQLITE_ERROR; + } + if( db->flags & SQLITE_Interrupt ){ + return SQLITE_INTERRUPT; } memset(&sVac, 0, sizeof(sVac)); /* Get the full pathname of the database file and create two ** temporary filenames in the same directory as the original file. @@ -278,26 +255,30 @@ } sVac.dbOld = db; sVac.dbNew = dbNew; sVac.pzErrMsg = pzErrMsg; - for(i=0; rc==SQLITE_OK && iaDb[0].pBt, meta1); + sqliteBtreeGetMeta(dbNew->aDb[0].pBt, meta2); + meta2[1] = meta1[1]+1; + meta2[3] = meta1[3]; + meta2[4] = meta1[4]; + meta2[6] = meta1[6]; + rc = sqliteBtreeUpdateMeta(dbNew->aDb[0].pBt, meta2); + } if( rc==SQLITE_OK ){ rc = sqliteBtreeCopyFile(db->aDb[0].pBt, dbNew->aDb[0].pBt); sqlite_exec(db, "COMMIT", 0, 0, 0); sqliteResetInternalSchema(db, 0); } @@ -306,15 +287,19 @@ if( rc && zErrMsg!=0 ){ sqliteSetString(pzErrMsg, "unable to vacuum database - ", zErrMsg, (char*)0); } sqlite_exec(db, "ROLLBACK", 0, 0, 0); + if( (dbNew && (dbNew->flags & SQLITE_Interrupt)) + || (db->flags & SQLITE_Interrupt) ){ + rc = SQLITE_INTERRUPT; + } if( dbNew ) sqlite_close(dbNew); sqliteOsDelete(zTemp); sqliteFree(zTemp); sqliteFree(sVac.s1.z); sqliteFree(sVac.s2.z); if( zErrMsg ) sqlite_freemem(zErrMsg); - if( rc==SQLITE_ABORT ) sVac.rc = SQLITE_ERROR; + if( rc==SQLITE_ABORT && sVac.rc!=SQLITE_INTERRUPT ) sVac.rc = SQLITE_ERROR; return sVac.rc; #endif } Index: src/vdbe.c ================================================================== --- src/vdbe.c +++ src/vdbe.c @@ -41,11 +41,11 @@ ** documentation, headers files, or other derived files. The formatting ** of the code in this file is, therefore, important. See other comments ** in this file for details. If in doubt, do not deviate from existing ** commenting and indentation practices when changing or adding code. ** -** $Id: vdbe.c,v 1.268 2004/03/03 01:51:25 drh Exp $ +** $Id: vdbe.c,v 1.268.2.5 2006/10/24 11:26:44 drh Exp $ */ #include "sqliteInt.h" #include "os.h" #include #include "vdbeInt.h" @@ -60,11 +60,11 @@ int sqlite_search_count = 0; /* ** When this global variable is positive, it gets decremented once before ** each instruction in the VDBE. When reaches zero, the SQLITE_Interrupt -** of the db.flags field is set in order to simulate and interrupt. +** of the db.flags field is set in order to simulate an interrupt. ** ** This facility is used for testing purposes only. It does not function ** in an ordinary build. */ int sqlite_interrupt_count = 0; @@ -2935,13 +2935,11 @@ iKey = intToKey(pNos->i); zKey = (char*)&iKey; if( pOp->p2 & OPFLAG_NCHANGE ) db->nChange++; if( pOp->p2 & OPFLAG_LASTROWID ) db->lastRowid = pNos->i; if( pOp->p2 & OPFLAG_CSCHANGE ) db->csChange++; - if( pC->nextRowidValid && pTos->i>=pC->nextRowid ){ - pC->nextRowidValid = 0; - } + pC->nextRowidValid = 0; } if( pTos->flags & MEM_Null ){ pTos->z = 0; pTos->n = 0; }else{ @@ -4543,10 +4541,14 @@ *pTos = *pMem; if( pTos->flags & MEM_Str ){ pTos->flags &= ~(MEM_Dyn|MEM_Static|MEM_Short); pTos->flags |= MEM_Ephem; } + if( pTos->flags & MEM_AggCtx ){ + Release(pTos); + pTos->flags = MEM_Null; + } break; } /* Opcode: AggNext * P2 * ** @@ -4693,12 +4695,13 @@ if( pSet->prev==0 ){ pc = pOp->p2 - 1; break; } }else{ - assert( pSet->prev ); - pSet->prev = sqliteHashNext(pSet->prev); + if( pSet->prev ){ + pSet->prev = sqliteHashNext(pSet->prev); + } if( pSet->prev==0 ){ break; }else{ pc = pOp->p2 - 1; } @@ -4720,10 +4723,40 @@ if( sqliteSafetyOff(db) ) goto abort_due_to_misuse; rc = sqliteRunVacuum(&p->zErrMsg, db); if( sqliteSafetyOn(db) ) goto abort_due_to_misuse; break; } + +/* Opcode: StackDepth * * * +** +** Push an integer onto the stack which is the depth of the stack prior +** to that integer being pushed. +*/ +case OP_StackDepth: { + int depth = (&pTos[1]) - p->aStack; + pTos++; + pTos->i = depth; + pTos->flags = MEM_Int; + break; +} + +/* Opcode: StackReset * * * +** +** Pop a single integer off of the stack. Then pop the stack +** as many times as necessary to get the depth of the stack down +** to the value of the integer that was popped. +*/ +case OP_StackReset: { + int depth, goal; + assert( pTos>=p->aStack ); + Integerify(pTos); + goal = pTos->i; + depth = (&pTos[1]) - p->aStack; + assert( goalopcode); @@ -4833,10 +4866,11 @@ } /* The end of the for(;;) loop the loops through opcodes */ /* If we reach this point, it means that execution is finished. */ vdbe_halt: + CHECK_FOR_INTERRUPT if( rc ){ p->rc = rc; rc = SQLITE_ERROR; }else{ rc = SQLITE_DONE; Index: src/vdbeaux.c ================================================================== --- src/vdbeaux.c +++ src/vdbeaux.c @@ -509,11 +509,11 @@ */ void sqliteVdbePrintOp(FILE *pOut, int pc, Op *pOp){ char *zP3; char zPtr[40]; if( pOp->p3type==P3_POINTER ){ - sprintf(zPtr, "ptr(%#x)", (int)pOp->p3); + sprintf(zPtr, "ptr(%#lx)", (long)pOp->p3); zP3 = zPtr; }else{ zP3 = pOp->p3; } if( pOut==0 ) pOut = stdout; @@ -563,11 +563,11 @@ }else{ sprintf(p->zArgv[0],"%d",i); sprintf(p->zArgv[2],"%d", p->aOp[i].p1); sprintf(p->zArgv[3],"%d", p->aOp[i].p2); if( p->aOp[i].p3type==P3_POINTER ){ - sprintf(p->aStack[4].zShort, "ptr(%#x)", (int)p->aOp[i].p3); + sprintf(p->aStack[4].zShort, "ptr(%#lx)", (long)p->aOp[i].p3); p->zArgv[4] = p->aStack[4].zShort; }else{ p->zArgv[4] = p->aOp[i].p3; } p->zArgv[1] = sqliteOpcodeNames[p->aOp[i].opcode]; Index: src/where.c ================================================================== --- src/where.c +++ src/where.c @@ -10,11 +10,11 @@ ** ************************************************************************* ** This module contains C code that generates VDBE code used to process ** the WHERE clause of SQL statements. ** -** $Id: where.c,v 1.89 2004/02/22 20:05:02 drh Exp $ +** $Id: where.c,v 1.89.2.2 2004/07/19 19:30:50 drh Exp $ */ #include "sqliteInt.h" /* ** The query generator uses an array of instances of this structure to @@ -44,11 +44,11 @@ ** cursors being used. */ typedef struct ExprMaskSet ExprMaskSet; struct ExprMaskSet { int n; /* Number of assigned cursor values */ - int ix[32]; /* Cursor assigned to each bit */ + int ix[31]; /* Cursor assigned to each bit */ }; /* ** Determine the number of elements in an array. */ @@ -121,11 +121,13 @@ */ static int exprTableUsage(ExprMaskSet *pMaskSet, Expr *p){ unsigned int mask = 0; if( p==0 ) return 0; if( p->op==TK_COLUMN ){ - return getMask(pMaskSet, p->iTable); + mask = getMask(pMaskSet, p->iTable); + if( mask==0 ) mask = -1; + return mask; } if( p->pRight ){ mask = exprTableUsage(pMaskSet, p->pRight); } if( p->pLeft ){ @@ -266,10 +268,39 @@ if( pMatch && pbRev ){ *pbRev = sortOrder==SQLITE_SO_DESC; } return pMatch; } + +/* +** Disable a term in the WHERE clause. Except, do not disable the term +** if it controls a LEFT OUTER JOIN and it did not originate in the ON +** or USING clause of that join. +** +** Consider the term t2.z='ok' in the following queries: +** +** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok' +** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok' +** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok' +** +** The t2.z='ok' is disabled in the in (2) because it did not originate +** in the ON clause. The term is disabled in (3) because it is not part +** of a LEFT OUTER JOIN. In (1), the term is not disabled. +** +** Disabling a term causes that term to not be tested in the inner loop +** of the join. Disabling is an optimization. We would get the correct +** results if nothing were ever disabled, but joins might run a little +** slower. The trick is to disable as much as we can without disabling +** too much. If we disabled in (1), we'd get the wrong answer. +** See ticket #813. +*/ +static void disableTerm(WhereLevel *pLevel, Expr **ppExpr){ + Expr *pExpr = *ppExpr; + if( pLevel->iLeftJoin==0 || ExprHasProperty(pExpr, EP_FromJoin) ){ + *ppExpr = 0; + } +} /* ** Generate the beginning of the loop used for WHERE clause processing. ** The return value is a pointer to an (opaque) structure that contains ** information needed to terminate the loop. Later, the calling routine @@ -734,11 +765,11 @@ pLevel->inP1 = pX->iTable; } }else{ sqliteExprCode(pParse, aExpr[k].p->pLeft); } - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); cont = pLevel->cont = sqliteVdbeMakeLabel(v); sqliteVdbeAddOp(v, OP_MustBeInt, 1, brk); haveKey = 0; sqliteVdbeAddOp(v, OP_NotExists, iCur, brk); pLevel->op = OP_Noop; @@ -758,11 +789,11 @@ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight && pX->pLeft->iColumn==pIdx->aiColumn[j] ){ if( pX->op==TK_EQ ){ sqliteExprCode(pParse, pX->pRight); - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } if( pX->op==TK_IN && nColumn==1 ){ if( pX->pList ){ sqliteVdbeAddOp(v, OP_SetFirst, pX->iTable, brk); @@ -775,21 +806,21 @@ sqliteVdbeAddOp(v, OP_KeyAsData, pX->iTable, 1); pLevel->inP2 = sqliteVdbeAddOp(v, OP_FullKey, pX->iTable, 0); pLevel->inOp = OP_Next; pLevel->inP1 = pX->iTable; } - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } } if( aExpr[k].idxRight==iCur && aExpr[k].p->op==TK_EQ && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft && aExpr[k].p->pRight->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, aExpr[k].p->pLeft); - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } } } pLevel->iMem = pParse->nMem++; @@ -852,11 +883,11 @@ sqliteExprCode(pParse, aExpr[k].p->pLeft); } sqliteVdbeAddOp(v, OP_ForceInt, aExpr[k].p->op==TK_LT || aExpr[k].p->op==TK_GT, brk); sqliteVdbeAddOp(v, OP_MoveTo, iCur, brk); - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); }else{ sqliteVdbeAddOp(v, OP_Rewind, iCur, brk); } if( iDirectLt[i]>=0 ){ k = iDirectLt[i]; @@ -874,11 +905,11 @@ if( aExpr[k].p->op==TK_LT || aExpr[k].p->op==TK_GT ){ testOp = OP_Ge; }else{ testOp = OP_Gt; } - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); } start = sqliteVdbeCurrentAddr(v); pLevel->op = OP_Next; pLevel->p1 = iCur; pLevel->p2 = start; @@ -929,20 +960,20 @@ && aExpr[k].p->op==TK_EQ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight && aExpr[k].p->pLeft->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, aExpr[k].p->pRight); - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } if( aExpr[k].idxRight==iCur && aExpr[k].p->op==TK_EQ && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft && aExpr[k].p->pRight->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, aExpr[k].p->pLeft); - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } } } @@ -975,21 +1006,21 @@ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight && pExpr->pLeft->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, pExpr->pRight); leFlag = pExpr->op==TK_LE; - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } if( aExpr[k].idxRight==iCur && (pExpr->op==TK_GT || pExpr->op==TK_GE) && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft && pExpr->pRight->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, pExpr->pLeft); leFlag = pExpr->op==TK_GE; - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } } testOp = OP_IdxGE; }else{ @@ -1034,21 +1065,21 @@ && (aExpr[k].prereqRight & loopMask)==aExpr[k].prereqRight && pExpr->pLeft->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, pExpr->pRight); geFlag = pExpr->op==TK_GE; - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } if( aExpr[k].idxRight==iCur && (pExpr->op==TK_LT || pExpr->op==TK_LE) && (aExpr[k].prereqLeft & loopMask)==aExpr[k].prereqLeft && pExpr->pRight->iColumn==pIdx->aiColumn[j] ){ sqliteExprCode(pParse, pExpr->pLeft); geFlag = pExpr->op==TK_LE; - aExpr[k].p = 0; + disableTerm(pLevel, &aExpr[k].p); break; } } }else{ geFlag = 1; Index: test/auth.test ================================================================== --- test/auth.test +++ test/auth.test @@ -10,11 +10,11 @@ #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this script is testing the ATTACH and DETACH commands # and related functionality. # -# $Id: auth.test,v 1.12 2003/12/07 00:24:35 drh Exp $ +# $Id: auth.test,v 1.12.2.1 2004/09/09 13:54:31 drh Exp $ # set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -34,10 +34,16 @@ catchsql {CREATE TABLE t1(a,b,c)} } {1 {not authorized}} do_test auth-1.1.2 { db errorcode } {23} +do_test auth-1.1.3 { + # Ticket #896. + catchsql { + SELECT x; + } +} {1 {no such column: x}} do_test auth-1.2 { execsql {SELECT name FROM sqlite_master} } {} do_test auth-1.3.1 { proc auth {code arg1 arg2 arg3 arg4} { Index: test/date.test ================================================================== --- test/date.test +++ test/date.test @@ -9,11 +9,11 @@ # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing date and time functions. # -# $Id: date.test,v 1.7 2004/02/29 01:08:18 drh Exp $ +# $Id: date.test,v 1.7.2.1 2004/07/18 22:25:16 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl proc datetest {tnum expr result} { @@ -93,11 +93,24 @@ datetest 3.6 {strftime('%m','2003-10-31 12:34:56.432')} 10 datetest 3.7 {strftime('%M','2003-10-31 12:34:56.432')} 34 datetest 3.8 {strftime('%s','2003-10-31 12:34:56.432')} 1067603696 datetest 3.9 {strftime('%S','2003-10-31 12:34:56.432')} 56 datetest 3.10 {strftime('%w','2003-10-31 12:34:56.432')} 5 -datetest 3.11 {strftime('%W','2003-10-31 12:34:56.432')} 44 +datetest 3.11.1 {strftime('%W','2003-10-31 12:34:56.432')} 43 +datetest 3.11.2 {strftime('%W','2004-01-01')} 00 +datetest 3.11.3 {strftime('%W','2004-01-02')} 00 +datetest 3.11.4 {strftime('%W','2004-01-03')} 00 +datetest 3.11.5 {strftime('%W','2004-01-04')} 00 +datetest 3.11.6 {strftime('%W','2004-01-05')} 01 +datetest 3.11.7 {strftime('%W','2004-01-06')} 01 +datetest 3.11.8 {strftime('%W','2004-01-07')} 01 +datetest 3.11.9 {strftime('%W','2004-01-08')} 01 +datetest 3.11.10 {strftime('%W','2004-01-09')} 01 +datetest 3.11.11 {strftime('%W','2004-07-18')} 28 +datetest 3.11.12 {strftime('%W','2004-12-31')} 52 +datetest 3.11.13 {strftime('%W','2007-12-31')} 53 +datetest 3.11.14 {strftime('%W','2007-01-01')} 01 datetest 3.12 {strftime('%Y','2003-10-31 12:34:56.432')} 2003 datetest 3.13 {strftime('%%','2003-10-31 12:34:56.432')} % datetest 3.14 {strftime('%_','2003-10-31 12:34:56.432')} NULL datetest 3.15 {strftime('%Y-%m-%d','2003-10-31')} 2003-10-31 proc repeat {n txt} { Index: test/func.test ================================================================== --- test/func.test +++ test/func.test @@ -9,11 +9,11 @@ # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing built-in functions. # -# $Id: func.test,v 1.16 2002/11/04 19:32:26 drh Exp $ +# $Id: func.test,v 1.16.2.2 2004/07/18 21:14:05 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Create a table to work with. @@ -262,10 +262,15 @@ execsql { CREATE TEMP TABLE t3 AS SELECT a FROM t2 ORDER BY a DESC; SELECT min('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3; } } {z+1abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} +do_test func-8.4 { + execsql { + SELECT max('z+'||a||'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP') FROM t3; + } +} {z+67890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP} # How do you test the random() function in a meaningful, deterministic way? # do_test func-9.1 { execsql { Index: test/index.test ================================================================== --- test/index.test +++ test/index.test @@ -9,11 +9,11 @@ # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing the CREATE INDEX statement. # -# $Id: index.test,v 1.24 2003/09/27 00:41:28 drh Exp $ +# $Id: index.test,v 1.24.2.1 2004/07/20 00:50:30 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Create a basic index and verify it is added to sqlite_master @@ -512,7 +512,22 @@ INSERT INTO t1 VALUES('+12347E+02',11); SELECT b FROM t1 ORDER BY a; } } {8 5 2 1 3 6 11 9 10 4 7} integrity_check index-15.1 + +# Drop index with a quoted name. Ticket #695. +# +do_test index-16.1 { + execsql { + CREATE INDEX "t6i2" ON t6(c); + DROP INDEX "t6i2"; + } +} {} +do_test index-16.2 { + execsql { + DROP INDEX "t6i1"; + } +} {} + finish_test Index: test/interrupt.test ================================================================== --- test/interrupt.test +++ test/interrupt.test @@ -9,11 +9,11 @@ # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this script is the sqlite_interrupt() API. # -# $Id: interrupt.test,v 1.4 2004/02/18 16:57:23 drh Exp $ +# $Id: interrupt.test,v 1.4.2.1 2004/05/10 20:27:42 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl @@ -35,42 +35,43 @@ # This routine attempts to execute the sql in $sql. It triggers an # interrupt a progressively later and later points during the processing # and checks to make sure SQLITE_INTERRUPT is returned. Eventually, # the routine completes successfully. # -proc interrupt_test {testid sql result {initcnt 0}} { +proc interrupt_test {testid sql result {initcnt 0} {maxcnt 1000000}} { set orig_sum [cksum] set i $initcnt - while 1 { + global sqlite_interrupt_count + while {$i<$maxcnt} { incr i - set ::sqlite_interrupt_count $i + set sqlite_interrupt_count $i do_test $testid.$i.1 [format { set ::r [catchsql %s] set ::code [db errorcode] expr {$::code==0 || $::code==9} } [list $sql]] 1 if {$::code==9} { do_test $testid.$i.2 { cksum } $orig_sum - } else { + } elseif {$sqlite_interrupt_count>0} { do_test $testid.$i.99 { set ::r } [list 0 $result] break } } - set ::sqlite_interrupt_count 0 + set sqlite_interrupt_count 0 } do_test interrupt-1.1 { execsql { CREATE TABLE t1(a,b); SELECT name FROM sqlite_master; } } {t1} -interrupt_test interrupt-1.2 {DROP TABLE t1} {} +interrupt_test interrupt-1.2 {DROP TABLE t1} {} 1 14 do_test interrupt-1.3 { execsql { SELECT name FROM sqlite_master; } } {} @@ -159,7 +160,8 @@ do_test interrupt-4.$i.1 { set ::sqlite_interrupt_count $::i catchsql $sql } {1 interrupted} } + finish_test ADDED test/join3_28.test Index: test/join3_28.test ================================================================== --- /dev/null +++ test/join3_28.test @@ -0,0 +1,34 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for joins, including outer joins, where +# there are a large number of tables involved in the join. +# +# $Id: join3_28.test,v 1.1.2.1 2004/07/22 16:08:39 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +catch {unset result} +set result {} +for {set N 1} {$N<=40} {incr N} { + lappend result $N + do_test join3-1.$N { + execsql "CREATE TABLE t${N}(x);" + execsql "INSERT INTO t$N VALUES($N)" + set sql "SELECT * FROM t1" + for {set i 2} {$i<=$N} {incr i} {append sql ", t$i"} + execsql $sql + } $result +} + +finish_test ADDED test/join4_28.test Index: test/join4_28.test ================================================================== --- /dev/null +++ test/join4_28.test @@ -0,0 +1,77 @@ +# 2002 May 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# This file implements regression tests for SQLite library. +# +# This file implements tests for left outer joins containing WHERE +# clauses that restrict the scope of the left term of the join. +# +# $Id: join4_28.test,v 1.1.2.1 2004/07/22 16:08:39 drh Exp $ + +set testdir [file dirname $argv0] +source $testdir/tester.tcl + +do_test join4-1.1 { + execsql { + create temp table t1(a integer, b varchar(10)); + insert into t1 values(1,'one'); + insert into t1 values(2,'two'); + insert into t1 values(3,'three'); + insert into t1 values(4,'four'); + + create temp table t2(x integer, y varchar(10), z varchar(10)); + insert into t2 values(2,'niban','ok'); + insert into t2 values(4,'yonban','err'); + } + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok' + } +} {2 two 2 niban ok} +do_test join4-1.2 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z='ok' + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +do_test join4-1.3 { + execsql { + create index i2 on t2(z); + } + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z='ok' + } +} {2 two 2 niban ok} +do_test join4-1.4 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z='ok' + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +do_test join4-1.5 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z>='ok' + } +} {2 two 2 niban ok} +do_test join4-1.4 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z>='ok' + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} +do_test join4-1.6 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x where t2.z IN ('ok') + } +} {2 two 2 niban ok} +do_test join4-1.7 { + execsql { + select * from t1 left outer join t2 on t1.a=t2.x and t2.z IN ('ok') + } +} {1 one {} {} {} 2 two 2 niban ok 3 three {} {} {} 4 four {} {} {}} + + +finish_test Index: test/limit.test ================================================================== --- test/limit.test +++ test/limit.test @@ -10,11 +10,11 @@ #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing the LIMIT ... OFFSET ... clause # of SELECT statements. # -# $Id: limit.test,v 1.11 2004/03/08 13:26:18 drh Exp $ +# $Id: limit.test,v 1.11.2.1 2004/07/19 23:33:04 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Build some test data @@ -291,7 +291,27 @@ execsql { SELECT a+27 FROM t6 INTERSECT SELECT x FROM t2 ORDER BY 1 DESC LIMIT 1 OFFSET 1; } } {30} + +# Tests for limit in conjunction with distinct. The distinct should +# occur before both the limit and the offset. Ticket #749. +# +do_test limit-8.1 { + execsql { + SELECT DISTINCT round(x/100) FROM t3 LIMIT 5; + } +} {0 1 2 3 4} +do_test limit-8.2 { + execsql { + SELECT DISTINCT round(x/100) FROM t3 LIMIT 5 OFFSET 5; + } +} {5 6 7 8 9} +do_test limit-8.3 { + execsql { + SELECT DISTINCT round(x/100) FROM t3 LIMIT 5 OFFSET 25; + } +} {25 26 27 28 29} + finish_test Index: test/minmax.test ================================================================== --- test/minmax.test +++ test/minmax.test @@ -11,11 +11,11 @@ # This file implements regression tests for SQLite library. The # focus of this file is testing SELECT statements that contain # aggregate min() and max() functions and which are handled as # as a special case. # -# $Id: minmax.test,v 1.9 2004/03/13 14:00:37 drh Exp $ +# $Id: minmax.test,v 1.9.2.2 2004/07/18 21:14:05 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl do_test minmax-1.0 { @@ -66,11 +66,11 @@ set sqlite_search_count 0 execsql {SELECT min(x) FROM t1} } {1} do_test minmax-1.6 { set sqlite_search_count -} {1} +} {2} do_test minmax-1.7 { set sqlite_search_count 0 execsql {SELECT max(x) FROM t1} } {20} do_test minmax-1.8 { @@ -273,7 +273,87 @@ SELECT max(rowid) FROM ( SELECT max(rowid) FROM t4 EXCEPT SELECT max(rowid) FROM t5 ) } } {{}} + +# If there is a NULL in an aggregate max() or min(), ignore it. An +# aggregate min() or max() will only return NULL if all values are NULL. +# +do_test minmax-10.1 { + execsql { + CREATE TABLE t6(x); + INSERT INTO t6 VALUES(1); + INSERT INTO t6 VALUES(2); + INSERT INTO t6 VALUES(NULL); + SELECT coalesce(min(x),-1) FROM t6; + } +} {1} +do_test minmax-10.2 { + execsql { + SELECT max(x) FROM t6; + } +} {2} +do_test minmax-10.3 { + execsql { + CREATE INDEX i6 ON t6(x); + SELECT coalesce(min(x),-1) FROM t6; + } +} {1} +do_test minmax-10.4 { + execsql { + SELECT max(x) FROM t6; + } +} {2} +do_test minmax-10.5 { + execsql { + DELETE FROM t6 WHERE x NOT NULL; + SELECT count(*) FROM t6; + } +} 1 +do_test minmax-10.6 { + execsql { + SELECT count(x) FROM t6; + } +} 0 +do_test minmax-10.7 { + execsql { + SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6); + } +} {{} {}} +do_test minmax-10.8 { + execsql { + SELECT min(x), max(x) FROM t6; + } +} {{} {}} +do_test minmax-10.9 { + execsql { + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + INSERT INTO t6 SELECT * FROM t6; + SELECT count(*) FROM t6; + } +} 1024 +do_test minmax-10.10 { + execsql { + SELECT count(x) FROM t6; + } +} 0 +do_test minmax-10.11 { + execsql { + SELECT (SELECT min(x) FROM t6), (SELECT max(x) FROM t6); + } +} {{} {}} +do_test minmax-10.12 { + execsql { + SELECT min(x), max(x) FROM t6; + } +} {{} {}} finish_test Index: test/null.test ================================================================== --- test/null.test +++ test/null.test @@ -92,10 +92,11 @@ select ifnull(case c when b then 1 else 0 end, 99) from t1; } } {1 0 0 1 0 0 0} # Check to see that NULL values are ignored in aggregate functions. +# (except for min().) # do_test null-3.1 { execsql { select count(*), count(b), count(c), sum(b), sum(c), avg(b), avg(c), min(b), max(b) from t1; Index: test/select1.test ================================================================== --- test/select1.test +++ test/select1.test @@ -9,11 +9,11 @@ # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing the SELECT statement. # -# $Id: select1.test,v 1.30 2002/06/02 16:09:03 drh Exp $ +# $Id: select1.test,v 1.30.2.3 2004/07/20 01:45:49 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Try to select on a non-existant table. @@ -610,14 +610,19 @@ do_test select1-11.3 { execsql2 { SELECT * FROM t3 AS x, t4 AS y; } } {x.a 1 x.b 2 y.a 3 y.b 4} -do_test select1-11.4 { +do_test select1-11.4.1 { execsql { SELECT t3.*, t4.b FROM t3, t4; } +} {1 2 4} +do_test select1-11.4.2 { + execsql { + SELECT "t3".*, t4.b FROM t3, t4; + } } {1 2 4} do_test select1-11.5 { execsql2 { SELECT t3.*, t4.b FROM t3, t4; } Index: test/tclsqlite.test ================================================================== --- test/tclsqlite.test +++ test/tclsqlite.test @@ -13,11 +13,11 @@ # # Actually, all tests are based on the TCL interface, so the main # interface is pretty well tested. This file contains some addition # tests for fringe issues that the main test suite does not cover. # -# $Id: tclsqlite.test,v 1.20 2004/02/25 22:51:06 rdc Exp $ +# $Id: tclsqlite.test,v 1.20.2.2 2006/10/24 11:26:44 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl # Check the error messages generated by tclsqlite @@ -70,10 +70,11 @@ regsub {:.*$} $msg {} msg lappend v $msg } {1 {syntax error in expression "x*"}} if {[sqlite -encoding]=="UTF-8" && [sqlite -tcl-uses-utf]} { + catch {unset ::result} do_test tcl-2.1 { execsql "CREATE TABLE t\u0123x(a int, b\u1235 float)" execsql "PRAGMA table_info(t\u0123x)" } "0 a int 0 {} 0 1 b\u1235 float 0 {} 0" do_test tcl-2.2 { @@ -86,10 +87,11 @@ if {[sqlite -encoding]=="iso8859" && [sqlite -tcl-uses-utf]} { do_test tcl-2.1 { execsql "CREATE TABLE t\251x(a int, b\306 float)" execsql "PRAGMA table_info(t\251x)" } "0 a int 0 {} 0 1 b\306 float 0 {} 0" + unset -nocomplain result do_test tcl-2.2 { execsql "INSERT INTO t\251x VALUES(1,2.3)" db eval "SELECT * FROM t\251x" result break set result(*) } "a b\306" Index: test/trigger1.test ================================================================== --- test/trigger1.test +++ test/trigger1.test @@ -410,7 +410,118 @@ INSERT INTO t3 VALUES(1,3); SELECT * FROM t3 UNION ALL SELECT 99, 99 UNION ALL SELECT * FROM t4; } } {1 2 1 3 99 99 1 3} +execsql { + DROP TABLE t2; + DROP TABLE t3; + DROP TABLE t4; +} + +# Skip the remaining tests if we have a codec. The ATTACH command +# will not work right on these tests when there is a codec. +# +if {[sqlite -has-codec]} { + finish_test + return +} + +# Ticket #764. At one stage TEMP triggers would fail to re-install when the +# schema was reloaded. The following tests ensure that TEMP triggers are +# correctly re-installed. +# +# Also verify that references within trigger programs are resolved at +# statement compile time, not trigger installation time. This means, for +# example, that you can drop and re-create tables referenced by triggers. +do_test trigger-10.0 { + file delete -force test2.db + file delete -force test2.db-journal + sqlite db2 test2.db + execsql {CREATE TABLE t3(a, b, c);} db2 + db2 close + execsql { + ATTACH 'test2.db' AS aux; + } +} {} +do_test trigger-10.1 { + execsql { + CREATE TABLE t1(a, b, c); + CREATE temp TABLE t2(a, b, c); + CREATE TABLE insert_log(db, a, b, c); + } +} {} +do_test trigger-10.2 { + execsql { + CREATE TEMP TRIGGER trig1 AFTER INSERT ON t1 BEGIN + INSERT INTO insert_log VALUES('main', new.a, new.b, new.c); + END; + CREATE TEMP TRIGGER trig2 AFTER INSERT ON t2 BEGIN + INSERT INTO insert_log VALUES('temp', new.a, new.b, new.c); + END; + CREATE TEMP TRIGGER trig3 AFTER INSERT ON t3 BEGIN + INSERT INTO insert_log VALUES('aux', new.a, new.b, new.c); + END; + } +} {} +do_test trigger-10.3 { + execsql { + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(4, 5, 6); + INSERT INTO t3 VALUES(7, 8, 9); + } +} {} +do_test trigger-10.4 { + execsql { + SELECT * FROM insert_log; + } +} {main 1 2 3 temp 4 5 6 aux 7 8 9} +do_test trigger-10.5 { + execsql { + BEGIN; + INSERT INTO t1 VALUES(1, 2, 3); + INSERT INTO t2 VALUES(4, 5, 6); + INSERT INTO t3 VALUES(7, 8, 9); + ROLLBACK; + } +} {} +do_test trigger-10.6 { + execsql { + SELECT * FROM insert_log; + } +} {main 1 2 3 temp 4 5 6 aux 7 8 9} +do_test trigger-10.7 { + execsql { + DELETE FROM insert_log; + INSERT INTO t1 VALUES(11, 12, 13); + INSERT INTO t2 VALUES(14, 15, 16); + INSERT INTO t3 VALUES(17, 18, 19); + } +} {} +do_test trigger-10.8 { + execsql { + SELECT * FROM insert_log; + } +} {main 11 12 13 temp 14 15 16 aux 17 18 19} +do_test trigger-10.8 { +# Drop and re-create the insert_log table in a different database. Note +# that we can change the column names because the trigger programs don't +# use them explicitly. + execsql { + DROP TABLE insert_log; + CREATE temp TABLE insert_log(db, d, e, f); + } +} {} +do_test trigger-10.10 { + execsql { + INSERT INTO t1 VALUES(21, 22, 23); + INSERT INTO t2 VALUES(24, 25, 26); + INSERT INTO t3 VALUES(27, 28, 29); + } +} {} +do_test trigger-10.11 { + execsql { + SELECT * FROM insert_log; + } +} {main 21 22 23 temp 24 25 26 aux 27 28 29} finish_test Index: test/view.test ================================================================== --- test/view.test +++ test/view.test @@ -9,11 +9,11 @@ # #*********************************************************************** # This file implements regression tests for SQLite library. The # focus of this file is testing VIEW statements. # -# $Id: view.test,v 1.16 2003/05/31 16:21:13 drh Exp $ +# $Id: view.test,v 1.16.2.1 2004/07/20 00:20:47 drh Exp $ set testdir [file dirname $argv0] source $testdir/tester.tcl do_test view-1.0 { execsql { @@ -382,8 +382,26 @@ do_test view-9.6 { execsql { SELECT * FROM v10 ORDER BY 1; } } {3 4 4 2 5 1} + +# Tables with columns having peculiar quoted names used in views +# Ticket #756. +# +do_test view-10.1 { + execsql { + CREATE TABLE t3("9" integer, [4] text); + INSERT INTO t3 VALUES(1,2); + CREATE VIEW v_t3_a AS SELECT a.[9] FROM t3 AS a; + CREATE VIEW v_t3_b AS SELECT "4" FROM t3; + SELECT * FROM v_t3_a; + } +} {1} +do_test view-10.2 { + execsql { + SELECT * FROM v_t3_b; + } +} {2} finish_test Index: tool/lemon.c ================================================================== --- tool/lemon.c +++ tool/lemon.c @@ -10,17 +10,10 @@ #include #include #include #include -extern void qsort(); -extern double strtod(); -extern long strtol(); -extern void free(); -extern int access(); -extern int atoi(); - #ifndef __WIN32__ # if defined(_WIN32) || defined(WIN32) # define __WIN32__ # endif #endif @@ -38,11 +31,10 @@ extern void *malloc(); /******** From the file "action.h" *************************************/ struct action *Action_new(); struct action *Action_sort(); -void Action_add(); /********* From the file "assert.h" ************************************/ void myassert(); #ifndef NDEBUG # define assert(X) if(!(X))myassert(__FILE__,__LINE__) @@ -372,11 +364,11 @@ /* Sort parser actions */ struct action *Action_sort(ap) struct action *ap; { - ap = (struct action *)msort(ap,&ap->next,actioncmp); + ap = (struct action *)msort((char *)ap,(char **)&ap->next,actioncmp); return ap; } void Action_add(app,type,sp,arg) struct action **app; @@ -809,11 +801,11 @@ ** constructed in the preceding loop */ newstp = getstate(lemp); /* The state "newstp" is reached from the state "stp" by a shift action ** on the symbol "sp" */ - Action_add(&stp->ap,SHIFT,sp,newstp); + Action_add(&stp->ap,SHIFT,sp,(char *)newstp); } } /* ** Construct the propagation links @@ -910,11 +902,11 @@ if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */ for(j=0; jnterminal; j++){ if( SetFind(cfp->fws,j) ){ /* Add a reduce action to the state "stp" which will reduce by the ** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */ - Action_add(&stp->ap,REDUCE,lemp->symbols[j],cfp->rp); + Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp); } } } } } @@ -1186,18 +1178,18 @@ return; } /* Sort the configuration list */ void Configlist_sort(){ - current = (struct config *)msort(current,&(current->next),Configcmp); + current = (struct config *)msort((char *)current,(char **)&(current->next),Configcmp); currentend = 0; return; } /* Sort the basis configuration list */ void Configlist_sortbasis(){ - basis = (struct config *)msort(current,&(current->bp),Configcmp); + basis = (struct config *)msort((char *)current,(char **)&(current->bp),Configcmp); basisend = 0; return; } /* Return a pointer to the head of the configuration list and @@ -1445,10 +1437,11 @@ } if( lem.nconflict ){ fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict); } exit(lem.errorcnt + lem.nconflict); + return (lem.errorcnt + lem.nconflict); } /******************** From the file "msort.c" *******************************/ /* ** A generic merge-sort program. ** @@ -2874,11 +2867,11 @@ FILE *out; struct symbol *sp; struct lemon *lemp; int *lineno; { - char *cp; + char *cp = 0; int linecnt = 0; if( sp->type==TERMINAL ){ cp = lemp->tokendest; if( cp==0 ) return; @@ -2888,10 +2881,12 @@ fprintf(out,"#line %d \"%s\"\n{",sp->destructorln,lemp->filename); }else if( lemp->vardest ){ cp = lemp->vardest; if( cp==0 ) return; fprintf(out,"#line %d \"%s\"\n{",lemp->vardestln,lemp->filename); + }else{ + assert( 0 ); /* Cannot happen */ } for(; *cp; cp++){ if( *cp=='$' && cp[1]=='$' ){ fprintf(out,"(yypminor->yy%d)",sp->dtnum); cp++; ADDED tool/spaceanal.tcl Index: tool/spaceanal.tcl ================================================================== --- /dev/null +++ tool/spaceanal.tcl @@ -0,0 +1,436 @@ +# Run this TCL script using "testfixture" in order get a report that shows +# how much disk space is used by a particular data to actually store data +# versus how much space is unused. +# + +# Get the name of the database to analyze +# +if {[llength $argv]!=1} { + puts stderr "Usage: $argv0 database-name" + exit 1 +} +set file_to_analyze [lindex $argv 0] +if {![file exists $file_to_analyze]} { + puts stderr "No such file: $file_to_analyze" + exit 1 +} +if {![file readable $file_to_analyze]} { + puts stderr "File is not readable: $file_to_analyze" + exit 1 +} +if {[file size $file_to_analyze]<2048} { + puts stderr "Empty or malformed database: $file_to_analyze" + exit 1 +} + +# Open the database +# +sqlite db [lindex $argv 0] +set DB [btree_open [lindex $argv 0]] + +# In-memory database for collecting statistics +# +sqlite mem :memory: +set tabledef\ +{CREATE TABLE space_used( + name clob, -- Name of a table or index in the database file + tblname clob, -- Name of associated table + is_index boolean, -- TRUE if it is an index, false for a table + nentry int, -- Number of entries in the BTree + payload int, -- Total amount of data stored in this table or index + mx_payload int, -- Maximum payload size + n_ovfl int, -- Number of entries that overflow + pri_pages int, -- Number of primary pages used + ovfl_pages int, -- Number of overflow pages used + pri_unused int, -- Number of unused bytes on primary pages + ovfl_unused int -- Number of unused bytes on overflow pages +);} +mem eval $tabledef + +# This query will be used to find the root page number for every index and +# table in the database. +# +set sql { + SELECT name, tbl_name, type, rootpage + FROM sqlite_master WHERE type IN ('table','index') + UNION ALL + SELECT 'sqlite_master', 'sqlite_master', 'table', 2 + ORDER BY 1 +} + +# Analyze every table in the database, one at a time. +# +foreach {name tblname type rootpage} [db eval $sql] { + puts stderr "Analyzing $name..." + set cursor [btree_cursor $DB $rootpage 0] + set go [btree_first $cursor] + set size 0 + catch {unset pg_used} + set unused_ovfl 0 + set n_overflow 0 + set cnt_ovfl 0 + set n_entry 0 + set mx_size 0 + set pg_used($rootpage) 1016 + while {$go==0} { + incr n_entry + set payload [btree_payload_size $cursor] + incr size $payload + set stat [btree_cursor_dump $cursor] + set pgno [lindex $stat 0] + set freebytes [lindex $stat 4] + set pg_used($pgno) $freebytes + if {$payload>236} { + # if {[lindex $stat 8]==0} {error "overflow is empty with $payload"} + set n [expr {($payload-236+1019)/1020}] + incr n_overflow $n + incr cnt_ovfl + incr unused_ovfl [expr {$n*1020+236-$payload}] + } else { + # if {[lindex $stat 8]!=0} {error "overflow not empty with $payload"} + } + if {$payload>$mx_size} {set mx_size $payload} + set go [btree_next $cursor] + } + btree_close_cursor $cursor + set n_primary [llength [array names pg_used]] + set unused_primary 0 + foreach x [array names pg_used] {incr unused_primary $pg_used($x)} + regsub -all ' $name '' name + set sql "INSERT INTO space_used VALUES('$name'" + regsub -all ' $tblname '' tblname + append sql ",'$tblname',[expr {$type=="index"}],$n_entry" + append sql ",$size,$mx_size,$cnt_ovfl," + append sql "$n_primary,$n_overflow,$unused_primary,$unused_ovfl);" + mem eval $sql +} + +# Generate a single line of output in the statistics section of the +# report. +# +proc statline {title value {extra {}}} { + set len [string length $title] + set dots [string range {......................................} $len end] + set len [string length $value] + set sp2 [string range { } $len end] + if {$extra ne ""} { + set extra " $extra" + } + puts "$title$dots $value$sp2$extra" +} + +# Generate a formatted percentage value for $num/$denom +# +proc percent {num denom} { + if {$denom==0.0} {return ""} + set v [expr {$num*100.0/$denom}] + if {$v>1.0 && $v<99.0} { + return [format %4.1f%% $v] + } elseif {$v<0.1 || $v>99.9} { + return [format %6.3f%% $v] + } else { + return [format %5.2f%% $v] + } +} + +# Generate a subreport that covers some subset of the database. +# the $where clause determines which subset to analyze. +# +proc subreport {title where} { + set hit 0 + mem eval "SELECT sum(nentry) AS nentry, \ + sum(payload) AS payload, \ + sum(CASE is_index WHEN 1 THEN 0 ELSE payload-4*nentry END) \ + AS data, \ + max(mx_payload) AS mx_payload, \ + sum(n_ovfl) as n_ovfl, \ + sum(pri_pages) AS pri_pages, \ + sum(ovfl_pages) AS ovfl_pages, \ + sum(pri_unused) AS pri_unused, \ + sum(ovfl_unused) AS ovfl_unused \ + FROM space_used WHERE $where" {} {set hit 1} + if {!$hit} {return 0} + puts "" + set len [string length $title] + incr len 5 + set stars "***********************************" + append stars $stars + set stars [string range $stars $len end] + puts "*** $title $stars" + puts "" + statline "Percentage of total database" \ + [percent [expr {$pri_pages+$ovfl_pages}] $::file_pgcnt] + statline "Number of entries" $nentry + set storage [expr {($pri_pages+$ovfl_pages)*1024}] + statline "Bytes of storage consumed" $storage + statline "Bytes of payload" $payload [percent $payload $storage] + statline "Bytes of data" $data [percent $data $storage] + set key [expr {$payload-$data}] + statline "Bytes of key" $key [percent $key $storage] + set avgpay [expr {$nentry>0?$payload/$nentry:0}] + statline "Average payload per entry" $avgpay + set avgunused [expr {$nentry>0?($pri_unused+$ovfl_unused)/$nentry:0}] + statline "Average unused bytes per entry" $avgunused + statline "Average fanout" \ + [format %.2f [expr {$pri_pages==0?0:($nentry+0.0)/$pri_pages}]] + statline "Maximum payload per entry" $mx_payload + statline "Entries that use overflow" $n_ovfl [percent $n_ovfl $nentry] + statline "Total pages used" [set allpgs [expr {$pri_pages+$ovfl_pages}]] + statline "Primary pages used" $pri_pages ;# [percent $pri_pages $allpgs] + statline "Overflow pages used" $ovfl_pages ;# [percent $ovfl_pages $allpgs] + statline "Unused bytes on primary pages" $pri_unused \ + [percent $pri_unused [expr {$pri_pages*1024}]] + statline "Unused bytes on overflow pages" $ovfl_unused \ + [percent $ovfl_unused [expr {$ovfl_pages*1024}]] + set allunused [expr {$ovfl_unused+$pri_unused}] + statline "Unused bytes on all pages" $allunused \ + [percent $allunused [expr {$allpgs*1024}]] + return 1 +} + +# Output summary statistics: +# +puts "/** Disk-Space Utilization Report For $file_to_analyze" +puts "*** As of [clock format [clock seconds] -format {%Y-%b-%d %H:%M:%S}]" +puts "" +set fsize [file size [lindex $argv 0]] +set file_pgcnt [expr {$fsize/1024}] +set usedcnt [mem eval {SELECT sum(pri_pages+ovfl_pages) FROM space_used}] +set freecnt [expr {$file_pgcnt-$usedcnt-1}] +set freecnt2 [lindex [btree_get_meta $DB] 0] +statline {Pages in the whole file (measured)} $file_pgcnt +set file_pgcnt2 [expr {$usedcnt+$freecnt2+1}] +statline {Pages in the whole file (calculated)} $file_pgcnt2 +statline {Pages that store data} $usedcnt [percent $usedcnt $file_pgcnt] +statline {Pages on the freelist (per header)}\ + $freecnt2 [percent $freecnt2 $file_pgcnt] +statline {Pages on the freelist (calculated)}\ + $freecnt [percent $freecnt $file_pgcnt] +statline {Header pages} 1 [percent 1 $file_pgcnt] + +set ntable [db eval {SELECT count(*)+1 FROM sqlite_master WHERE type='table'}] +statline {Number of tables in the database} $ntable +set nindex [db eval {SELECT count(*) FROM sqlite_master WHERE type='index'}] +set autoindex [db eval {SELECT count(*) FROM sqlite_master + WHERE type='index' AND name LIKE '(% autoindex %)'}] +set manindex [expr {$nindex-$autoindex}] +statline {Number of indices} $nindex +statline {Number of named indices} $manindex [percent $manindex $nindex] +statline {Automatically generated indices} $autoindex \ + [percent $autoindex $nindex] + +set bytes_data [mem eval "SELECT sum(payload-4*nentry) FROM space_used + WHERE NOT is_index AND name!='sqlite_master'"] +set total_payload [mem eval "SELECT sum(payload) FROM space_used"] +statline "Size of the file in bytes" $fsize +statline "Bytes of payload stored" $total_payload \ + [percent $total_payload $fsize] +statline "Bytes of user data stored" $bytes_data \ + [percent $bytes_data $fsize] + +# Output table rankings +# +puts "" +puts "*** Page counts for all tables with their indices ********************" +puts "" +mem eval {SELECT tblname, count(*) AS cnt, sum(pri_pages+ovfl_pages) AS size + FROM space_used GROUP BY tblname ORDER BY size DESC, tblname} {} { + statline [string toupper $tblname] $size [percent $size $file_pgcnt] +} + +# Output subreports +# +if {$nindex>0} { + subreport {All tables and indices} 1 +} +subreport {All tables} {NOT is_index} +if {$nindex>0} { + subreport {All indices} {is_index} +} +foreach tbl [mem eval {SELECT name FROM space_used WHERE NOT is_index + ORDER BY name}] { + regsub ' $tbl '' qn + set name [string toupper $tbl] + set n [mem eval "SELECT count(*) FROM space_used WHERE tblname='$qn'"] + if {$n>1} { + subreport "Table $name and all its indices" "tblname='$qn'" + subreport "Table $name w/o any indices" "name='$qn'" + subreport "Indices of table $name" "tblname='$qn' AND is_index" + } else { + subreport "Table $name" "name='$qn'" + } +} + +# Output instructions on what the numbers above mean. +# +puts { +*** Definitions ****************************************************** + +Number of pages in the whole file + + The number of 1024-byte pages that go into forming the complete database + +Pages that store data + + The number of pages that store data, either as primary B*Tree pages or + as overflow pages. The number at the right is the data pages divided by + the total number of pages in the file. + +Pages on the freelist + + The number of pages that are not currently in use but are reserved for + future use. The percentage at the right is the number of freelist pages + divided by the total number of pages in the file. + +Header pages + + The number of pages of header overhead in the database. This value is + always 1. The percentage at the right is the number of header pages + divided by the total number of pages in the file. + +Number of tables in the database + + The number of tables in the database, including the SQLITE_MASTER table + used to store schema information. + +Number of indices + + The total number of indices in the database. + +Number of named indices + + The number of indices created using an explicit CREATE INDEX statement. + +Automatically generated indices + + The number of indices used to implement PRIMARY KEY or UNIQUE constraints + on tables. + +Size of the file in bytes + + The total amount of disk space used by the entire database files. + +Bytes of payload stored + + The total number of bytes of payload stored in the database. Payload + includes both key and data. The content of the SQLITE_MASTER table is + counted when computing this number. The percentage at the right shows + the payload divided by the total file size. + +Bytes of user data stored + + The total number of bytes of data stored in the database, not counting + the database schema information stored in the SQLITE_MASTER table. The + percentage at the right is the user data size divided by the total file + size. + +Percentage of total database + + The amount of the complete database file that is devoted to storing + information described by this category. + +Number of entries + + The total number of B*Tree key/value pairs stored under this category. + +Bytes of storage consumed + + The total amount of disk space required to store all B*Tree entries + under this category. The is the total number of pages used times + the pages size (1024). + +Bytes of payload + + The amount of payload stored under this category. Payload is the sum + of keys and data. Each table entry has 4 bytes of key and an arbitrary + amount of data. Each index entry has 4 or more bytes of key and no + data. The percentage at the right is the bytes of payload divided by + the bytes of storage consumed. + +Bytes of data + + The amount of data stored under this category. The data space reported + includes formatting information such as nul-terminators and field-lengths + that are stored with the data. The percentage at the right is the bytes + of data divided by bytes of storage consumed. + +Bytes of key + + The sum of the sizes of all keys under this category. The percentage at + the right is the bytes of key divided by the bytes of storage consumed. + +Average payload per entry + + The average amount of payload on each entry. This is just the bytes of + payload divided by the number of entries. + +Average unused bytes per entry + + The average amount of free space remaining on all pages under this + category on a per-entry basis. This is the number of unused bytes on + all pages divided by the number of entries. + +Maximum payload per entry + + The largest payload size of any entry. + +Entries that use overflow + + Up to 236 bytes of payload for each entry are stored directly in the + primary B*Tree page. Any additional payload is stored on a linked list + of overflow pages. This is the number of entries that exceed 236 bytes + in size. The value to the right is the number of entries that overflow + divided by the total number of entries. + +Total pages used + + This is the number of 1024 byte pages used to hold all information in + the current category. This is the sum of primary and overflow pages. + +Primary pages used + + This is the number of primary B*Tree pages used. + +Overflow pages used + + The total number of overflow pages used for this category. + +Unused bytes on primary pages + + The total number of bytes of unused space on all primary pages. The + percentage at the right is the number of unused bytes divided by the + total number of bytes on primary pages. + +Unused bytes on overflow pages + + The total number of bytes of unused space on all overflow pages. The + percentage at the right is the number of unused bytes divided by the + total number of bytes on overflow pages. + +Unused bytes on all pages + + The total number of bytes of unused space on all primary and overflow + pages. The percentage at the right is the number of unused bytes + divided by the total number of bytes. +} + +# Output the database +# +puts "**********************************************************************" +puts "The entire text of this report can be sourced into any SQL database" +puts "engine for further analysis. All of the text above is an SQL comment." +puts "The data used to generate this report follows:" +puts "*/" +puts "BEGIN;" +puts $tabledef +unset -nocomplain x +mem eval {SELECT * FROM space_used} x { + puts -nonewline "INSERT INTO space_used VALUES(" + regsub ' $x(name) '' qn + regsub ' $x(tblname) '' qtn + puts -nonewline "'$qn','$qtn'," + puts -nonewline "$x(is_index),$x(nentry),$x(payload),$x(mx_payload)," + puts -nonewline "$x(n_ovfl),$x(pri_pages),$x(ovfl_pages),$x(pri_unused)," + puts "$x(ovfl_unused));" +} +puts "COMMIT;"